[
  {
    "path": ".gitattributes",
    "content": "###############################################################################\n# Set default behavior to automatically normalize line endings.\n###############################################################################\n* text=auto\n\n###############################################################################\n# Set default behavior for command prompt diff.\n#\n# This is need for earlier builds of msysgit that does not have it on by\n# default for csharp files.\n# Note: This is only used by command line\n###############################################################################\n#*.cs     diff=csharp\n\n###############################################################################\n# Set the merge driver for project and solution files\n#\n# Merging from the command prompt will add diff markers to the files if there\n# are conflicts (Merging from VS is not affected by the settings below, in VS\n# the diff markers are never inserted). Diff markers may cause the following \n# file extensions to fail to load in VS. An alternative would be to treat\n# these files as binary and thus will always conflict and require user\n# intervention with every merge. To do so, just uncomment the entries below\n###############################################################################\n#*.sln       merge=binary\n#*.csproj    merge=binary\n#*.vbproj    merge=binary\n#*.vcxproj   merge=binary\n#*.vcproj    merge=binary\n#*.dbproj    merge=binary\n#*.fsproj    merge=binary\n#*.lsproj    merge=binary\n#*.wixproj   merge=binary\n#*.modelproj merge=binary\n#*.sqlproj   merge=binary\n#*.wwaproj   merge=binary\n\n###############################################################################\n# behavior for image files\n#\n# image files are treated as binary by default.\n###############################################################################\n#*.jpg   binary\n#*.png   binary\n#*.gif   binary\n\n###############################################################################\n# diff behavior for common document formats\n# \n# Convert binary document formats to text before diffing them. This feature\n# is only available from the command line. Turn it on by uncommenting the \n# entries below.\n###############################################################################\n#*.doc   diff=astextplain\n#*.DOC   diff=astextplain\n#*.docx  diff=astextplain\n#*.DOCX  diff=astextplain\n#*.dot   diff=astextplain\n#*.DOT   diff=astextplain\n#*.pdf   diff=astextplain\n#*.PDF   diff=astextplain\n#*.rtf   diff=astextplain\n#*.RTF   diff=astextplain\n"
  },
  {
    "path": ".gitignore",
    "content": "DSC/DSC.zip\n\ncompiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n*/.py[cod]\n\n# C extensions\n*.so\n\n# Distribution / packaging\n.Python\nenv/\nbuild/\ndevelop-eggs/\ndist/\neggs/\nlib/\nlib64/\nparts/\nsdist/\nvar/\n*.egg-info/\n.installed.cfg\n*.egg\n\n# Editor\n*~\n\n# PyCharm\n.idea/\n.idea_modules/\n\n# PyInstaller\n#  Usually these files are written by a python script from a template \n#  before PyInstaller builds the exe, so as to inject date/other infos into it.\n*.manifest\n*.spec\n\n# Installer logs\npip-log.txt\npip-delete-this-directory.txt\n\n# Unit test / coverage reports\nhtmlcov/\n.tox/\n.coverage\n.cache\nnosetests.xml\ncoverage.xml\n\n# Translations\n*.mo\n*.pot\n\n# Django stuff:\n*.log\n!OmsAgent/extension-test/omsfiles/*.log\noms*.zip\n\n# Sphinx documentation\ndocs/_build/\n\n# PyBuilder\ntarget/*\n\n\n# mac osx specific files\n.DS_Store\n\n### VirtualEnv template\n# Virtualenv\n# http://iamzed.com/2009/05/07/a-primer-on-virtualenv/\n.Python\npyvenv.cfg\n.venv\npip-selfcheck.json\n\n# virtualenv\nvenv/\nENV/\n\n# dotenv\n.env\n\n# pyenv\n.python-version\n\n# VMBackup package ignors\nVMBackup/dist\nVMBackup/dist/*\nVMBackup/azure-sdk\nVMBackup/azure-sdk/*\nVMBackup/main/azure/*\nVMBackup/MANIFEST\n#VMBackup/*.pyproj\nVMBackup/*.pyproj.user\nVMBackup/*.suo\nVMBackup/main/safefreeze/bin/*\n\n# CustomScript ignors\nCustomScript/test/download/0/stdout\nCustomScript/test/download/0/errout\n*node_modules/\n\n# VMEncryption ignores\nVMEncryption/main/azure/*\n\n# Common\nCommon/psutil/build/*\nCommon/psutil/dist/*\nCommon/psutil/psutil.egg-info/*\nVMBackup/.vs/VMBackup/v14/.suo\nRDMAUpdate/RDMAUpdate.pyproj.user\n*.sln\nRDMAUpdate/VMBackup.pyproj.user\nVMBackup/.vs/config/applicationhost.config\nRDMAUpdate/.vs/VMBackup/v14/.suo\n\n# Handler Registration ignores\n*.pem\nRDMAUpdate/.vs/RDMAUpdate/v14/.suo\n\n# Visual Studio directory\n.vs/\n\n# Ignore HandlerManifest updates\nVMEncryption/HandlerManifest.json\nVMEncryption/AzureDiskEncryptionForLinux*.xml\nVMEncryption/ADEForLinux*.xml\nVMEncryption/MANIFEST\n\n"
  },
  {
    "path": ".gitmodules",
    "content": "[submodule \"Common/azure-sdk-for-python\"]\n\tpath = Common/azure-sdk-for-python\n\turl = https://github.com/Azure/azure-sdk-for-python.git\n[submodule \"Common/psutil\"]\n\tpath = Common/psutil\n\turl = https://github.com/giampaolo/psutil.git\n[submodule \"VMEncryption/transitions\"]\n\tpath = VMEncryption/transitions\n\turl = https://github.com/tyarkoni/transitions.git\n"
  },
  {
    "path": ".vscode/launch.json",
    "content": "{\n    // Use IntelliSense to learn about possible attributes.\n    // Hover to view descriptions of existing attributes.\n    // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387\n    \"version\": \"0.2.0\",\n    \"configurations\": [\n        {\n            \"name\": \"Python: test_encode.py\",\n            \"type\": \"python\",\n            \"request\": \"launch\",\n            \"program\": \"test_encode.py\",\n            \"console\": \"integratedTerminal\",\n            \"justMyCode\": true,\n            \"cwd\": \"${workspaceFolder}/Utils/test\",\n            \"env\" : {\n                \"PYTHONPATH\": \"${workspaceFolder}\"\n            }\n        }\n    ]\n}"
  },
  {
    "path": "AzureEnhancedMonitor/README.md",
    "content": "# How to enable Azure Enhanced Monitoring on Linux VM\n\nThis is an instruction about how to enable Azure Enhanced Monitoring(AEM) on Azure Linux VM. \n\n## Install Azure CLI\n\nFirst of all, you need to to install [Azure CLI][azure-cli]\n\n**NOTE** This feature is currently on developing. You need to install it from github by running the following command.\n```\nnpm -g install git+https://github.com/yuezh/azure-xplat-cli.git#dev\n```\n\n## Configure Azure Enhanced Monitoring\n\n1. Login with your Azure account\n\n    ```\n    azure login\n    ```\n2. Switch to azure resource management mode\n\n    ```\n    azure config mode arm\n    ```\n3. Enable Azure Enhanced Monitoring\n\n    ```\n    azure vm enable-aem <resource-group-name> <vm-name>\n    ```  \n4. Verify that the Azure Enhanced Monitoring is active on the Azure Linux VM. Check if the file  /var/lib/AzureEnhancedMonitor/PerfCounters exists. If exists, display information collected by AEM with:\n\n    ```\n    cat /var/lib/AzureEnhancedMonitor/PerfCounters\n    ```\n    Then you will get output like:\n    \n    ```\n    2;cpu;Current Hw Frequency;;0;2194.659;MHz;60;1444036656;saplnxmon;\n    2;cpu;Max Hw Frequency;;0;2194.659;MHz;0;1444036656;saplnxmon;\n    …\n    …\n    ```\n\n[azure-cli]: https://azure.microsoft.com/en-us/documentation/articles/xplat-cli/\n"
  },
  {
    "path": "AzureEnhancedMonitor/bin/pack.sh",
    "content": "#!/bin/bash\nproj_name=\"aem\"\nproj_version=\"1.0\"\n\nproj_full_name=\"$proj_name-$proj_version\"\n\nscript=$(dirname $0)\nroot=$script/..\ncd $root\nroot=`pwd`\n\nbuild_dir=$root/build\ntarget_dir=$build_dir/$proj_full_name\n\nmkdir -p $build_dir\nmkdir -p $target_dir\n\ncd $root/clib\nmake clean\n\ncp -r $root/nodejs $build_dir\ncd $build_dir/nodejs\nnpm pack\n\ncp -r $root/clib $target_dir\ncp $build_dir/nodejs/*.tgz $target_dir\ncp $root/bin/setup.sh $target_dir\nchmod +x $root/bin/setup.sh\n\n#install.sh is a self-extracting script.\n#The begin of this file is sh script while the end is a tar\necho \"#!/bin/bash\"                              >  $build_dir/install.sh\necho \"#\"                                        >> $build_dir/install.sh\necho \"#Auto-generated. Do NOT edit this file.\"  >> $build_dir/install.sh\necho \"#\"                                        >> $build_dir/install.sh\necho \"root=\\$(dirname \\$0)\"                     >> $build_dir/install.sh\necho \"cd \\$root\"                                >> $build_dir/install.sh\necho \"root=\\`pwd\\`\"                             >> $build_dir/install.sh\necho \"if [ -d $proj_full_name ]; then\"          >> $build_dir/install.sh\necho \"    echo \\\"[INFO]Remove old package...\\\"\" >> $build_dir/install.sh\necho \"    rm $proj_full_name -rf\"               >> $build_dir/install.sh\necho \"fi\"                                       >> $build_dir/install.sh\necho \"echo \\\"[INFO]Unpacking...\\\"\"              >> $build_dir/install.sh\necho \"sed -e '1,/^exit$/d' \"\\$0\" | tar xzf -\"   >> $build_dir/install.sh\necho \"$proj_full_name/setup.sh\"                 >> $build_dir/install.sh\necho \"exit\"                                     >> $build_dir/install.sh\ncd $build_dir\ntar czf - $proj_full_name                       >> $build_dir/install.sh\nchmod +x $build_dir/install.sh\n\n\ncp -r $root/clib $build_dir\ncd $build_dir\ntar czf clib.tar.gz clib/\n\n"
  },
  {
    "path": "AzureEnhancedMonitor/bin/setup.sh",
    "content": "#!/bin/bash\n\ninstall_log=`pwd`/install.log\nroot=$(dirname $0)\ncd $root\nroot=`pwd`\n\nif [[ $EUID -ne 0 ]]; then\n    echo \"[ERROR]This script must be run as root\" 1>&2\n    exit 1\nfi\n\nfunction install_nodejs_tarball()\n{\n    version=\"v0.10.37\"\n    node_version=\"node-$version-linux-x64\"\n    src=\"$root/$node_version\"\n    target=\"/usr/local\"\n\n    echo \"[INFO]Installing nodejs from http://nodejs.org/dist/$version/${node_version}.tar.gz\"\n    if [ -f ${src}.tar.gz ]; then\n        rm ${src}.tar.gz -f\n    fi\n    if [ -d ${src} ]; then\n        rm ${src} -rf\n    fi\n    wget http://nodejs.org/dist/$version/${node_version}.tar.gz 1>>$install_log 2>&1\n    tar -zxf ${node_version}.tar.gz  1>>$install_log 2>&1\n\n    echo \"[INFO]Install nodejs to $target\"\n    if [ -f $target/bin/node ]; then\n        rm $target/bin/node -f\n    fi\n    cp $src/bin/node $target/bin/node\n    \n    echo \"[INFO]Create link to $target/bin/node\"\n    if [ -f /usr/bin/node ]; then\n        rm /usr/bin/node -f\n    fi\n    ln -s $target/bin/node /usr/bin/node\n   \n    echo \"[INFO]Install npm\"\n    curl -sL https://www.npmjs.org/install.sh | sh 1>>$install_log 2>&1\n\n}\n\nfunction install_nodejs()\n{\n    echo \"[INFO]Installing nodejs and npm\"\n    if [ \"$(type apt-get 2>/dev/null)\" != \"\" ] ; then\n        curl -sL https://deb.nodesource.com/setup | bash - 1>>$install_log 2>&1\n        apt-get -y install nodejs 1>>$install_log 2>&1\n    elif [ \"$(type yum 2>/dev/null)\" != \"\" ] ; then\n        curl -sL https://rpm.nodesource.com/setup | bash - 1>>$install_log 2>&1\n        yum -y install nodejs 1>>$install_log 2>&1\n    else\n        install_nodejs_tarball \n    fi\n    if [ ! $? ]; then\n        echo \"[ERROR]Install nodejs and npm failed. See $install_log.\"\n        exit 1\n    fi\n}\n\necho \"[INFO]Checking dependency...\"\necho \"\" > $install_log\nif [ \"$(type node 2>/dev/null)\" == \"\" ]; then\n    install_nodejs\nfi\necho \"[INFO]  nodejs version: $(node --version)\"\n\nif [ \"$(type npm 2>/dev/null)\" == \"\" ]; then\n    install_nodejs\nfi\necho \"[INFO]  npm version: $(npm -version)\"\n\nif [ \"$(type azure 2> /dev/null)\" == \"\" ]; then\n    echo \"[INFO]Installing azure-cli\"\n    npm install -g azure-cli 1>>$install_log 2>&1\n    if [ ! $? ]; then\n        echo \"[ERROR]Install azure-cli failed. See $install_log.\"\n        exit 1\n    fi\nfi\necho \"[INFO]  azure-cli version: $(azure --version)\"\n\nnpm_pkg=\"azure-linux-tools-1.0.0.tgz\"\necho \"[INFO]Installing Azure Enhanced Monitor tools...\"\nif [ -f ./$npm_pkg ]; then\n    npm install -g ./$npm_pkg 1>>$install_log 2>&1\n    if [ ! $? ]; then\n        echo \"[ERROR]Install Azure Enhanced Monitor tools failed. See $install_log.\"\n        exit 1\n    fi\nelse\n    echo \"[ERROR] Couldn't find npm package $npm_pkg\"\n    exit 1\nfi\n\necho \"[INFO]Finished.\"\n"
  },
  {
    "path": "AzureEnhancedMonitor/clib/.gitignore",
    "content": "bin/*\n"
  },
  {
    "path": "AzureEnhancedMonitor/clib/Makefile",
    "content": "CC := gcc\nSRCDIR := src\nLIBDIR := lib\nINCDIR := include\nBUILDDIR := build\nTARGET := $(LIBDIR)/libazureperf.so\n\nSRCEXT := c\nSOURCES := $(shell find $(SRCDIR) -type f -name *.$(SRCEXT))\nOBJECTS := $(patsubst $(SRCDIR)/%,$(BUILDDIR)/%,$(SOURCES:.$(SRCEXT)=.o))\nCFLAGS := -g -fPIC\nLDFLAGS := -shared\nINC := -I $(INCDIR)\nLIB := -L $(LIBDIR)\n\nall : $(TARGET)\n\n$(TARGET): $(OBJECTS)\n\t@echo \"Linking...\"\n\t$(CC) $^ $(LDFLAGS) -o $(TARGET) $(LIB)\n\n$(BUILDDIR)/%.o: $(SRCDIR)/%.$(SRCEXT)\n\t@mkdir -p $(BUILDDIR)\n\t@echo \"Compiling...\"\n\t$(CC) $(CFLAGS) $(INC) -c -o $@ $<\n\nclean:\n\t@echo \"Cleaning...\"\n\t$(RM) -r $(BUILDDIR) $(TARGET)\n\ntest: $(OBJECTS)\n\t@echo \"Run test\"\n\t$(CC) test/runtest.c $^ $(INC) -L $(LIBDIR) -lazureperf -o bin/runtest\n\tbin/runtest\n\ninstall:\n\tmkdir -p /usr/lib/azureperf\n\tcp $(TARGET) /usr/lib/azureperf\n\techo \"/usr/lib/azureperf\" > /etc/ld.so.conf.d/azureperf.conf\n\tldconfig\n\tcp $(INCDIR)/azureperf.h /usr/include\n\n.PHONY: clean test\n"
  },
  {
    "path": "AzureEnhancedMonitor/clib/include/azureperf.h",
    "content": "//\n// Copyright 2014 Microsoft Corporation\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n//\n#ifndef AZURE_PERF\n#define AZURE_PERF\n\n/*All the strings are utf-8 encoded*/\n\n/*The max buf size for all string*/\n#define STR_BUF_MAX         (256)\n\n#define TYPE_NAME_MAX       (64)\n#define PROPERTY_NAME_MAX   (128)\n#define INSTANCE_NAME_MAX   (256)\n#define STRING_VALUE_MAX    (256)\n#define UNIT_NAME_MAX       (64)\n#define MACHINE_NAME_MAX    (128)\n\n#define PERF_COUNT_MAX      (128)\n\n#define PERF_COUNTER_TYPE_INVALID\t(0)\n#define PERF_COUNTER_TYPE_INT\t\t(1)\n#define PERF_COUNTER_TYPE_DOUBLE\t(2)\n#define PERF_COUNTER_TYPE_LARGE\t    (3)\n#define PERF_COUNTER_TYPE_STRING\t(4)\n\n#define AP_ERR_PC_NOT_FOUND                 (-1)\n#define AP_ERR_PC_BUF_OVERFLOW              (-2) \n#define AP_ERR_INVALID_COUNTER_TYPE         (-11)\n#define AP_ERR_INVALID_TYPE_NAME            (-12)\n#define AP_ERR_INVALID_PROPERTY_NAME        (-13)\n#define AP_ERR_INVALID_INSTANCE_NAME        (-14)\n#define AP_ERR_INVALID_IS_EMPTY_FLAG        (-15)\n#define AP_ERR_INVALID_VALUE                (-15)\n#define AP_ERR_INVALID_UNIT_NAME            (-16)\n#define AP_ERR_INVALID_REFRESH_INTERVAL     (-17)\n#define AP_ERR_INVALID_TIMESTAMP            (-18)\n#define AP_ERR_INVALID_MACHINE_NAME         (-19)\n\n\ntypedef struct \n{\n\tint\t\t\t    counter_typer;\n\tchar\t\t\ttype_name[TYPE_NAME_MAX];\n\tchar\t\t\tproperty_name[PROPERTY_NAME_MAX];\n\tchar\t\t\tinstance_name[STRING_VALUE_MAX];\t\n    int             is_empty;\n    union {\n        int         val_int;\n        long long   val_large;\n        double      val_double;\n        char        val_str[STRING_VALUE_MAX];\n    };\n\tchar\t\t\tunit_name[UNIT_NAME_MAX];\n    unsigned int\trefresh_interval;\n\tlong long\t\ttimestamp;\n\tchar\t\t\tmachine_name[MACHINE_NAME_MAX];\t\n    \n} perf_counter;\n\ntypedef struct\n{\n    perf_counter    buf[PERF_COUNT_MAX]; \n    int             len; \n    int             err;\n    char            *ap_file;\n} ap_handler;\n\nap_handler* ap_open();\n\nextern void ap_close(ap_handler* handler);\n\nextern void ap_refresh(ap_handler* handler);\n\nextern int ap_metric_all(ap_handler *handler, perf_counter *pc, size_t size);\n\n//config\\Cloud Provider\nextern int ap_metric_config_cloud_provider(ap_handler *handler, perf_counter *pc, size_t size);\n\n//config\\CPU Over-Provisioning\nextern int ap_metric_config_cpu_over_provisioning(ap_handler *handler, perf_counter *pc, size_t size);\n\n//config\\Memory Over-Provisioning\nextern int ap_metric_config_memory_over_provisioning(ap_handler *handler, perf_counter *pc, size_t size);\n\n//config\\Data Provider Version\nextern int ap_metric_config_data_provider_version(ap_handler *handler, perf_counter *pc, size_t size);\n\n//config\\Data Sources\nextern int ap_metric_config_data_sources(ap_handler *handler, perf_counter *pc, size_t size);\n\n//config\\Instance Type\nextern int ap_metric_config_instance_type(ap_handler *handler, perf_counter *pc, size_t size);\n\n//config\\Virtualization Solution\nextern int ap_metric_config_virtualization_solution(ap_handler *handler, perf_counter *pc, size_t size);\n\n//config\\Virtualization Solution Version\nextern int ap_metric_config_virtualization_solution_version(ap_handler *handler, perf_counter *pc, size_t size);\n\n//cpu\\Current Hw Frequency\nextern int ap_metric_cpu_current_hw_frequency(ap_handler *handler, perf_counter *pc, size_t size);\n\n//cpu\\Max Hw Frequency\nextern int ap_metric_cpu_max_hw_frequency(ap_handler *handler, perf_counter *pc, size_t size);\n\n//cpu\\Current VM Processing Power\nextern int ap_metric_cpu_current_vm_processing_power(ap_handler *handler, perf_counter *pc, size_t size);\n\n//cpu\\Guaranteed VM Processing Power\nextern int ap_metric_cpu_guaranteed_vm_processing_power(ap_handler *handler, perf_counter *pc, size_t size);\n\n//cpu\\Max. VM Processing Power\nextern int ap_metric_cpu_max_vm_processing_power(ap_handler *handler, perf_counter *pc, size_t size);\n\n//cpu\\Number of Cores per CPU\nextern int ap_metric_cpu_number_of_cores_per_cpu(ap_handler *handler, perf_counter *pc, size_t size);\n\n//cpu\\Number of Threads per Core\nextern int ap_metric_cpu_number_of_threads_per_core(ap_handler *handler, perf_counter *pc, size_t size);\n\n//cpu\\Phys. Processing Power per vCPU\nextern int ap_metric_cpu_phys_processing_power_per_vcpu(ap_handler *handler, perf_counter *pc, size_t size);\n\n//cpu\\Processor Type\nextern int ap_metric_cpu_processor_type(ap_handler *handler, perf_counter *pc, size_t size);\n\n#endif\n"
  },
  {
    "path": "AzureEnhancedMonitor/clib/src/apmetric.c",
    "content": "//\n// Copyright 2014 Microsoft Corporation\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n//\n\n//\n// This file is auto-generated, don't modify it directly.\n//\n\n#include <stdlib.h> \n#include <azureperf.h> \n\nint ap_metric_config_cloud_provider(ap_handler *handler, perf_counter *pc, size_t size)\n{\n    if(handler->err)\n    {\n        return 0;\n    }\n    return get_metric(handler, pc, \"config\", \"Cloud Provider\", size);\n}\n\nint ap_metric_config_cpu_over_provisioning(ap_handler *handler, perf_counter *pc, size_t size)\n{\n    if(handler->err)\n    {\n        return 0;\n    }\n    return get_metric(handler, pc, \"config\", \"CPU Over-Provisioning\", size);\n}\n\nint ap_metric_config_memory_over_provisioning(ap_handler *handler, perf_counter *pc, size_t size)\n{\n    if(handler->err)\n    {\n        return 0;\n    }\n    return get_metric(handler, pc, \"config\", \"Memory Over-Provisioning\", size);\n}\n\nint ap_metric_config_data_provider_version(ap_handler *handler, perf_counter *pc, size_t size)\n{\n    if(handler->err)\n    {\n        return 0;\n    }\n    return get_metric(handler, pc, \"config\", \"Data Provider Version\", size);\n}\n\nint ap_metric_config_data_sources(ap_handler *handler, perf_counter *pc, size_t size)\n{\n    if(handler->err)\n    {\n        return 0;\n    }\n    return get_metric(handler, pc, \"config\", \"Data Sources\", size);\n}\n\nint ap_metric_config_instance_type(ap_handler *handler, perf_counter *pc, size_t size)\n{\n    if(handler->err)\n    {\n        return 0;\n    }\n    return get_metric(handler, pc, \"config\", \"Instance Type\", size);\n}\n\nint ap_metric_config_virtualization_solution(ap_handler *handler, perf_counter *pc, size_t size)\n{\n    if(handler->err)\n    {\n        return 0;\n    }\n    return get_metric(handler, pc, \"config\", \"Virtualization Solution\", size);\n}\n\nint ap_metric_config_virtualization_solution_version(ap_handler *handler, perf_counter *pc, size_t size)\n{\n    if(handler->err)\n    {\n        return 0;\n    }\n    return get_metric(handler, pc, \"config\", \"Virtualization Solution Version\", size);\n}\n\nint ap_metric_cpu_current_hw_frequency(ap_handler *handler, perf_counter *pc, size_t size)\n{\n    if(handler->err)\n    {\n        return 0;\n    }\n    return get_metric(handler, pc, \"cpu\", \"Current Hw Frequency\", size);\n}\n\nint ap_metric_cpu_max_hw_frequency(ap_handler *handler, perf_counter *pc, size_t size)\n{\n    if(handler->err)\n    {\n        return 0;\n    }\n    return get_metric(handler, pc, \"cpu\", \"Max Hw Frequency\", size);\n}\n\nint ap_metric_cpu_current_vm_processing_power(ap_handler *handler, perf_counter *pc, size_t size)\n{\n    if(handler->err)\n    {\n        return 0;\n    }\n    return get_metric(handler, pc, \"cpu\", \"Current VM Processing Power\", size);\n}\n\nint ap_metric_cpu_guaranteed_vm_processing_power(ap_handler *handler, perf_counter *pc, size_t size)\n{\n    if(handler->err)\n    {\n        return 0;\n    }\n    return get_metric(handler, pc, \"cpu\", \"Guaranteed VM Processing Power\", size);\n}\n\nint ap_metric_cpu_max_vm_processing_power(ap_handler *handler, perf_counter *pc, size_t size)\n{\n    if(handler->err)\n    {\n        return 0;\n    }\n    return get_metric(handler, pc, \"cpu\", \"Max. VM Processing Power\", size);\n}\n\nint ap_metric_cpu_number_of_cores_per_cpu(ap_handler *handler, perf_counter *pc, size_t size)\n{\n    if(handler->err)\n    {\n        return 0;\n    }\n    return get_metric(handler, pc, \"cpu\", \"Number of Cores per CPU\", size);\n}\n\nint ap_metric_cpu_number_of_threads_per_core(ap_handler *handler, perf_counter *pc, size_t size)\n{\n    if(handler->err)\n    {\n        return 0;\n    }\n    return get_metric(handler, pc, \"cpu\", \"Number of Threads per Core\", size);\n}\n\nint ap_metric_cpu_phys_processing_power_per_vcpu(ap_handler *handler, perf_counter *pc, size_t size)\n{\n    if(handler->err)\n    {\n        return 0;\n    }\n    return get_metric(handler, pc, \"cpu\", \"Phys. Processing Power per vCPU\", size);\n}\n\nint ap_metric_cpu_processor_type(ap_handler *handler, perf_counter *pc, size_t size)\n{\n    if(handler->err)\n    {\n        return 0;\n    }\n    return get_metric(handler, pc, \"cpu\", \"Processor Type\", size);\n}\n\nint ap_metric_cpu_reference_compute_unit(ap_handler *handler, perf_counter *pc, size_t size)\n{\n    if(handler->err)\n    {\n        return 0;\n    }\n    return get_metric(handler, pc, \"cpu\", \"Reference Compute Unit\", size);\n}\n\nint ap_metric_cpu_vcpu_mapping(ap_handler *handler, perf_counter *pc, size_t size)\n{\n    if(handler->err)\n    {\n        return 0;\n    }\n    return get_metric(handler, pc, \"cpu\", \"vCPU Mapping\", size);\n}\n\nint ap_metric_cpu_vm_processing_power_consumption(ap_handler *handler, perf_counter *pc, size_t size)\n{\n    if(handler->err)\n    {\n        return 0;\n    }\n    return get_metric(handler, pc, \"cpu\", \"VM Processing Power Consumption\", size);\n}\n\nint ap_metric_memory_current_memory_assigned(ap_handler *handler, perf_counter *pc, size_t size)\n{\n    if(handler->err)\n    {\n        return 0;\n    }\n    return get_metric(handler, pc, \"memory\", \"Current Memory assigned\", size);\n}\n\nint ap_metric_memory_guaranteed_memory_assigned(ap_handler *handler, perf_counter *pc, size_t size)\n{\n    if(handler->err)\n    {\n        return 0;\n    }\n    return get_metric(handler, pc, \"memory\", \"Guaranteed Memory assigned\", size);\n}\n\nint ap_metric_memory_max_memory_assigned(ap_handler *handler, perf_counter *pc, size_t size)\n{\n    if(handler->err)\n    {\n        return 0;\n    }\n    return get_metric(handler, pc, \"memory\", \"Max Memory assigned\", size);\n}\n\nint ap_metric_memory_vm_memory_consumption(ap_handler *handler, perf_counter *pc, size_t size)\n{\n    if(handler->err)\n    {\n        return 0;\n    }\n    return get_metric(handler, pc, \"memory\", \"VM Memory Consumption\", size);\n}\n\nint ap_metric_network_adapter_id(ap_handler *handler, perf_counter *pc, size_t size)\n{\n    if(handler->err)\n    {\n        return 0;\n    }\n    return get_metric(handler, pc, \"network\", \"Adapter Id\", size);\n}\n\nint ap_metric_network_mapping(ap_handler *handler, perf_counter *pc, size_t size)\n{\n    if(handler->err)\n    {\n        return 0;\n    }\n    return get_metric(handler, pc, \"network\", \"Mapping\", size);\n}\n\nint ap_metric_network_min_network_bandwidth(ap_handler *handler, perf_counter *pc, size_t size)\n{\n    if(handler->err)\n    {\n        return 0;\n    }\n    return get_metric(handler, pc, \"network\", \"Minimum Network Bandwidth\", size);\n}\n\nint ap_metric_network_max_network_bandwidth(ap_handler *handler, perf_counter *pc, size_t size)\n{\n    if(handler->err)\n    {\n        return 0;\n    }\n    return get_metric(handler, pc, \"network\", \"Maximum Network Bandwidth\", size);\n}\n\nint ap_metric_network_network_read_bytes(ap_handler *handler, perf_counter *pc, size_t size)\n{\n    if(handler->err)\n    {\n        return 0;\n    }\n    return get_metric(handler, pc, \"network\", \"Network Read Bytes\", size);\n}\n\nint ap_metric_network_network_write_bytes(ap_handler *handler, perf_counter *pc, size_t size)\n{\n    if(handler->err)\n    {\n        return 0;\n    }\n    return get_metric(handler, pc, \"network\", \"Network Write Bytes\", size);\n}\n\nint ap_metric_network_packets_retransmitted(ap_handler *handler, perf_counter *pc, size_t size)\n{\n    if(handler->err)\n    {\n        return 0;\n    }\n    return get_metric(handler, pc, \"network\", \"Packets Retransmitted\", size);\n}\n\nint ap_metric_config_last_hardware_change(ap_handler *handler, perf_counter *pc, size_t size)\n{\n    if(handler->err)\n    {\n        return 0;\n    }\n    return get_metric(handler, pc, \"config\", \"Last Hardware Change\", size);\n}\n\nint ap_metric_storage_phys_disc_to_storage_mapping(ap_handler *handler, perf_counter *pc, size_t size)\n{\n    if(handler->err)\n    {\n        return 0;\n    }\n    return get_metric(handler, pc, \"storage\", \"Phys. Disc to Storage Mapping\", size);\n}\n\nint ap_metric_storage_storage_id(ap_handler *handler, perf_counter *pc, size_t size)\n{\n    if(handler->err)\n    {\n        return 0;\n    }\n    return get_metric(handler, pc, \"storage\", \"Storage ID\", size);\n}\n\nint ap_metric_storage_read_bytes(ap_handler *handler, perf_counter *pc, size_t size)\n{\n    if(handler->err)\n    {\n        return 0;\n    }\n    return get_metric(handler, pc, \"storage\", \"Storage Read Bytes\", size);\n}\n\nint ap_metric_storage_read_ops(ap_handler *handler, perf_counter *pc, size_t size)\n{\n    if(handler->err)\n    {\n        return 0;\n    }\n    return get_metric(handler, pc, \"storage\", \"Storage Read Ops\", size);\n}\n\nint ap_metric_storage_read_op_latency_e2e(ap_handler *handler, perf_counter *pc, size_t size)\n{\n    if(handler->err)\n    {\n        return 0;\n    }\n    return get_metric(handler, pc, \"storage\", \"Storage Read Op Latency E2E msec\", size);\n}\n\nint ap_metric_storage_read_op_latency_server(ap_handler *handler, perf_counter *pc, size_t size)\n{\n    if(handler->err)\n    {\n        return 0;\n    }\n    return get_metric(handler, pc, \"storage\", \"Storage Read Op Latency Server msec\", size);\n}\n\nint ap_metric_storage_read_throughput_e2e(ap_handler *handler, perf_counter *pc, size_t size)\n{\n    if(handler->err)\n    {\n        return 0;\n    }\n    return get_metric(handler, pc, \"storage\", \"Storage Read Throughput E2E MB/sec\", size);\n}\n\nint ap_metric_storage_write_bytes(ap_handler *handler, perf_counter *pc, size_t size)\n{\n    if(handler->err)\n    {\n        return 0;\n    }\n    return get_metric(handler, pc, \"storage\", \"Storage Write Bytes\", size);\n}\n\nint ap_metric_storage_write_ops(ap_handler *handler, perf_counter *pc, size_t size)\n{\n    if(handler->err)\n    {\n        return 0;\n    }\n    return get_metric(handler, pc, \"storage\", \"Storage Write Ops\", size);\n}\n\nint ap_metric_storage_write_op_latency_e2e(ap_handler *handler, perf_counter *pc, size_t size)\n{\n    if(handler->err)\n    {\n        return 0;\n    }\n    return get_metric(handler, pc, \"storage\", \"Storage Write Op Latency E2E msec\", size);\n}\n\nint ap_metric_storage_write_op_latency_server(ap_handler *handler, perf_counter *pc, size_t size)\n{\n    if(handler->err)\n    {\n        return 0;\n    }\n    return get_metric(handler, pc, \"storage\", \"Storage Write Op Latency Server msec\", size);\n}\n\nint ap_metric_storage_write_throughput_e2e(ap_handler *handler, perf_counter *pc, size_t size)\n{\n    if(handler->err)\n    {\n        return 0;\n    }\n    return get_metric(handler, pc, \"storage\", \"Storage Write Throughput E2E MB/sec\", size);\n}\n\n"
  },
  {
    "path": "AzureEnhancedMonitor/clib/src/azureperf.c",
    "content": "//\n// Copyright 2014 Microsoft Corporation\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n//\n\n#include <stdio.h> \n#include <stdlib.h> \n#include <string.h> \n#include <errno.h>\n#include <azureperf.h> \n\n#define INTMIN(X, Y) (((X) < (Y)) ? (X) : (Y))\n#define INTMAX(X, Y) (((X) > (Y)) ? (X) : (Y))\n\n#define MATCH_SUCCESS       (1)\n#define MATCH_FAILED        (0)\n#define MATCH_EOF           (-1)\n#define STRICT_MATCH        (1)\n#define NON_STRICT_MATCH    (0)\n\nstatic char FIELD_SEPRATOR = ';';\nstatic char DEFAULT_AP_FILE[] = \"/var/lib/AzureEnhancedMonitor/PerfCounters\";\n\nap_handler* ap_open()\n{\n    ap_handler *handler = malloc(sizeof(ap_handler));\n    handler->ap_file = DEFAULT_AP_FILE;\n    memset(handler, 0, sizeof(ap_handler));\n    return handler;\n}\n\nvoid ap_close(ap_handler *handler)\n{\n    free(handler);\n}\n\nint read_sperator(FILE *fp, int strict)\n{\n    int c;\n    c = fgetc(fp);\n    //In non-strict mode, Read and discard chars until EOF or FIELD_SEPRATOR\n    while(strict == NON_STRICT_MATCH && c != EOF && c != FIELD_SEPRATOR)\n    {\n        c = fgetc(fp);\n    }\n    if(c == EOF)\n    {\n        return MATCH_EOF;\n    }\n    if(c != FIELD_SEPRATOR)\n    {\n        return MATCH_FAILED;\n    }\n    else\n    {\n        return MATCH_SUCCESS;\n    }\n}\n\nint read_int(FILE *fp, int *val)\n{\n    int ret = EOF;\n    ret = fscanf(fp, \"%d\", val);\n    if(ret == EOF)\n    {\n        return MATCH_EOF;\n    }\n    if(ret != 1)\n    {\n        return MATCH_FAILED;\n    }\n    else\n    {\n        return read_sperator(fp, STRICT_MATCH);\n    }\n}\n\nint read_int64(FILE *fp, long long *val)\n{\n    int ret = EOF;\n    ret = fscanf(fp, \"%Ld\", val);\n    if(ret == EOF)\n    {\n        return MATCH_EOF;\n    }\n    if(ret != 1)\n    {\n        return MATCH_FAILED;\n    }\n    else\n    {\n        return read_sperator(fp, STRICT_MATCH);\n    } \n}\n\nint read_double(FILE *fp, double *val)\n{\n    int ret = EOF;\n    ret = fscanf(fp, \"%lf\", val);\n    if(ret == EOF)\n    {\n        return MATCH_EOF;\n    }\n    if(ret != 1)\n    {\n        return MATCH_FAILED;\n    }\n    else\n    {\n        return read_sperator(fp, STRICT_MATCH);\n    } \n}\n\nint read_str(FILE *fp, char* str, int max_size)\n{\n    char buf[STR_BUF_MAX];\n    int c = EOF;\n    int i = 0;\n\n    if(max_size > STR_BUF_MAX)\n    {\n        return MATCH_FAILED;\n    }\n\n    memset(buf, 0, STR_BUF_MAX);\n    for(; i < max_size - 1; i++)\n    {\n        c = fgetc(fp);\n        if(c == EOF)\n        {\n            return MATCH_EOF;\n        }\n        if(c == FIELD_SEPRATOR)\n        {\n            break;\n        }\n        buf[i] = c;\n    }\n    strncpy(str, buf, i);\n    if(c == FIELD_SEPRATOR)\n    {\n        return MATCH_SUCCESS;\n    }\n    else//Reaches buf max, discard the rest part of string\n    {\n        return read_sperator(fp, NON_STRICT_MATCH); \n    }\n}\n\nvoid set_handler_err(ap_handler *handler, int err)\n{\n    handler->err = err;\n}\n\nint read_pc_from_file(ap_handler* handler, FILE *fp)\n{\n    int ret = MATCH_FAILED;\n    perf_counter *pc;\n\n    if(handler->len == PERF_COUNT_MAX)\n    {\n        handler->err = AP_ERR_PC_BUF_OVERFLOW; \n        goto EXIT; \n    }\n    pc = &handler->buf[handler->len];\n\n    ret = read_int(fp, &pc->counter_typer);\n    if(ret == MATCH_EOF)\n    {\n        goto EXIT; \n    }\n    if(ret != MATCH_SUCCESS)\n    {\n        set_handler_err(handler, AP_ERR_INVALID_COUNTER_TYPE);\n        goto EXIT;\n    }\n\n    ret = read_str(fp, pc->type_name, TYPE_NAME_MAX);\n    if(ret != MATCH_SUCCESS)\n    {\n        set_handler_err(handler, AP_ERR_INVALID_TYPE_NAME);\n        goto EXIT;\n    }\n\n    ret = read_str(fp, pc->property_name, PROPERTY_NAME_MAX);\n    if(ret != MATCH_SUCCESS)\n    {\n        set_handler_err(handler, AP_ERR_INVALID_PROPERTY_NAME);\n        goto EXIT;\n    }\n\n    ret = read_str(fp, pc->instance_name, INSTANCE_NAME_MAX);\n    if(ret != MATCH_SUCCESS)\n    {\n        set_handler_err(handler, AP_ERR_INVALID_INSTANCE_NAME);\n        goto EXIT;\n    }\n    \n    ret = read_int(fp, &pc->is_empty);\n    if(ret != MATCH_SUCCESS)\n    {\n        set_handler_err(handler, AP_ERR_INVALID_IS_EMPTY_FLAG);\n        goto EXIT;\n    }\n\n    if(!pc->is_empty)\n    {\n        switch(pc->counter_typer)\n        {\n            case PERF_COUNTER_TYPE_INT:\n               ret = read_int(fp, &pc->val_int);\n               break;\n            case PERF_COUNTER_TYPE_LARGE:\n               ret = read_int64(fp, &pc->val_large);\n               break;\n            case PERF_COUNTER_TYPE_DOUBLE:\n               ret = read_double(fp, &pc->val_double);\n               break;\n            case PERF_COUNTER_TYPE_STRING:\n               ret = read_str(fp, pc->val_str, STRING_VALUE_MAX);\n               break;\n        }\n        if(ret != MATCH_SUCCESS)\n        {\n            set_handler_err(handler, AP_ERR_INVALID_VALUE);\n            goto EXIT;\n        }\n    }\n    else\n    {\n        ret = read_sperator(fp, NON_STRICT_MATCH);\n        if(ret != MATCH_SUCCESS)\n        {\n            set_handler_err(handler, AP_ERR_INVALID_VALUE);\n            goto EXIT;\n        }\n    }\n    \n    ret = read_str(fp, pc->unit_name, UNIT_NAME_MAX);\n    if(ret != MATCH_SUCCESS)\n    {\n        set_handler_err(handler, AP_ERR_INVALID_UNIT_NAME);\n        goto EXIT;\n    }\n\n    ret = read_int(fp, &pc->refresh_interval);\n    if(ret != MATCH_SUCCESS)\n    {\n        set_handler_err(handler, AP_ERR_INVALID_REFRESH_INTERVAL);\n        goto EXIT;\n    }\n\n    ret = read_int64(fp, &pc->timestamp);\n    if(ret != MATCH_SUCCESS)\n    {\n        set_handler_err(handler, AP_ERR_INVALID_TIMESTAMP);\n        goto EXIT;\n    }\n\n    ret = read_str(fp, pc->machine_name, MACHINE_NAME_MAX);\n    if(ret != MATCH_SUCCESS)\n    {\n        set_handler_err(handler, AP_ERR_INVALID_MACHINE_NAME);\n        goto EXIT;\n    }\n\n    handler->len++;\n\n    //Discard line end if exits.\n    fscanf(fp, \"\\n\");\n    \nEXIT:\n    return ret;\n}\n\nvoid ap_refresh(ap_handler *handler)\n{\n    FILE *fp = 0;\n    perf_counter *next = 0;\n   \n    //Reset handler \n    memset(handler->buf, 0, sizeof(perf_counter) * PERF_COUNT_MAX);\n    handler->len = 0;\n   \n    errno = 0;\n    fp = fopen(handler->ap_file, \"r\");\n    if(errno || 0 == fp){\n        handler->err = errno;\n        goto EXIT;  \n    }\n    \n    while(read_pc_from_file(handler, fp) != EOF)\n    {\n        if(handler->err != 0)\n        {\n            goto EXIT;\n        }\n    }\n\nEXIT:\n    if(fp)\n    {\n        fclose(fp);\n    }\n}\n\nint ap_metric_all(ap_handler *handler, perf_counter *all, size_t size)\n{\n    int size_to_cp = 0;\n    if(handler->err)\n    {\n        return;\n    }\n    size_to_cp = INTMIN(handler->len, size);\n    if(size_to_cp > 0)\n    {\n        memcpy(all, handler->buf, sizeof(perf_counter) * size_to_cp);\n    }\n    return size_to_cp;\n}\n\nint get_metric(ap_handler *handler, perf_counter *pc, \n        const char *type_name, const char* property_name, size_t size)\n{\n    int i = 0; \n    int found = 0;\n    for(;i < handler->len && found < size; i++)\n    {\n        if(0 == strcmp(handler->buf[i].type_name, type_name) && \n                0 == strcmp(handler->buf[i].property_name, property_name))\n        {\n            memcpy(pc + found, &handler->buf[i], sizeof(perf_counter));\n            found++;\n        }\n    }\n    if(!found)\n    {\n        handler->err = AP_ERR_PC_NOT_FOUND;\n    }\n    return found;\n}\n\n"
  },
  {
    "path": "AzureEnhancedMonitor/clib/test/cases/positive_case",
    "content": "2;cpu;Current Hw Frequency;;0;2194.507;MHz;60;1423450780;aem-suse11sp3;\n2;cpu;Max Hw Frequency;;0;2194.507;MHz;0;1423450780;aem-suse11sp3;\n1;cpu;Current VM Processing Power;;0;1;compute unit;0;1423450780;aem-suse11sp3;\n1;cpu;Guaranteed VM Processing Power;;0;1;compute unit;0;1423450780;aem-suse11sp3;\n1;cpu;Max. VM Processing Power;;0;1;compute unit;0;1423450780;aem-suse11sp3;\n1;cpu;Number of Cores per CPU;;0;1;none;0;1423450780;aem-suse11sp3;\n1;cpu;Number of Threads per Core;;0;1;none;0;1423450780;aem-suse11sp3;\n2;cpu;Phys. Processing Power per vCPU;;0;1.0;none;0;1423450780;aem-suse11sp3;\n4;cpu;Processor Type;;0;Intel(R) Xeon(R) CPU E5-2660 0 @ 2.20GHz, GenuineIntel;none;0;1423450780;aem-suse11sp3;\n4;cpu;Reference Compute Unit;;0;Intel(R) Xeon(R) CPU E5-2660 0 @ 2.20GHz, GenuineIntel;none;0;1423450780;aem-suse11sp3;\n4;cpu;vCPU Mapping;;0;core;none;0;1423450780;aem-suse11sp3;\n2;cpu;VM Processing Power Consumption;;0;1.0;%;60;1423450480;aem-suse11sp3;\n1;memory;Current Memory assigned;;0;1681;MB;0;1423450780;aem-suse11sp3;\n1;memory;Guaranteed Memory assigned;;0;1681;MB;0;1423450780;aem-suse11sp3;\n1;memory;Max Memory assigned;;0;1681;MB;0;1423450780;aem-suse11sp3;\n2;memory;VM Memory Consumption;;0;10.0;%;60;1423450480;aem-suse11sp3;\n4;network;Adapter Id;eth0;0;eth0;none;0;1423450780;aem-suse11sp3;\n4;network;Mapping;eth0;0;00-0d-3a-20-7c-81;none;0;1423450780;aem-suse11sp3;\n1;network;Minimum Network Bandwidth;eth0;0;1000;Mbit/s;0;1423450780;aem-suse11sp3;\n1;network;Maximum Network Bandwidth;eth0;0;1000;Mbit/s;0;1423450780;aem-suse11sp3;\n3;network;Network Read Bytes;;0;60676750;byte/s;0;1423450780;aem-suse11sp3;\n3;network;Network Write Bytes;;0;11596695;byte/s;0;1423450780;aem-suse11sp3;\n1;network;Packets Retransmitted;;0;279;packets/min;0;1423450780;aem-suse11sp3;\n3;config;Last Hardware Change;;0;1423449729;posixtime;0;1423450780;aem-suse11sp3;\n4;storage;Phys. Disc to Storage Mapping;/dev/sdb;0;not mapped to vhd;none;0;1423450780;aem-suse11sp3;\n4;storage;Phys. Disc to Storage Mapping;/dev/sda;0;portalvhdsz0msmsvh2cnqj aem-suse11sp3-aem-suse11sp3-0-201502071338440211;none;0;1423450780;aem-suse11sp3;\n4;storage;Storage ID;portalvhdsz0msmsvh2cnqj;0;portalvhdsz0msmsvh2cnqj;none;0;1423450781;aem-suse11sp3;\n3;storage;Storage Read Bytes;portalvhdsz0msmsvh2cnqj;0;424198985;byte;60;1423450781;aem-suse11sp3;\n1;storage;Storage Read Ops;portalvhdsz0msmsvh2cnqj;0;2183;none;60;1423450781;aem-suse11sp3;\n2;storage;Storage Read Op Latency E2E msec;portalvhdsz0msmsvh2cnqj;0;64.7292721223;ms;60;1423450781;aem-suse11sp3;\n2;storage;Storage Read Op Latency Server msec;portalvhdsz0msmsvh2cnqj;0;20.0522214489;ms;60;1423450781;aem-suse11sp3;\n2;storage;Storage Read Throughput E2E MB/sec;portalvhdsz0msmsvh2cnqj;0;6.742461284;MB/s;60;1423450781;aem-suse11sp3;\n3;storage;Storage Write Bytes;portalvhdsz0msmsvh2cnqj;0;208673771;byte;60;1423450781;aem-suse11sp3;\n1;storage;Storage Write Ops;portalvhdsz0msmsvh2cnqj;0;3860;none;60;1423450781;aem-suse11sp3;\n2;storage;Storage Write Op Latency E2E msec;portalvhdsz0msmsvh2cnqj;0;14.3150263047;ms;60;1423450781;aem-suse11sp3;\n2;storage;Storage Write Op Latency Server msec;portalvhdsz0msmsvh2cnqj;0;14.0740937373;ms;60;1423450781;aem-suse11sp3;\n2;storage;Storage Write Throughput E2E MB/sec;portalvhdsz0msmsvh2cnqj;0;3.31678026517;MB/s;60;1423450781;aem-suse11sp3;\n4;config;Cloud Provider;;0;Microsoft Azure;none;0;1423450781;aem-suse11sp3;\n4;config;CPU Over-Provisioning;;0;no;none;0;1423450781;aem-suse11sp3;\n4;config;Memory Over-Provisioning;;0;no;none;0;1423450781;aem-suse11sp3;\n4;config;Data Provider Version;;0;1.0.0;none;0;1423450781;aem-suse11sp3;\n4;config;Data Sources;;0;lad;none;0;1423450781;aem-suse11sp3;\n4;config;Instance Type;;0;Small;none;0;1423450781;aem-suse11sp3;\n4;config;Virtualization Solution;;0;Microsoft Hv;none;0;1423450781;aem-suse11sp3;\n4;config;Virtualization Solution Version;;0;6.3;none;0;1423450781;aem-suse11sp3;\n"
  },
  {
    "path": "AzureEnhancedMonitor/clib/test/codegen.py",
    "content": "#!/usr/bin/env python\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport re\nimport os\n\ncode_start=\"\"\"\\\n//\n// Copyright 2014 Microsoft Corporation\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n//\n\n//\n// This file is auto-generated, don't modify it directly.\n//\n\n#include <stdlib.h> \n#include <azureperf.h> \n\n\"\"\"\n\ncode_tmpl=\"\"\"\\\nint ap_metric_{0}_{1}(ap_handler *handler, perf_counter *pc, size_t size)\n{{\n    if(handler->err)\n    {{\n        return 0;\n    }}\n    return get_metric(handler, pc, \"{2}\", \"{3}\", size);\n}}\n\n\"\"\"\n\nhead_tmpl=\"\"\"\\\n//{0}\\{1}\nextern int ap_metric_{2}_{3}(ap_handler *handler, perf_counter *pc, size_t size);\n\n\"\"\"\n\ntest_root = os.path.dirname(os.path.abspath(__file__))\n\nif __name__ == \"__main__\":\n\n    with open(os.path.join(test_root, \"counter_names\"), \"r\") as file_in, \\\n         open(os.path.join(test_root, \"../src/apmetric.c\"), \"w\") as file_out, \\\n         open(os.path.join(test_root, \"../build/metric_def\"), \"w\") as head_out:\n\n        lines = file_in.read().split(\"\\n\")\n        \n        file_out.write(code_start)\n        for line in lines:\n            match = re.match(\"([^;]*);([^;]*);([^;]*)\", line)\n            if match is not None:\n                type_name = match.group(1)\n                prop_name = match.group(2)\n                short_name = match.group(3)\n                short_name = short_name.lower()\n                short_name = short_name.replace(\" \", \"_\")\n                short_name = short_name.replace(\"-\", \"_\")\n                code_snippet = code_tmpl.format(type_name.lower(),\n                                                short_name,\n                                                type_name,\n                                                prop_name)\n                file_out.write(code_snippet)\n                head_snippet = head_tmpl.format(type_name,\n                                                prop_name,\n                                                type_name.lower(),\n                                                short_name)\n                head_out.write(head_snippet)\n                print(\"printf(\\\">>>>ap_metric_{0}_{1}\\\\n\\\");\".format(type_name, short_name))\n                print(\"ap_metric_{0}_{1}(handler, &pc, 1);\".format(type_name, short_name))\n                print(\"print_counter(&pc);\")\n\n\n"
  },
  {
    "path": "AzureEnhancedMonitor/clib/test/counter_names",
    "content": "config;Cloud Provider;Cloud Provider\nconfig;CPU Over-Provisioning;CPU Over-Provisioning\nconfig;Memory Over-Provisioning;Memory Over-Provisioning\nconfig;Data Provider Version;Data Provider Version\nconfig;Data Sources;Data Sources\nconfig;Instance Type;Instance Type\nconfig;Virtualization Solution;Virtualization Solution\nconfig;Virtualization Solution Version;Virtualization Solution Version\ncpu;Current Hw Frequency;Current Hw Frequency\ncpu;Max Hw Frequency;Max Hw Frequency\ncpu;Current VM Processing Power;Current VM Processing Power\ncpu;Guaranteed VM Processing Power;Guaranteed VM Processing Power\ncpu;Max. VM Processing Power;Max VM Processing Power\ncpu;Number of Cores per CPU;Number of Cores per CPU\ncpu;Number of Threads per Core;Number of Threads per Core\ncpu;Phys. Processing Power per vCPU;Phys Processing Power per vCPU\ncpu;Processor Type;Processor Type\ncpu;Reference Compute Unit;Reference Compute Unit\ncpu;vCPU Mapping;vCPU Mapping\ncpu;VM Processing Power Consumption;VM Processing Power Consumption\nmemory;Current Memory assigned;Current Memory assigned\nmemory;Guaranteed Memory assigned;Guaranteed Memory assigned\nmemory;Max Memory assigned;Max Memory assigned\nmemory;VM Memory Consumption;VM Memory Consumption\nnetwork;Adapter Id;Adapter Id\nnetwork;Mapping;Mapping\nnetwork;Minimum Network Bandwidth;Min Network Bandwidth\nnetwork;Maximum Network Bandwidth;Max Network Bandwidth\nnetwork;Network Read Bytes;Network Read Bytes\nnetwork;Network Write Bytes;Network Write Bytes\nnetwork;Packets Retransmitted;Packets Retransmitted\nconfig;Last Hardware Change;Last Hardware Change\nstorage;Phys. Disc to Storage Mapping;Phys Disc to Storage Mapping\nstorage;Storage ID;Storage ID\nstorage;Storage Read Bytes;Read Bytes\nstorage;Storage Read Ops;Read Ops\nstorage;Storage Read Op Latency E2E msec;Read Op Latency E2E\nstorage;Storage Read Op Latency Server msec;Read Op Latency Server\nstorage;Storage Read Throughput E2E MB/sec;Read Throughput E2E\nstorage;Storage Write Bytes;Write Bytes\nstorage;Storage Write Ops;Write Ops\nstorage;Storage Write Op Latency E2E msec;Write Op Latency E2E\nstorage;Storage Write Op Latency Server msec;Write Op Latency Server\nstorage;Storage Write Throughput E2E MB/sec;Write Throughput E2E\n"
  },
  {
    "path": "AzureEnhancedMonitor/clib/test/runtest.c",
    "content": "//\n// Copyright 2014 Microsoft Corporation\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n//\n\n#include <stdio.h>\n#include <string.h>\n#include <azureperf.h> \n\nstatic const char default_input[] = \"./test/cases/positive_case\";\n\nint main(int argc, char ** argv)\n{\n    char* ap_file = (char*) default_input;\n    if(argc == 2)\n    {\n        ap_file = argv[1];\n    }\n    printf(\"Parsing perf counters from: %s\\n\", ap_file);\n    run_test(ap_file);\n}\n\nvoid print_counter(perf_counter *pc)\n{\n    printf(\"%-7s | %-24.24s | %-15.15s | \", pc->type_name, pc->property_name, \n            pc->instance_name);\n    switch(pc->counter_typer)\n    {\n        case PERF_COUNTER_TYPE_INT:\n            printf(\"%-30d\", pc->val_int);\n            break;\n        case PERF_COUNTER_TYPE_LARGE:\n            printf(\"%-30Ld\", pc->val_large);\n            break;\n        case PERF_COUNTER_TYPE_DOUBLE:\n            printf(\"%-30lf\", pc->val_double);\n            break;\n        case PERF_COUNTER_TYPE_STRING:\n        default:\n            printf(\"%-30.30s\", pc->val_str);\n            break;\n    }\n    printf(\" |\\n\");\n}\n\nint run_test(char* ap_file)\n{\n    int ret = 0;\n    ap_handler *handler = 0;\n    int i = 0;\n    perf_counter pc;\n\n    handler = ap_open();\n    handler->ap_file = ap_file;\n    ap_refresh(handler);\n    if(handler->err)\n    {\n        ret = handler->err;\n        printf(\"Error code:%d\\n\", handler->err);\n        goto EXIT;\n    }\n    printf(\"Found counters:%d\\n\", handler->len);\n    for(; i < handler->len; i++)\n    {\n        pc = handler->buf[i];\n        print_counter(&pc);\n        memset(&pc, 0 , sizeof(perf_counter));\n    }\n\n    printf(\">>>>ap_metric_config_cloud_provider\\n\");\n    ap_metric_config_cloud_provider(handler, &pc, 1);\n    print_counter(&pc);\n    printf(\">>>>ap_metric_config_cpu_over_provisioning\\n\");\n    ap_metric_config_cpu_over_provisioning(handler, &pc, 1);\n    print_counter(&pc);\n    printf(\">>>>ap_metric_config_memory_over_provisioning\\n\");\n    ap_metric_config_memory_over_provisioning(handler, &pc, 1);\n    print_counter(&pc);\n    printf(\">>>>ap_metric_config_data_provider_version\\n\");\n    ap_metric_config_data_provider_version(handler, &pc, 1);\n    print_counter(&pc);\n    printf(\">>>>ap_metric_config_data_sources\\n\");\n    ap_metric_config_data_sources(handler, &pc, 1);\n    print_counter(&pc);\n    printf(\">>>>ap_metric_config_instance_type\\n\");\n    ap_metric_config_instance_type(handler, &pc, 1);\n    print_counter(&pc);\n    printf(\">>>>ap_metric_config_virtualization_solution\\n\");\n    ap_metric_config_virtualization_solution(handler, &pc, 1);\n    print_counter(&pc);\n    printf(\">>>>ap_metric_config_virtualization_solution_version\\n\");\n    ap_metric_config_virtualization_solution_version(handler, &pc, 1);\n    print_counter(&pc);\n    printf(\">>>>ap_metric_cpu_current_hw_frequency\\n\");\n    ap_metric_cpu_current_hw_frequency(handler, &pc, 1);\n    print_counter(&pc);\n    printf(\">>>>ap_metric_cpu_max_hw_frequency\\n\");\n    ap_metric_cpu_max_hw_frequency(handler, &pc, 1);\n    print_counter(&pc);\n    printf(\">>>>ap_metric_cpu_current_vm_processing_power\\n\");\n    ap_metric_cpu_current_vm_processing_power(handler, &pc, 1);\n    print_counter(&pc);\n    printf(\">>>>ap_metric_cpu_guaranteed_vm_processing_power\\n\");\n    ap_metric_cpu_guaranteed_vm_processing_power(handler, &pc, 1);\n    print_counter(&pc);\n    printf(\">>>>ap_metric_cpu_max_vm_processing_power\\n\");\n    ap_metric_cpu_max_vm_processing_power(handler, &pc, 1);\n    print_counter(&pc);\n    printf(\">>>>ap_metric_cpu_number_of_cores_per_cpu\\n\");\n    ap_metric_cpu_number_of_cores_per_cpu(handler, &pc, 1);\n    print_counter(&pc);\n    printf(\">>>>ap_metric_cpu_number_of_threads_per_core\\n\");\n    ap_metric_cpu_number_of_threads_per_core(handler, &pc, 1);\n    print_counter(&pc);\n    printf(\">>>>ap_metric_cpu_phys_processing_power_per_vcpu\\n\");\n    ap_metric_cpu_phys_processing_power_per_vcpu(handler, &pc, 1);\n    print_counter(&pc);\n    printf(\">>>>ap_metric_cpu_processor_type\\n\");\n    ap_metric_cpu_processor_type(handler, &pc, 1);\n    print_counter(&pc);\n    printf(\">>>>ap_metric_cpu_reference_compute_unit\\n\");\n    ap_metric_cpu_reference_compute_unit(handler, &pc, 1);\n    print_counter(&pc);\n    printf(\">>>>ap_metric_cpu_vcpu_mapping\\n\");\n    ap_metric_cpu_vcpu_mapping(handler, &pc, 1);\n    print_counter(&pc);\n    printf(\">>>>ap_metric_cpu_vm_processing_power_consumption\\n\");\n    ap_metric_cpu_vm_processing_power_consumption(handler, &pc, 1);\n    print_counter(&pc);\n    printf(\">>>>ap_metric_memory_current_memory_assigned\\n\");\n    ap_metric_memory_current_memory_assigned(handler, &pc, 1);\n    print_counter(&pc);\n    printf(\">>>>ap_metric_memory_guaranteed_memory_assigned\\n\");\n    ap_metric_memory_guaranteed_memory_assigned(handler, &pc, 1);\n    print_counter(&pc);\n    printf(\">>>>ap_metric_memory_max_memory_assigned\\n\");\n    ap_metric_memory_max_memory_assigned(handler, &pc, 1);\n    print_counter(&pc);\n    printf(\">>>>ap_metric_memory_vm_memory_consumption\\n\");\n    ap_metric_memory_vm_memory_consumption(handler, &pc, 1);\n    print_counter(&pc);\n    printf(\">>>>ap_metric_network_adapter_id\\n\");\n    ap_metric_network_adapter_id(handler, &pc, 1);\n    print_counter(&pc);\n    printf(\">>>>ap_metric_network_mapping\\n\");\n    ap_metric_network_mapping(handler, &pc, 1);\n    print_counter(&pc);\n    printf(\">>>>ap_metric_network_min_network_bandwidth\\n\");\n    ap_metric_network_min_network_bandwidth(handler, &pc, 1);\n    print_counter(&pc);\n    printf(\">>>>ap_metric_network_max_network_bandwidth\\n\");\n    ap_metric_network_max_network_bandwidth(handler, &pc, 1);\n    print_counter(&pc);\n    printf(\">>>>ap_metric_network_network_read_bytes\\n\");\n    ap_metric_network_network_read_bytes(handler, &pc, 1);\n    print_counter(&pc);\n    printf(\">>>>ap_metric_network_network_write_bytes\\n\");\n    ap_metric_network_network_write_bytes(handler, &pc, 1);\n    print_counter(&pc);\n    printf(\">>>>ap_metric_network_packets_retransmitted\\n\");\n    ap_metric_network_packets_retransmitted(handler, &pc, 1);\n    print_counter(&pc);\n    printf(\">>>>ap_metric_config_last_hardware_change\\n\");\n    ap_metric_config_last_hardware_change(handler, &pc, 1);\n    print_counter(&pc);\n    printf(\">>>>ap_metric_storage_phys_disc_to_storage_mapping\\n\");\n    ap_metric_storage_phys_disc_to_storage_mapping(handler, &pc, 1);\n    print_counter(&pc);\n    printf(\">>>>ap_metric_storage_storage_id\\n\");\n    ap_metric_storage_storage_id(handler, &pc, 1);\n    print_counter(&pc);\n    printf(\">>>>ap_metric_storage_read_bytes\\n\");\n    ap_metric_storage_read_bytes(handler, &pc, 1);\n    print_counter(&pc);\n    printf(\">>>>ap_metric_storage_read_ops\\n\");\n    ap_metric_storage_read_ops(handler, &pc, 1);\n    print_counter(&pc);\n    printf(\">>>>ap_metric_storage_read_op_latency_e2e\\n\");\n    ap_metric_storage_read_op_latency_e2e(handler, &pc, 1);\n    print_counter(&pc);\n    printf(\">>>>ap_metric_storage_read_op_latency_server\\n\");\n    ap_metric_storage_read_op_latency_server(handler, &pc, 1);\n    print_counter(&pc);\n    printf(\">>>>ap_metric_storage_read_throughput_e2e\\n\");\n    ap_metric_storage_read_throughput_e2e(handler, &pc, 1);\n    print_counter(&pc);\n    printf(\">>>>ap_metric_storage_write_bytes\\n\");\n    ap_metric_storage_write_bytes(handler, &pc, 1);\n    print_counter(&pc);\n    printf(\">>>>ap_metric_storage_write_ops\\n\");\n    ap_metric_storage_write_ops(handler, &pc, 1);\n    print_counter(&pc);\n    printf(\">>>>ap_metric_storage_write_op_latency_e2e\\n\");\n    ap_metric_storage_write_op_latency_e2e(handler, &pc, 1);\n    print_counter(&pc);\n    printf(\">>>>ap_metric_storage_write_op_latency_server\\n\");\n    ap_metric_storage_write_op_latency_server(handler, &pc, 1);\n    print_counter(&pc);\n    printf(\">>>>ap_metric_storage_write_throughput_e2e\\n\");\n    ap_metric_storage_write_throughput_e2e(handler, &pc, 1);\n    print_counter(&pc);\n    \nEXIT:\n    ap_close(handler);\n    return ret;\n}\n\n"
  },
  {
    "path": "AzureEnhancedMonitor/ext/.gitignore",
    "content": "bin/*\n.ropeproject/\n"
  },
  {
    "path": "AzureEnhancedMonitor/ext/HandlerManifest.json",
    "content": "[{\n  \"name\": \"AzureEnhancedMonitor\",\n  \"version\": 1.0,\n  \"handlerManifest\": {\n    \"installCommand\": \"installer.py\",\n    \"uninstallCommand\": \"handler.py uninstall\",\n    \"updateCommand\": \"handler.py update\",\n    \"enableCommand\": \"handler.py enable\",\n    \"disableCommand\": \"handler.py disable\",\n    \"rebootAfterInstall\": false,\n    \"reportHeartbeat\": false\n  }\n}]\n"
  },
  {
    "path": "AzureEnhancedMonitor/ext/aem.py",
    "content": "#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport re\nimport socket\nimport traceback\nimport time\nimport datetime\nimport psutil\nimport urlparse\nimport xml.dom.minidom as minidom\nfrom azure.storage import TableService, Entity\nfrom Utils.WAAgentUtil import waagent, AddExtensionEvent\n\n\nFAILED_TO_RETRIEVE_MDS_DATA=\"(03100)Failed to retrieve mds data\"\nFAILED_TO_RETRIEVE_LOCAL_DATA=\"(03101)Failed to retrieve local data\"\nFAILED_TO_RETRIEVE_STORAGE_DATA=\"(03102)Failed to retrieve storage data\"\nFAILED_TO_SERIALIZE_PERF_COUNTERS=\"(03103)Failed to serialize perf counters\"\n\ndef timedelta_total_seconds(delta):\n\n    if not hasattr(datetime.timedelta, 'total_seconds'):\n        return delta.days * 86400 + delta.seconds\n    else:\n        return delta.total_seconds()\n\ndef get_host_base_from_uri(blob_uri):\n    uri = urlparse.urlparse(blob_uri)\n    netloc = uri.netloc\n    if netloc is None:\n        return None\n    return netloc[netloc.find('.'):]\n\nMonitoringIntervalInMinute = 1 #One minute\nMonitoringInterval = 60 * MonitoringIntervalInMinute\n\n#It takes sometime before the performance date reaches azure table.\nAzureTableDelayInMinute = 5 #Five minute\nAzureTableDelay = 60 * AzureTableDelayInMinute\n\nAzureEnhancedMonitorVersion = \"2.0.0\"\nLibDir = \"/var/lib/AzureEnhancedMonitor\"\n\nLatestErrorRecord = \"LatestErrorRecord\"\n\ndef clearLastErrorRecord():\n    errFile = os.path.join(LibDir, LatestErrorRecord)\n    if os.path.exists(errFile) and os.path.isfile(errFile):\n        os.remove(errFile)\n\ndef getLatestErrorRecord():\n    errFile=os.path.join(LibDir, LatestErrorRecord)\n    if os.path.exists(errFile) and os.path.isfile(errFile):\n        with open(errFile, 'r') as f:\n            return f.read()\n\n    return \"0\"\n\ndef updateLatestErrorRecord(s):\n    errFile = os.path.join(LibDir, LatestErrorRecord)\n    maxRetry = 3\n    for i in range(0, maxRetry):\n        try:\n            with open(errFile, \"w+\") as F:\n                F.write(s.encode(\"utf8\"))\n                return\n        except IOError:\n            time.sleep(1)\n\n    waagent.Error((\"Failed to serialize latest error record to file:\"\n                    \"{0}\").format(errFile))\n    AddExtensionEvent(message=\"failed to write latest error record\")\n    raise\n\ndef easyHash(s):\n    \"\"\"\n    MDSD used the following hash algorithm to cal a first part of partition key\n    \"\"\"\n    strHash = 0\n    multiplier = 37\n    for c in s:\n        strHash = strHash * multiplier + ord(c)\n        #Only keep the last 64bit, since the mod base is 100\n        strHash = strHash % (1<<64) \n    return strHash % 100 #Assume eventVolume is Large\n\nEpoch = datetime.datetime(1, 1, 1)\ntickInOneSecond = 1000 * 10000 # 1s = 1000 * 10000 ticks\n\ndef getMDSTimestamp(unixTimestamp):\n    unixTime = datetime.datetime.utcfromtimestamp(unixTimestamp)\n    startTimestamp = int(timedelta_total_seconds(unixTime - Epoch))\n    return startTimestamp * tickInOneSecond\n\ndef getIdentity():\n    identity = socket.gethostname()\n    return identity\n\ndef getMDSPartitionKey(identity, timestamp):\n    hashVal = easyHash(identity)\n    return \"{0:0>19d}___{1:0>19d}\".format(hashVal, timestamp)\n\ndef getAzureDiagnosticKeyRange():\n    #Round down by MonitoringInterval\n    endTime = (int(time.time()) / MonitoringInterval) * MonitoringInterval\n    endTime = endTime - AzureTableDelay\n    startTime = endTime - MonitoringInterval\n\n    identity = getIdentity()\n    startKey = getMDSPartitionKey(identity, getMDSTimestamp(startTime))\n    endKey = getMDSPartitionKey(identity, getMDSTimestamp(endTime))\n    return startKey, endKey\n\ndef getAzureDiagnosticCPUData(accountName, accountKey, hostBase,\n                              startKey, endKey, deploymentId):\n    try:\n        waagent.Log(\"Retrieve diagnostic data(CPU).\")\n        table = \"LinuxCpuVer2v0\"\n        tableService = TableService(account_name = accountName, \n                                    account_key = accountKey,\n                                    host_base = hostBase)\n        ofilter = (\"PartitionKey ge '{0}' and PartitionKey lt '{1}' \"\n                   \"and DeploymentId eq '{2}'\").format(startKey, endKey, deploymentId)\n        oselect = (\"PercentProcessorTime,DeploymentId\")\n        data = tableService.query_entities(table, ofilter, oselect, 1)\n        if data is None or len(data) == 0:\n            return None\n        cpuPercent = float(data[0].PercentProcessorTime)\n        return cpuPercent\n    except Exception as e:\n        waagent.Error((u\"Failed to retrieve diagnostic data(CPU): {0} {1}\"\n                       \"\").format(e, traceback.format_exc()))\n        updateLatestErrorRecord(FAILED_TO_RETRIEVE_MDS_DATA)\n        AddExtensionEvent(message=FAILED_TO_RETRIEVE_MDS_DATA)\n        return None\n    \n\ndef getAzureDiagnosticMemoryData(accountName, accountKey, hostBase,\n                                 startKey, endKey, deploymentId):\n    try:\n        waagent.Log(\"Retrieve diagnostic data: Memory\")\n        table = \"LinuxMemoryVer2v0\"\n        tableService = TableService(account_name = accountName, \n                                    account_key = accountKey,\n                                    host_base = hostBase)\n        ofilter = (\"PartitionKey ge '{0}' and PartitionKey lt '{1}' \"\n                   \"and DeploymentId eq '{2}'\").format(startKey, endKey, deploymentId)\n        oselect = (\"PercentAvailableMemory,DeploymentId\")\n        data = tableService.query_entities(table, ofilter, oselect, 1)\n        if data is None or len(data) == 0:\n            return None\n        memoryPercent = 100 - float(data[0].PercentAvailableMemory)\n        return memoryPercent\n    except Exception as e:\n        waagent.Error((u\"Failed to retrieve diagnostic data(Memory): {0} {1}\"\n                       \"\").format(e, traceback.format_exc()))\n        updateLatestErrorRecord(FAILED_TO_RETRIEVE_MDS_DATA)\n        AddExtensionEvent(message=FAILED_TO_RETRIEVE_MDS_DATA)\n        return None\n\nclass AzureDiagnosticData(object):\n    def __init__(self, config):\n        self.config = config\n        accountName = config.getLADName()\n        accountKey = config.getLADKey()\n        hostBase = config.getLADHostBase()\n        hostname = socket.gethostname()\n        deploymentId = config.getVmDeploymentId()\n        startKey, endKey = getAzureDiagnosticKeyRange()\n        self.cpuPercent = getAzureDiagnosticCPUData(accountName, \n                                                    accountKey,\n                                                    hostBase,\n                                                    startKey,\n                                                    endKey,\n                                                    deploymentId)\n        self.memoryPercent = getAzureDiagnosticMemoryData(accountName, \n                                                          accountKey,\n                                                          hostBase,\n                                                          startKey,\n                                                          endKey,\n                                                          deploymentId)\n\n    def getCPUPercent(self):\n        return self.cpuPercent\n\n    def getMemoryPercent(self):\n        return self.memoryPercent\n\nclass AzureDiagnosticMetric(object):\n    def __init__(self, config):\n        self.config = config\n        self.linux = LinuxMetric(self.config)\n        self.azure = AzureDiagnosticData(self.config)\n        self.timestamp = int(time.time()) - AzureTableDelay\n\n    def getTimestamp(self):\n        return self.timestamp\n\n    def getCurrHwFrequency(self):\n        return self.linux.getCurrHwFrequency()\n\n    def getMaxHwFrequency(self):\n        return self.linux.getMaxHwFrequency()\n\n    def getCurrVMProcessingPower(self):\n        return self.linux.getCurrVMProcessingPower()\n\n    def getGuaranteedVMProcessingPower(self):\n        return self.linux.getGuaranteedVMProcessingPower()\n\n    def getMaxVMProcessingPower(self):\n        return self.linux.getMaxVMProcessingPower()\n\n    def getNumOfCoresPerCPU(self):\n        return self.linux.getNumOfCoresPerCPU()\n\n    def getNumOfThreadsPerCore(self):\n        return self.linux.getNumOfThreadsPerCore()\n\n    def getPhysProcessingPowerPerVCPU(self):\n        return self.linux.getPhysProcessingPowerPerVCPU()\n\n    def getProcessorType(self):\n        return self.linux.getProcessorType()\n\n    def getReferenceComputeUnit(self):\n        return self.linux.getReferenceComputeUnit()\n\n    def getVCPUMapping(self):\n        return self.linux.getVCPUMapping()\n    \n    def getVMProcessingPowerConsumption(self):\n        return self.azure.getCPUPercent()\n    \n    def getCurrMemAssigned(self):\n        return self.linux.getCurrMemAssigned()\n        \n    def getGuaranteedMemAssigned(self):\n        return self.linux.getGuaranteedMemAssigned()\n\n    def getMaxMemAssigned(self):\n        return self.linux.getMaxMemAssigned()\n\n    def getVMMemConsumption(self):\n        return self.azure.getMemoryPercent()\n\n    def getNetworkAdapterIds(self):\n        return self.linux.getNetworkAdapterIds()\n\n    def getNetworkAdapterMapping(self, adapterId):\n        return self.linux.getNetworkAdapterMapping(adapterId)\n\n    def getMaxNetworkBandwidth(self, adapterId):\n        return self.linux.getMaxNetworkBandwidth(adapterId)\n\n    def getMinNetworkBandwidth(self, adapterId):\n        return self.linux.getMinNetworkBandwidth(adapterId)\n\n    def getNetworkReadBytes(self, adapterId):\n        return self.linux.getNetworkReadBytes(adapterId)\n\n    def getNetworkWriteBytes(self, adapterId):\n        return self.linux.getNetworkWriteBytes(adapterId)\n\n    def getNetworkPacketRetransmitted(self):\n        return self.linux.getNetworkPacketRetransmitted()\n  \n    def getLastHardwareChange(self):\n        return self.linux.getLastHardwareChange()\n\nclass CPUInfo(object):\n\n    @staticmethod\n    def getCPUInfo():\n        cpuinfo = waagent.GetFileContents(\"/proc/cpuinfo\")\n        ret, lscpu = waagent.RunGetOutput(\"lscpu\")\n        return CPUInfo(cpuinfo, lscpu)\n\n    def __init__(self, cpuinfo, lscpu):\n        self.cpuinfo = cpuinfo\n        self.lscpu = lscpu\n        self.cores = 1;\n        self.coresPerCpu = 1;\n        self.threadsPerCore = 1;\n        \n        coresMatch = re.search(\"CPU(s):\\s+(\\d+)\", self.lscpu)\n        if coresMatch:\n            self.cores = int(coresMatch.group(1))\n        \n        coresPerCpuMatch = re.search(\"Core(s) per socket:\\s+(\\d+)\", self.lscpu)\n        if coresPerCpuMatch:\n            self.coresPerCpu = int(coresPerCpuMatch.group(1))\n        \n        threadsPerCoreMatch = re.search(\"Core(s) per socket:\\s+(\\d+)\", self.lscpu)\n        if threadsPerCoreMatch:\n            self.threadsPerCore = int(threadsPerCoreMatch.group(1))\n        \n        model = re.search(\"model name\\s+:\\s+(.*)\\s\", self.cpuinfo)\n        vendorId = re.search(\"vendor_id\\s+:\\s+(.*)\\s\", self.cpuinfo)\n        if model and vendorId:\n            self.processorType = \"{0}, {1}\".format(model.group(1), \n                                                   vendorId.group(1))\n        else:\n            self.processorType = None\n        \n        freqMatch = re.search(\"CPU MHz:\\s+(.*)\\s\", self.lscpu)\n        if freqMatch:\n            self.frequency = float(freqMatch.group(1))\n        else:\n            self.frequency = None\n\n        ht = re.match(\"flags\\s.*\\sht\\s\", self.cpuinfo)\n        self.isHTon = ht is not None\n\n    def getNumOfCoresPerCPU(self):\n        return self.coresPerCpu\n    \n    def getNumOfCores(self):\n        return self.cores\n\n    def getNumOfThreadsPerCore(self):\n        return self.threadsPerCore\n    \n    def getProcessorType(self):\n        return self.processorType\n   \n    def getFrequency(self):\n        return self.frequency\n\n    def isHyperThreadingOn(self):\n        return self.isHTon\n\n    def getCPUPercent(self):\n        return psutil.cpu_percent()\n    \nclass MemoryInfo(object):\n    def __init__(self):\n        self.memInfo = psutil.virtual_memory()\n\n    def getMemSize(self):\n        return self.memInfo[0]  / 1024 / 1024 #MB\n\n    def getMemPercent(self):\n        return self.memInfo[2] #%\n\ndef getMacAddress(adapterId):\n    nicAddrPath = os.path.join(\"/sys/class/net\", adapterId, \"address\")\n    mac = waagent.GetFileContents(nicAddrPath)\n    mac = mac.strip()\n    mac = mac.replace(\":\", \"-\")\n    return mac\n\ndef sameList(l1, l2):\n    if l1 is None or l2 is None:\n        return l1 == l2\n    if len(l1) != len(l2):\n        return False\n    for i in range(0, len(l1)):\n        if l1[i] != l2[i]:\n            return False\n    return True\n\nclass NetworkInfo(object):\n    def __init__(self):\n        self.nics = psutil.net_io_counters(pernic=True)\n        self.nicNames = []\n        for nicName, stat in self.nics.iteritems():\n            if nicName != 'lo':\n                self.nicNames.append(nicName)\n\n    def getAdapterIds(self):\n        return self.nicNames\n\n    def getNetworkReadBytes(self, adapterId):\n        net = psutil.net_io_counters(pernic=True)\n        if net[adapterId] != None:\n            bytes_recv1 = net[adapterId][1]\n            time1 = time.time()\n            \n            time.sleep(0.2)\n            \n            net = psutil.net_io_counters(pernic=True)\n            bytes_recv2 = net[adapterId][1]\n            time2 = time.time()\n            \n            interval = (time2 - time1)\n            \n            return (bytes_recv2 - bytes_recv1) / interval\n        else:\n            return 0\n\n    def getNetworkWriteBytes(self, adapterId):\n        net = psutil.net_io_counters(pernic=True)\n        if net[adapterId] != None:\n            bytes_sent1 = net[adapterId][0]\n            time1 = time.time()\n            \n            time.sleep(0.2)\n            \n            net = psutil.net_io_counters(pernic=True)\n            bytes_sent2 = net[adapterId][0]\n            time2 = time.time()\n            \n            interval = (time2 - time1)\n            \n            return (bytes_sent2 - bytes_sent1) / interval\n        else:\n            return 0\n\n    def getNetstat(self):\n        retCode, output = waagent.RunGetOutput(\"netstat -s\", chk_err=False)\n        return output\n\n    def getNetworkPacketRetransmitted(self):\n        netstat = self.getNetstat()\n        match = re.search(\"(\\d+)\\s*segments retransmited\", netstat)\n        if match != None:\n            return int(match.group(1))\n        else:\n            waagent.Error(\"Failed to parse netstat output: {0}\".format(netstat))\n            updateLatestErrorRecord(FAILED_TO_RETRIEVE_LOCAL_DATA)\n            AddExtensionEvent(message=FAILED_TO_RETRIEVE_LOCAL_DATA)\n            return None\n\n\nHwInfoFile = os.path.join(LibDir, \"HwInfo\")\nclass HardwareChangeInfo(object):\n    def __init__(self, networkInfo):\n        self.networkInfo = networkInfo\n\n    def getHwInfo(self):\n        if not os.path.isfile(HwInfoFile):\n            return None, None\n        hwInfo = waagent.GetFileContents(HwInfoFile).split(\"\\n\")\n        return int(hwInfo[0]), hwInfo[1:]\n\n    def setHwInfo(self, timestamp, hwInfo):\n        content = str(timestamp)\n        content = content + \"\\n\" + \"\\n\".join(hwInfo)\n        waagent.SetFileContents(HwInfoFile, content)\n\n    def getLastHardwareChange(self):\n        oldTime, oldMacs = self.getHwInfo()\n        newMacs = map(lambda x : getMacAddress(x), \n                      self.networkInfo.getAdapterIds())\n        newTime = int(time.time())\n        newMacs.sort()\n        if oldMacs is None or not sameList(newMacs, oldMacs):\n            #Hardware changed\n            if newTime < oldTime:\n                waagent.Warn((\"Hardware change detected. But the old timestamp \"\n                               \"is greater than now, {0}>{1}.\").format(oldTime, \n                                                                       newTime))\n            self.setHwInfo(newTime, newMacs)\n            return newTime\n        else:\n            return oldTime\n\nclass LinuxMetric(object):\n    def __init__(self, config):\n        self.config = config\n        #CPU\n        self.cpuInfo = CPUInfo.getCPUInfo()\n        #Memory\n        self.memInfo = MemoryInfo()\n        #Network\n        self.networkInfo = NetworkInfo()\n        #Detect hardware change\n        self.hwChangeInfo = HardwareChangeInfo(self.networkInfo)\n        self.timestamp = int(time.time())\n\n    def getTimestamp(self):\n        return self.timestamp\n\n    def getCurrHwFrequency(self):\n        return self.cpuInfo.getFrequency()\n\n    def getMaxHwFrequency(self):\n        return self.getCurrHwFrequency()\n\n    def getCurrVMProcessingPower(self):\n        if self.config.isCpuOverCommitted():\n            return None\n        else:\n            return self.cpuInfo.getNumOfCores()\n\n    def getGuaranteedVMProcessingPower(self):\n        return self.getCurrVMProcessingPower()\n\n    def getMaxVMProcessingPower(self):\n        return self.getCurrVMProcessingPower()\n\n    def getNumOfCoresPerCPU(self):\n        return self.cpuInfo.getNumOfCoresPerCPU()\n\n    def getNumOfThreadsPerCore(self):\n        return self.cpuInfo.getNumOfThreadsPerCore()\n\n    def getPhysProcessingPowerPerVCPU(self):\n        return 1 / float(self.getNumOfThreadsPerCore())\n\n    def getProcessorType(self):\n        return self.cpuInfo.getProcessorType()\n\n    def getReferenceComputeUnit(self):\n        return self.getProcessorType()\n\n    def getVCPUMapping(self):\n        return \"thread\" if self.cpuInfo.isHyperThreadingOn() else \"core\"\n    \n    def getVMProcessingPowerConsumption(self):\n        return self.memInfo.getMemPercent()\n    \n    def getCurrMemAssigned(self):\n        if self.config.isMemoryOverCommitted():\n            return None\n        else:\n            return self.memInfo.getMemSize()\n        \n    def getGuaranteedMemAssigned(self):\n        return self.getCurrMemAssigned()\n\n    def getMaxMemAssigned(self):\n        return self.getCurrMemAssigned()\n\n    def getVMMemConsumption(self):\n        return self.memInfo.getMemPercent()\n\n    def getNetworkAdapterIds(self):\n        return self.networkInfo.getAdapterIds()\n\n    def getNetworkAdapterMapping(self, adapterId):\n        return getMacAddress(adapterId)\n\n    def getMaxNetworkBandwidth(self, adapterId):\n        return 1000 #Mbit/s \n\n    def getMinNetworkBandwidth(self, adapterId):\n        return 1000 #Mbit/s \n\n    def getNetworkReadBytes(self, adapterId):\n        return self.networkInfo.getNetworkReadBytes(adapterId)\n\n    def getNetworkWriteBytes(self, adapterId):\n        return self.networkInfo.getNetworkWriteBytes(adapterId)\n\n    def getNetworkPacketRetransmitted(self):\n        return self.networkInfo.getNetworkPacketRetransmitted()\n  \n    def getLastHardwareChange(self):\n        return self.hwChangeInfo.getLastHardwareChange()\n\nclass VMDataSource(object):\n    def __init__(self, config):\n        self.config = config\n\n    def collect(self):\n        counters = []\n        if self.config.isLADEnabled():\n            metrics = AzureDiagnosticMetric(self.config)\n        else:\n            metrics = LinuxMetric(self.config)\n\n        #CPU\n        counters.append(self.createCounterCurrHwFrequency(metrics))\n        counters.append(self.createCounterMaxHwFrequency(metrics))\n        counters.append(self.createCounterCurrVMProcessingPower(metrics))\n        counters.append(self.createCounterGuaranteedVMProcessingPower(metrics))\n        counters.append(self.createCounterMaxVMProcessingPower(metrics))\n        counters.append(self.createCounterNumOfCoresPerCPU(metrics))\n        counters.append(self.createCounterNumOfThreadsPerCore(metrics))\n        counters.append(self.createCounterPhysProcessingPowerPerVCPU(metrics))\n        counters.append(self.createCounterProcessorType(metrics))\n        counters.append(self.createCounterReferenceComputeUnit(metrics))\n        counters.append(self.createCounterVCPUMapping(metrics))\n        counters.append(self.createCounterVMProcessingPowerConsumption(metrics))\n\n        #Memory\n        counters.append(self.createCounterCurrMemAssigned(metrics))\n        counters.append(self.createCounterGuaranteedMemAssigned(metrics))\n        counters.append(self.createCounterMaxMemAssigned(metrics))\n        counters.append(self.createCounterVMMemConsumption(metrics))\n\n        #Network\n        adapterIds = metrics.getNetworkAdapterIds()\n        for adapterId in adapterIds:\n            if adapterId.startswith('eth'):\n                counters.append(self.createCounterAdapterId(adapterId))\n                counters.append(self.createCounterNetworkMapping(metrics, adapterId))\n                counters.append(self.createCounterMinNetworkBandwidth(metrics, adapterId))\n                counters.append(self.createCounterMaxNetworkBandwidth(metrics, adapterId))\n                counters.append(self.createCounterNetworkReadBytes(metrics, adapterId))\n                counters.append(self.createCounterNetworkWriteBytes(metrics, adapterId))\n        counters.append(self.createCounterNetworkPacketRetransmitted(metrics))\n        \n        #Hardware change\n        counters.append(self.createCounterLastHardwareChange(metrics))\n\n        #Error\n        counters.append(self.createCounterError())\n\n        return counters\n    \n    def createCounterLastHardwareChange(self, metrics):\n        return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_LARGE,\n                           category = \"config\",\n                           name = \"Last Hardware Change\",\n                           value = metrics.getLastHardwareChange(),\n                           unit=\"posixtime\")\n\n    def createCounterError(self):\n        return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_LARGE,\n                           category = \"config\",\n                           name = \"Error\",\n                           value = getLatestErrorRecord())\n\n    def createCounterCurrHwFrequency(self, metrics):\n        return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_DOUBLE,\n                           category = \"cpu\",\n                           name = \"Current Hw Frequency\",\n                           value = metrics.getCurrHwFrequency(),\n                           unit = \"MHz\",\n                           refreshInterval = 60)\n\n    def createCounterMaxHwFrequency(self, metrics):\n        return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_DOUBLE,\n                           category = \"cpu\",\n                           name = \"Max Hw Frequency\",\n                           value = metrics.getMaxHwFrequency(),\n                           unit = \"MHz\")\n\n    def createCounterCurrVMProcessingPower(self, metrics):\n        return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_INT,\n                           category = \"cpu\",\n                           name = \"Current VM Processing Power\",\n                           value = metrics.getCurrVMProcessingPower(),\n                           unit = \"compute unit\")\n\n    def createCounterMaxVMProcessingPower(self, metrics):\n        return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_INT,\n                           category = \"cpu\",\n                           name = \"Max. VM Processing Power\",\n                           value = metrics.getMaxVMProcessingPower(),\n                           unit = \"compute unit\")\n\n    def createCounterGuaranteedVMProcessingPower(self, metrics):\n        return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_INT,\n                           category = \"cpu\",\n                           name = \"Guaranteed VM Processing Power\",\n                           value = metrics.getGuaranteedVMProcessingPower(),\n                           unit = \"compute unit\")\n\n    def createCounterNumOfCoresPerCPU(self, metrics):\n        return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_INT,\n                           category = \"cpu\",\n                           name = \"Number of Cores per CPU\",\n                           value = metrics.getNumOfCoresPerCPU())\n\n    def createCounterNumOfThreadsPerCore(self, metrics):\n        return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_INT,\n                           category = \"cpu\",\n                           name = \"Number of Threads per Core\",\n                           value = metrics.getNumOfThreadsPerCore())\n\n    def createCounterPhysProcessingPowerPerVCPU(self, metrics):\n        return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_DOUBLE,\n                           category = \"cpu\",\n                           name = \"Phys. Processing Power per vCPU\",\n                           value = metrics.getPhysProcessingPowerPerVCPU())\n\n    def createCounterProcessorType(self, metrics):\n        return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_STRING,\n                           category = \"cpu\",\n                           name = \"Processor Type\",\n                           value = metrics.getProcessorType())\n\n    def createCounterReferenceComputeUnit(self, metrics):\n        return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_STRING,\n                           category = \"cpu\",\n                           name = \"Reference Compute Unit\",\n                           value = metrics.getReferenceComputeUnit())\n\n    def createCounterVCPUMapping(self, metrics):\n        return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_STRING,\n                           category = \"cpu\",\n                           name = \"vCPU Mapping\",\n                           value = metrics.getVCPUMapping())\n\n    def createCounterVMProcessingPowerConsumption(self, metrics):\n        return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_DOUBLE,\n                           category = \"cpu\",\n                           name = \"VM Processing Power Consumption\",\n                           value = metrics.getVMProcessingPowerConsumption(),\n                           unit = \"%\",\n                           timestamp = metrics.getTimestamp(),\n                           refreshInterval = 60)\n\n    def createCounterCurrMemAssigned(self, metrics):\n        return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_INT,\n                           category = \"memory\",\n                           name = \"Current Memory assigned\",\n                           value = metrics.getCurrMemAssigned(),\n                           unit = \"MB\")\n\n    def createCounterMaxMemAssigned(self, metrics):\n        return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_INT,\n                           category = \"memory\",\n                           name = \"Max Memory assigned\",\n                           value = metrics.getMaxMemAssigned(),\n                           unit = \"MB\")\n\n    def createCounterGuaranteedMemAssigned(self, metrics):\n        return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_INT,\n                           category = \"memory\",\n                           name = \"Guaranteed Memory assigned\",\n                           value = metrics.getGuaranteedMemAssigned(),\n                           unit = \"MB\")\n\n    def createCounterVMMemConsumption(self, metrics):\n        return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_DOUBLE,\n                           category = \"memory\",\n                           name = \"VM Memory Consumption\",\n                           value = metrics.getVMMemConsumption(),\n                           unit = \"%\",\n                           timestamp = metrics.getTimestamp(),\n                           refreshInterval = 60)\n\n    def createCounterAdapterId(self, adapterId):\n        return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_STRING,\n                           category = \"network\",\n                           name = \"Adapter Id\",\n                           instance = adapterId,\n                           value = adapterId)\n\n    def createCounterNetworkMapping(self, metrics, adapterId):\n        return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_STRING,\n                           category = \"network\",\n                           name = \"Mapping\",\n                           instance = adapterId,\n                           value = metrics.getNetworkAdapterMapping(adapterId))\n\n    def createCounterMaxNetworkBandwidth(self, metrics, adapterId):\n        return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_INT,\n                           category = \"network\",\n                           name = \"VM Maximum Network Bandwidth\",\n                           instance = adapterId,\n                           value = metrics.getMaxNetworkBandwidth(adapterId),\n                           unit = \"Mbit/s\")\n\n    def createCounterMinNetworkBandwidth(self, metrics, adapterId):\n        return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_INT,\n                           category = \"network\",\n                           name = \"VM Minimum Network Bandwidth\",\n                           instance = adapterId,\n                           value = metrics.getMinNetworkBandwidth(adapterId),\n                           unit = \"Mbit/s\")\n\n    def createCounterNetworkReadBytes(self, metrics, adapterId):\n        return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_LARGE,\n                           category = \"network\",\n                           name = \"Network Read Bytes\",\n                           instance = adapterId,\n                           value = metrics.getNetworkReadBytes(adapterId),\n                           unit = \"byte/s\")\n\n    def createCounterNetworkWriteBytes(self, metrics, adapterId):\n        return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_LARGE,\n                           category = \"network\",\n                           name = \"Network Write Bytes\",\n                           instance = adapterId,\n                           value = metrics.getNetworkWriteBytes(adapterId),\n                           unit = \"byte/s\")\n\n    def createCounterNetworkPacketRetransmitted(self, metrics):\n        return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_INT,\n                           category = \"network\",\n                           name = \"Packets Retransmitted\",\n                           value = metrics.getNetworkPacketRetransmitted(),\n                           unit = \"packets/min\")\n\ndef getStorageTimestamp(unixTimestamp):\n    tformat = \"{0:0>4d}{1:0>2d}{2:0>2d}T{3:0>2d}{4:0>2d}\"\n    ts = time.gmtime(unixTimestamp)\n    return tformat.format(ts.tm_year,\n                          ts.tm_mon,\n                          ts.tm_mday,\n                          ts.tm_hour,\n                          ts.tm_min)\n    \n\ndef getStorageTableKeyRange():\n    #Round down by MonitoringInterval\n    endTime = int(time.time()) / MonitoringInterval * MonitoringInterval \n    endTime = endTime - AzureTableDelay\n    startTime = endTime - MonitoringInterval\n    return getStorageTimestamp(startTime), getStorageTimestamp(endTime)\n\ndef getStorageMetrics(account, key, hostBase, table, startKey, endKey):\n    try:\n        waagent.Log(\"Retrieve storage metrics data.\")\n        tableService = TableService(account_name = account, \n                                    account_key = key,\n                                    host_base = hostBase)\n        ofilter = (\"PartitionKey ge '{0}' and PartitionKey lt '{1}'\"\n                   \"\").format(startKey, endKey)\n        oselect = (\"TotalRequests,TotalIngress,TotalEgress,AverageE2ELatency,\"\n                   \"AverageServerLatency,RowKey\")\n        metrics = tableService.query_entities(table, ofilter, oselect)\n        waagent.Log(\"{0} records returned.\".format(len(metrics)))\n        return metrics\n    except Exception as e:\n        waagent.Error((u\"Failed to retrieve storage metrics data: {0} {1}\"\n                       \"\").format(e, traceback.format_exc()))\n        updateLatestErrorRecord(FAILED_TO_RETRIEVE_STORAGE_DATA)\n        AddExtensionEvent(message=FAILED_TO_RETRIEVE_STORAGE_DATA)\n        return None\n\ndef getDataDisks():\n    blockDevs = os.listdir('/sys/block')\n    dataDisks = filter(lambda d : re.match(\"sd[c-z]\", d), blockDevs)\n    return dataDisks\n\ndef getFirstLun(dev):\n    path = os.path.join(\"/sys/block\", dev, \"device/scsi_disk\")\n    for lun in os.listdir(path):\n        return int(lun[-1])\n\nclass DiskInfo(object):\n    def __init__(self, config):\n        self.config = config\n\n    def getDiskMapping(self):\n        osdiskVhd = \"{0} {1}\".format(self.config.getOSDiskAccount(),\n                                     self.config.getOSDiskName())\n        osdisk = {\n                \"vhd\":osdiskVhd, \n                \"type\": self.config.getOSDiskType(),\n                \"caching\": self.config.getOSDiskCaching(),\n                \"iops\": self.config.getOSDiskSLAIOPS(),\n                \"throughput\": self.config.getOSDiskSLAThroughput(),\n        }\n\n        diskMapping = {\n                \"/dev/sda\": osdisk,\n        }\n\n        dataDisks = getDataDisks()\n        if dataDisks is None or len(dataDisks) == 0:\n            return diskMapping\n        \n        lunToDevMap = {}\n        for dev in dataDisks:\n            lun = getFirstLun(dev)\n            lunToDevMap[lun] = dev\n\n        diskCount = self.config.getDataDiskCount()\n        for i in range(0, diskCount):\n            lun = self.config.getDataDiskLun(i)\n            datadiskVhd = \"{0} {1}\".format(self.config.getDataDiskAccount(i),\n                                           self.config.getDataDiskName(i))\n            datadisk = {\n                    \"vhd\": datadiskVhd,\n                    \"type\": self.config.getDataDiskType(i),\n                    \"caching\": self.config.getDataDiskCaching(i),\n                    \"iops\": self.config.getDataDiskSLAIOPS(i),\n                    \"throughput\": self.config.getDataDiskSLAThroughput(i),\n            }\n            if lun in lunToDevMap:\n                dev = lunToDevMap[lun]\n                diskMapping[dev] = datadisk\n            else:\n                waagent.Warn(\"Couldn't find disk with lun: {0}\".format(lun))\n\n        return diskMapping \n\ndef isUserRead(op):\n    if not op.startswith(\"user;\"):\n        return False\n    op = op[5:]\n    for prefix in [\"Get\", \"List\", \"Preflight\"]:\n        if op.startswith(prefix):\n            return True\n    return False\n\ndef isUserWrite(op):\n    if not op.startswith(\"user;\"):\n        return False\n    op = op[5:]\n    for prefix in [\"Put\" ,\"Set\" ,\"Clear\" ,\"Delete\" ,\"Create\" ,\"Snapshot\"]:    \n        if op.startswith(prefix):\n            return True\n    return False\n\ndef storageStat(metrics, opFilter):\n    stat = {}\n    stat['bytes'] = None\n    stat['ops'] = None\n    stat['e2eLatency'] = None\n    stat['serverLatency'] = None\n    stat['throughput'] = None\n    if metrics is None:\n        return stat\n\n    metrics = filter(lambda x : opFilter(x.RowKey), metrics)\n    stat['bytes'] = sum(map(lambda x : x.TotalIngress + x.TotalEgress, \n                            metrics))\n    stat['ops'] = sum(map(lambda x : x.TotalRequests, metrics))\n    if stat['ops'] != 0:\n        stat['e2eLatency'] = sum(map(lambda x : x.TotalRequests * \\\n                                                x.AverageE2ELatency, \n                                     metrics)) / stat['ops']\n        stat['serverLatency'] = sum(map(lambda x : x.TotalRequests * \\\n                                                   x.AverageServerLatency, \n                                        metrics)) / stat['ops']\n    #Convert to MB/s\n    stat['throughput'] = float(stat['bytes']) / (1024 * 1024) / 60 \n    return stat\n\nclass AzureStorageStat(object):\n\n    def __init__(self, metrics):\n        self.metrics = metrics\n        self.rStat = storageStat(metrics, isUserRead)\n        self.wStat = storageStat(metrics, isUserWrite)\n\n    def getReadBytes(self):\n        return self.rStat['bytes']\n\n    def getReadOps(self):\n        return self.rStat['ops']\n\n    def getReadOpE2ELatency(self):\n        return self.rStat['e2eLatency']\n\n    def getReadOpServerLatency(self):\n        return self.rStat['serverLatency']\n\n    def getReadOpThroughput(self):\n        return self.rStat['throughput']\n\n    def getWriteBytes(self):\n        return self.wStat['bytes']\n\n    def getWriteOps(self):\n        return self.wStat['ops']\n\n    def getWriteOpE2ELatency(self):\n        return self.wStat['e2eLatency']\n\n    def getWriteOpServerLatency(self):\n        return self.wStat['serverLatency']\n\n    def getWriteOpThroughput(self):\n        return self.wStat['throughput']\n\n\nclass StorageDataSource(object):\n    def __init__(self, config):\n        self.config = config\n\n    def collect(self):\n        counters = []\n\n        #Add disk mapping for resource disk\n        counters.append(self.createCounterDiskMapping(\"/dev/sdb\", \n                                                      \"not mapped to vhd\"))\n        #Add disk mapping for osdisk and data disk\n        diskMapping = DiskInfo(self.config).getDiskMapping()\n        for dev, disk in diskMapping.iteritems():\n            counters.append(self.createCounterDiskMapping(dev, disk.get(\"vhd\")))\n            counters.append(self.createCounterDiskType(dev, disk.get(\"type\")))\n            counters.append(self.createCounterDiskCaching(dev, disk.get(\"caching\")))\n            if disk.get(\"type\") == \"Premium\":\n                counters.append(self.createCounterDiskIOPS(dev, disk.get(\"iops\")))\n                counters.append(self.createCounterDiskThroughput(dev, disk.get(\"throughput\")))\n\n        accounts = self.config.getStorageAccountNames()\n        for account in accounts:\n            if self.config.getStorageAccountType(account) == \"Standard\":\n                counters.extend(self.collectMetrixForStandardStorage(account))\n        return counters\n\n    def collectMetrixForStandardStorage(self, account):\n        counters = []\n        startKey, endKey = getStorageTableKeyRange()\n        tableName = self.config.getStorageAccountMinuteTable(account)\n        accountKey = self.config.getStorageAccountKey(account)\n        hostBase = self.config.getStorageHostBase(account)\n        metrics = getStorageMetrics(account, \n                                    accountKey,\n                                    hostBase,\n                                    tableName,\n                                    startKey,\n                                    endKey)\n        stat = AzureStorageStat(metrics)\n        counters.append(self.createCounterStorageId(account))\n        counters.append(self.createCounterReadBytes(account, stat))\n        counters.append(self.createCounterReadOps(account, stat))\n        counters.append(self.createCounterReadOpE2ELatency(account, stat))\n        counters.append(self.createCounterReadOpServerLatency(account, stat))\n        counters.append(self.createCounterReadOpThroughput(account, stat))\n        counters.append(self.createCounterWriteBytes(account, stat))\n        counters.append(self.createCounterWriteOps(account, stat))\n        counters.append(self.createCounterWriteOpE2ELatency(account, stat))\n        counters.append(self.createCounterWriteOpServerLatency(account, stat))\n        counters.append(self.createCounterWriteOpThroughput(account, stat))\n        return counters\n\n    def createCounterDiskType(self, dev, diskType):\n        return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_STRING,\n                           category = \"disk\",\n                           name = \"Storage Type\",\n                           instance = dev,\n                           value = diskType)\n\n    def createCounterDiskCaching(self, dev, caching):\n        return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_STRING,\n                           category = \"disk\",\n                           name = \"Caching\",\n                           instance = dev,\n                           value = caching)\n\n    def createCounterDiskThroughput(self, dev, throughput):\n        return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_INT,\n                           category = \"disk\",\n                           name = \"SLA Throughput\",\n                           instance = dev,\n                           unit = \"MB/sec\",\n                           value = throughput)\n\n    def createCounterDiskIOPS(self, dev, iops):\n        return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_INT,\n                           category = \"disk\",\n                           name = \"SLA\",\n                           instance = dev,\n                           unit = \"Ops/sec\",\n                           value = iops)\n\n    def createCounterReadBytes(self, account, stat):\n        return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_LARGE,\n                           category = \"storage\",\n                           name = \"Storage Read Bytes\",\n                           instance = account,\n                           value = stat.getReadBytes(),\n                           unit = 'byte',\n                           refreshInterval = 60)\n\n    def createCounterReadOps(self, account, stat):\n        return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_INT,\n                           category = \"storage\",\n                           name = \"Storage Read Ops\",\n                           instance = account,\n                           value = stat.getReadOps(),\n                           refreshInterval = 60)\n\n    def createCounterReadOpE2ELatency(self, account, stat):\n        return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_DOUBLE,\n                           category = \"storage\",\n                           name = \"Storage Read Op Latency E2E msec\",\n                           instance = account,\n                           value = stat.getReadOpE2ELatency(),\n                           unit = 'ms',\n                           refreshInterval = 60)\n\n    def createCounterReadOpServerLatency(self, account, stat):\n        return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_DOUBLE,\n                           category = \"storage\",\n                           name = \"Storage Read Op Latency Server msec\",\n                           instance = account,\n                           value = stat.getReadOpServerLatency(),\n                           unit = 'ms',\n                           refreshInterval = 60)\n\n    def createCounterReadOpThroughput(self, account, stat):\n        return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_DOUBLE,\n                           category = \"storage\",\n                           name = \"Storage Read Throughput E2E MB/sec\",\n                           instance = account,\n                           value = stat.getReadOpThroughput(),\n                           unit = 'MB/s',\n                           refreshInterval = 60)\n\n    def createCounterWriteBytes(self, account, stat):\n        return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_LARGE,\n                           category = \"storage\",\n                           name = \"Storage Write Bytes\",\n                           instance = account,\n                           value = stat.getWriteBytes(),\n                           unit = 'byte',\n                           refreshInterval = 60)\n\n    def createCounterWriteOps(self, account, stat):\n        return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_INT,\n                           category = \"storage\",\n                           name = \"Storage Write Ops\",\n                           instance = account,\n                           value = stat.getWriteOps(),\n                           refreshInterval = 60)\n\n    def createCounterWriteOpE2ELatency(self, account, stat):\n        return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_DOUBLE,\n                           category = \"storage\",\n                           name = \"Storage Write Op Latency E2E msec\",\n                           instance = account,\n                           value = stat.getWriteOpE2ELatency(),\n                           unit = 'ms',\n                           refreshInterval = 60)\n\n    def createCounterWriteOpServerLatency(self, account, stat):\n        return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_DOUBLE,\n                           category = \"storage\",\n                           name = \"Storage Write Op Latency Server msec\",\n                           instance = account,\n                           value = stat.getWriteOpServerLatency(),\n                           unit = 'ms',\n                           refreshInterval = 60)\n\n    def createCounterWriteOpThroughput(self, account, stat):\n        return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_DOUBLE,\n                           category = \"storage\",\n                           name = \"Storage Write Throughput E2E MB/sec\",\n                           instance = account,\n                           value = stat.getWriteOpThroughput(),\n                           unit = 'MB/s',\n                           refreshInterval = 60)\n\n\n    def createCounterStorageId(self, account):\n        return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_STRING,\n                           category = \"storage\",\n                           name = \"Storage ID\",\n                           instance = account,\n                           value = account)\n\n    def createCounterDiskMapping(self, dev, vhd):\n        return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_STRING,\n                           category = \"storage\",\n                           name = \"Phys. Disc to Storage Mapping\",\n                           instance = dev,\n                           value = vhd)\n                   \nclass HvInfo(object):\n    def __init__(self):\n        self.hvName = None;\n        self.hvVersion = None;\n        root_dir = os.path.dirname(__file__)\n        cmd = os.path.join(root_dir, \"bin/hvinfo\")\n        ret, output = waagent.RunGetOutput(cmd, chk_err=False)\n        print(ret)\n        if ret ==0 and output is not None:\n            lines = output.split(\"\\n\")\n            if len(lines) >= 2:\n                self.hvName = lines[0]\n                self.hvVersion = lines[1]\n\n    def getHvName(self):\n        return self.hvName\n\n    def getHvVersion(self):\n        return self.hvVersion\n\nclass StaticDataSource(object):\n    def __init__(self, config):\n        self.config = config\n\n    def collect(self):\n        counters = []\n        hvInfo = HvInfo()\n        counters.append(self.createCounterCloudProvider())\n        counters.append(self.createCounterCpuOverCommitted())\n        counters.append(self.createCounterMemoryOverCommitted())\n        counters.append(self.createCounterDataProviderVersion())\n        counters.append(self.createCounterDataSources())\n        counters.append(self.createCounterInstanceType())\n        counters.append(self.createCounterVirtSln(hvInfo.getHvName()))\n        counters.append(self.createCounterVirtSlnVersion(hvInfo.getHvVersion()))\n        vmSLAThroughput = self.config.getVMSLAThroughput()\n        if vmSLAThroughput is not None:\n            counters.append(self.createCounterVMSLAThroughput(vmSLAThroughput))\n        vmSLAIOPS = self.config.getVMSLAIOPS()\n        if vmSLAIOPS is not None:\n            counters.append(self.createCounterVMSLAIOPS(vmSLAIOPS))\n\n        return counters\n    \n    def createCounterVMSLAThroughput(self, throughput):\n        return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_INT,\n                           category = \"config\",\n                           name = \"SLA Max Disk Bandwidth per VM\",\n                           unit = \"Ops/sec\",\n                           value = throughput)\n     \n    def createCounterVMSLAIOPS(self, iops):\n        return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_INT,\n                           category = \"config\",\n                           name = \"SLA Max Disk IOPS per VM\",\n                           unit = \"Ops/sec\",\n                           value = iops)\n\n    def createCounterCloudProvider(self):\n        return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_STRING,\n                           category = \"config\",\n                           name = \"Cloud Provider\",\n                           value = \"Microsoft Azure\")\n\n    def createCounterVirtSlnVersion(self, hvVersion):\n        return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_STRING,\n                           category = \"config\",\n                           name = \"Virtualization Solution Version\",\n                           value = hvVersion)\n\n    def createCounterVirtSln(self, hvName):\n        return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_STRING,\n                           category = \"config\",\n                           name = \"Virtualization Solution\",\n                           value = hvName)\n  \n    def createCounterInstanceType(self):\n        return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_STRING,\n                           category = \"config\",\n                           name = \"Instance Type\",\n                           value = self.config.getVmSize())\n\n    def createCounterDataSources(self):\n        dataSource = \"wad\" if self.config.isLADEnabled() else \"local\"\n        return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_STRING,\n                           category = \"config\",\n                           name = \"Data Sources\",\n                           value = dataSource)\n\n    def createCounterDataProviderVersion(self):\n        return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_STRING,\n                           category = \"config\",\n                           name = \"Data Provider Version\",\n                           value = AzureEnhancedMonitorVersion)\n\n    def createCounterMemoryOverCommitted(self):\n        value = \"yes\" if self.config.isMemoryOverCommitted() else \"no\"\n        return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_STRING,\n                           category = \"config\",\n                           name = \"Memory Over-Provisioning\",\n                           value = value)\n\n    def createCounterCpuOverCommitted(self):\n        value = \"yes\" if self.config.isCpuOverCommitted() else \"no\"\n        return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_STRING,\n                           category = \"config\",\n                           name = \"CPU Over-Provisioning\",\n                           value = value)\n\nclass PerfCounterType(object):\n    COUNTER_TYPE_INVALID = 0\n    COUNTER_TYPE_INT = 1\n    COUNTER_TYPE_DOUBLE = 2\n    COUNTER_TYPE_LARGE = 3\n    COUNTER_TYPE_STRING = 4\n\nclass PerfCounter(object):\n    def __init__(self, \n                 counterType, \n                 category, \n                 name, \n                 value, \n                 instance=\"\",\n                 unit=\"none\",\n                 timestamp = None,\n                 refreshInterval=0):\n        self.counterType = counterType\n        self.category = category\n        self.name = name\n        self.instance = instance\n        self.value = value\n        self.unit = unit\n        self.refreshInterval = refreshInterval\n        if(timestamp):\n            self.timestamp = timestamp\n        else:\n            self.timestamp = int(time.time())\n        self.machine = socket.gethostname()\n\n    def __str__(self):\n        return (u\"{0};{1};{2};{3};{4};{5};{6};{7};{8};{9};\\n\"\n                 \"\").format(self.counterType,\n                            self.category,\n                            self.name,\n                            self.instance,\n                            0 if self.value is not None else 1,\n                            self.value if self.value is not None else \"\",\n                            self.unit,\n                            self.refreshInterval,\n                            self.timestamp,\n                            self.machine)\n\n    __repr__ = __str__\n\nclass EnhancedMonitor(object):\n    def __init__(self, config):\n        self.dataSources = []\n        self.dataSources.append(VMDataSource(config))\n        self.dataSources.append(StorageDataSource(config))\n        self.dataSources.append(StaticDataSource(config))\n        self.writer = PerfCounterWriter()\n\n    def run(self):\n        counters = []\n        for dataSource in self.dataSources:\n            counters.extend(dataSource.collect())\n        clearLastErrorRecord()\n        self.writer.write(counters)\n\nEventFile=os.path.join(LibDir, \"PerfCounters\")\nclass PerfCounterWriter(object):\n    def write(self, counters, maxRetry = 3, eventFile=EventFile):\n        for i in range(0, maxRetry):\n            try:\n                self._write(counters, eventFile)\n                waagent.Log((\"Write {0} counters to event file.\"\n                             \"\").format(len(counters)))\n                return\n            except IOError as e:\n                waagent.Warn((u\"Write to perf counters file failed: {0}\"\n                              \"\").format(e))\n                waagent.Log(\"Retry: {0}\".format(i))\n                time.sleep(1)\n\n        waagent.Error((\"Failed to serialize perf counter to file:\"\n                       \"{0}\").format(eventFile))\n        updateLatestErrorRecord(FAILED_TO_SERIALIZE_PERF_COUNTERS)\n        AddExtensionEvent(message=FAILED_TO_SERIALIZE_PERF_COUNTERS)\n        raise\n\n    def _write(self, counters, eventFile):\n        with open(eventFile, \"w+\") as F:\n            F.write(\"\".join(map(lambda c : str(c), counters)).encode(\"utf8\"))\n\nclass EnhancedMonitorConfig(object):\n    def __init__(self, publicConfig, privateConfig):\n        xmldoc = minidom.parse('/var/lib/waagent/SharedConfig.xml')\n        self.deployment = xmldoc.getElementsByTagName('Deployment')\n        self.role = xmldoc.getElementsByTagName('Role')\n        self.configData = {}\n        diskCount = 0\n        accountNames = []\n        for item in publicConfig[\"cfg\"]:\n            self.configData[item[\"key\"]] = item[\"value\"]\n            if item[\"key\"].startswith(\"disk.lun\"):\n                diskCount = diskCount + 1\n            if item[\"key\"].endswith(\"minute.name\"):\n                accountNames.append(item[\"value\"])\n\n        for item in privateConfig[\"cfg\"]:\n            self.configData[item[\"key\"]] = item[\"value\"]\n\n        self.configData[\"disk.count\"] = diskCount\n        self.configData[\"account.names\"] = accountNames\n\n\n    def getVmSize(self):\n        return self.configData.get(\"vmsize\")\n\n    def getVmRoleInstance(self):\n        return self.role[0].attributes['name'].value\n\n    def getVmDeploymentId(self):\n        return self.deployment[0].attributes['name'].value\n\n    def isMemoryOverCommitted(self):\n        return self.configData.get(\"vm.memory.isovercommitted\")\n\n    def isCpuOverCommitted(self):\n        return self.configData.get(\"vm.cpu.isovercommitted\")\n\n    def getScriptVersion(self):\n        return self.configData.get(\"script.version\")\n\n    def isVerbose(self):\n        flag = self.configData.get(\"verbose\")\n        return flag == \"1\" or flag == 1\n\n    def getVMSLAIOPS(self):\n        return self.configData.get(\"vm.sla.iops\")\n\n    def getVMSLAThroughput(self):\n        return self.configData.get(\"vm.sla.throughput\")\n\n    def getOSDiskName(self):\n        return self.configData.get(\"osdisk.name\")\n\n    def getOSDiskAccount(self):\n        osdiskConnMinute = self.getOSDiskConnMinute()\n        return self.configData.get(\"{0}.name\".format(osdiskConnMinute))\n\n    def getOSDiskConnMinute(self):\n        return self.configData.get(\"osdisk.connminute\")\n\n    def getOSDiskConnHour(self):\n        return self.configData.get(\"osdisk.connhour\")\n\n    def getOSDiskType(self):\n        return self.configData.get(\"osdisk.type\")\n\n    def getOSDiskCaching(self):\n        return self.configData.get(\"osdisk.caching\")\n\n    def getOSDiskSLAIOPS(self):\n        return self.configData.get(\"osdisk.sla.iops\")\n    \n    def getOSDiskSLAThroughput(self):\n        return self.configData.get(\"osdisk.sla.throughput\")\n    \n    def getDataDiskCount(self):\n        return self.configData.get(\"disk.count\")\n\n    def getDataDiskLun(self, index):\n        return self.configData.get(\"disk.lun.{0}\".format(index))\n\n    def getDataDiskName(self, index):\n        return self.configData.get(\"disk.name.{0}\".format(index))\n\n    def getDataDiskAccount(self, index):\n        return self.configData.get(\"disk.account.{0}\".format(index))\n\n    def getDataDiskConnMinute(self, index):\n        return self.configData.get(\"disk.connminute.{0}\".format(index))\n\n    def getDataDiskConnHour(self, index):\n        return self.configData.get(\"disk.connhour.{0}\".format(index))\n    \n    def getDataDiskType(self, index):\n        return self.configData.get(\"disk.type.{0}\".format(index))\n\n    def getDataDiskCaching(self, index):\n        return self.configData.get(\"disk.caching.{0}\".format(index))\n\n    def getDataDiskSLAIOPS(self, index):\n        return self.configData.get(\"disk.sla.iops.{0}\".format(index))\n    \n    def getDataDiskSLAThroughput(self, index):\n        return self.configData.get(\"disk.sla.throughput.{0}\".format(index))\n    \n    def getStorageAccountNames(self):\n        return self.configData.get(\"account.names\")\n\n    def getStorageAccountKey(self, name):\n        return self.configData.get(\"{0}.minute.key\".format(name))\n        \n    def getStorageAccountType(self, name):\n        key = \"{0}.minute.ispremium\".format(name) \n        return \"Premium\" if self.configData.get(key) == 1 else \"Standard\"\n    \n    def getStorageHostBase(self, name):\n        return get_host_base_from_uri(self.getStorageAccountMinuteUri(name)) \n\n    def getStorageAccountMinuteUri(self, name):\n        return self.configData.get(\"{0}.minute.uri\".format(name))\n\n    def getStorageAccountMinuteTable(self, name):\n        uri = self.getStorageAccountMinuteUri(name)\n        pos = uri.rfind('/')\n        tableName = uri[pos+1:]\n        return tableName\n\n    def getStorageAccountHourUri(self, name):\n        return self.configData.get(\"{0}.hour.uri\".format(name))\n\n    def isLADEnabled(self):\n        flag = self.configData.get(\"wad.isenabled\")\n        return flag == \"1\" or flag == 1\n\n    def getLADKey(self):\n        return self.configData.get(\"wad.key\")\n\n    def getLADName(self):\n        return self.configData.get(\"wad.name\")\n    \n    def getLADHostBase(self):\n        return get_host_base_from_uri(self.getLADUri())\n\n    def getLADUri(self):\n        return self.configData.get(\"wad.uri\")\n\n"
  },
  {
    "path": "AzureEnhancedMonitor/ext/handler.py",
    "content": "#!/usr/bin/env python\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nimport re\nimport os\nimport subprocess\nimport traceback\nimport time\nimport aem\nimport string\nfrom Utils.WAAgentUtil import waagent, InitExtensionEventLog\nimport Utils.HandlerUtil as util\n\nExtensionShortName = 'AzureEnhancedMonitor'\nExtensionFullName  = 'Microsoft.OSTCExtensions.AzureEnhancedMonitor'\nExtensionVersion   = 'AzureEnhancedMonitor'\n\ndef printable(s):\n    return filter(lambda c : c in string.printable, str(s))\n\ndef enable(hutil):\n    pidFile = os.path.join(aem.LibDir, \"pid\");\n   \n    #Check whether monitor process is running.\n    #If it does, return. Otherwise clear pid file\n    if os.path.isfile(pidFile):\n        pid = waagent.GetFileContents(pidFile)\n        if os.path.isdir(os.path.join(\"/proc\", pid)):\n            if hutil.is_seq_smaller():\n                hutil.do_exit(0, 'Enable', 'success', '0', \n                              'Azure Enhanced Monitor is already running')\n            else:\n                waagent.Log(\"Stop old daemon: {0}\".format(pid))\n                os.kill(int(pid), 9)\n        os.remove(pidFile)\n\n    args = [os.path.join(os.getcwd(), __file__), \"daemon\"]\n    devnull = open(os.devnull, 'w')\n    child = subprocess.Popen(args, stdout=devnull, stderr=devnull)\n    if child.pid == None or child.pid < 1:\n        hutil.do_exit(1, 'Enable', 'error', '1', \n                      'Failed to launch Azure Enhanced Monitor')\n    else:\n        hutil.save_seq()\n        waagent.SetFileContents(pidFile, str(child.pid))\n        waagent.Log((\"Daemon pid: {0}\").format(child.pid))\n        hutil.do_exit(0, 'Enable', 'success', '0', \n                      'Azure Enhanced Monitor is enabled')\n\ndef disable(hutil):\n    pidFile = os.path.join(aem.LibDir, \"pid\");\n   \n    #Check whether monitor process is running.\n    #If it does, kill it. Otherwise clear pid file\n    if os.path.isfile(pidFile):\n        pid = waagent.GetFileContents(pidFile)\n        if os.path.isdir(os.path.join(\"/proc\", pid)):\n            waagent.Log((\"Stop daemon: {0}\").format(pid))\n            os.kill(int(pid), 9)\n            os.remove(pidFile)\n            hutil.do_exit(0, 'Disable', 'success', '0', \n                          'Azure Enhanced Monitor is disabled')\n        os.remove(pidFile)\n    \n    hutil.do_exit(0, 'Disable', 'success', '0', \n                  'Azure Enhanced Monitor is not running')\n\ndef daemon(hutil):\n    publicConfig = hutil.get_public_settings()\n    privateConfig = hutil.get_protected_settings()\n    config = aem.EnhancedMonitorConfig(publicConfig, privateConfig)\n    monitor = aem.EnhancedMonitor(config)\n    hutil.set_verbose_log(config.isVerbose())\n    InitExtensionEventLog(hutil.get_name())\n    while True:\n        waagent.Log(\"Collecting performance counter.\")\n        startTime = time.time()\n        try:\n            monitor.run()\n            message = (\"deploymentId={0} roleInstance={1} OK\"\n                       \"\").format(config.getVmDeploymentId(), \n                                  config.getVmRoleInstance())\n            hutil.do_status_report(\"Enable\", \"success\", 0, message)\n\n        except Exception as e:\n            waagent.Error(\"{0} {1}\".format(printable(e), \n                                           traceback.format_exc()))\n            hutil.do_status_report(\"Enable\", \"error\", 0, \"{0}\".format(e))\n        waagent.Log(\"Finished collection.\")\n        timeElapsed = time.time() - startTime\n        timeToWait = (aem.MonitoringInterval - timeElapsed)\n        #Make sure timeToWait is in the range [0, aem.MonitoringInterval)\n        timeToWait = timeToWait % aem.MonitoringInterval\n        time.sleep(timeToWait)\n\ndef grace_exit(operation, status, msg):\n    hutil = parse_context(operation)\n    hutil.do_exit(0, operation, status, '0', msg)\n\ndef parse_context(operation):\n    hutil = util.HandlerUtility(waagent.Log, waagent.Error, ExtensionShortName, ExtensionFullName, ExtensionVersion)\n    hutil.do_parse_context(operation)\n    return hutil\n\ndef main():\n    waagent.LoggerInit('/var/log/waagent.log','/dev/stdout')\n    waagent.Log(\"{0} started to handle.\".format(ExtensionShortName))\n    \n    if not os.path.isdir(aem.LibDir):\n        os.makedirs(aem.LibDir)\n    \n    for command in sys.argv[1:]:\n        if re.match(\"^([-/]*)(install)\", command):\n            grace_exit(\"install\", \"success\", \"Install succeeded\")\n        if re.match(\"^([-/]*)(uninstall)\", command):\n            grace_exit(\"uninstall\", \"success\", \"Uninstall succeeded\")\n        if re.match(\"^([-/]*)(update)\", command):\n            grace_exit(\"update\", \"success\", \"Update succeeded\")\n\n        try:\n            if re.match(\"^([-/]*)(enable)\", command):\n                hutil = parse_context(\"enable\")\n                enable(hutil)\n            elif re.match(\"^([-/]*)(disable)\", command):\n                hutil = parse_context(\"disable\")\n                disable(hutil)\n            elif re.match(\"^([-/]*)(daemon)\", command):\n                hutil = parse_context(\"enable\")\n                daemon(hutil)\n        except Exception as e:\n            hutil.error(\"{0}, {1}\".format(e, traceback.format_exc()))\n            hutil.do_exit(1, command, 'failed','0', \n                          '{0} failed:{1}'.format(command, e))\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "AzureEnhancedMonitor/ext/installer.py",
    "content": "#!/usr/bin/env python\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport imp\nimport os\nimport shutil\n\nfrom Utils.WAAgentUtil import waagent\nimport Utils.HandlerUtil as util\n\nExtensionShortName = 'AzureEnhancedMonitor'\n\ndef parse_context(operation):\n    hutil = util.HandlerUtility(waagent.Log, waagent.Error, ExtensionShortName)\n    hutil.do_parse_context(operation)\n    return hutil\n\ndef find_psutil_build(buildDir):\n    for item in os.listdir(buildDir):\n        try:\n            build = os.path.join(buildDir, item)\n            binary = os.path.join(build, '_psutil_linux.so')\n            imp.load_dynamic('_psutil_linux', binary)\n            return build\n        except Exception:\n            pass\n    raise Exception(\"Available build of psutil not found.\")\n\ndef main():\n    waagent.LoggerInit('/var/log/waagent.log','/dev/stdout')\n    waagent.Log(\"{0} started to handle.\".format(ExtensionShortName))\n    \n    hutil = parse_context(\"Install\")\n    try:\n        root = os.path.dirname(os.path.abspath(__file__))\n        buildDir = os.path.join(root, \"libpsutil\")\n        build = find_psutil_build(buildDir) \n        for item in os.listdir(build):\n            src = os.path.join(build, item)\n            dest = os.path.join(root, item)\n            if os.path.isfile(src):\n                if os.path.isfile(dest):\n                    os.remove(dest)\n                shutil.copyfile(src, dest)\n            else:\n                if os.path.isdir(dest):\n                    shutil.rmtree(dest)\n                shutil.copytree(src, dest)\n    except Exception as e:\n        hutil.error(\"{0}, {1}\").format(e, traceback.format_exc())\n        hutil.do_exit(1, \"Install\", 'failed','0', \n                      'Install failed: {0}'.format(e))\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "AzureEnhancedMonitor/ext/references",
    "content": "Common/azure-sdk-for-python/azure/\nCommon/psutil/LICENSE\nCommon/libpsutil\nUtils/\nLICENSE-2_0.txt\nAzureEnhancedMonitor/hvinfo/bin\n"
  },
  {
    "path": "AzureEnhancedMonitor/ext/test/env.py",
    "content": "#!/usr/bin/env python\n#\n#CustomScript extension\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nimport os\n\ntest_dir = os.path.dirname(os.path.abspath(__file__))\nroot = os.path.dirname(test_dir)\nsys.path.append(root)\n\nazure_sdk = os.path.join(root, \"Common/azure-sdk-for-python\") \nsys.path.append(azure_sdk)\n"
  },
  {
    "path": "AzureEnhancedMonitor/ext/test/storage_metrics",
    "content": "[{\"TotalRequests\": 1, \"RowKey\": \"system;All\", \"AverageE2ELatency\": 52.0, \"AverageServerLatency\": 48.0, \"TotalIngress\": 247088, \"TotalEgress\": 160}, {\"TotalRequests\": 154, \"RowKey\": \"user;All\", \"AverageE2ELatency\": 6.285714, \"AverageServerLatency\": 5.551948, \"TotalIngress\": 1015225, \"TotalEgress\": 562321}, {\"TotalRequests\": 6, \"RowKey\": \"user;ClearPage\", \"AverageE2ELatency\": 5.0, \"AverageServerLatency\": 5.0, \"TotalIngress\": 3166, \"TotalEgress\": 1284}, {\"TotalRequests\": 1, \"RowKey\": \"user;GetBlob\", \"AverageE2ELatency\": 139.0, \"AverageServerLatency\": 31.0, \"TotalIngress\": 500, \"TotalEgress\": 524684}, {\"TotalRequests\": 11, \"RowKey\": \"user;PutBlob\", \"AverageE2ELatency\": 8.727273, \"AverageServerLatency\": 8.727273, \"TotalIngress\": 19026, \"TotalEgress\": 2475}, {\"TotalRequests\": 136, \"RowKey\": \"user;PutPage\", \"AverageE2ELatency\": 5.169118, \"AverageServerLatency\": 5.132353, \"TotalIngress\": 992533, \"TotalEgress\": 33878}]"
  },
  {
    "path": "AzureEnhancedMonitor/ext/test/test_aem.py",
    "content": "#!/usr/bin/env python\n#\n#CustomScript extension\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport datetime\nimport os\nimport json\nimport unittest\n\nimport env\nimport aem\nfrom Utils.WAAgentUtil import waagent\n\nTestPublicConfig = \"\"\"\\\n{\n    \"cfg\": [{\n        \"key\":  \"vmsize\",\n        \"value\":  \"Small (A1)\"\n    },{\n        \"key\":  \"vm.roleinstance\",\n        \"value\":  \"osupdate\"\n    },{\n        \"key\":  \"vm.role\",\n        \"value\":  \"IaaS\"\n    },{\n        \"key\":  \"vm.deploymentid\",\n        \"value\":  \"cd98461b43364478a908d03d0c3135a7\"\n    },{\n        \"key\":  \"vm.memory.isovercommitted\",\n        \"value\":  0\n    },{\n        \"key\":  \"vm.cpu.isovercommitted\",\n        \"value\":  0\n    },{\n        \"key\":  \"script.version\",\n        \"value\":  \"1.2.0.0\"\n    },{\n        \"key\":  \"verbose\",\n        \"value\":  \"0\"\n    },{\n        \"key\":  \"osdisk.connminute\",\n        \"value\":  \"asdf.minute\"\n    },{\n        \"key\":  \"osdisk.connhour\",\n        \"value\":  \"asdf.hour\"\n    },{\n        \"key\":  \"osdisk.name\",\n        \"value\":  \"osupdate-osupdate-2015-02-12.vhd\"\n    },{\n        \"key\":  \"asdf.hour.uri\",\n        \"value\":  \"https://asdf.table.core.windows.net/$metricshourprimarytransactionsblob\"\n    },{\n        \"key\":  \"asdf.minute.uri\",\n        \"value\":  \"https://asdf.table.core.windows.net/$metricsminuteprimarytransactionsblob\"\n    },{\n        \"key\":  \"asdf.hour.name\",\n        \"value\":  \"asdf\"\n    },{\n        \"key\":  \"asdf.minute.name\",\n        \"value\":  \"asdf\"\n    },{\n        \"key\":  \"wad.name\",\n        \"value\":  \"asdf\"\n    },{\n        \"key\":  \"wad.isenabled\",\n        \"value\":  \"1\"\n    },{\n        \"key\":  \"wad.uri\",\n        \"value\":  \"https://asdf.table.core.windows.net/wadperformancecounterstable\"\n    }]\n}\n\"\"\"\nTestPrivateConfig = \"\"\"\\\n{\n    \"cfg\" : [{\n        \"key\" : \"asdf.minute.key\",\n        \"value\" : \"qwer\"\n    },{\n        \"key\" : \"wad.key\",\n        \"value\" : \"qwer\"\n    }]\n}\n\"\"\"\nclass TestAEM(unittest.TestCase):\n    def setUp(self):\n        waagent.LoggerInit(\"/dev/null\", \"/dev/stdout\")\n\n    def test_config(self):\n        publicConfig = json.loads(TestPublicConfig)\n        privateConfig = json.loads(TestPrivateConfig)\n        config = aem.EnhancedMonitorConfig(publicConfig, privateConfig)\n        self.assertNotEquals(None, config)\n        self.assertEquals(\".table.core.windows.net\", \n                          config.getStorageHostBase('asdf'))\n        self.assertEquals(\".table.core.windows.net\", \n                          config.getLADHostBase())\n        return config\n\n    def test_static_datasource(self):\n        config = self.test_config()\n        dataSource = aem.StaticDataSource(config)\n        counters = dataSource.collect()\n        self.assertNotEquals(None, counters)\n        self.assertNotEquals(0, len(counters))\n\n        name = \"Cloud Provider\"\n        counter = next((c for c in counters if c.name == name))\n        self.assertNotEquals(None, counter)\n        self.assertEquals(\"Microsoft Azure\", counter.value)\n        \n        name = \"Virtualization Solution Version\"\n        counter = next((c for c in counters if c.name == name))\n        self.assertNotEquals(None, counter)\n        self.assertNotEquals(None, counter.value)\n\n        name = \"Virtualization Solution\"\n        counter = next((c for c in counters if c.name == name))\n        self.assertNotEquals(None, counter)\n        self.assertNotEquals(None, counter.value)\n\n        name = \"Instance Type\"\n        counter = next((c for c in counters if c.name == name))\n        self.assertNotEquals(None, counter)\n        self.assertEquals(\"Small (A1)\", counter.value)\n\n        name = \"Data Sources\"\n        counter = next((c for c in counters if c.name == name))\n        self.assertNotEquals(None, counter)\n        self.assertEquals(\"wad\", counter.value)\n\n        name = \"Data Provider Version\"\n        counter = next((c for c in counters if c.name == name))\n        self.assertNotEquals(None, counter)\n        self.assertEquals(\"2.0.0\", counter.value)\n\n        name = \"Memory Over-Provisioning\"\n        counter = next((c for c in counters if c.name == name))\n        self.assertNotEquals(None, counter)\n        self.assertEquals(\"no\", counter.value)\n\n        name = \"CPU Over-Provisioning\"\n        counter = next((c for c in counters if c.name == name))\n        self.assertNotEquals(None, counter)\n        self.assertEquals(\"no\", counter.value)\n\n    def test_cpuinfo(self):\n        cpuinfo = aem.CPUInfo.getCPUInfo()\n        self.assertNotEquals(None, cpuinfo)\n        self.assertNotEquals(0, cpuinfo.getNumOfCoresPerCPU())\n        self.assertNotEquals(0, cpuinfo.getNumOfCores())\n        self.assertNotEquals(None, cpuinfo.getProcessorType())\n        self.assertEquals(float, type(cpuinfo.getFrequency()))\n        self.assertEquals(bool, type(cpuinfo.isHyperThreadingOn()))\n        percent = cpuinfo.getCPUPercent()\n        self.assertEquals(float, type(percent))\n        self.assertTrue(percent >= 0 and percent <= 100)\n\n    def test_meminfo(self):\n        meminfo = aem.MemoryInfo()\n        self.assertNotEquals(None, meminfo.getMemSize())\n        self.assertEquals(long, type(meminfo.getMemSize()))\n        percent = meminfo.getMemPercent()\n        self.assertEquals(float, type(percent))\n        self.assertTrue(percent >= 0 and percent <= 100)\n\n    def test_networkinfo(self):\n        netinfo = aem.NetworkInfo()\n        adapterIds = netinfo.getAdapterIds()\n        self.assertNotEquals(None, adapterIds)\n        self.assertNotEquals(0, len(adapterIds))\n        adapterId = adapterIds[0]\n        self.assertNotEquals(None, aem.getMacAddress(adapterId))\n        self.assertNotEquals(None, netinfo.getNetworkReadBytes())\n        self.assertNotEquals(None, netinfo.getNetworkWriteBytes())\n        self.assertNotEquals(None, netinfo.getNetworkPacketRetransmitted())\n\n    def test_hwchangeinfo(self):\n        netinfo = aem.NetworkInfo()\n        testHwInfoFile = \"/tmp/HwInfo\"\n        aem.HwInfoFile = testHwInfoFile\n        if os.path.isfile(testHwInfoFile):\n            os.remove(testHwInfoFile)\n        hwChangeInfo = aem.HardwareChangeInfo(netinfo)\n        self.assertNotEquals(None, hwChangeInfo.getLastHardwareChange())\n        self.assertTrue(os.path.isfile, aem.HwInfoFile)\n\n        #No hardware change\n        lastChange = hwChangeInfo.getLastHardwareChange()\n        hwChangeInfo = aem.HardwareChangeInfo(netinfo)\n        self.assertEquals(lastChange, hwChangeInfo.getLastHardwareChange())\n\n        #Create mock hardware\n        waagent.SetFileContents(testHwInfoFile, (\"0\\nma-ca-sa-ds-02\"))\n        hwChangeInfo = aem.HardwareChangeInfo(netinfo)\n        self.assertNotEquals(None, hwChangeInfo.getLastHardwareChange())\n\n        \n    def test_linux_metric(self):\n        config = self.test_config()\n        metric = aem.LinuxMetric(config)\n        self.validate_cnm_metric(metric)\n\n    #Metric for CPU, network and memory\n    def validate_cnm_metric(self, metric):\n        self.assertNotEquals(None, metric.getCurrHwFrequency())\n        self.assertNotEquals(None, metric.getMaxHwFrequency())\n        self.assertNotEquals(None, metric.getCurrVMProcessingPower())\n        self.assertNotEquals(None, metric.getGuaranteedMemAssigned())\n        self.assertNotEquals(None, metric.getMaxVMProcessingPower())\n        self.assertNotEquals(None, metric.getNumOfCoresPerCPU())\n        self.assertNotEquals(None, metric.getNumOfThreadsPerCore())\n        self.assertNotEquals(None, metric.getPhysProcessingPowerPerVCPU())\n        self.assertNotEquals(None, metric.getProcessorType())\n        self.assertNotEquals(None, metric.getReferenceComputeUnit())\n        self.assertNotEquals(None, metric.getVCPUMapping())\n        self.assertNotEquals(None, metric.getVMProcessingPowerConsumption())\n        self.assertNotEquals(None, metric.getCurrMemAssigned())\n        self.assertNotEquals(None, metric.getGuaranteedMemAssigned())\n        self.assertNotEquals(None, metric.getMaxMemAssigned())\n        self.assertNotEquals(None, metric.getVMMemConsumption())\n        adapterIds = metric.getNetworkAdapterIds()\n        self.assertNotEquals(None, adapterIds)\n        self.assertNotEquals(0, len(adapterIds))\n        adapterId = adapterIds[0]\n        self.assertNotEquals(None, metric.getNetworkAdapterMapping(adapterId))\n        self.assertNotEquals(None, metric.getMaxNetworkBandwidth(adapterId))\n        self.assertNotEquals(None, metric.getMinNetworkBandwidth(adapterId))\n        self.assertNotEquals(None, metric.getNetworkReadBytes())\n        self.assertNotEquals(None, metric.getNetworkWriteBytes())\n        self.assertNotEquals(None, metric.getNetworkPacketRetransmitted())\n        self.assertNotEquals(None, metric.getLastHardwareChange())\n\n    def test_vm_datasource(self):\n        config = self.test_config()\n        config.configData[\"wad.isenabled\"] = \"0\"\n        dataSource = aem.VMDataSource(config)\n        counters = dataSource.collect()\n        self.assertNotEquals(None, counters)\n        self.assertNotEquals(0, len(counters))\n\n        counterNames = [\n            \"Current Hw Frequency\",\n            \"Current VM Processing Power\",\n            \"Guaranteed VM Processing Power\",\n            \"Max Hw Frequency\",\n            \"Max. VM Processing Power\",\n            \"Number of Cores per CPU\",\n            \"Number of Threads per Core\",\n            \"Phys. Processing Power per vCPU\",\n            \"Processor Type\",\n            \"Reference Compute Unit\",\n            \"vCPU Mapping\",\n            \"VM Processing Power Consumption\",\n            \"Current Memory assigned\",\n            \"Guaranteed Memory assigned\",\n            \"Max Memory assigned\",\n            \"VM Memory Consumption\",\n            \"Adapter Id\",\n            \"Mapping\",\n            \"Maximum Network Bandwidth\",\n            \"Minimum Network Bandwidth\",\n            \"Network Read Bytes\",\n            \"Network Write Bytes\",\n            \"Packets Retransmitted\"\n        ]\n        #print \"\\n\".join(map(lambda c: str(c), counters))\n        for name in counterNames:\n            #print name\n            counter = next((c for c in counters if c.name == name))\n            self.assertNotEquals(None, counter)\n            self.assertNotEquals(None, counter.value)\n\n    def test_storagemetric(self):\n        metrics = mock_getStorageMetrics()\n        self.assertNotEquals(None, metrics)\n        stat = aem.AzureStorageStat(metrics)\n        self.assertNotEquals(None, stat.getReadBytes())\n        self.assertNotEquals(None, stat.getReadOps())\n        self.assertNotEquals(None, stat.getReadOpE2ELatency())\n        self.assertNotEquals(None, stat.getReadOpServerLatency())\n        self.assertNotEquals(None, stat.getReadOpThroughput())\n        self.assertNotEquals(None, stat.getWriteBytes())\n        self.assertNotEquals(None, stat.getWriteOps())\n        self.assertNotEquals(None, stat.getWriteOpE2ELatency())\n        self.assertNotEquals(None, stat.getWriteOpServerLatency())\n        self.assertNotEquals(None, stat.getWriteOpThroughput())\n\n    def test_disk_info(self):\n        config = self.test_config()\n        mapping = aem.DiskInfo(config).getDiskMapping()\n        self.assertNotEquals(None, mapping)\n\n    def test_get_storage_key_range(self):\n        startKey, endKey = aem.getStorageTableKeyRange()\n        self.assertNotEquals(None, startKey)\n        self.assertEquals(13, len(startKey))\n        self.assertNotEquals(None, endKey)\n        self.assertEquals(13, len(endKey))\n\n    def test_storage_datasource(self):\n        aem.getStorageMetrics = mock_getStorageMetrics\n        config = self.test_config()\n        dataSource = aem.StorageDataSource(config)\n        counters = dataSource.collect()\n\n        self.assertNotEquals(None, counters)\n        self.assertNotEquals(0, len(counters))\n\n        counterNames = [\n            \"Phys. Disc to Storage Mapping\",\n            \"Storage ID\",\n            \"Storage Read Bytes\",\n            \"Storage Read Op Latency E2E msec\",\n            \"Storage Read Op Latency Server msec\",\n            \"Storage Read Ops\",\n            \"Storage Read Throughput E2E MB/sec\",\n            \"Storage Write Bytes\",\n            \"Storage Write Op Latency E2E msec\",\n            \"Storage Write Op Latency Server msec\",\n            \"Storage Write Ops\",\n            \"Storage Write Throughput E2E MB/sec\"\n        ]\n\n        #print \"\\n\".join(map(lambda c: str(c), counters))\n        for name in counterNames:\n            #print name\n            counter = next((c for c in counters if c.name == name))\n            self.assertNotEquals(None, counter)\n            self.assertNotEquals(None, counter.value)\n\n    def test_writer(self):\n        testEventFile = \"/tmp/Event\"\n        if os.path.isfile(testEventFile):\n            os.remove(testEventFile)\n        writer = aem.PerfCounterWriter()\n        counters = [aem.PerfCounter(counterType = 0,\n                                    category = \"test\",\n                                    name = \"test\",\n                                    value = \"test\",\n                                    unit = \"test\")]\n\n        writer.write(counters, eventFile = testEventFile)\n        with open(testEventFile) as F:\n            content = F.read()\n            self.assertEquals(str(counters[0]), content)\n\n        testEventFile = \"/dev/console\"\n        print(\"==============================\")\n        print(\"The warning below is expected.\")\n        self.assertRaises(IOError, writer.write, counters, 2, testEventFile)\n        print(\"==============================\")\n\n    def test_easyHash(self):\n        hashVal = aem.easyHash('a')\n        self.assertEquals(97, hashVal)\n        hashVal = aem.easyHash('ab')\n        self.assertEquals(87, hashVal)\n        hashVal = aem.easyHash((\"ciextension-SUSELinuxEnterpriseServer11SP3\"\n                                \"___role1___\"\n                                \"ciextension-SUSELinuxEnterpriseServer11SP3\"))\n        self.assertEquals(5, hashVal)\n    \n    def test_get_ad_key_range(self):\n        startKey, endKey = aem.getAzureDiagnosticKeyRange()\n        print(startKey)\n        print(endKey)\n\n    def test_get_mds_timestamp(self):\n        date = datetime.datetime(2015, 1, 26, 3, 54)\n        epoch = datetime.datetime.utcfromtimestamp(0)\n        unixTimestamp = (int((date - epoch).total_seconds()))\n        mdsTimestamp = aem.getMDSTimestamp(unixTimestamp)\n        self.assertEquals(635578412400000000, mdsTimestamp)\n    \n    def test_get_storage_timestamp(self):\n        date = datetime.datetime(2015, 1, 26, 3, 54)\n        epoch = datetime.datetime.utcfromtimestamp(0)\n        unixTimestamp = (int((date - epoch).total_seconds()))\n        storageTimestamp = aem.getStorageTimestamp(unixTimestamp)\n        self.assertEquals(\"20150126T0354\", storageTimestamp)\n\ndef mock_getStorageMetrics(*args, **kwargs):\n        with open(os.path.join(env.test_dir, \"storage_metrics\")) as F:\n            test_data = F.read()\n        jsonObjs = json.loads(test_data)  \n        class ObjectView(object):\n            def __init__(self, data):\n                self.__dict__ = data\n        metrics = map(lambda x : ObjectView(x), jsonObjs)\n        return metrics\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "AzureEnhancedMonitor/ext/test/test_installer.py",
    "content": "#!/usr/bin/env python\n#\n#CustomScript extension\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport sys\nimport unittest\nimport env\nimport os\nimport json\nimport datetime\nimport installer\n\nclass TestInstall(unittest.TestCase):\n    def test_install_psutil(self):\n        buildDir = os.path.join(env.root, \"../../Common/libpsutil\")\n        build = installer.find_psutil_build(buildDir)\n        self.assertNotEquals(None, build)\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "AzureEnhancedMonitor/hvinfo/.gitignore",
    "content": "bin/*\n"
  },
  {
    "path": "AzureEnhancedMonitor/hvinfo/Makefile",
    "content": "CC := gcc\nSRCDIR := src\nLIBDIR := lib\nINCDIR := include\nBUILDDIR := build\nTARGET := bin/hvinfo\n\nSRCEXT := c\nSOURCES := $(shell find $(SRCDIR) -type f -name *.$(SRCEXT))\nOBJECTS := $(patsubst $(SRCDIR)/%,$(BUILDDIR)/%,$(SOURCES:.$(SRCEXT)=.o))\nCFLAGS := -g\nLDFLAGS := \nINC := -I $(INCDIR)\nLIB := -L $(LIBDIR)\n\nall : $(TARGET)\n\n$(TARGET): $(OBJECTS)\n\t@echo \"Linking...\"\n\t$(CC) $^ $(LDFLAGS) -o $(TARGET) $(LIB)\n\n$(BUILDDIR)/%.o: $(SRCDIR)/%.$(SRCEXT)\n\t@mkdir -p $(BUILDDIR)\n\t@echo \"Compiling...\"\n\t$(CC) $(CFLAGS) $(INC) -c -o $@ $<\n\nclean:\n\t@echo \"Cleaning...\"\n\t$(RM) -r $(BUILDDIR) $(TARGET)\n\n.PHONY: clean test\n"
  },
  {
    "path": "AzureEnhancedMonitor/hvinfo/src/hvinfo.c",
    "content": "//\n// Copyright 2014 Microsoft Corporation\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n//\n\n#include <stdio.h> \n#include <stdlib.h> \n#include <string.h> \n\nvoid get_cpuid(unsigned int leaf, unsigned int *cpuid)\n{\n    asm volatile (\n        \"cpuid\" \n        : \"=a\" (cpuid[0]), \"=b\" (cpuid[1]), \"=c\" (cpuid[2]), \"=d\" (cpuid[3]) \n        : \"a\" (leaf));\n}\n\nvoid u32_to_char_arr(char* dest, unsigned int i)\n{\n    dest[0] = (char)(i       & 0xFF);\n    dest[1] = (char)(i >> 8  & 0xFF);\n    dest[2] = (char)(i >> 16 & 0xFF);\n    dest[3] = (char)(i >> 24 & 0xFF);\n}\n\nint main()\n{\n    unsigned int cpuid[4];\n    char vendor_id[13];\n\n    /* Read hypervisor name*/\n    memset(cpuid, 0, sizeof(unsigned int) * 4);\n    memset(vendor_id, 0, sizeof(char) * 13);\n    get_cpuid(0x40000000, cpuid);\n\n    //cpuid[1~3] is hypervisor vendor id signature.\n    //In hyper-v, it is:\n    //\n    //    0x7263694D—“Micr”\n    //    0x666F736F—“osof”\n    //    0x76482074—“t Hv”\n    //\n    u32_to_char_arr(vendor_id,     cpuid[1]);\n    u32_to_char_arr(vendor_id + 4, cpuid[2]);\n    u32_to_char_arr(vendor_id + 8, cpuid[3]);\n\n    printf(\"%s\\n\", vendor_id);\n\n    /* Read hypervisor version*/\n    memset(cpuid, 0, sizeof(unsigned int) * 4);\n    get_cpuid(0x40000001, cpuid);\n\n    // cpuid[0] is hypervisor vendor-neutral interface identification.\n    // 0x31237648—“Hv#1. It means the next leaf contains version info.\n    if(0x31237648 != cpuid[0])\n    {\n        return 1;\n    }\n    memset(cpuid, 0, sizeof(unsigned int) * 4);\n    get_cpuid(0x40000002, cpuid);\n\n    //cpuid[1] is host version. \n    //The high-end 16 bit is major version, while the low-end is minor.\n    printf(\"%d.%d\\n\", (cpuid[1] >> 16) & 0xFF, (cpuid[1]) & 0xFF);\n    return 0;\n}\n\n"
  },
  {
    "path": "AzureEnhancedMonitor/nodejs/package.json",
    "content": "{\n  \"name\": \"azure-linux-tools\",\n  \"author\": \"Microsoft Corporation\",\n  \"contributors\": [\n    \"Yue, Zhang <yuezha@microsoft.com>\"\n  ],\n  \"version\": \"1.0.0\",\n  \"description\": \"Azure Linux VM configuration tools\",\n  \"tags\": [\n    \"azure\",\n    \"vm\",\n    \"linux\",\n    \"tools\"\n  ],\n  \"keywords\": [\n    \"node\",\n    \"azure\",\n    \"vm\",\n    \"linux\",\n    \"tools\"\n  ],\n  \"main\": \"setaem.js\",\n  \"preferGlobal\": \"true\",\n  \"engines\": {\n    \"node\": \">= 0.8.26\"\n  },\n  \"licenses\": [\n    {\n      \"type\": \"Apache\",\n      \"url\": \"http://www.apache.org/licenses/LICENSE-2.0\"\n    }\n  ],\n  \"dependencies\": {\n    \"promise\" : \"6.1.0\",\n    \"azure-common\" : \"0.9.13\",\n    \"azure-storage\" : \"0.4.2\",\n    \"azure-arm-storage\" : \"0.11.0\",\n    \"azure-arm-compute\" : \"0.13.0\"\n  },\n  \"devDependencies\": {\n  },\n  \"homepage\": \"https://github.com/Azure/azure-linux-extensions\",\n  \"repository\": {\n    \"type\": \"git\",\n    \"url\": \"git@github.com:Azure/azure-linux-extensions.git\"\n  },\n  \"bin\": {\n    \"setaem\": \"setaem.js\"\n  },\n  \"scripts\":{\n  }\n}\n"
  },
  {
    "path": "AzureEnhancedMonitor/nodejs/setaem.js",
    "content": "#!/usr/bin/env node\n\n//\n// Copyright (c) Microsoft and contributors.  All rights reserved.\n// \n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//   http://www.apache.org/licenses/LICENSE-2.0\n// \n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// \n// See the License for the specific language governing permissions and\n// limitations under the License.\n// \n'use strict';\n\nvar fs = require('fs');\nvar path = require('path');\nvar Promise = require('promise');\nvar common = require('azure-common');\nvar storage = require('azure-storage');\nvar storageMgmt = require('azure-arm-storage');\nvar computeMgmt = require('azure-arm-compute');\nvar readFile = Promise.denodeify(fs.readFile); \n\nvar debug = 0;\n\n/*Const*/\nvar CurrentScriptVersion = \"1.0.0.0\";\n\nvar aemExtPublisher = \"Microsoft.OSTCExtensions\";\nvar aemExtName = \"AzureEnhancedMonitorForLinux\";\nvar aemExtVersion = \"2.0\";\n\nvar ladExtName = \"LinuxDiagnostic\";\nvar ladExtPublisher = \"Microsoft.OSTCExtensions\";\nvar ladExtVersion = \"2.0\";\n\nvar ROLECONTENT = \"IaaS\";\nvar AzureEndpoint = \"windows.net\";\nvar BlobMetricsMinuteTable= \"$MetricsMinutePrimaryTransactionsBlob\";\nvar BlobMetricsHourTable= \"$MetricsMinutePrimaryTransactionsBlob\";\nvar ladMetricesTable= \"\";\n/*End of Const*/\n\nvar AemConfig = function(){\n    this.prv = [];\n    this.pub = [];\n};\n\nAemConfig.prototype.setPublic = function(key, value){\n    this.pub.push({\n        'key' : key,\n        'value' : value\n    });\n};\n\n\nAemConfig.prototype.setPrivate = function(key, value){\n    this.prv.push({\n        'key' : key,\n        'value' : value\n    });\n};\n\nAemConfig.prototype.getPublic = function(){\n    return {\n        'key' : aemExtName + \"PublicConfigParameter\",\n        'value' : JSON.stringify({'cfg' : this.pub}),\n        'type':'Public'\n    }\n};\n\nAemConfig.prototype.getPrivate = function(){\n    return {\n        'key' : aemExtName + \"PrivateConfigParameter\",\n        'value' : JSON.stringify({'cfg' : this.prv}),\n        'type':'Private'\n    }\n};\n\nvar setAzureVMEnhancedMonitorForLinux = function(rgpName, vmName){\n    var azureProfile;\n    var currSubscription;\n    var computeClient;\n    var storageClient;\n    var selectedVM;\n    var osdiskAccount;\n    var accounts = [];\n    var aemConfig = new AemConfig();\n\n    return getAzureProfile().then(function(profile){\n        azureProfile = profile;\n        return getDefaultSubscription(profile);\n    }).then(function(subscription){\n        console.log(\"[INFO]Using subscription: \" + subscription.name);\n        debug && console.log(JSON.stringify(subscription, null, 4));\n        currSubscription = subscription;\n        var cred = getCloudCredential(subscription);\n        var baseUri = subscription.managementEndpointUrl;\n        computeClient = computeMgmt.createComputeManagementClient(cred, baseUri);\n        storageClient = storageMgmt.createStorageManagementClient(cred, baseUri);\n    }).then(function(){\n        return getVirtualMachine(computeClient, rgpName, vmName);\n    }).then(function(vm){\n        //Set vm role basic config\n        console.log(\"[INFO]Found VM: \" + vm.oSProfile.computerName);\n        debug && console.log(JSON.stringify(vm, null, 4));\n        /*\n        vm:\n        { extensions: [ [Object] ],\n          tags: {},\n          hardwareProfile: { virtualMachineSize: 'Standard_A1' },\n          storageProfile: { dataDisks: [], imageReference: [Object], oSDisk: [Object] },\n          oSProfile:\n          { secrets: [],\n            computerName: 'zhongyiubuntu4',\n            adminUsername: 'zhongyi',\n            linuxConfiguration: [Object] },\n          networkProfile: { networkInterfaces: [Object] },\n          diagnosticsProfile: { bootDiagnostics: [Object] },\n          provisioningState: 'Succeeded',\n          id: '/subscriptions/4be8920b-2978-43d7-ab14-04d8549c1d05/resourceGroups/zhongyiubuntu4/providers/Microsoft.Compute/virtualMachines/zhongyiubuntu4',\n          name: 'zhongyiubuntu4',\n          type: 'Microsoft.Compute/virtualMachines',\n          location: 'eastasia' }}\n        */\n        selectedVM = vm;\n        var cpuOverCommitted = 0;\n        if(selectedVM.hardwareProfile.virtualMachineSize === 'ExtralSmall'){\n            cpuOverCommitted = 1\n        }\n        aemConfig.setPublic('vmsize', selectedVM.hardwareProfile.virtualMachineSize);\n        aemConfig.setPublic('vm.role', 'IaaS');\n        aemConfig.setPublic('vm.memory.isovercommitted', 0);\n        aemConfig.setPublic('vm.cpu.isovercommitted', cpuOverCommitted);\n        aemConfig.setPublic('script.version', CurrentScriptVersion);\n        aemConfig.setPublic('verbose', '0');\n        aemConfig.setPublic('href', 'http://aka.ms/sapaem');\n    }).then(function(){\n        //Set vm disk config\n        /*\n        osDisk:\n        { operatingSystemType: 'Linux',\n          name: 'zhongyiubuntu4',\n          virtualHardDisk: { uri: 'https://zhongyiubuntu44575.blob.core.windows.net/vhds/zhongyiubuntu4.vhd' },\n          caching: 'ReadWrite',\n          createOption: 'FromImage' }\n        */\n        var osdisk = selectedVM.storageProfile.oSDisk;\n        osdiskAccount = getStorageAccountFromUri(osdisk.virtualHardDisk.uri);\n        console.log(\"[INFO]Adding configure for OS disk.\");\n        aemConfig.setPublic('osdisk.account', osdiskAccount);\n        aemConfig.setPublic('osdisk.name', osdisk.name);\n        //aemConfig.setPublic('osdisk.caching', osdisk.caching);\n        aemConfig.setPublic('osdisk.connminute', osdiskAccount + \".minute\");\n        aemConfig.setPublic('osdisk.connhour', osdiskAccount + \".hour\");\n        accounts.push({\n            name: osdiskAccount,\n        });        \n        /*\n        dataDisk:\n        { lun: 0,\n          name: 'zhongyiubuntu4-20151112-140433',\n          virtualHardDisk: { uri: 'https://zhongyiubuntu44575.blob.core.windows.net/vhds/zhongyiubuntu4-20151112-140433.vhd' },\n          caching: 'None',\n          createOption: 'Empty',\n          diskSizeGB: 1023 }\n        */\n        for(var i = 0; i < selectedVM.storageProfile.dataDisks.length; i++){\n            var dataDisk = selectedVM.storageProfile.dataDisks[i];\n            console.log(\"[INFO]Adding configure for data disk: \" + \n                        dataDisk.name);\n            var datadiskAccount = getStorageAccountFromUri(dataDisk.virtualHardDisk.uri);\n            accounts.push({\n                name:datadiskAccount\n            });\n            //The default lun value is 0\n            var lun = dataDisk.lun;\n            aemConfig.setPublic('disk.lun.' + i, lun);\n            aemConfig.setPublic('disk.name.' + i, dataDisk.name);\n            aemConfig.setPublic('disk.caching.' + i, dataDisk.name);\n            aemConfig.setPublic('disk.account.' + i, datadiskAccount);\n            aemConfig.setPublic('disk.connminute.' + i, \n                                datadiskAccount + \".minute\");\n            aemConfig.setPublic('disk.connhour.' + i, datadiskAccount + \".hour\");\n        }\n    }).then(function(){        \n        //Set storage account config\n        var promises = [];\n        var i = -2;\n        Object(accounts).forEach(function(account){\n            var promise = getResourceGroupName(storageClient, account.name)\n              .then(function(rgpName){\n                account.rgp = rgpName;\n                console.log(\"!!!!rgp\",rgpName);\n                return getStorageAccountKey(storageClient, rgpName, account.name);\n            }).then(function(accountKey){\n                console.log(\"!!!!key\",accountKey);\n                account.key = accountKey;\n                aemConfig.setPrivate(account.name + \".minute.key\", accountKey);\n                aemConfig.setPrivate(account.name + \".hour.key\", accountKey);\n                return getStorageAccountProperties(storageClient, account.rgp, account.name);\n            }).then(function(properties){\n                //ispremium\n                i += 1;\n                if (properties.accountType.startsWith(\"Standard\")) {\n                    if (i >= 0)\n                        aemConfig.setPublic('disk.type.' + i, \"Standard\");\n                    else\n                        aemConfig.setPublic('osdisk.type' + i, \"Standard\");\n                } else {\n                    if (i >= 0)\n                        aemConfig.setPublic('disk.type.' + i, \"Premium\");\n                    else\n                        aemConfig.setPublic('osdisk.type' + i, \"Premium\");\n                    aemConfig.setPublic(account.name + \".hour.ispremium\", 1);\n                    aemConfig.setPublic(account.name + \".minute.ispremium\", 1);\n                }\n                \n                //endpoints\n                var endpoints = properties.primaryEndpoints;\n                \n                var tableEndpoint;\n                var blobEndpoint;\n                endpoints.forEach(function(endpoint){\n                    if(endpoint.match(/.*table.*/)){\n                        tableEndpoint = endpoint;\n                    }else if(endpoint.match(/.*blob.*/)){\n                        blobEndpoint = endpoint;\n                    }\n                });\n                account.tableEndpoint = tableEndpoint;\n                account.blobEndpoint = blobEndpoint;\n                var minuteUri = tableEndpoint + BlobMetricsMinuteTable;\n                var hourUri = tableEndpoint + BlobMetricsHourTable;\n                account.minuteUri = minuteUri\n                aemConfig.setPublic(account.name + \".hour.uri\", hourUri);\n                aemConfig.setsetPrivate(account.name + \".hour.key\", account.key);\n                aemConfig.setPublic(account.name + \".minute.uri\", minuteUri);\n                aemConfig.setsetPrivate(account.name + \".minute.key\", account.key);\n                aemConfig.setPublic(account.name + \".hour.name\", account.name);\n                aemConfig.setPublic(account.name + \".minute.name\", account.name);\n            }).then(function(){\n                return checkStorageAccountAnalytics(account.name, \n                                                    account.key,\n                                                    account.blobEndpoint);\n            });\n            promises.push(promise);\n        });\n        return Promise.all(promises);\n    }).then(function(res){\n        //Set Linux diagnostic config\n        aemConfig.setPublic(\"wad.name\", accounts[0].name);\n        aemConfig.setPublic(\"wad.isenabled\", 1);\n        var ladUri = accounts[0].tableEndpoint + ladMetricesTable;\n        console.log(\"[INFO]Your endpoint is: \"+accounts[0].tableEndpoint);\n        aemConfig.setPublic(\"wad.uri\", ladUri);\n        aemConfig.setPrivate(\"wad.key\", accounts[0].key);\n    }).then(function(){\n        //Update vm\n        var extensions = [];\n        var ladExtConfig = {\n            'name' : ladExtName,\n            'referenceName' : ladExtName,\n            'publisher' : ladExtPublisher,\n            'version' : ladExtVersion,\n            'state': 'Enable',\n            'resourceExtensionParameterValues' : [{\n                'key' : ladExtName + \"PrivateConfigParameter\",\n                'value' : JSON.stringify({\n                    'storageAccountName' : accounts[0].name,\n                    'storageAccountKey' : accounts[0].key,\n                    'endpoint' : accounts[0].tableEndpoint.substring((accounts[0].tableEndpoint.search(/\\./)) + 1, accounts[0].tableEndpoint.length)\n                }),\n                'type':'Private'\n            }]\n        };\n        var aemExtConfig = {\n            'name' : aemExtName,\n            'referenceName' : aemExtName,\n            'publisher' : aemExtPublisher,\n            'version' : aemExtVersion,\n            'state': 'Enable',\n            'resourceExtensionParameterValues' : [\n                aemConfig.getPublic(), \n                aemConfig.getPrivate()\n            ]\n        };\n        extensions.push(ladExtConfig);\n        extensions.push(aemExtConfig);\n        selectedVM.provisionGuestAgent = true;\n        selectedVM.resourceExtensionReferences = extensions;\n        console.log(\"[INFO]Updating configuration for VM: \" + selectedVM.roleName);\n        console.log(\"[INFO]This could take a few minutes. Please wait.\")\n        debug && console.log(JSON.stringify(selectedVM, null, 4)) \n        return updateVirtualMachine(computeClient, svcName, vmName, selectedVM);\n    });\n}\n\nvar updateVirtualMachine = function (client, svcName, vmName, parameters){\n    return new Promise(function(fullfill, reject){\n        client.virtualMachines.update(svcName, vmName, vmName, parameters, \n                                      function(err, ret){\n            if(err){\n                reject(err)\n            } else {\n                fullfill(ret);\n            } \n        });\n    });\n}\n\nvar getStorageAccountProperties = function(storageClient, rgpName, accountName){\n    return new Promise(function(fullfill, reject){\n        storageClient.storageAccounts.getProperties(rgpName, accountName, function(err, res){\n            if(err){\n                reject(err);\n            } else {\n                fullfill(res.storageAccounts.properties);\n            }\n        });\n    });\n};\n\nvar getResourceGroupName = function(storageClient, accountName) {\n    return new Promise(function(fullfill, reject){\n        storageClient.storageAccounts.list(function(err, res){\n            if(err){\n                reject(err);\n            } else {\n                res.storageAccounts.forEach(function (storage) {\n                    var matchRgp = /resourceGroups\\/(.+?)\\/.*/.exec(storage.id);\n                    var matchAct = /storageAccounts\\/(.+?)$/.exec(storage.id);\n                    if (matchAct[1] == accountName) {\n                        fullfill(matchRgp[1]);\n                    }\n                });\n            }\n        });\n    });\n};\n\nvar getStorageAccountKey = function(storageClient, rgpName, accountName){\n    console.log(\"123\");\n    return new Promise(function(fullfill, reject){\n        storageClient.storageAccounts.listKeys(rgpName, accountName, function(err, res){\n            console.log(\"??\");\n            if (err) {\n                reject(err);\n            } else {\n                fullfill(res);\n            }\n        });\n    });\n};\n\nvar getStorageAccountAnalytics = function(accountName, accountKey, host){\n    return new Promise(function(fullfill, reject){\n        var blobService = storage.createBlobService(accountName, accountKey, \n                                                    host); \n        blobService.getServiceProperties(null, function(err, properties, resp){\n            if(err){\n                reject(err)\n            } else {\n                fullfill(properties);\n            }\n        });\n    });\n};\n\nvar analyticsSettings = {\n    Logging:{ \n        Version: '1.0',\n        Delete: true,\n        Read: true,\n        Write: true,\n        RetentionPolicy: { Enabled: true, Days: 13 } },\n    HourMetrics:{ \n        Version: '1.0',\n        Enabled: true,\n        IncludeAPIs: true,\n        RetentionPolicy: { Enabled: true, Days: 13 } },\n    MinuteMetrics:{ \n        Version: '1.0',\n        Enabled: true,\n        IncludeAPIs: true,\n        RetentionPolicy: { Enabled: true, Days: 13 } \n    } \n};\n\nvar checkStorageAccountAnalytics = function(accountName, accountKey, host){\n   return getStorageAccountAnalytics(accountName, accountKey, host)\n     .then(function(properties){\n        if(!properties \n                || !properties.Logging\n                || !properties.Logging.Read \n                || !properties.Logging.Write\n                || !properties.Logging.Delete\n                || !properties.MinuteMetrics\n                || !properties.MinuteMetrics.Enabled\n                || !properties.MinuteMetrics.RetentionPolicy\n                || !properties.MinuteMetrics.RetentionPolicy.Enabled\n                || !properties.MinuteMetrics.RetentionPolicy.Days\n                || properties.MinuteMetrics.RetentionPolicy.Days == 0\n                ){\n            console.log(\"[INFO] Turn on storage analytics for: \" + accountName)\n            return setStorageAccountAnalytics(accountName, accountKey, host,\n                                              analyticsSettings);\n        }\n   });\n}\n\nvar setStorageAccountAnalytics = function(accountName, accountKey, \n                                          host, properties){\n    return new Promise(function(fullfill, reject){\n        var blobService = storage.createBlobService(accountName, accountKey,\n                                                    host); \n        blobService.setServiceProperties(properties, null, \n                                         function(err, properties, resp){\n            if(err){\n                reject(err)\n            } else {\n                fullfill(properties);\n            }\n        });\n    });\n};\n\nvar getStorageAccountFromUri = function(uri){\n    var match = /https:\\/\\/(.+?)\\..*/.exec(uri);\n    if(match){\n        return match[1];\n    }\n}\n\nvar getVirtualMachine = function(computeClient, rgpName, vmName){\n    return new Promise(function(fullfill, reject){\n        computeClient.virtualMachines.get(rgpName, vmName, \n                                            function(err, res){\n            if(err){\n                reject(err);\n            } else {\n                fullfill(res.virtualMachine);\n            }\n        });\n    });\n}\n\nvar getCloudCredential = function(subscription){\n    var cred;\n    if(subscription.credential.type === 'cert'){\n        cred = computeMgmt.createCertificateCloudCredentials({\n            subscriptionId:subscription.id ,\n            cert:subscription.managementCertificate.cert,\n            key:subscription.managementCertificate.key,\n        });\n    }else{//if(subscription.credential.type === 'token'){\n       cred = new common.TokenCloudCredentials({\n            subscriptionId : subscription.id,\n            token : subscription.credential.token  \n       });\n    } \n    return cred;\n}\n\nvar getAzureProfile = function(){\n    var profileJSON = path.join(getUserHome(), \".azure/azureProfile.json\");\n    return readFile(profileJSON).then(function(result){\n        var profile = JSON.parse(result);\n        return profile;\n    });\n}\n\nvar getDefaultSubscription = function(profile){\n    debug && console.log(JSON.stringify(profile, null, 4))\n    if(profile == null || profile.subscriptions == null \n            || profile.subscriptions.length == 0){\n        throw \"No subscription found.\"\n    }\n    console.log(\"[INFO]Found available subscriptions:\");\n    console.log(\"\");\n    console.log(\"    Id\\t\\t\\t\\t\\t\\tName\");\n    console.log(\"    --------------------------------------------------------\");\n    profile.subscriptions.forEach(function(subscription){\n        console.log(\"    \" + subscription.id + \"\\t\" + subscription.name);\n    });\n    console.log(\"\");\n    var defaultSubscription;\n    profile.subscriptions.every(function(subscription, index, arr){\n        if(subscription.isDefault){\n            defaultSubscription = subscription;\n            return false;\n        } else {\n            return true;\n        }\n    });\n\n    if(defaultSubscription == null){\n        console.log(\"[WARN]No subscription is selected.\");\n        defaultSubscription = profile.subscriptions[0];\n        console.log(\"[INFO]The first subscription will be used.\");\n        console.log(\"[INFO]You could use the following command to select \" + \n                    \"another subscription.\");\n        console.log(\"\");\n        console.log(\"    azure account set [<subscript_id>|<subscript_name>]\");\n        console.log(\"\");\n    }\n    if(defaultSubscription.user){\n        return getTokenCredential(defaultSubscription);\n    } else if(defaultSubscription.managementCertificate){\n        return getCertCredential(defaultSubscription);\n    } else {\n        throw \"Unknown subscription type.\";\n    }\n}\n\nvar getTokenCredential = function(subscription){\n    var tokensJSON = path.join(getUserHome(), \".azure/accessTokens.json\");\n    return readFile(tokensJSON).then(function(result){\n        var tokens = JSON.parse(result);\n        tokens.every(function(token, index, arr){\n            if(token.userId === subscription.user.name){\n                subscription.credential = {\n                    type : 'token',\n                    token : token.accessToken\n                };\n                return false\n            }\n        });\n        return subscription;\n    });\n}\n\nvar getCertCredential = function(subscription){\n    subscription.credential = {\n        type : 'cert',\n        cert : subscription.managementCertificate\n    };\n    return subscription;\n}\n\nfunction getUserHome() {\n  return process.env[(process.platform == 'win32') ? 'USERPROFILE' : 'HOME'];\n}\n\nvar main = function(){\n    var rgpName = null;\n    var vmName = null;\n    if(process.argv.length === 4){\n        vmName = process.argv[3];\n        rgpName = process.argv[2];\n    } else if(process.argv.length === 3){\n        if(process.argv[2] === \"--help\" || process.argv[2] === \"-h\"){\n            usage();\n            process.exit(0);\n        } else if(process.argv[2] === \"--version\" || process.argv[2] === \"-v\"){\n            console.log(CurrentScriptVersion);\n            process.exit(0);\n        }\n        vmName = process.argv[2];\n        rgpName = vmName;\n    } else{\n        usage();\n        process.exit(1);\n    }\n\n    setAzureVMEnhancedMonitorForLinux(rgpName, vmName).done(function(){\n        console.log(\"[INFO]Azure Enhanced Monitoring Extension \" + \n                    \"configuration updated.\");\n        console.log(\"[INFO]It can take up to 15 Minutes for the \" + \n                    \"monitoring data to appear in the system.\");\n        process.exit(0);\n    }, function(err){\n        if(err && err.statusCode == 401){\n            console.error(\"[ERROR]Token expired. \" + \n                          \"Please run the following command to login.\");\n            console.log(\"    \");\n            console.log(\"    azure login\");\n            console.log(\"or\");\n            console.log(\"    azure account import <pem_file>\");\n            process.exit(-1);\n        }else{\n            console.log(err);\n            console.log(err.stack);\n            process.exit(-1);\n        }\n    });\n}\n\nvar usage = function(){\n    console.log(\"\");\n    console.log(\"Usage:\");\n    console.log(\"    setaem <service_name> <vm_name>\");\n    console.log(\"or\");\n    console.log(\"    setaem <vm_name>\");\n    console.log(\"\");\n    console.log(\"  *if service_name and vm_name are the same, \" + \n                \"service_name could be omitted.\");\n    console.log(\"\");\n    console.log(\"    \");\n    console.log(\"    -h, --help \");\n    console.log(\"        Print help.\");\n    console.log(\"    \");\n    console.log(\"    -v, --version\");\n    console.log(\"        Print version.\");\n    console.log(\"    \");\n}\n\nmain();\n"
  },
  {
    "path": "AzureMonitorAgent/.gitignore",
    "content": "MetricsExtensionBin/\nmetrics_ext_utils/\npackages/\ntelegraf_utils/\nUtils/\nwaagent\n"
  },
  {
    "path": "AzureMonitorAgent/HandlerManifest.json",
    "content": "[\n  {\n    \"name\":  \"AzureMonitorLinuxAgent\",\n    \"version\": \"1.5.124\",\n    \"handlerManifest\": {\n      \"installCommand\": \"./shim.sh -install\",\n      \"uninstallCommand\": \"./shim.sh -uninstall\",\n      \"updateCommand\": \"./shim.sh -update\",\n      \"enableCommand\": \"./shim.sh -enable\",\n      \"disableCommand\": \"./shim.sh -disable\",\n      \"rebootAfterInstall\": false,\n      \"reportHeartbeat\": false,\n      \"updateMode\": \"UpdateWithInstall\",\n      \"continueOnUpdateFailure\": true\n    },\n    \"resourceLimits\": {\n      \"services\": [\n        {\n          \"name\": \"azuremonitoragent\",\n          \"cpuQuotaPercentage\": 250\n        },\n        {\n          \"name\": \"azuremonitoragentmgr\"\n        },\n        {\n          \"name\": \"azuremonitor-agentlauncher\",\n          \"cpuQuotaPercentage\": 4\n        },\n        {\n          \"name\": \"azuremonitor-coreagent\",\n          \"cpuQuotaPercentage\": 200\n        },\n        {\n          \"name\": \"metrics-extension\",\n          \"cpuQuotaPercentage\": 5\n        },\n        {\n          \"name\": \"metrics-sourcer\",\n          \"cpuQuotaPercentage\": 10\n        }\n      ]\n    }\n  }\n]\n"
  },
  {
    "path": "AzureMonitorAgent/README.md",
    "content": "# AzureMonitorLinuxAgent Extension\nAllow the owner of the Azure Virtual Machines to install the Azure Monitor Linux Agent\n\n# The Latest Version is 1.6.2\nThe extension is currently in Public Preview and is accessible to all public cloud regions in Azure. \n\nYou can read the User Guide below.\n* [Learn more: Azure Virtual Machine Extensions](https://azure.microsoft.com/en-us/documentation/articles/virtual-machines-extensions-features/)\n\nAzure Monitor Linux Agent Extension can:\n* Install the agent and pull configs from MCS\n\n# User Guide\n\n## 1. Deploying the Extension to a VM\n\nYou can deploy it using Azure CLI\n\n\n \n### 1.1. Using Azure CLI Resource Manager\n\nYou can view the availability of the Azure Monitor Linux Agent extension versions in each region by running:\n\n```\naz vm extension image list-versions -l <region> --name AzureMonitorLinuxAgent -p Microsoft.Azure.Monitor\n```\n\nYou can deploy the Azure Monitor Linux Agent Extension by running:\n```\naz vm extension set --name AzureMonitorLinuxAgent --publisher Microsoft.Azure.Monitor --version <version> --resource-group <My Resource Group> --vm-name <My VM Name>\n```\n\nTo update the version of the esisting installation of Azure Monitor Linux Agent extension on a VM, please add \"--force-update\" flag to the above command. (Currenty Waagent only supports this way of upgrading. Will update once we have more info from them.)\n\n\n## Supported Linux Distributions \n Currently Manually tested only on -\n* CentOS Linux 6, and 7 (x64)\n* Red Hat Enterprise Linux Server 6 and 7 (x64)\n* Ubuntu 16.04 LTS, 18.04 LTS(x64)\n\nWill Add more distros once they are tested\n\n## Troubleshooting\n\n* The status of the extension is reported back to Azure so that user can\nsee the status on Azure Portal\n* All the extension installation and config files are unzipped into - \n`/var/lib/waagent/Microsoft.Azure.Monitor.AzureMonitorLinuxAgent-<version>/packages/`\nand the tail of the output is logged into the log directory specified\nin HandlerEnvironment.json and reported back to Azure\n* The operation log of the extension is `/var/log/azure/Microsoft.Azure.Monitor.AzureMonitorLinuxAgent-<version>/extension.log` file.\n"
  },
  {
    "path": "AzureMonitorAgent/agent.py",
    "content": "#!/usr/bin/env python\n#\n# AzureMonitoringLinuxAgent Extension\n#\n# Copyright 2021 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\nimport sys\nimport os\nimport os.path\nimport datetime\nimport signal\nimport pwd\nimport glob\nimport grp\nimport re\nimport filecmp\nimport stat\nimport traceback\nimport time\nimport platform\nimport subprocess\nimport json\nimport base64\nimport inspect\nimport shutil\nimport hashlib\nimport fileinput\nimport contextlib\nimport ama_tst.modules.install.supported_distros as supported_distros\nfrom collections import OrderedDict\nfrom hashlib import sha256\nfrom shutil import copyfile, rmtree, copytree, copy2\n\nfrom threading import Thread\nimport telegraf_utils.telegraf_config_handler as telhandler\nimport metrics_ext_utils.metrics_constants as metrics_constants\nimport metrics_ext_utils.metrics_ext_handler as me_handler\nimport metrics_ext_utils.metrics_common_utils as metrics_utils\n\ntry:\n    import urllib.request as urllib # Python 3+\nexcept ImportError:\n    import urllib2 as urllib # Python 2\n\ntry:\n    from urllib.parse import urlparse  # Python 3+\nexcept ImportError:\n    from urlparse import urlparse  # Python 2\n\ntry:\n    import urllib.error as urlerror # Python 3+\nexcept ImportError:\n    import urllib2 as urlerror # Python 2\n\n\n# python shim can only make IMDS calls which shouldn't go through proxy\ntry:\n    urllib.getproxies = lambda x = None: {}\nexcept Exception as e:\n    print('Resetting proxies failed with error: {0}'.format(e))    \n\ntry:\n    from Utils.WAAgentUtil import waagent\n    import Utils.HandlerUtil as HUtil\nexcept Exception as e:\n    # These utils have checks around the use of them; this is not an exit case\n    print('Importing utils failed with error: {0}'.format(e))\n\n# This code is taken from the omsagent's extension wrapper.\n# This same monkey patch fix is relevant for AMA extension as well.\n# This monkey patch duplicates the one made in the waagent import above.\n# It is necessary because on 2.6, the waagent monkey patch appears to be overridden\n# by the python-future subprocess.check_output backport.\nif sys.version_info < (2,7):\n    def check_output(*popenargs, **kwargs):\n        r\"\"\"Backport from subprocess module from python 2.7\"\"\"\n        if 'stdout' in kwargs:\n            raise ValueError('stdout argument not allowed, it will be overridden.')\n        process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)\n        output, unused_err = process.communicate()\n        retcode = process.poll()\n        if retcode:\n            cmd = kwargs.get(\"args\")\n            if cmd is None:\n                cmd = popenargs[0]\n            raise subprocess.CalledProcessError(retcode, cmd, output=output)\n        return output\n\n    # Exception classes used by this module.\n    class CalledProcessError(Exception):\n        def __init__(self, returncode, cmd, output=None):\n            self.returncode = returncode\n            self.cmd = cmd\n            self.output = output\n\n        def __str__(self):\n            return \"Command '%s' returned non-zero exit status %d\" % (self.cmd, self.returncode)\n\n    subprocess.check_output = check_output\n    subprocess.CalledProcessError = CalledProcessError\n\n# Global Variables\nPackagesDirectory = 'packages'\n# The BundleFileName values will be replaced by actual values in the release pipeline. See apply_version.sh.\nBundleFileNameDeb = 'azuremonitoragent.deb'\nBundleFileNameRpm = 'azuremonitoragent.rpm'\nBundleFileName = ''\nTelegrafBinName = 'telegraf'\nInitialRetrySleepSeconds = 30\nPackageManager = ''\nPackageManagerOptions = ''\nMdsdCounterJsonPath = '/etc/opt/microsoft/azuremonitoragent/config-cache/metricCounters.json'\nFluentCfgPath = '/etc/opt/microsoft/azuremonitoragent/config-cache/fluentbit/td-agent.conf'\nAMASyslogConfigMarkerPath = '/etc/opt/microsoft/azuremonitoragent/config-cache/syslog.marker'\nAMASyslogPortFilePath = '/etc/opt/microsoft/azuremonitoragent/config-cache/syslog.port'\nAMAFluentPortFilePath = '/etc/opt/microsoft/azuremonitoragent/config-cache/fluent.port'\nPreviewFeaturesDirectory = '/etc/opt/microsoft/azuremonitoragent/config-cache/previewFeatures/'\nArcSettingsFile = '/var/opt/azcmagent/localconfig.json'\nAMAAstTransformConfigMarkerPath = '/etc/opt/microsoft/azuremonitoragent/config-cache/agenttransform.marker'\nAMAExtensionLogRotateFilePath = '/etc/logrotate.d/azuremonitoragentextension'\nWAGuestAgentLogRotateFilePath = '/etc/logrotate.d/waagent-extn.logrotate'\nAmaUninstallContextFile = '/var/opt/microsoft/uninstall-context'\nAmaDataPath = '/var/opt/microsoft/azuremonitoragent/'\nSupportedArch = set(['x86_64', 'aarch64'])\nMDSDFluentPort = 0\nMDSDSyslogPort = 0\n\n# Error codes\nGenericErrorCode = 1\nUnsupportedOperatingSystem = 51\nIndeterminateOperatingSystem = 51\nMissingorInvalidParameterErrorCode = 53\nDPKGOrRPMLockedErrorCode = 56\nMissingDependency = 52\n\n# Settings\nGenevaConfigKey = \"genevaConfiguration\"\nAzureMonitorConfigKey = \"azureMonitorConfiguration\"\n\n# Configuration\nHUtilObject = None\nSettingsSequenceNumber = None\nHandlerEnvironment = None\nSettingsDict = None\n\n\ndef main():\n    \"\"\"\n    Main method\n    Parse out operation from argument, invoke the operation, and finish.\n    \"\"\"\n    init_waagent_logger()\n    waagent_log_info('Azure Monitoring Agent for Linux started to handle.')\n\n    # Determine the operation being executed\n    operation = None\n    try:\n        option = sys.argv[1]\n        if re.match('^([-/]*)(disable)', option):\n            operation = 'Disable'\n        elif re.match('^([-/]*)(uninstall)', option):\n            operation = 'Uninstall'\n        elif re.match('^([-/]*)(install)', option):\n            operation = 'Install'\n        elif re.match('^([-/]*)(enable)', option):\n            operation = 'Enable'\n        elif re.match('^([-/]*)(update)', option):\n            operation = 'Update'\n        elif re.match('^([-/]*)(metrics)', option):\n            operation = 'Metrics'\n        elif re.match('^([-/]*)(syslogconfig)', option):\n            operation = 'Syslogconfig'\n        elif re.match('^([-/]*)(transformconfig)', option):\n            operation = 'Transformconfig'\n    except Exception as e:\n        waagent_log_error(str(e))\n\n    if operation is None:\n        log_and_exit('Unknown', GenericErrorCode, 'No valid operation provided')\n\n    # Set up for exit code and any error messages\n    exit_code = 0\n    message = '{0} succeeded'.format(operation)\n\n    # Avoid entering broken state where manual purge actions are necessary in low disk space scenario\n    destructive_operations = ['Disable', 'Uninstall']\n    if operation not in destructive_operations:\n        exit_code = check_disk_space_availability()\n        if exit_code != 0:\n            message = '{0} failed due to low disk space'.format(operation)\n            log_and_exit(operation, exit_code, message)\n\n    # Invoke operation\n    try:\n        global HUtilObject\n        HUtilObject = parse_context(operation)\n        exit_code, output = operations[operation]()\n\n        # Exit code 1 indicates a general problem that doesn't have a more\n        # specific error code; it often indicates a missing dependency\n        if exit_code == 1 and operation == 'Install':\n            message = 'Install failed with exit code 1. For error details, check logs ' \\\n                      'in /var/log/azure/Microsoft.Azure.Monitor' \\\n                      '.AzureMonitorLinuxAgent'\n        elif exit_code is DPKGOrRPMLockedErrorCode and operation == 'Install':\n            message = 'Install failed with exit code {0} because the ' \\\n                      'package manager on the VM is currently locked: ' \\\n                      'please wait and try again'.format(DPKGOrRPMLockedErrorCode)\n        elif exit_code != 0:\n            message = '{0} failed with exit code {1} {2}'.format(operation,\n                                                             exit_code, output)\n\n    except AzureMonitorAgentForLinuxException as e:\n        exit_code = e.error_code\n        message = e.get_error_message(operation)\n    except Exception as e:\n        exit_code = GenericErrorCode\n        message = '{0} failed with error: {1}\\n' \\\n                  'Stacktrace: {2}'.format(operation, e,\n                                           traceback.format_exc())\n\n    # Finish up and log messages\n    log_and_exit(operation, exit_code, message)\n\ndef check_disk_space_availability():\n    \"\"\"\n    Check if there is the required space on the machine.\n    \"\"\"\n    try:\n        if get_free_space_mb(\"/var\") < 700 or get_free_space_mb(\"/etc\") < 500 or get_free_space_mb(\"/opt\") < 500 :\n            # 52 is the exit code for missing dependency i.e. disk space\n            # https://github.com/Azure/azure-marketplace/wiki/Extension-Build-Notes-Best-Practices#error-codes-and-messages-output-to-stderr\n            return MissingDependency\n        else:\n            return 0\n    except:\n        print('Failed to check disk usage.')\n        return 0\n\ndef get_free_space_mb(dirname):\n    \"\"\"\n    Get the free space in MB in the directory path.\n    \"\"\"\n    st = os.statvfs(dirname)\n    return (st.f_bavail * st.f_frsize) // (1024 * 1024)\n\ndef is_systemd():\n    \"\"\"\n    Check if the system is using systemd\n    \"\"\"\n    return os.path.isdir(\"/run/systemd/system\")\n\ndef get_service_command(service, *operations):\n    \"\"\"\n    Get the appropriate service command [sequence] for the provided service name and operation(s)\n    \"\"\"\n    if is_systemd():\n        return \" && \".join([\"systemctl {0} {1}\".format(operation, service) for operation in operations])\n    else:\n        hutil_log_info(\"The VM doesn't have systemctl. Using the init.d service to start {0}.\".format(service))\n        return '/etc/init.d/{0} {1}'.format(service, operations[0])\n\ndef check_kill_process(pstring):\n    for line in os.popen(\"ps ax | grep \" + pstring + \" | grep -v grep\"):\n        fields = line.split()\n        pid = fields[0]\n        os.kill(int(pid), signal.SIGKILL)\n\ndef compare_and_copy_bin(src, dest):\n    # Check if previous file exist at the location, compare the two binaries,\n    # If the files are not same, remove the older file, and copy the new one\n    # If they are the same, then we ignore it and don't copy\n    if os.path.isfile(src ):\n        if os.path.isfile(dest):\n            if not filecmp.cmp(src, dest):\n                # Removing the file in case it is already being run in a process,\n                # in which case we can get an error \"text file busy\" while copying\n                os.remove(dest)\n                copyfile(src, dest)\n\n        else:\n            # No previous binary exist, simply copy it and make it executable\n            copyfile(src, dest)\n        \n        os.chmod(dest, stat.S_IXGRP | stat.S_IRGRP | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IXOTH | stat.S_IROTH)\n\ndef set_metrics_binaries():\n    current_arch = platform.machine()\n    # Rename the Arch appropriate metrics extension binary to MetricsExtension\n    MetricsExtensionDir = os.path.join(os.getcwd(), 'MetricsExtensionBin')\n    SupportedMEPath = os.path.join(MetricsExtensionDir, 'metricsextension_'+current_arch)\n\n    if os.path.exists(SupportedMEPath):\n        os.rename(SupportedMEPath, os.path.join(MetricsExtensionDir, 'MetricsExtension'))\n\n    # Cleanup unused ME binaries\n    for f in os.listdir(MetricsExtensionDir):\n        if f != 'MetricsExtension':\n            os.remove(os.path.join(MetricsExtensionDir, f))\n\ndef copy_amacoreagent_binaries():\n    current_arch = platform.machine()\n    amacoreagent_bin_local_path = os.getcwd() + \"/amaCoreAgentBin/amacoreagent_\" + current_arch\n    amacoreagent_bin = \"/opt/microsoft/azuremonitoragent/bin/amacoreagent\"\n    compare_and_copy_bin(amacoreagent_bin_local_path, amacoreagent_bin)\n\n    if current_arch == 'x86_64':\n        #libgrpc_bin_local_path = os.getcwd() + \"/amaCoreAgentBin/libgrpc_csharp_ext.x64.so\"\n        #libgrpc_bin = \"/opt/microsoft/azuremonitoragent/bin/libgrpc_csharp_ext.x64.so\"\n        #compare_and_copy_bin(libgrpc_bin_local_path, libgrpc_bin)\n\n        liblz4x64_bin_local_path = os.getcwd() + \"/amaCoreAgentBin/liblz4x64.so\"\n        liblz4x64_bin = \"/opt/microsoft/azuremonitoragent/bin/liblz4x64.so\"\n        compare_and_copy_bin(liblz4x64_bin_local_path, liblz4x64_bin)   \n    #elif current_arch == 'aarch64':\n        #libgrpc_bin_local_path = os.getcwd() + \"/amaCoreAgentBin/libgrpc_csharp_ext.arm64.so\"\n        #libgrpc_bin = \"/opt/microsoft/azuremonitoragent/bin/libgrpc_csharp_ext.arm64.so\"\n        #compare_and_copy_bin(libgrpc_bin_local_path, libgrpc_bin)\n                  \n    agentlauncher_bin_local_path = os.getcwd() + \"/agentLauncherBin/agentlauncher_\" + current_arch\n    agentlauncher_bin = \"/opt/microsoft/azuremonitoragent/bin/agentlauncher\"\n    compare_and_copy_bin(agentlauncher_bin_local_path, agentlauncher_bin)\n\ndef copy_mdsd_fluentbit_binaries():\n    current_arch = platform.machine()\n    mdsd_bin_local_path = os.getcwd() + \"/mdsdBin/mdsd_\" + current_arch\n    mdsdmgr_bin_local_path = os.getcwd() + \"/mdsdBin/mdsdmgr_\" + current_arch\n    fluentbit_bin_local_path = os.getcwd() + \"/fluentBitBin/fluent-bit_\" + current_arch\n    mdsd_bin = \"/opt/microsoft/azuremonitoragent/bin/mdsd\"\n    mdsdmgr_bin = \"/opt/microsoft/azuremonitoragent/bin/mdsdmgr\"\n    fluentbit_bin = \"/opt/microsoft/azuremonitoragent/bin/fluent-bit\"\n\n    # copy the required libs to our test directory first\n    lib_dir = os.path.join(os.getcwd(), \"lib\")\n    if os.path.exists(lib_dir):\n        rmtree(lib_dir)\n\n    if sys.version_info >= (3, 8):\n        # dirs_exist_ok parameter was added in Python 3.8\n        copytree(\"/opt/microsoft/azuremonitoragent/lib\", lib_dir, dirs_exist_ok=True)\n    else:\n        copytree(\"/opt/microsoft/azuremonitoragent/lib\", lib_dir)\n    \n    canUseSharedmdsd, _ = run_command_and_log('ldd ' + mdsd_bin_local_path + ' | grep \"not found\"')\n    canUseSharedmdsdmgr, _ = run_command_and_log('ldd ' + mdsdmgr_bin_local_path + ' | grep \"not found\"')\n    if canUseSharedmdsd != 0 and canUseSharedmdsdmgr != 0:        \n        compare_and_copy_bin(mdsd_bin_local_path, mdsd_bin)\n        compare_and_copy_bin(mdsdmgr_bin_local_path, mdsdmgr_bin)\n\n    canUseSharedfluentbit, _ = run_command_and_log('ldd ' + fluentbit_bin_local_path + ' | grep \"not found\"')\n    if canUseSharedfluentbit != 0:\n        compare_and_copy_bin(fluentbit_bin_local_path, fluentbit_bin)\n\n    rmtree(os.getcwd() + \"/lib\")    \n\ndef get_installed_package_version():\n    \"\"\"\n    Returns if Azure Monitor Agent is installed and a list of installed version of the Azure Monitor Agent package.\n    Returns: (is_installed, version_list)\n    \"\"\"\n    if PackageManager == \"dpkg\":\n        # In the case of dpkg, we specify only Package and Version as architecture is written as amd64/arm64 instead of x86_64/aarch64.\n        cmd = \"dpkg-query -W -f='${Package}_${Version}\\n' 'azuremonitoragent*' 2>/dev/null\"\n    elif PackageManager == \"rpm\":\n        cmd = \"rpm -q azuremonitoragent\"\n    else:\n        hutil_log_error(\"Could not determine package manager.\")\n        return False, []\n\n    exit_code, output = run_command_and_log(cmd, check_error=False)\n\n    if exit_code != 0 or not output:\n        hutil_log_info(\"Azure Monitor Agent package not found after running {0}.\".format(cmd))\n        return False, []\n\n    version_list = output.strip().split('\\n')\n    return True, version_list\n\ndef get_current_bundle_file():\n    if PackageManager == 'dpkg':\n        return BundleFileNameDeb.rsplit('.deb', 1)[0]  # Remove .deb extension\n    elif PackageManager == 'rpm':\n        return BundleFileNameRpm.rsplit('.rpm', 1)[0]  # Remove .rpm extension\n    return \"\"\n\ndef install():\n    \"\"\"\n    Ensure that this VM distro and version are supported.\n    Install the Azure Monitor Linux Agent package, using retries.\n    Note: install operation times out from WAAgent at 15 minutes, so do not\n    wait longer.\n    \"\"\"\n\n    exit_if_vm_not_supported('Install')\n    find_package_manager(\"Install\")\n    set_os_arch('Install')\n    vm_dist, vm_ver = find_vm_distro('Install')\n\n    # Check if Debian 12 and 13 VMs have rsyslog package (required for AMA 1.31+)\n    if (vm_dist.startswith('debian')) and ((vm_ver.startswith('12') or vm_ver.startswith('13')) or int(vm_ver.split('.')[0]) >= 12):\n        check_rsyslog, _ = run_command_and_log(\"dpkg -s rsyslog\")\n        if check_rsyslog != 0:\n            hutil_log_info(\"'rsyslog' package missing from Debian {0} machine, installing to allow AMA to run.\".format(vm_ver))\n            rsyslog_exit_code, rsyslog_output = run_command_and_log(\"DEBIAN_FRONTEND=noninteractive apt-get update && \\\n                                                                    DEBIAN_FRONTEND=noninteractive apt-get install -y rsyslog\")\n            if rsyslog_exit_code != 0:\n                return rsyslog_exit_code, rsyslog_output\n    \n    # Check if Amazon 2023 VMs have rsyslog package (required for AMA 1.31+)\n    if (vm_dist.startswith('amzn')) and vm_ver.startswith('2023'):\n        check_rsyslog, _ = run_command_and_log(\"dnf list installed | grep rsyslog.x86_64\")\n        if check_rsyslog != 0:\n            hutil_log_info(\"'rsyslog' package missing from Amazon Linux 2023 machine, installing to allow AMA to run.\")\n            rsyslog_exit_code, rsyslog_output = run_command_and_log(\"dnf install -y rsyslog\")\n            if rsyslog_exit_code != 0:\n                return rsyslog_exit_code, rsyslog_output\n    \n    # Flag to handle the case where the same package is already installed\n    same_package_installed = False\n\n    # Check if the package is already installed with the correct version\n    is_installed, installed_versions = get_installed_package_version()\n\n    # Check if the package is already installed, if so determine if it is the same as the bundle or not\n    if is_installed:\n        hutil_log_info(\"Found installed azuremonitoragent version(s): {0}\".format(installed_versions))\n        # Check if already have this version of AMA installed, if so, no-op for install of AMA\n        if len(installed_versions) == 1:\n            current_bundle = get_current_bundle_file()\n            hutil_log_info(\"Current bundle file: {0}\".format(current_bundle))\n            package_name = installed_versions[0]\n\n            # This is to make sure dpkg's package name is in the same format as the BundleFileNameDeb\n            if PackageManager == 'dpkg':\n                architecture = ''\n                if platform.machine() == 'x86_64':\n                    architecture = '_x86_64'\n                elif platform.machine() == 'aarch64':\n                    architecture = '_aarch64'\n                # need to change the ending from amd64 to x86_64 and arm64 to aarch64\n                package_name = package_name + architecture\n            if current_bundle == package_name:\n                hutil_log_info(\"This version of azuremonitoragent package is already installed. Skipping package install.\")\n                same_package_installed = True\n        else:\n            hutil_log_error(\"Multiple versions of azuremonitoragent package found: {0}\\n This is undefined behavior, we recommend running the following:\".format(installed_versions))\n\n            if PackageManager == 'dpkg':\n                hutil_log_error(\"Run the following command first:\\n dpkg --purge azuremonitoragent. If this does not work try the following with caution:\\n\"\n                \"'rm /var/lib/dpkg/info/azuremonitoragent.*' followed by 'dpkg --force-all -P azuremonitoragent'\")\n            elif PackageManager == 'rpm': # For reference AzureLinux 3.0 also falls under this category\n                hutil_log_error(\"Run the following command first: \")\n                hutil_log_error(\"'rpm -q azuremonitoragent' and for each version run: rpm -e azuremonitoragent-(version)-(bundle_number).(architecture), or rpm -e --deleteall azuremonitoragent\\n An example of the command is as follows: rpm -e {0}\".format(installed_versions[0]))\n                hutil_log_error(\"If the following does not work please try the following: rpm -e --noscripts --nodeps azuremonitoragent-(version)-(bundle_number).(architecture). I.e. rpm -e --noscripts --nodeps {0}\".format(installed_versions[0]))\n\n    # If the same bundle of Azure Monitor Agent package is not already installed, proceed with installation\n    if not same_package_installed:\n        hutil_log_info(\"Installing Azure Monitor Agent package.\")\n        package_directory = os.path.join(os.getcwd(), PackagesDirectory)\n        bundle_path = os.path.join(package_directory, BundleFileName)\n        os.chmod(bundle_path, 100)\n        print(PackageManager, \" and \", BundleFileName)\n        AMAInstallCommand = \"{0} {1} -i {2}\".format(PackageManager, PackageManagerOptions, bundle_path)\n        hutil_log_info('Running command \"{0}\"'.format(AMAInstallCommand))\n\n        # Try to install with retry, since install can fail due to concurrent package operations\n        exit_code, output = run_command_with_retries_output(AMAInstallCommand, retries = 15,\n                                            retry_check = retry_if_dpkg_or_rpm_locked,\n                                            final_check = final_check_if_dpkg_or_rpm_locked)\n\n        # Retry install for aarch64 rhel8 VMs as initial install fails to create symlink to /etc/systemd/system/azuremonitoragent.service\n        # in /etc/systemd/system/multi-user.target.wants/azuremonitoragent.service\n        if vm_dist.replace(' ','').lower().startswith('redhat') and vm_ver == '8.6' and platform.machine() == 'aarch64':\n            exit_code, output = run_command_with_retries_output(AMAInstallCommand, retries = 15,\n                                            retry_check = retry_if_dpkg_or_rpm_locked,\n                                            final_check = final_check_if_dpkg_or_rpm_locked)\n\n        if exit_code != 0:\n            return exit_code, output\n\n        # System daemon reload is required for systemd to pick up the new service\n        exit_code, output = run_command_and_log(\"systemctl daemon-reload\")\n        if exit_code != 0:\n            return exit_code, output\n\n    # Copy the AMACoreAgent and agentlauncher binaries\n    copy_amacoreagent_binaries()\n\n    set_metrics_binaries()\n\n    # Copy AstExtension binaries\n    # Needs to be revisited for aarch64\n    copy_astextension_binaries()\n\n    # Copy mdsd and fluent-bit with OpenSSL dynamically linked\n    if is_feature_enabled('useDynamicSSL'):\n        # Check if they have libssl.so.1.1 since AMA is built against this version\n        libssl1_1, _ = run_command_and_log('ldconfig -p | grep libssl.so.1.1')\n        if libssl1_1 == 0:\n            copy_mdsd_fluentbit_binaries()\n    \n    # Set task limits to max of 65K in suse 12\n    # Based on Task 9764411: AMA broken after 1.7 in sles 12 - https://dev.azure.com/msazure/One/_workitems/edit/9764411\n    vm_dist, _ = find_vm_distro('Install')\n    if (vm_dist.startswith('suse') or vm_dist.startswith('sles')):\n        try:\n            suse_exit_code, suse_output = run_command_and_log(\"mkdir -p /etc/systemd/system/azuremonitoragent.service.d\")\n            if suse_exit_code != 0:\n                return suse_exit_code, suse_output\n\n            suse_exit_code, suse_output = run_command_and_log(\"echo '[Service]' > /etc/systemd/system/azuremonitoragent.service.d/override.conf\")\n            if suse_exit_code != 0:\n                return suse_exit_code, suse_output\n\n            suse_exit_code, suse_output = run_command_and_log(\"echo 'TasksMax=65535' >> /etc/systemd/system/azuremonitoragent.service.d/override.conf\")\n            if suse_exit_code != 0:\n                return suse_exit_code, suse_output\n\n            suse_exit_code, suse_output = run_command_and_log(\"systemctl daemon-reload\")\n            if suse_exit_code != 0:\n                return suse_exit_code, suse_output\n        except:\n            log_and_exit(\"install\", MissingorInvalidParameterErrorCode, \"Failed to update /etc/systemd/system/azuremonitoragent.service.d for suse 12,15\" )\n\n    return 0, \"Azure Monitor Agent package installed successfully\"\n\ndef uninstall():\n    \"\"\"\n    Uninstall the Azure Monitor Linux Agent.\n    Whether it is a purge of all files or preserve of log files depends on the uninstall context file.\n    Note: uninstall operation times out from WAAgent at 5 minutes\n    \"\"\"\n\n    exit_if_vm_not_supported('Uninstall')\n    find_package_manager(\"Uninstall\")\n\n    # Before we uninstall, we need to ensure AMA is installed to begin with\n    is_installed, installed_versions = get_installed_package_version()\n    if not is_installed:\n        hutil_log_info(\"Azure Monitor Agent is not installed, nothing to uninstall.\")\n        return 0, \"Azure Monitor Agent is not installed, nothing to uninstall.\"\n\n    if PackageManager != \"dpkg\" and PackageManager != \"rpm\":\n        log_and_exit(\"Uninstall\", UnsupportedOperatingSystem, \"The OS has neither rpm nor dpkg.\" )\n\n    # For clean uninstall, gather the file list BEFORE running the uninstall command\n    # This ensures we have the complete list even after the package manager removes its database\n    package_files_for_cleanup = []\n\n    hutil_log_info(\"Gathering package file list for clean uninstall before removing package\")\n    package_files_for_cleanup = _get_package_files_for_cleanup()\n        \n    # Attempt to uninstall each specific\n\n    # Try a specific package uninstall for rpm\n    if PackageManager == \"rpm\":\n        purge_cmd_template = \"rpm -e {0}\"    \n        # Process each package\n        for package_name in installed_versions:\n            if not package_name.strip():\n                continue\n            \n            package_name = package_name.strip()\n\n            # Clean the package name and create uninstall command\n            uninstall_command = purge_cmd_template.format(package_name)\n\n            hutil_log_info(\"Removing package: {0} by running {1}\".format(package_name, uninstall_command))\n\n            # Execute uninstall command with retries\n            exit_code, output = run_command_with_retries_output(\n                uninstall_command, \n                retries=4,\n                retry_check=retry_if_dpkg_or_rpm_locked,\n                final_check=final_check_if_dpkg_or_rpm_locked\n            )\n    elif PackageManager == \"dpkg\":\n        AMAUninstallCommand = \"dpkg -P azuremonitoragent\"\n        hutil_log_info(\"Removing package: azuremonitoragent by running {0}\".format(AMAUninstallCommand))\n        exit_code, output = run_command_with_retries_output(\n            AMAUninstallCommand, \n            retries=4,\n            retry_check=retry_if_dpkg_or_rpm_locked,\n            final_check=final_check_if_dpkg_or_rpm_locked\n        )\n\n    remove_localsyslog_configs()\n\n    uninstall_azureotelcollector()\n\n    # remove the logrotate config\n    if os.path.exists(AMAExtensionLogRotateFilePath):   \n        try:\n            os.remove(AMAExtensionLogRotateFilePath)\n        except Exception as ex:\n            output = 'Logrotate removal failed with error: {0}\\n' \\\n                'Stacktrace: {1}'.format(ex, traceback.format_exc())\n            hutil_log_info(output)\n\n    # Retry, since uninstall can fail due to concurrent package operations\n    try:\n        exit_code, output = force_uninstall_azure_monitor_agent()\n\n        # Remove all files installed by the package that were listed\n        _remove_package_files_from_list(package_files_for_cleanup)\n\n        # Clean up context marker (always do this)\n        _cleanup_uninstall_context()\n\n    except Exception as ex:\n        exit_code = GenericErrorCode\n        output = 'Uninstall failed with error: {0}\\n' \\\n                'Stacktrace: {1}'.format(ex, traceback.format_exc())\n    return exit_code, output\n\ndef force_uninstall_azure_monitor_agent():\n    \"\"\"\n    Force uninstall the Azure Monitor Linux Agent package with possibility of multiple existing Azure Monitor Agent Linux packages.\n    Just for rpm ,this function will attempt to uninstall each package in the installed_versions list.\n    If it still persists, a force uninstall is done.\n    Returns: (exit_code, output_message or installed_versions (list of remaining packages))\n    \"\"\"\n    # Check if azuremonitoragent is still installed, exit code will be non-zero if it is not.\n    is_installed, remaining_packages = get_installed_package_version()\n\n    commands_used = []\n    if is_installed:\n        # Since the previous uninstall failed we are going down the route of uninstall without dep and pre/post\n        hutil_log_info(\"Initial uninstall command did not remove all packages. Remaining packages: {0}\".format(remaining_packages))\n        AMAUninstallCommandForce = \"\"\n        if PackageManager == \"dpkg\":\n            # we can remove the post and pre scripts first then purge\n            RemoveScriptsCommand = \"rm /var/lib/dpkg/info/azuremonitoragent.*\"\n            run_command_with_retries_output(RemoveScriptsCommand, retries = 4,\n                                            retry_check = retry_if_dpkg_or_rpm_locked,\n                                            final_check = final_check_if_dpkg_or_rpm_locked)\n            AMAUninstallCommandForce = \"dpkg --force-all -P azuremonitoragent\"\n            hutil_log_info('Running command \"{0}\"'.format(AMAUninstallCommandForce))\n            exit_code, output = run_command_with_retries_output(AMAUninstallCommandForce, retries = 4,\n                                            retry_check = retry_if_dpkg_or_rpm_locked,\n                                            final_check = final_check_if_dpkg_or_rpm_locked)\n            commands_used.extend([RemoveScriptsCommand, AMAUninstallCommandForce])\n        elif PackageManager == \"rpm\":\n            # First try to mass uninstall AMA by using the --allmatches flag for rpm\n            # This is a more robust version of uninstall() since it uses the --allmatches flag\n            AMAUninstallCommand = \"rpm -e --allmatches azuremonitoragent\"\n            hutil_log_info('Running command \"{0}\"'.format(AMAUninstallCommand))\n            exit_code, output = run_command_with_retries_output(AMAUninstallCommand, retries = 4,\n                                                retry_check = retry_if_dpkg_or_rpm_locked,\n                                                final_check = final_check_if_dpkg_or_rpm_locked)\n\n            hutil_log_info(\"Force uninstall command {0} returned exit code {1} and output: {2}\".format(AMAUninstallCommandForce, exit_code, output))\n            commands_used.append(AMAUninstallCommand)\n            # Query to see what is left after using the --allmatches uninstall\n            is_still_installed, remaining_packages = get_installed_package_version()\n\n            # If the above command fails, we will try to force uninstall each package by using the --noscripts and --nodeps flags\n            if is_still_installed:\n                hutil_log_info(\"Failed to uninstall azuremonitoragent with --allmatches, trying to force uninstall each package individually.\")\n                # --noscripts and --nodeps flags are used to avoid running any pre/post scripts and skip dependencies test\n                # https://jfearn.fedorapeople.org/en-US/RPM/4/html/RPM_Guide/ch03s03s03.html\n                for package in remaining_packages:\n                    # Clean the package name and create uninstall command\n                    package = package.strip()\n                    if not package:\n                        continue\n                    AMAUninstallCommandForce = \"rpm -e --noscripts --nodeps {0}\".format(package)\n                    commands_used.append(AMAUninstallCommandForce)\n                    hutil_log_info('Running command \"{0}\"'.format(AMAUninstallCommandForce))\n                \n                    exit_code, output = run_command_with_retries_output(AMAUninstallCommandForce, retries = 4,\n                                                        retry_check = retry_if_dpkg_or_rpm_locked,\n                                                        final_check = final_check_if_dpkg_or_rpm_locked)\n                    \n                    hutil_log_info(\"Force uninstall command {0} returned exit code {1} and output: {2}\".format(AMAUninstallCommandForce, exit_code, output))\n        # Check if packages are still installed\n        is_still_installed, remaining_packages = get_installed_package_version()\n        if is_still_installed:\n            output = \"Force uninstall did not remove all packages, remaining packages: {0}\".format(remaining_packages)\n            hutil_log_info(\"Force uninstall did not remove all packages, remaining packages: {0}\".format(remaining_packages))\n            return 1, output\n        else:\n            hutil_log_info(\"Force uninstall removed all packages successfully after using: {0}\".format(\", \".join(commands_used)))\n            return 0, \"Azure Monitor Agent packages uninstalled successfully after using: {0}\".format(\", \".join(commands_used))\n    # Since there was no indication of AMA, we can assume it was uninstalled successfully\n    else:\n        hutil_log_info(\"Azure Monitor Agent has been uninstalled.\")\n        return 0, \"Azure Monitor Agent has been uninstalled.\"\n      \ndef _get_package_files_for_cleanup():\n    \"\"\"\n    Get the list of files and directories installed by the provided\n    azuremonitoragent spec that should be removed during uninstall.\n    This must be called BEFORE the package is uninstalled to ensure the package\n    manager still has the file list available.\n    \n    Returns:\n        tuple: (files_list, directories_to_add) where files_list contains package files\n               and directories_to_add contains directories that need explicit cleanup\n    \"\"\"\n    try:\n        # Get list of files installed by the package\n        if PackageManager == \"dpkg\":\n            # For Debian-based systems\n            cmd = \"dpkg -L azuremonitoragent\"\n        elif PackageManager == \"rpm\":\n            # For RPM-based systems\n            cmd = \"rpm -ql azuremonitoragent\"\n        else:\n            hutil_log_info(\"Unknown package manager, cannot list package files\")\n            return []\n\n        exit_code, output = run_command_and_log(cmd, check_error=False)\n        \n        if exit_code != 0 or not output:\n            hutil_log_info(\"Could not get package file list for cleanup\")\n            return []\n\n        # Parse the file list\n        files = [line.strip() for line in output.strip().split('\\n') if line.strip()]\n        \n        # Collect all azuremonitor-related paths\n        azuremonitoragent_files = []\n        \n        for file_path in files:\n            # Only include files/directories that have \"azuremonitor\" in their path\n            # This covers both \"azuremonitoragent\" and \"azuremonitor-*\" service files\n            if \"azuremonitor\" in file_path:\n                azuremonitoragent_files.append(file_path)\n            else:\n                hutil_log_info(\"Skipping non-azuremonitor path: {0}\".format(file_path))\n        \n        return azuremonitoragent_files\n        \n    except Exception as ex:\n        hutil_log_error(\"Error gathering package files for cleanup: {0}\\n Is Azure Monitor Agent Installed?\".format(ex))\n        return []\n\ndef _remove_package_files_from_list(package_files):\n    \"\"\"\n    Remove all files and directories from the provided list that were installed \n    by the provided azuremonitoragent spec. This function works with a pre-gathered \n    list of files from _get_package_files_for_cleanup(), allowing it to work even \n    after the package has been uninstalled.\n    \n    Args:\n        package_files (list): List of file/directory paths to remove\n    \"\"\"\n    try:\n        if not package_files:\n            hutil_log_info(\"No package files provided for removal\")\n            return\n            \n        # Build consolidated list of paths to clean up\n        cleanup_paths = set(package_files) if package_files else set()\n        \n        # Add directories that need explicit cleanup since on rpm systems \n        # the initial list for this path does not remove the directories and files\n        cleanup_paths.add(\"/opt/microsoft/azuremonitoragent/\")\n\n        # Determine uninstall context based on if the context file exists\n        uninstall_context = _get_uninstall_context()\n        hutil_log_info(\"Uninstall context: {0}\".format(uninstall_context))\n        \n        if uninstall_context == 'complete':\n            hutil_log_info(\"Complete uninstall context - removing everything\")\n            cleanup_paths.add(AmaDataPath)\n\n        # Sort paths by depth (deepest first) to avoid removing parent before children\n        sorted_paths = sorted(cleanup_paths, key=lambda x: x.count('/'), reverse=True)\n        \n        hutil_log_info(\"Removing {0} azuremonitor paths\".format(len(sorted_paths)))\n        \n        items_removed = 0\n        for item_path in sorted_paths:\n            try:\n                if os.path.exists(item_path):\n                    if os.path.isdir(item_path):\n                        rmtree(item_path)\n                        hutil_log_info(\"Removed directory: {0}\".format(item_path))\n                    else:\n                        os.remove(item_path)\n                        hutil_log_info(\"Removed file: {0}\".format(item_path))\n                    items_removed += 1\n            except Exception as ex:\n                hutil_log_info(\"Failed to remove {0}: {1}\".format(item_path, ex))\n        \n        hutil_log_info(\"Removed {0} items total\".format(items_removed))\n        \n    except Exception as ex:\n        hutil_log_error(\"Error during file removal from list: {0}\\n Were these files removed already?\".format(ex))\n\ndef enable():\n    \"\"\"\n    Start the Azure Monitor Linux Agent Service\n    This call will return non-zero or throw an exception if\n    the settings provided are incomplete or incorrect.\n    Note: enable operation times out from WAAgent at 5 minutes\n    \"\"\"\n\n    public_settings, protected_settings = get_settings()\n\n    exit_if_vm_not_supported('Enable')\n\n    ensure = OrderedDict([\n        (\"azuremonitoragent\", False),\n        (\"azuremonitoragentmgr\", False)\n    ])\n\n    # Set traceFlags in publicSettings to enable mdsd tracing. For example, the EventIngest flag can be enabled via \"traceFlags\": \"0x2\"\n    flags = \"\"\n    if public_settings is not None and \"traceFlags\" in public_settings:\n        flags = \"-T {} \".format(public_settings.get(\"traceFlags\"))\n\n    # Use an Ordered Dictionary to ensure MDSD_OPTIONS (and other dependent variables) are written after their dependencies\n    default_configs = OrderedDict([\n        (\"MDSD_CONFIG_DIR\", \"/etc/opt/microsoft/azuremonitoragent\"),\n        (\"MDSD_LOG_DIR\", \"/var/opt/microsoft/azuremonitoragent/log\"),\n        (\"MDSD_ROLE_PREFIX\", \"/run/azuremonitoragent/default\"),\n        (\"MDSD_SPOOL_DIRECTORY\", \"/var/opt/microsoft/azuremonitoragent\"),\n        (\"MDSD_OPTIONS\", \"\\\"{}-A -R -c /etc/opt/microsoft/azuremonitoragent/mdsd.xml -d -r $MDSD_ROLE_PREFIX -S $MDSD_SPOOL_DIRECTORY/eh -L $MDSD_SPOOL_DIRECTORY/events\\\"\".format(flags)),\n        (\"MDSD_USE_LOCAL_PERSISTENCY\", \"true\"),\n        (\"MDSD_TCMALLOC_RELEASE_FREQ_SEC\", \"1\"),\n        (\"MONITORING_USE_GENEVA_CONFIG_SERVICE\", \"false\"),\n        (\"ENABLE_MCS\", \"false\")\n    ])\n\n    ssl_cert_var_name, ssl_cert_var_value = get_ssl_cert_info('Enable')\n    default_configs[ssl_cert_var_name] = ssl_cert_var_value\n\n    \"\"\"\n    Decide the mode and configuration. There are two supported configuration schema, mix-and-match between schemas is disallowed:\n        Legacy:          allows one of [MCS, GCS single tenant, or GCS multi tenant (\"Auto-Config\")] modes\n        Next-Generation: allows MCS, GCS multi tenant, or both\n    \"\"\"\n    is_gcs_single_tenant = False\n    GcsEnabled, McsEnabled = get_control_plane_mode()\n\n    # Next-generation schema\n    if public_settings is not None and (public_settings.get(GenevaConfigKey) or public_settings.get(AzureMonitorConfigKey)):\n\n        geneva_configuration = public_settings.get(GenevaConfigKey)\n        azure_monitor_configuration = public_settings.get(AzureMonitorConfigKey)\n\n        # Check for mix-and match of next-generation and legacy schema content\n        if len(public_settings) > 1 and ((geneva_configuration and not azure_monitor_configuration) or (azure_monitor_configuration and not geneva_configuration)):\n            log_and_exit(\"Enable\", MissingorInvalidParameterErrorCode, 'Mixing genevaConfiguration or azureMonitorConfiguration with other configuration schemas is not allowed')\n\n        if geneva_configuration and geneva_configuration.get(\"enable\") == True:\n            hutil_log_info(\"Detected Geneva+ mode; azuremonitoragentmgr service will be started to handle Geneva tenants\")\n            ensure[\"azuremonitoragentmgr\"] = True\n            \n        if azure_monitor_configuration and azure_monitor_configuration.get(\"enable\") == True:\n            hutil_log_info(\"Detected Azure Monitor+ mode; azuremonitoragent service will be started to handle Azure Monitor tenant\")\n            ensure[\"azuremonitoragent\"] = True\n            azure_monitor_public_settings = azure_monitor_configuration.get(\"configuration\")\n            azure_monitor_protected_settings = protected_settings.get(AzureMonitorConfigKey) if protected_settings is not None else None\n            handle_mcs_config(azure_monitor_public_settings, azure_monitor_protected_settings, default_configs)\n\n    # Legacy schema\n    elif public_settings is not None and public_settings.get(\"GCS_AUTO_CONFIG\") == True:\n        hutil_log_info(\"Detected Auto-Config mode; azuremonitoragentmgr service will be started to handle Geneva tenants\")\n        ensure[\"azuremonitoragentmgr\"] = True\n                \n    elif (protected_settings is None or len(protected_settings) == 0) or (public_settings is not None and \"proxy\" in public_settings and \"mode\" in public_settings.get(\"proxy\") and public_settings.get(\"proxy\").get(\"mode\") == \"application\"):\n        hutil_log_info(\"Detected Azure Monitor mode; azuremonitoragent service will be started to handle Azure Monitor configuration\")\n        ensure[\"azuremonitoragent\"] = True\n        handle_mcs_config(public_settings, protected_settings, default_configs)\n\n    else:\n        hutil_log_info(\"Detected Geneva mode; azuremonitoragent service will be started to handle Geneva configuration\")\n        ensure[\"azuremonitoragent\"] = True\n        is_gcs_single_tenant = True\n        handle_gcs_config(public_settings, protected_settings, default_configs)\n        \n    # generate local syslog configuration files as in auto config syslog is not driven from DCR\n    # Note that internally AMCS with geneva config path can be used in which case syslog should be handled same way as default 1P\n    # generate local syslog configuration files as in 1P syslog is not driven from DCR\n    if GcsEnabled:\n        generate_localsyslog_configs(uses_gcs=True, uses_mcs=McsEnabled)\n\n    config_file = \"/etc/default/azuremonitoragent\"\n    temp_config_file = \"/etc/default/azuremonitoragent_temp\"\n\n    try:\n        if os.path.isfile(config_file):\n            new_config = \"\\n\".join([\"export {0}={1}\".format(key, value) for key, value in default_configs.items()]) + \"\\n\"\n\n            with open(temp_config_file, \"w\") as f:\n                f.write(new_config)\n\n            if not os.path.isfile(temp_config_file):\n                log_and_exit(\"Enable\", GenericErrorCode, \"Error while updating environment variables in {0}\".format(config_file))\n\n            os.remove(config_file)\n            os.rename(temp_config_file, config_file)            \n        else:\n            log_and_exit(\"Enable\", GenericErrorCode, \"Could not find the file {0}\".format(config_file))\n    except Exception as e:\n        log_and_exit(\"Enable\", GenericErrorCode, \"Failed to add environment variables to {0}: {1}\".format(config_file, e))\n\n    if \"ENABLE_MCS\" in default_configs and default_configs[\"ENABLE_MCS\"] == \"true\":\n        # enable processes for Custom Logs\n        ensure[\"azuremonitor-agentlauncher\"] = True\n        ensure[\"azuremonitor-coreagent\"] = True\n            \n        # start the metrics, agent transform and syslog watchers only in 3P mode\n        start_metrics_process()\n        start_syslogconfig_process()\n    elif ensure.get(\"azuremonitoragentmgr\") or is_gcs_single_tenant:\n        # In GCS scenarios, ensure that AMACoreAgent is running\n        ensure[\"azuremonitor-coreagent\"] = True\n\n    hutil_log_info('Handler initiating onboarding.')\n\n    if HUtilObject and HUtilObject.is_seq_smaller():\n        # Either upgrade has just happened (in which case we need to start), or enable was called with no change to extension config\n        hutil_log_info(\"Current sequence number, \" + HUtilObject._context._seq_no + \", is not greater than the LKG sequence number. Starting service(s) only if it is not yet running.\")\n        operations = [\"start\", \"enable\"]\n    else:\n        # Either this is a clean install (in which case restart is effectively start), or extension config has changed\n        hutil_log_info(\"Current sequence number, \" + HUtilObject._context._seq_no + \", is greater than the LKG sequence number. Restarting service(s) to pick up the new config.\")\n        operations = [\"restart\", \"enable\"]\n\n    output = \"\"\n\n    # Ensure non-required services are not running; do not block if this step fails\n    for service in [s for s in ensure.keys() if not ensure[s]]:\n        exit_code, disable_output = run_command_and_log(get_service_command(service, \"stop\", \"disable\"))\n        output += disable_output\n\n    for service in [s for s in ensure.keys() if ensure[s]]:\n        exit_code, enable_output = run_command_and_log(get_service_command(service, *operations))\n        output += enable_output\n\n        if exit_code != 0:\n            status_command = get_service_command(service, \"status\")\n            status_exit_code, status_output = run_command_and_log(status_command)\n\n            if status_exit_code != 0:\n                output += \"Output of '{0}':\\n{1}\".format(status_command, status_output)\n                return exit_code, output\n\n    if platform.machine() != 'aarch64':\n        if \"ENABLE_MCS\" in default_configs and default_configs[\"ENABLE_MCS\"] == \"true\":\n            # start/enable ast extension only in 3P mode and non aarch64\n            _, ast_output = run_command_and_log(get_service_command(\"azuremonitor-astextension\", *operations))\n            output += ast_output # do not block if ast start fails\n            # start transformation config watcher process\n            start_transformconfig_process()\n\n    # Service(s) were successfully configured and started; increment sequence number\n    HUtilObject.save_seq()\n\n    return exit_code, output\n\ndef handle_gcs_config(public_settings, protected_settings, default_configs):\n    \"\"\"\n    Populate the defaults for legacy-path GCS mode\n    \"\"\"\n    # look for LA protected settings\n    for var in list(protected_settings.keys()):\n        if \"_key\" in var or \"_id\" in var:\n            default_configs[var] = protected_settings.get(var)\n\n    # check if required GCS params are available\n    MONITORING_GCS_CERT_CERTFILE = None\n    if \"certificate\" in protected_settings:\n        MONITORING_GCS_CERT_CERTFILE = base64.standard_b64decode(protected_settings.get(\"certificate\"))\n\n    if \"certificatePath\" in protected_settings:\n        try:\n            with open(protected_settings.get(\"certificatePath\"), 'r') as f:\n                MONITORING_GCS_CERT_CERTFILE = f.read()\n        except Exception as ex:\n            log_and_exit('Enable', MissingorInvalidParameterErrorCode, 'Failed to read certificate {0}: {1}'.format(protected_settings.get(\"certificatePath\"), ex))\n\n    MONITORING_GCS_CERT_KEYFILE = None\n    if \"certificateKey\" in protected_settings:\n        MONITORING_GCS_CERT_KEYFILE = base64.standard_b64decode(protected_settings.get(\"certificateKey\"))\n\n    if \"certificateKeyPath\" in protected_settings:\n        try:\n            with open(protected_settings.get(\"certificateKeyPath\"), 'r') as f:\n                MONITORING_GCS_CERT_KEYFILE = f.read()\n        except Exception as ex:\n            log_and_exit('Enable', MissingorInvalidParameterErrorCode, 'Failed to read certificate key {0}: {1}'.format(protected_settings.get(\"certificateKeyPath\"), ex))\n\n    MONITORING_GCS_ENVIRONMENT = \"\"\n    if \"monitoringGCSEnvironment\" in protected_settings:\n        MONITORING_GCS_ENVIRONMENT = protected_settings.get(\"monitoringGCSEnvironment\")\n\n    MONITORING_GCS_NAMESPACE = \"\"\n    if \"namespace\" in protected_settings:\n        MONITORING_GCS_NAMESPACE = protected_settings.get(\"namespace\")\n\n    MONITORING_GCS_ACCOUNT = \"\"\n    if \"monitoringGCSAccount\" in protected_settings:\n        MONITORING_GCS_ACCOUNT = protected_settings.get(\"monitoringGCSAccount\")\n\n    MONITORING_GCS_REGION = \"\"\n    if \"monitoringGCSRegion\" in protected_settings:\n        MONITORING_GCS_REGION = protected_settings.get(\"monitoringGCSRegion\")\n\n    MONITORING_CONFIG_VERSION = \"\"\n    if \"configVersion\" in protected_settings:\n        MONITORING_CONFIG_VERSION = protected_settings.get(\"configVersion\")\n\n    MONITORING_GCS_AUTH_ID_TYPE = \"\"\n    if \"monitoringGCSAuthIdType\" in protected_settings:\n        MONITORING_GCS_AUTH_ID_TYPE = protected_settings.get(\"monitoringGCSAuthIdType\")\n\n    MONITORING_GCS_AUTH_ID = \"\"\n    if \"monitoringGCSAuthId\" in protected_settings:\n        MONITORING_GCS_AUTH_ID = protected_settings.get(\"monitoringGCSAuthId\")\n\n    MONITORING_TENANT = \"\"\n    if \"monitoringTenant\" in protected_settings:\n        MONITORING_TENANT = protected_settings.get(\"monitoringTenant\")\n\n    MONITORING_ROLE = \"\"\n    if \"monitoringRole\" in protected_settings:\n        MONITORING_ROLE = protected_settings.get(\"monitoringRole\")\n\n    MONITORING_ROLE_INSTANCE = \"\"\n    if \"monitoringRoleInstance\" in protected_settings:\n        MONITORING_ROLE_INSTANCE = protected_settings.get(\"monitoringRoleInstance\")\n\n\n    if ((MONITORING_GCS_CERT_CERTFILE is None or MONITORING_GCS_CERT_KEYFILE is None) and (MONITORING_GCS_AUTH_ID_TYPE == \"\")) or MONITORING_GCS_ENVIRONMENT == \"\" or MONITORING_GCS_NAMESPACE == \"\" or MONITORING_GCS_ACCOUNT == \"\" or MONITORING_GCS_REGION == \"\" or MONITORING_CONFIG_VERSION == \"\":\n        log_and_exit(\"Enable\", MissingorInvalidParameterErrorCode, 'Not all required GCS parameters are provided')\n    else:\n        # set the values for GCS\n        default_configs[\"MONITORING_USE_GENEVA_CONFIG_SERVICE\"] = \"true\"\n        default_configs[\"MONITORING_GCS_ENVIRONMENT\"] = MONITORING_GCS_ENVIRONMENT\n        default_configs[\"MONITORING_GCS_NAMESPACE\"] = MONITORING_GCS_NAMESPACE\n        default_configs[\"MONITORING_GCS_ACCOUNT\"] = MONITORING_GCS_ACCOUNT\n        default_configs[\"MONITORING_GCS_REGION\"] = MONITORING_GCS_REGION\n        default_configs[\"MONITORING_CONFIG_VERSION\"] = MONITORING_CONFIG_VERSION\n\n        # write the certificate and key to disk\n        uid = pwd.getpwnam(\"syslog\").pw_uid\n        gid = grp.getgrnam(\"syslog\").gr_gid\n\n        if MONITORING_GCS_AUTH_ID_TYPE != \"\":\n            default_configs[\"MONITORING_GCS_AUTH_ID_TYPE\"] = MONITORING_GCS_AUTH_ID_TYPE\n\n        if MONITORING_GCS_AUTH_ID != \"\":\n            default_configs[\"MONITORING_GCS_AUTH_ID\"] = MONITORING_GCS_AUTH_ID\n\n        if MONITORING_GCS_CERT_CERTFILE is not None:\n            default_configs[\"MONITORING_GCS_CERT_CERTFILE\"] = \"/etc/opt/microsoft/azuremonitoragent/gcscert.pem\"\n            with open(\"/etc/opt/microsoft/azuremonitoragent/gcscert.pem\", \"wb\") as f:\n                f.write(MONITORING_GCS_CERT_CERTFILE)\n            os.chown(\"/etc/opt/microsoft/azuremonitoragent/gcscert.pem\", uid, gid)\n            os.system('chmod {1} {0}'.format(\"/etc/opt/microsoft/azuremonitoragent/gcscert.pem\", 400))\n\n        if MONITORING_GCS_CERT_KEYFILE is not None:\n            default_configs[\"MONITORING_GCS_CERT_KEYFILE\"] = \"/etc/opt/microsoft/azuremonitoragent/gcskey.pem\"\n            with open(\"/etc/opt/microsoft/azuremonitoragent/gcskey.pem\", \"wb\") as f:\n                f.write(MONITORING_GCS_CERT_KEYFILE)\n            os.chown(\"/etc/opt/microsoft/azuremonitoragent/gcskey.pem\", uid, gid)\n            os.system('chmod {1} {0}'.format(\"/etc/opt/microsoft/azuremonitoragent/gcskey.pem\", 400))\n\n        if MONITORING_TENANT != \"\":\n            default_configs[\"MONITORING_TENANT\"] = MONITORING_TENANT\n\n        if MONITORING_ROLE != \"\":\n            default_configs[\"MONITORING_ROLE\"] = MONITORING_ROLE\n\n        if MONITORING_TENANT != \"\":\n            default_configs[\"MONITORING_ROLE_INSTANCE\"] = MONITORING_ROLE_INSTANCE\n\ndef handle_mcs_config(public_settings, protected_settings, default_configs):\n    \"\"\"\n    Populate the defaults for MCS mode\n    \"\"\"\n    default_configs[\"ENABLE_MCS\"] = \"true\"\n    default_configs[\"PA_GIG_BRIDGE_MODE\"] = \"true\"\n    # April 2022: PA_FLUENT_SOCKET_PORT setting is being deprecated in place of PA_DATA_PORT. Remove when AMA 1.17 and earlier no longer need servicing.\n    default_configs[\"PA_FLUENT_SOCKET_PORT\"] = \"13005\"\n    # this port will be dynamic in future\n    default_configs[\"PA_DATA_PORT\"] = \"13005\"\n    proxySet = False\n\n    # fetch proxy settings\n    if public_settings is not None and \"proxy\" in public_settings and \"mode\" in public_settings.get(\"proxy\") and public_settings.get(\"proxy\").get(\"mode\") == \"application\":\n        default_configs[\"MDSD_PROXY_MODE\"] = \"application\"\n\n        if \"address\" in public_settings.get(\"proxy\"):\n            default_configs[\"MDSD_PROXY_ADDRESS\"] = public_settings.get(\"proxy\").get(\"address\")\n        else:\n            log_and_exit(\"Enable\", MissingorInvalidParameterErrorCode, 'Parameter \"address\" is required in proxy public setting')\n\n        if \"auth\" in public_settings.get(\"proxy\") and public_settings.get(\"proxy\").get(\"auth\") == True:\n            if protected_settings is not None and \"proxy\" in protected_settings and \"username\" in protected_settings.get(\"proxy\") and \"password\" in protected_settings.get(\"proxy\"):\n                default_configs[\"MDSD_PROXY_USERNAME\"] = protected_settings.get(\"proxy\").get(\"username\")\n                default_configs[\"MDSD_PROXY_PASSWORD\"] = protected_settings.get(\"proxy\").get(\"password\")\n                set_proxy(default_configs[\"MDSD_PROXY_ADDRESS\"], default_configs[\"MDSD_PROXY_USERNAME\"], default_configs[\"MDSD_PROXY_PASSWORD\"])\n                proxySet = True\n            else:\n                log_and_exit(\"Enable\", MissingorInvalidParameterErrorCode, 'Parameter \"username\" and \"password\" not in proxy protected setting')\n        else:\n            set_proxy(default_configs[\"MDSD_PROXY_ADDRESS\"], \"\", \"\")\n            proxySet = True\n    \n    # is this Arc? If so, check for proxy     \n    if os.path.isfile(ArcSettingsFile):\n        f = open(ArcSettingsFile, \"r\")\n        data = f.read()\n\n        if (data != ''):\n            json_data = json.loads(data)\n            BypassProxy = False\n            if json_data is not None and \"proxy.bypass\" in json_data:\n                bypass = json_data[\"proxy.bypass\"]\n                # proxy.bypass is an array\n                if \"AMA\" in bypass:\n                    BypassProxy = True\n                    \n            if not BypassProxy and json_data is not None and \"proxy.url\" in json_data:\n                url = json_data[\"proxy.url\"]\n                # only non-authenticated proxy config is supported\n                if url != '':\n                    default_configs[\"MDSD_PROXY_ADDRESS\"] = url\n                    set_proxy(default_configs[\"MDSD_PROXY_ADDRESS\"], \"\", \"\")\n                    proxySet = True\n\n    if not proxySet:\n        unset_proxy()\n\n    # set arc autonomous endpoints\n    az_environment, _ = get_azure_environment_and_region()\n    if az_environment == me_handler.ArcACloudName:\n        try:\n            _, mcs_endpoint = me_handler.get_arca_endpoints_from_himds()\n        except Exception as ex:\n            log_and_exit(\"Enable\", MissingorInvalidParameterErrorCode, 'Failed to get Arc autonomous endpoints. {0}'.format(ex))\n\n        default_configs[\"customRegionalEndpoint\"] = mcs_endpoint\n        default_configs[\"customGlobalEndpoint\"] = mcs_endpoint\n        default_configs[\"customResourceEndpoint\"] = \"https://monitoring.azs\"\n\n    # add managed identity settings if they were provided\n    identifier_name, identifier_value, error_msg = get_managed_identity()\n\n    if error_msg:\n        log_and_exit(\"Enable\", MissingorInvalidParameterErrorCode, 'Failed to determine managed identity settings. {0}.'.format(error_msg))\n\n    if identifier_name and identifier_value:\n        default_configs[\"MANAGED_IDENTITY\"] = \"{0}#{1}\".format(identifier_name, identifier_value)\n\ndef get_control_plane_mode():\n    \"\"\"\n    Identify which control plane is in use\n    \"\"\"\n    public_settings, protected_settings = get_settings()\n\n    GcsEnabled = False\n    McsEnabled = False\n\n    if public_settings is not None and (public_settings.get(GenevaConfigKey) or public_settings.get(AzureMonitorConfigKey)):        \n        geneva_configuration = public_settings.get(GenevaConfigKey)\n        azure_monitor_configuration = public_settings.get(AzureMonitorConfigKey)\n\n        if geneva_configuration and geneva_configuration.get(\"enable\") == True:\n            GcsEnabled = True\n        if azure_monitor_configuration and azure_monitor_configuration.get(\"enable\") == True:\n            McsEnabled = True\n    # Legacy schema\n    elif public_settings is not None and public_settings.get(\"GCS_AUTO_CONFIG\") == True:\n        GcsEnabled = True\n    elif (protected_settings is None or len(protected_settings) == 0) or (public_settings is not None and \"proxy\" in public_settings and \"mode\" in public_settings.get(\"proxy\") and public_settings.get(\"proxy\").get(\"mode\") == \"application\"):\n        McsEnabled = True\n    else:\n        GcsEnabled = True\n    \n    return GcsEnabled, McsEnabled\n\ndef disable():\n    \"\"\"\n    Disable Azure Monitor Linux Agent process on the VM.\n    Note: disable operation times out from WAAgent at 15 minutes\n    \"\"\"\n\n    #stop the metrics process\n    stop_metrics_process()\n\n    #stop syslog config watcher process\n    stop_syslogconfig_process()\n\n    #stop agent transform config watcher process\n    stop_transformconfig_process()\n\n    # stop amacoreagent and agent launcher\n    hutil_log_info('Handler initiating Core Agent and agent launcher')\n    if is_systemd():\n        exit_code, output = run_command_and_log('systemctl stop azuremonitor-coreagent && systemctl disable azuremonitor-coreagent')\n        exit_code, output = run_command_and_log('systemctl stop azuremonitor-agentlauncher && systemctl disable azuremonitor-agentlauncher')\n        # in case AL is not cleaning up properly\n        check_kill_process('/opt/microsoft/azuremonitoragent/bin/fluent-bit')\n\n    # Stop and disable systemd services so they are not started after system reboot.\n    for service in [\"azuremonitoragent\", \"azuremonitoragentmgr\"]:\n        exit_code, output = run_command_and_log(get_service_command(service, \"stop\", \"disable\"))\n\n        if exit_code != 0:\n            status_command = get_service_command(service, \"status\")\n            status_exit_code, status_output = run_command_and_log(status_command)\n\n            if status_exit_code != 0:\n                output += \"Output of '{0}':\\n{1}\".format(status_command, status_output)\n\n    if platform.machine() != 'aarch64':\n        # stop ast extensionso that is not started after system reboot. Do not block if it fails.\n        ast_exit_code, disable_output = run_command_and_log(get_service_command(\"azuremonitor-astextension\", \"stop\", \"disable\"))\n        if ast_exit_code != 0:\n            hutil_log_info(disable_output)\n            status_command = get_service_command(\"azuremonitor-astextension\", \"status\")\n            _, ast_status_output = run_command_and_log(status_command)\n            hutil_log_info(ast_status_output)\n\n    return exit_code, output\n\ndef update():\n    \"\"\"\n    This function is called when the extension is updated.\n    It marks the uninstall context to indicate that the next run should be treated as an update rather than a clean install.\n\n    Always returns 0\n    \"\"\"\n\n    hutil_log_info(\"Update operation called for Azure Monitor Agent\")\n\n    try:\n        state_dir = os.path.dirname(AmaUninstallContextFile)\n        if not os.path.exists(state_dir):\n            os.makedirs(state_dir)\n        with open(AmaUninstallContextFile, 'w') as f:\n            f.write('update\\n')\n            f.write(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')) # Timestamp for debugging\n        hutil_log_info(\"Marked uninstall context as 'update'\")\n    except Exception as ex:\n        hutil_log_error(\"Failed to set uninstall context: {0}\\n The uninstall operation will not behave as expected with the uninstall context file missing, defaulting to an uninstall that removes {1}.\".format(ex, AmaDataPath))\n\n    return 0, \"Update succeeded\"\n\ndef _get_uninstall_context():\n    \"\"\"\n    Determine the context of this uninstall operation\n\n    Returns the context as a string:\n        'complete' - if this is a clean uninstall\n        'update' - if this is an update operation\n    Also returns as 'complete' if it fails to read the context file.\n    \"\"\"\n\n    try:\n        if os.path.exists(AmaUninstallContextFile):\n            with open(AmaUninstallContextFile, 'r') as f:\n                context = f.read().strip().split('\\n')[0]\n                hutil_log_info(\"Found uninstall context: {0}\".format(context))\n                return context\n        else:\n            hutil_log_info(\"Uninstall context file does not exist, defaulting to 'complete'\")\n    except Exception as ex:\n        hutil_log_error(\"Failed to read uninstall context file: {0}\\n The uninstall operation will not behave as expected with the uninstall context file missing, defaulting to an uninstall that removes {1}.\".format(ex, AmaDataPath))\n\n    return 'complete'\n\ndef _cleanup_uninstall_context():\n    \"\"\"\n    Clean up uninstall context marker\n    \"\"\"\n\n    try:\n        if os.path.exists(AmaUninstallContextFile):\n            os.remove(AmaUninstallContextFile)\n            hutil_log_info(\"Removed uninstall context file\")\n        else:\n            hutil_log_info(\"Uninstall context file does not exist, nothing to remove\")\n    except Exception as ex:\n        hutil_log_error(\"Failed to cleanup uninstall context: {0}\\n This may result in unintended behavior as described.\\nIf the marker file exists and cannot be removed, uninstall will continue to keep the {1} path, leading users to have to remove it manually.\".format(ex, AmaDataPath))\n\ndef restart_launcher():\n    # start agent launcher\n    hutil_log_info('Handler initiating agent launcher')\n    if is_systemd():\n        exit_code, output = run_command_and_log('systemctl restart azuremonitor-agentlauncher && systemctl enable azuremonitor-agentlauncher')\n\ndef restart_astextension():\n    # start agent transformation extension process\n    hutil_log_info('Handler initiating agent transformation extension (AstExtension) restart and enable')\n    if is_systemd():\n        exit_code, output = run_command_and_log('systemctl restart azuremonitor-astextension && systemctl enable azuremonitor-astextension')\n\ndef set_proxy(address, username, password):\n    \"\"\"\n    # Set proxy http_proxy env var in dependent services\n    \"\"\"\n    \n    try:\n        http_proxy = address\n        address = address.replace(\"http://\",\"\")\n\n        if username:\n            http_proxy = \"http://\" + username + \":\" + password + \"@\" + address\n\n        # Update Coreagent\n        run_command_and_log(\"mkdir -p /etc/systemd/system/azuremonitor-coreagent.service.d\")\n        run_command_and_log(\"echo '[Service]' > /etc/systemd/system/azuremonitor-coreagent.service.d/proxy.conf\")\n        run_command_and_log(\"echo 'Environment=\\\"http_proxy={0}\\\"' >> /etc/systemd/system/azuremonitor-coreagent.service.d/proxy.conf\".format(http_proxy))\n        run_command_and_log(\"echo 'Environment=\\\"https_proxy={0}\\\"' >> /etc/systemd/system/azuremonitor-coreagent.service.d/proxy.conf\".format(http_proxy))\n        os.system('chmod {1} {0}'.format(\"/etc/systemd/system/azuremonitor-coreagent.service.d/proxy.conf\", 400))\n\n        # Update ME\n        run_command_and_log(\"mkdir -p /etc/systemd/system/metrics-extension.service.d\")\n        run_command_and_log(\"echo '[Service]' > /etc/systemd/system/metrics-extension.service.d/proxy.conf\")\n        run_command_and_log(\"echo 'Environment=\\\"http_proxy={0}\\\"' >> /etc/systemd/system/metrics-extension.service.d/proxy.conf\".format(http_proxy))\n        run_command_and_log(\"echo 'Environment=\\\"https_proxy={0}\\\"' >> /etc/systemd/system/metrics-extension.service.d/proxy.conf\".format(http_proxy))\n        os.system('chmod {1} {0}'.format(\"/etc/systemd/system/metrics-extension.service.d/proxy.conf\", 400))\n\n        run_command_and_log(\"systemctl daemon-reload\")\n        run_command_and_log('systemctl restart azuremonitor-coreagent')\n        run_command_and_log('systemctl restart metrics-extension')\n        \n    except:\n        log_and_exit(\"enable\", MissingorInvalidParameterErrorCode, \"Failed to update /etc/systemd/system/azuremonitor-coreagent.service.d and /etc/systemd/system/metrics-extension.service.d\" )\n\ndef unset_proxy():\n    \"\"\"\n    # Unset proxy http_proxy env var in dependent services\n    \"\"\"\n    \n    try:\n        hasSettings=False\n        \n        # Update Coreagent\n        if os.path.exists(\"/etc/systemd/system/azuremonitor-coreagent.service.d/proxy.conf\"):\n            os.remove(\"/etc/systemd/system/azuremonitor-coreagent.service.d/proxy.conf\")\n            hasSettings=True\n            \n        # Update ME\n        if os.path.exists(\"/etc/systemd/system/metrics-extension.service.d/proxy.conf\"):\n            os.remove(\"/etc/systemd/system/metrics-extension.service.d/proxy.conf\")\n            hasSettings=True\n            \n        if hasSettings:\n            run_command_and_log(\"systemctl daemon-reload\")\n            run_command_and_log('systemctl restart azuremonitor-coreagent')\n            run_command_and_log('systemctl restart metrics-extension')\n        \n        \n    except:\n        log_and_exit(\"enable\", MissingorInvalidParameterErrorCode, \"Failed to remove /etc/systemd/system/azuremonitor-coreagent.service.d and /etc/systemd/system/metrics-extension.service.d\" )\n\ndef get_managed_identity():\n    \"\"\"\n    # Determine Managed Identity (MI) settings\n    # Nomenclature: Managed System Identity (MSI), System-Assigned Identity (SAI), User-Assigned Identity (UAI)\n    # Unspecified MI scenario: MSI returns SAI token if exists, otherwise returns UAI token if exactly one UAI exists, otherwise failure\n    # Specified MI scenario: MSI returns token for specified MI\n    # Returns identifier_name, identifier_value, and error message (if any)\n    \"\"\"\n    identifier_name = identifier_value = \"\"\n    public_settings, _ = get_settings()\n\n    if public_settings is not None and public_settings.get(AzureMonitorConfigKey):\n        azure_monitor_configuration = public_settings.get(AzureMonitorConfigKey)\n\n        if azure_monitor_configuration and azure_monitor_configuration.get(\"enable\") == True:\n            public_settings = azure_monitor_configuration.get(\"configuration\")\n\n    if public_settings is not None and \"authentication\" in public_settings and \"managedIdentity\" in public_settings.get(\"authentication\"):\n        managedIdentity = public_settings.get(\"authentication\").get(\"managedIdentity\")\n\n        if \"identifier-name\" not in managedIdentity or \"identifier-value\" not in managedIdentity:\n            return identifier_name, identifier_value, 'Parameters \"identifier-name\" and \"identifier-value\" are both required in authentication.managedIdentity public setting'\n\n        identifier_name = managedIdentity.get(\"identifier-name\")\n        identifier_value = managedIdentity.get(\"identifier-value\")\n\n        if identifier_name not in [\"client_id\", \"mi_res_id\", \"object_id\"]:\n            return identifier_name, identifier_value, 'Invalid identifier-name provided; must be \"client_id\" or \"mi_res_id\" or \"object_id\"'\n\n        if not identifier_value:\n            return identifier_name, identifier_value, 'Invalid identifier-value provided; cannot be empty'\n\n        if identifier_name in [\"object_id\", \"client_id\"]:\n            guid_re = re.compile(r'[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}')\n            if not guid_re.search(identifier_value):\n                return identifier_name, identifier_value, 'Invalid identifier-value provided for {0}; must be a GUID'.format(identifier_name)\n\n    return identifier_name, identifier_value, \"\"\n\n\ndef azureotelcollector_is_active():\n    \"\"\"\n    Checks if `azureotelcollector` is installed to run as a systemd service.\n    \"\"\"\n    if is_systemd():\n        try:\n            rc = subprocess.call([\"systemctl\", \"is-active\", \"--quiet\", \"azureotelcollector-watcher.path\"])\n            return rc == 0\n        except OSError:\n            return False\n\n    return False\n\n\ndef install_azureotelcollector():\n    \"\"\"\n    This method will install the azureotelcollector package and start a systemd file watcher service that watches for configuration file changes.\n    MetricsExtension is responsible for writing the configuration file.\n    Only if configuration is present, otelcollector process will start to run, the watcher service is responsible to monitor the configuration file.\n    \"\"\"\n    if is_systemd():\n        find_package_manager(\"Install\")\n        azureotelcollector_install_command = get_otelcollector_installation_command()\n        hutil_log_info('Running command \"{0}\"'.format(azureotelcollector_install_command))\n\n        # Retry, since install can fail due to concurrent package operations\n        exit_code, output = run_command_with_retries_output(\n            azureotelcollector_install_command,\n            retries = 5,\n            retry_check = retry_if_dpkg_or_rpm_locked,\n            final_check = final_check_if_dpkg_or_rpm_locked\n        )\n\n        if exit_code == 0:\n            hutil_log_info('Successfully installed azureotelcollector')\n            return True\n\n        hutil_log_error('Error installing azureotelcollector \"{0}\"'.format(output))\n\n    return False\n\n\ndef get_otelcollector_installation_command():\n    \"\"\"\n    This method provides the installation command to install an azureotelcollector package as a systemd service\n    \"\"\"\n    find_package_manager(\"Install\")\n    dir_path = os.getcwd() + \"/azureotelcollector/\"\n    if PackageManager == \"dpkg\":\n        package_path = find_otelcollector_package_file(dir_path, \"deb\")\n    elif PackageManager == \"rpm\":\n        package_path = find_otelcollector_package_file(dir_path, \"rpm\")\n    else:\n        raise Exception(\"Unsupported package manager to install azureotelcollector: {0}.\".format(PackageManager))\n\n    return \"{0} {1} --install {2}\".format(PackageManager, PackageManagerOptions, package_path)\n\n\ndef find_otelcollector_package_file(directory, pkg_type):\n    \"\"\"\n    Finds the otelcollector package in a given path for a given package type using name globbing.\n    \"\"\"\n    arch = platform.machine()\n\n    # Create pattern based on type and arch\n    if pkg_type == \"deb\":\n        if arch == \"x86_64\":\n            pattern = \"azureotelcollector_*_amd64.deb\"\n        elif arch == \"aarch64\":\n            pattern = \"azureotelcollector_*_arm64.deb\"\n        else:\n            raise Exception(\"Unsupported architecture for deb package: {0}\".format(arch))\n    elif pkg_type == \"rpm\":\n        pattern = \"azureotelcollector-*{0}.rpm\".format(arch)\n    else:\n        raise Exception(\"Unsupported package type to install azureotelcollector: {0}\".format(pkg_type))\n\n    search_pattern = os.path.join(directory, pattern)\n    matches = glob.glob(search_pattern)\n\n    if not matches:\n        raise IOError(\"No {0} package found for arch '{1}' in {2} with pattern '{3}'\".format(pkg_type, arch, directory, pattern))\n\n    # Return the most recently modified match\n    return max(matches, key=os.path.getmtime)\n\n\ndef uninstall_azureotelcollector():\n    \"\"\"\n    This method will uninstall azureotelcollector services.\n    No need to stop it separately as the package maintainer script handles it upon uninstalling.\n    \"\"\"\n    if is_feature_enabled(\"enableAzureOTelCollector\"):\n        # Only remove azureotelcollector if file exists\n        if os.path.exists(\"/lib/systemd/system/azureotelcollector-watcher.path\"):\n            azureotelcollector_uninstall_command = \"\"\n            find_package_manager(\"Uninstall\")\n\n            if PackageManager == \"dpkg\":\n                azureotelcollector_uninstall_command = \"dpkg --purge azureotelcollector\"\n            elif PackageManager == \"rpm\":\n                azureotelcollector_uninstall_command = \"rpm --erase azureotelcollector\"\n            else:\n                log_and_exit(\"Uninstall\", UnsupportedOperatingSystem, \"The OS has neither rpm nor dpkg\" )\n\n            hutil_log_info('Running command \"{0}\"'.format(azureotelcollector_uninstall_command))\n\n            exit_code, output = run_command_with_retries_output(\n                azureotelcollector_uninstall_command,\n                retries = 5,\n                retry_check = retry_if_dpkg_or_rpm_locked,\n                final_check = final_check_if_dpkg_or_rpm_locked\n            )\n\n            if exit_code == 0:\n                hutil_log_info('Successfully removed azureotelcollector')\n            else:\n                hutil_log_error('Error removing azureotelcollector \"{0}\"'.format(output))\n\n\ndef stop_metrics_process():\n\n    if telhandler.is_running(is_lad=False):\n        #Stop the telegraf and ME services\n        tel_out, tel_msg = telhandler.stop_telegraf_service(is_lad=False)\n        if tel_out:\n            hutil_log_info(tel_msg)\n        else:\n            hutil_log_error(tel_msg)\n\n        #Delete the telegraf and ME services\n        tel_rm_out, tel_rm_msg = telhandler.remove_telegraf_service(is_lad=False)\n        if tel_rm_out:\n            hutil_log_info(tel_rm_msg)\n        else:\n            hutil_log_error(tel_rm_msg)\n\n    if me_handler.is_running(is_lad=False):\n        me_out, me_msg = me_handler.stop_metrics_service(is_lad=False)\n        if me_out:\n            hutil_log_info(me_msg)\n        else:\n            hutil_log_error(me_msg)\n\n        me_rm_out, me_rm_msg = me_handler.remove_metrics_service(is_lad=False)\n        if me_rm_out:\n            hutil_log_info(me_rm_msg)\n        else:\n            hutil_log_error(me_rm_msg)\n\n    pids_filepath = os.path.join(os.getcwd(),'amametrics.pid')\n\n    # kill existing metrics watcher\n    if os.path.exists(pids_filepath):\n        with open(pids_filepath, \"r\") as f:\n            for pid in f.readlines():\n                # Verify the pid actually belongs to AMA metrics watcher.\n                cmd_file = os.path.join(\"/proc\", str(pid.strip(\"\\n\")), \"cmdline\")\n                if os.path.exists(cmd_file):\n                    with open(cmd_file, \"r\") as pidf:\n                        cmdline = pidf.readlines()\n                        if len(cmdline) > 0 and cmdline[0].find(\"agent.py\") >= 0 and cmdline[0].find(\"-metrics\") >= 0:\n                            kill_cmd = \"kill \" + pid\n                            run_command_and_log(kill_cmd)\n\n        run_command_and_log(\"rm \"+pids_filepath)\n\ndef stop_syslogconfig_process():\n    \n    pids_filepath = os.path.join(os.getcwd(),'amasyslogconfig.pid')\n\n    # kill existing syslog config watcher\n    if os.path.exists(pids_filepath):\n        with open(pids_filepath, \"r\") as f:\n            for pid in f.readlines():\n                # Verify the pid actually belongs to AMA syslog watcher.\n                cmd_file = os.path.join(\"/proc\", str(pid.strip(\"\\n\")), \"cmdline\")\n                if os.path.exists(cmd_file):\n                    with open(cmd_file, \"r\") as pidf:\n                        cmdline = pidf.readlines()\n                        if len(cmdline) > 0 and cmdline[0].find(\"agent.py\") >= 0 and cmdline[0].find(\"-syslogconfig\") >= 0:\n                            kill_cmd = \"kill \" + pid\n                            run_command_and_log(kill_cmd)\n\n        run_command_and_log(\"rm \"+ pids_filepath)\n\ndef is_metrics_process_running():\n    pids_filepath = os.path.join(os.getcwd(),'amametrics.pid')\n    if os.path.exists(pids_filepath):\n        with open(pids_filepath, \"r\") as f:\n            for pid in f.readlines():\n                # Verify the pid actually belongs to AMA metrics watcher.\n                cmd_file = os.path.join(\"/proc\", str(pid.strip(\"\\n\")), \"cmdline\")\n                if os.path.exists(cmd_file):\n                    with open(cmd_file, \"r\") as pidf:\n                        cmdline = pidf.readlines()\n                        if len(cmdline) > 0 and cmdline[0].find(\"agent.py\") >= 0 and cmdline[0].find(\"-metrics\") >= 0:\n                            return True\n\n    return False\n\ndef is_syslogconfig_process_running():\n    pids_filepath = os.path.join(os.getcwd(),'amasyslogconfig.pid')\n    if os.path.exists(pids_filepath):\n        with open(pids_filepath, \"r\") as f:\n            for pid in f.readlines():\n                # Verify the pid actually belongs to AMA syslog watcher.\n                cmd_file = os.path.join(\"/proc\", str(pid.strip(\"\\n\")), \"cmdline\")\n                if os.path.exists(cmd_file):\n                    with open(cmd_file, \"r\") as pidf:\n                        cmdline = pidf.readlines()\n                        if len(cmdline) > 0 and cmdline[0].find(\"agent.py\") >= 0 and cmdline[0].find(\"-syslogconfig\") >= 0:\n                            return True\n\n    return False\n\ndef is_transformconfig_process_running():\n    pids_filepath = os.path.join(os.getcwd(),'amatransformconfig.pid')\n    if os.path.exists(pids_filepath):\n        with open(pids_filepath, \"r\") as f:\n            for pid in f.readlines():\n                # Verify the pid actually belongs to AMA transform config watcher.\n                cmd_file = os.path.join(\"/proc\", str(pid.strip(\"\\n\")), \"cmdline\")\n                if os.path.exists(cmd_file):\n                    with open(cmd_file, \"r\") as pidf:\n                        cmdline = pidf.readlines()\n                        if len(cmdline) > 0 and cmdline[0].find(\"agent.py\") >= 0 and cmdline[0].find(\"-transformconfig\") >= 0:\n                            return True\n\n    return False\n\ndef start_metrics_process():\n    \"\"\"\n    Start metrics process that performs periodic monitoring activities\n    :return: None\n    \"\"\"\n\n    # if metrics process is already running, it should manage lifecycle of telegraf, ME, \n    # process to refresh ME MSI token and look for new config changes if counters change, etc, so this is no-op\n    if not is_metrics_process_running():\n        stop_metrics_process()\n\n        # Start metrics watcher\n        ama_path = os.path.join(os.getcwd(), 'agent.py')\n        args = [sys.executable, ama_path, '-metrics']\n        log = open(os.path.join(os.getcwd(), 'daemon.log'), 'w')\n        hutil_log_info('start watcher process '+str(args))\n        subprocess.Popen(args, stdout=log, stderr=log)\n\ndef start_syslogconfig_process():\n    \"\"\"\n    Start syslog check process that performs periodic DCR monitoring activities and looks for syslog config changes\n    :return: None\n    \"\"\"\n\n    # test\n    if not is_syslogconfig_process_running():\n        stop_syslogconfig_process()\n\n        # Start syslog config watcher\n        ama_path = os.path.join(os.getcwd(), 'agent.py')\n        args = [sys.executable, ama_path, '-syslogconfig']\n        log = open(os.path.join(os.getcwd(), 'daemon.log'), 'w')\n        hutil_log_info('start syslog watcher process '+str(args))\n        subprocess.Popen(args, stdout=log, stderr=log)\n\ndef start_transformconfig_process():\n    \"\"\"\n    Start agent transform check process that performs periodic DCR monitoring activities and looks for agent transformation config changes\n    :return: None\n    \"\"\"\n\n    # test\n    if not is_transformconfig_process_running():\n        stop_transformconfig_process()\n\n        # Start agent transform config watcher\n        ama_path = os.path.join(os.getcwd(), 'agent.py')\n        args = [sys.executable, ama_path, '-transformconfig']\n        log = open(os.path.join(os.getcwd(), 'daemon.log'), 'w')\n        hutil_log_info('start agent transform config watcher process '+str(args))\n        subprocess.Popen(args, stdout=log, stderr=log)\n\ndef stop_transformconfig_process():\n\n    pids_filepath = os.path.join(os.getcwd(),'amatransformconfig.pid')\n\n    # kill existing agent transform config watcher\n    if os.path.exists(pids_filepath):\n        with open(pids_filepath, \"r\") as f:\n            for pid in f.readlines():\n                # Verify the pid actually belongs to AMA tranform config watcher.\n                cmd_file = os.path.join(\"/proc\", str(pid.strip(\"\\n\")), \"cmdline\")\n                if os.path.exists(cmd_file):\n                    with open(cmd_file, \"r\") as pidf:\n                        cmdline = pidf.readlines()\n                        if len(cmdline) > 0 and cmdline[0].find(\"agent.py\") >= 0 and cmdline[0].find(\"-transformconfig\") >= 0:\n                            kill_cmd = \"kill \" + pid\n                            run_command_and_log(kill_cmd)\n\n        run_command_and_log(\"rm \"+ pids_filepath)\n\ndef metrics_watcher(hutil_error, hutil_log):\n    \"\"\"\n    Watcher thread to monitor metric configuration changes and to take action on them\n    \"\"\"\n    global MDSDFluentPort\n    # Check every 30 seconds\n    sleepTime =  30\n\n    # Retrieve managed identity info that may be needed for token retrieval\n    identifier_name, identifier_value, error_msg = get_managed_identity()\n    if error_msg:\n        hutil_error('Failed to determine managed identity settings; MSI token retreival will rely on default identity, if any. {0}.'.format(error_msg))\n    if identifier_name and identifier_value:\n        managed_identity_str = \"uai#{0}#{1}\".format(identifier_name, identifier_value)\n    else:\n        managed_identity_str = \"sai\"\n\n    # Sleep before starting the monitoring\n    time.sleep(sleepTime)\n    last_crc = None\n    last_crc_fluent = None\n    me_msi_token_expiry_epoch = None\n    enabled_me_CMv2_mode = False\n    log_messages = \"\"\n\n    while True:\n        try:\n            if not azureotelcollector_is_active():\n                install_azureotelcollector()\n\n            if not me_handler.is_running(is_lad=False):\n                me_service_template_path = os.getcwd() + \"/services/metrics-extension.service\"\n\n                try:\n                    if is_feature_enabled(\"enableAzureOTelCollector\"):\n                        if os.path.exists(me_service_template_path):\n                            os.remove(me_service_template_path)\n                        copyfile(os.getcwd() + \"/services/metrics-extension-cmv2.service\", me_service_template_path)\n                        me_handler.setup_me(\n                            is_lad=False,\n                            managed_identity=managed_identity_str,\n                            HUtilObj=HUtilObject,\n                            is_local_control_channel=False,\n                            user=\"azuremetricsext\",\n                            group=\"azuremonitoragent\")\n                        enabled_me_CMv2_mode, log_messages = me_handler.start_metrics_cmv2()\n                    elif is_feature_enabled(\"enableCMV2\"):\n                        if os.path.exists(me_service_template_path):\n                            os.remove(me_service_template_path)\n                        copyfile(os.getcwd() + \"/services/metrics-extension-otlp.service\", me_service_template_path)\n                        me_handler.setup_me(\n                            is_lad=False,\n                            managed_identity=managed_identity_str,\n                            HUtilObj=HUtilObject,\n                            is_local_control_channel=False)\n                        enabled_me_CMv2_mode, log_messages = me_handler.start_metrics_cmv2()\n                except Exception as e:\n                    hutil_log_error(\"Error in setting up metrics-extension.service in CMv2 mode. Exception={0}\".format(e))\n\n                if enabled_me_CMv2_mode:\n                    hutil_log_info(\"Successfully started metrics-extension.\")\n                elif log_messages:\n                    hutil_log_error(log_messages)\n\n            # update fluent config for fluent port if needed\n            fluent_port = ''\n            if os.path.isfile(AMAFluentPortFilePath):\n                f = open(AMAFluentPortFilePath, \"r\")\n                fluent_port = validate_port_number(f.read(), \"fluent\")\n                f.close()\n            \n            if fluent_port != '' and os.path.isfile(FluentCfgPath) and fluent_port != MDSDFluentPort:\n                portSetting = \"    Port                       \"  + fluent_port + \"\\n\"\n                defaultPortSetting = 'Port'\n                portUpdated = True                \n                with open(FluentCfgPath, 'r') as f:                    \n                    for line in f:                        \n                        found = re.search(r'^\\s{0,}Port\\s{1,}' + fluent_port + '$', line)\n                        if found:\n                            portUpdated = False\n\n                if portUpdated == True:\n                    with contextlib.closing(fileinput.FileInput(FluentCfgPath, inplace=True, backup='.bak')) as file:\n                        for line in file:\n                            if defaultPortSetting in line:\n                                print(portSetting, end='')\n                            else:\n                                print(line, end='')\n                    os.chmod(FluentCfgPath, stat.S_IRGRP | stat.S_IRUSR | stat.S_IWUSR | stat.S_IROTH)\n                    MDSDFluentPort = fluent_port\n\n                    # add SELinux rules if needed\n                    if os.path.exists('/etc/selinux/config') and fluent_port != '':\n                        sedisabled, _ = run_command_and_log('getenforce | grep -i \"Disabled\"',log_cmd=False, log_output=False)\n                        if sedisabled != 0:                        \n                            check_semanage, _ = run_command_and_log(\"which semanage\",log_cmd=False, log_output=False)\n                            if check_semanage == 0:\n                                fluentPortEnabled, _ = run_command_and_log('grep -Rnw /var/lib/selinux -e ' + fluent_port,log_cmd=False, log_output=False)\n                                if fluentPortEnabled != 0:                    \n                                    # also check SELinux config paths for Oracle/RH\n                                    fluentPortEnabled, _ = run_command_and_log('grep -Rnw /etc/selinux -e ' + fluent_port,log_cmd=False, log_output=False)\n                                    if fluentPortEnabled != 0:                    \n                                        # allow the fluent port in SELinux\n                                        run_command_and_log('semanage port -a -t http_port_t -p tcp ' + fluent_port,log_cmd=False, log_output=False)\n\n            if os.path.isfile(FluentCfgPath):\n                f = open(FluentCfgPath, \"r\")\n                data = f.read()\n\n                if (data != ''):\n                    crc_fluent = hashlib.sha256(data.encode('utf-8')).hexdigest()\n\n                    if (crc_fluent != last_crc_fluent):                        \n                        restart_launcher()\n                        last_crc_fluent = crc_fluent\n           \n            if os.path.isfile(MdsdCounterJsonPath):\n                f = open(MdsdCounterJsonPath, \"r\")\n                data = f.read()\n\n                if (data != ''):\n                    json_data = json.loads(data)\n\n                    if len(json_data) == 0:\n                        last_crc = hashlib.sha256(data.encode('utf-8')).hexdigest()\n                        if telhandler.is_running(is_lad=False):\n                            # Stop the telegraf and ME services\n                            tel_out, tel_msg = telhandler.stop_telegraf_service(is_lad=False)\n                            if tel_out:\n                                hutil_log(tel_msg)\n                            else:\n                                hutil_error(tel_msg)\n\n                            # Delete the telegraf and ME services\n                            tel_rm_out, tel_rm_msg = telhandler.remove_telegraf_service(is_lad=False)\n                            if tel_rm_out:\n                                hutil_log(tel_rm_msg)\n                            else:\n                                hutil_error(tel_rm_msg)\n\n                        if not enabled_me_CMv2_mode and me_handler.is_running(is_lad=False):\n                            me_out, me_msg = me_handler.stop_metrics_service(is_lad=False)\n                            if me_out:\n                                hutil_log(me_msg)\n                            else:\n                                hutil_error(me_msg)\n\n                            me_rm_out, me_rm_msg = me_handler.remove_metrics_service(is_lad=False)\n                            if me_rm_out:\n                                hutil_log(me_rm_msg)\n                            else:\n                                hutil_error(me_rm_msg)\n\n                    else:\n                        crc = hashlib.sha256(data.encode('utf-8')).hexdigest()\n\n                        if(crc != last_crc):\n                            # Resetting the me_msi_token_expiry_epoch variable if we set up ME again.\n                            me_msi_token_expiry_epoch = None\n                            hutil_log(\"Start processing metric configuration\")\n                            hutil_log(data)\n\n                            telegraf_config, telegraf_namespaces = telhandler.handle_config(\n                                json_data,\n                                \"unix:///run/azuremetricsext/mdm_influxdb.socket\",\n                                \"unix:///run/azuremonitoragent/default_influx.socket\",\n                                is_lad=False)\n\n                            start_telegraf_res, log_messages = telhandler.start_telegraf(is_lad=False)\n                            if start_telegraf_res:\n                                hutil_log(\"Successfully started metrics-sourcer.\")\n                            else:\n                                hutil_error(log_messages)\n\n                            if not enabled_me_CMv2_mode:\n                                me_service_template_path = os.getcwd() + \"/services/metrics-extension.service\"\n                                if os.path.exists(me_service_template_path):\n                                    os.remove(me_service_template_path)\n\n                                copyfile(os.getcwd() + \"/services/metrics-extension-cmv1.service\", me_service_template_path)\n                                me_handler.setup_me(is_lad=False, managed_identity=managed_identity_str, HUtilObj=HUtilObject)\n\n                                start_metrics_out, log_messages = me_handler.start_metrics(is_lad=False, managed_identity=managed_identity_str)\n                                if start_metrics_out:\n                                    hutil_log(\"Successfully started metrics-extension.\")\n                                else:\n                                    hutil_error(log_messages)\n\n                            last_crc = crc\n\n                        generate_token = False\n                        me_token_path = os.path.join(os.getcwd(), \"/config/metrics_configs/AuthToken-MSI.json\")\n\n                        if me_msi_token_expiry_epoch is None or me_msi_token_expiry_epoch == \"\":\n                            if os.path.isfile(me_token_path):\n                                with open(me_token_path, \"r\") as f:\n                                    authtoken_content = f.read()\n                                    if authtoken_content and \"expires_on\" in authtoken_content:\n                                        me_msi_token_expiry_epoch = authtoken_content[\"expires_on\"]\n                                    else:\n                                        generate_token = True\n                            else:\n                                generate_token = True\n\n                        if me_msi_token_expiry_epoch:\n                            currentTime = datetime.datetime.now()\n                            token_expiry_time = datetime.datetime.fromtimestamp(int(me_msi_token_expiry_epoch))\n                            if token_expiry_time - currentTime < datetime.timedelta(minutes=30):\n                                # The MSI Token will expire within 30 minutes. We need to refresh the token\n                                generate_token = True\n\n                        if generate_token:\n                            generate_token = False\n                            msi_token_generated, me_msi_token_expiry_epoch, log_messages = me_handler.generate_MSI_token(identifier_name, identifier_value, is_lad=False)\n                            if msi_token_generated:\n                                hutil_log(\"Successfully refreshed metrics-extension MSI Auth token.\")\n                            else:\n                                hutil_error(log_messages)\n\n                        telegraf_restart_retries = 0\n                        me_restart_retries = 0\n                        max_restart_retries = 10\n\n                        # Check if telegraf is running, if not, then restart\n                        if not telhandler.is_running(is_lad=False):\n                            if telegraf_restart_retries < max_restart_retries:\n                                telegraf_restart_retries += 1\n                                hutil_log(\"Telegraf binary process is not running. Restarting telegraf now. Retry count - {0}\".format(telegraf_restart_retries))\n                                tel_out, tel_msg = telhandler.stop_telegraf_service(is_lad=False)\n                                if tel_out:\n                                    hutil_log(tel_msg)\n                                else:\n                                    hutil_error(tel_msg)\n                                start_telegraf_res, log_messages = telhandler.start_telegraf(is_lad=False)\n                                if start_telegraf_res:\n                                    hutil_log(\"Successfully started metrics-sourcer.\")\n                                else:\n                                    hutil_error(log_messages)\n                            else:\n                                hutil_error(\"Telegraf binary process is not running. Failed to restart after {0} retries. Please check telegraf.log\".format(max_restart_retries))\n                        else:\n                            telegraf_restart_retries = 0\n\n                        # Check if ME is running, if not, then restart\n                        if not me_handler.is_running(is_lad=False):\n                            if me_restart_retries < max_restart_retries:\n                                me_restart_retries += 1\n                                hutil_log(\"MetricsExtension binary process is not running. Restarting MetricsExtension now. Retry count - {0}\".format(me_restart_retries))\n                                me_out, me_msg = me_handler.stop_metrics_service(is_lad=False)\n                                if me_out:\n                                    hutil_log(me_msg)\n                                else:\n                                    hutil_error(me_msg)\n                                start_metrics_out, log_messages = me_handler.start_metrics(is_lad=False, managed_identity=managed_identity_str)\n\n                                if start_metrics_out:\n                                    hutil_log(\"Successfully started metrics-extension.\")\n                                else:\n                                    hutil_error(log_messages)\n                            else:\n                                hutil_error(\"MetricsExtension binary process is not running. Failed to restart after {0} retries. Please check /var/log/syslog for ME logs\".format(max_restart_retries))\n                        else:\n                            me_restart_retries = 0\n\n        except IOError as e:\n            hutil_error('I/O error in setting up or monitoring metrics. Exception={0}'.format(e))\n\n        except Exception as e:\n            hutil_error('Error in setting up or monitoring metrics. Exception={0}'.format(e))\n\n        finally:\n            time.sleep(sleepTime)\n\ndef syslogconfig_watcher(hutil_error, hutil_log):\n    \"\"\"\n    Watcher thread to monitor syslog configuration changes and to take action on them\n    \"\"\"\n    syslog_enabled  = False\n    # Check for config changes every 30 seconds\n    sleepTime =  30\n\n    # Sleep before starting the monitoring\n    time.sleep(sleepTime)\n\n    GcsEnabled, McsEnabled = get_control_plane_mode()\n        \n    while True:\n        try:       \n            if os.path.isfile(AMASyslogConfigMarkerPath):\n                f = open(AMASyslogConfigMarkerPath, \"r\")\n                data = f.read()\n\n                if (data != ''):\n                    if \"true\" in data:\n                        syslog_enabled = True\n                f.close()\n            elif GcsEnabled:\n                # 1P Syslog is always enabled as each tenant could be having different mdsd.xml configuration\n                syslog_enabled = True\n\n            if syslog_enabled:\n                # place syslog local configs\n                syslog_enabled  = False\n                generate_localsyslog_configs(uses_gcs=GcsEnabled, uses_mcs=McsEnabled)\n            else:\n                # remove syslog local configs\n                remove_localsyslog_configs()\n\n        except IOError as e:\n            hutil_error('I/O error in setting up syslog config watcher. Exception={0}'.format(e))\n\n        except Exception as e:\n            hutil_error('Error in setting up syslog config watcher. Exception={0}'.format(e))\n\n        finally:\n            time.sleep(sleepTime)\n\ndef transformconfig_watcher(hutil_error, hutil_log):\n    \"\"\"\n    Watcher thread to monitor agent transformation configuration changes and to take action on them\n    \"\"\"\n    # Check for config changes every 30 seconds\n    sleepTime =  30\n\n    # Sleep before starting the monitoring\n    time.sleep(sleepTime)\n    last_crc = None\n\n    while True:\n        try:\n            if os.path.isfile(AMAAstTransformConfigMarkerPath):\n                f = open(AMAAstTransformConfigMarkerPath, \"r\")\n                data = f.read()\n                if (data != ''):\n                    crc = hashlib.sha256(data.encode('utf-8')).hexdigest()\n\n                    if (crc != last_crc):\n                        restart_astextension()\n                        last_crc = crc\n                f.close()\n\n        except IOError as e:\n            hutil_error('I/O error in setting up agent transform config watcher. Exception={0}'.format(e))\n\n        except Exception as e:\n            hutil_error('Error in setting up agent transform config watcher. Exception={0}'.format(e))\n\n        finally:\n            time.sleep(sleepTime)\n\ndef generate_localsyslog_configs(uses_gcs = False, uses_mcs = False):\n    \"\"\"\n    Install local syslog configuration files if not present and restart syslog\n    \"\"\"\n    global MDSDSyslogPort\n    \n    # don't deploy any configuration if no control plane is configured\n    if not uses_gcs and not uses_mcs:\n        return\n    \n    public_settings, _ = get_settings()\n    syslog_port = ''\n    if os.path.isfile(AMASyslogPortFilePath):\n        f = open(AMASyslogPortFilePath, \"r\")\n        syslog_port = validate_port_number(f.read(), \"syslog\")\n        f.close()\n        \n    useSyslogTcp = False\n\n    if syslog_port == MDSDSyslogPort:\n        return\n    \n    # always use syslog tcp port, unless \n    # - the distro is Red Hat based and doesn't have semanage\n    #   these distros seem to have SELinux on by default and we shouldn't be installing semanage ourselves\n    if not os.path.exists('/etc/selinux/config'):\n        useSyslogTcp = True\n    else:        \n        sedisabled, _ = run_command_and_log('getenforce | grep -i \"Disabled\"',log_cmd=False, log_output=False)\n        if sedisabled == 0:\n            useSyslogTcp = True\n        else:            \n            check_semanage, _ = run_command_and_log(\"which semanage\",log_cmd=False, log_output=False)\n            if check_semanage == 0 and syslog_port != '':\n                syslogPortEnabled, _ = run_command_and_log('grep -Rnw /var/lib/selinux -e ' + syslog_port,log_cmd=False, log_output=False)\n                if syslogPortEnabled != 0:                    \n                    # also check SELinux config paths for Oracle/RH\n                    syslogPortEnabled, _ = run_command_and_log('grep -Rnw /etc/selinux -e ' + syslog_port,log_cmd=False, log_output=False)\n                    if syslogPortEnabled != 0:                    \n                        # allow the syslog port in SELinux\n                        run_command_and_log('semanage port -a -t syslogd_port_t -p tcp ' + syslog_port,log_cmd=False, log_output=False)\n                useSyslogTcp = True   \n\n    if syslog_port != '':\n        MDSDSyslogPort = syslog_port\n    \n    # 1P tenants use omuxsock, so keep using that for customers using 1P\n    if useSyslogTcp == True and syslog_port != '':\n        if os.path.exists('/etc/rsyslog.d/'):            \n            restartRequired = False\n            if uses_gcs and not os.path.exists('/etc/rsyslog.d/05-azuremonitoragent-loadomuxsock.conf'):\n                copyfile(\"/etc/opt/microsoft/azuremonitoragent/syslog/rsyslogconf/05-azuremonitoragent-loadomuxsock.conf\",\"/etc/rsyslog.d/05-azuremonitoragent-loadomuxsock.conf\")\n                restartRequired = True\n            \n            if not os.path.exists('/etc/rsyslog.d/10-azuremonitoragent-omfwd.conf'):\n                if os.path.exists('/etc/rsyslog.d/05-azuremonitoragent-loadomuxsock.conf'):\n                    os.remove(\"/etc/rsyslog.d/05-azuremonitoragent-loadomuxsock.conf\")\n                if os.path.exists('/etc/rsyslog.d/10-azuremonitoragent.conf'):\n                    os.remove(\"/etc/rsyslog.d/10-azuremonitoragent.conf\")\n                copyfile(\"/etc/opt/microsoft/azuremonitoragent/syslog/rsyslogconf/10-azuremonitoragent-omfwd.conf\",\"/etc/rsyslog.d/10-azuremonitoragent-omfwd.conf\")\n                os.chmod('/etc/rsyslog.d/10-azuremonitoragent-omfwd.conf', stat.S_IRGRP | stat.S_IRUSR | stat.S_IWUSR | stat.S_IROTH)\n                restartRequired = True                \n            \n            portSetting = 'Port=\"' + syslog_port + '\"'\n            defaultPortSetting = 'Port=\"28330\"'\n            portUpdated = False\n            with open('/etc/rsyslog.d/10-azuremonitoragent-omfwd.conf') as f:\n                if portSetting not in f.read():\n                    portUpdated = True\n\n            if portUpdated == True:\n                copyfile(\"/etc/opt/microsoft/azuremonitoragent/syslog/rsyslogconf/10-azuremonitoragent-omfwd.conf\",\"/etc/rsyslog.d/10-azuremonitoragent-omfwd.conf\")\n                with contextlib.closing(fileinput.FileInput('/etc/rsyslog.d/10-azuremonitoragent-omfwd.conf', inplace=True, backup='.bak')) as file:\n                    for line in file:\n                        print(line.replace(defaultPortSetting, portSetting), end='')\n                os.chmod('/etc/rsyslog.d/10-azuremonitoragent-omfwd.conf', stat.S_IRGRP | stat.S_IRUSR | stat.S_IWUSR | stat.S_IROTH)\n                restartRequired = True\n            \n            if restartRequired == True:\n                run_command_and_log(get_service_command(\"rsyslog\", \"restart\"))\n                hutil_log_info(\"Installed local syslog configuration files and restarted syslog\")\n\n        if os.path.exists('/etc/syslog-ng/syslog-ng.conf'):\n            restartRequired = False\n            if not os.path.exists('/etc/syslog-ng/conf.d/azuremonitoragent-tcp.conf'):\n                if os.path.exists('/etc/syslog-ng/conf.d/azuremonitoragent.conf'):\n                    os.remove(\"/etc/syslog-ng/conf.d/azuremonitoragent.conf\")\n                syslog_ng_confpath = os.path.join('/etc/syslog-ng/', 'conf.d')\n                if not os.path.exists(syslog_ng_confpath):\n                    os.makedirs(syslog_ng_confpath)\n                copyfile(\"/etc/opt/microsoft/azuremonitoragent/syslog/syslog-ngconf/azuremonitoragent-tcp.conf\",\"/etc/syslog-ng/conf.d/azuremonitoragent-tcp.conf\")\n                os.chmod('/etc/syslog-ng/conf.d/azuremonitoragent-tcp.conf', stat.S_IRGRP | stat.S_IRUSR | stat.S_IWUSR | stat.S_IROTH)\n                restartRequired = True\n\n            portSetting = \"port(\" + syslog_port + \")\"\n            defaultPortSetting = \"port(28330)\"\n            portUpdated = False\n            with open('/etc/syslog-ng/conf.d/azuremonitoragent-tcp.conf') as f:\n                if portSetting not in f.read():\n                    portUpdated = True\n\n            if portUpdated == True:\n                copyfile(\"/etc/opt/microsoft/azuremonitoragent/syslog/syslog-ngconf/azuremonitoragent-tcp.conf\",\"/etc/syslog-ng/conf.d/azuremonitoragent-tcp.conf\")\n                with contextlib.closing(fileinput.FileInput('/etc/syslog-ng/conf.d/azuremonitoragent-tcp.conf', inplace=True, backup='.bak')) as file:\n                    for line in file:\n                        print(line.replace(defaultPortSetting, portSetting), end='')\n                os.chmod('/etc/syslog-ng/conf.d/azuremonitoragent-tcp.conf', stat.S_IRGRP | stat.S_IRUSR | stat.S_IWUSR | stat.S_IROTH)\n                restartRequired = True\n            \n            if restartRequired == True:\n                run_command_and_log(get_service_command(\"syslog-ng\", \"restart\"))\n                hutil_log_info(\"Installed local syslog configuration files and restarted syslog\")    \n    else:\n        if os.path.exists('/etc/rsyslog.d/') and not os.path.exists('/etc/rsyslog.d/10-azuremonitoragent.conf'):\n            if os.path.exists('/etc/rsyslog.d/10-azuremonitoragent-omfwd.conf'):\n                os.remove(\"/etc/rsyslog.d/10-azuremonitoragent-omfwd.conf\")\n            copyfile(\"/etc/opt/microsoft/azuremonitoragent/syslog/rsyslogconf/05-azuremonitoragent-loadomuxsock.conf\",\"/etc/rsyslog.d/05-azuremonitoragent-loadomuxsock.conf\")\n            copyfile(\"/etc/opt/microsoft/azuremonitoragent/syslog/rsyslogconf/10-azuremonitoragent.conf\",\"/etc/rsyslog.d/10-azuremonitoragent.conf\")\n            os.chmod('/etc/rsyslog.d/05-azuremonitoragent-loadomuxsock.conf', stat.S_IRGRP | stat.S_IRUSR | stat.S_IWUSR | stat.S_IROTH)\n            os.chmod('/etc/rsyslog.d/10-azuremonitoragent.conf', stat.S_IRGRP | stat.S_IRUSR | stat.S_IWUSR | stat.S_IROTH)\n            run_command_and_log(get_service_command(\"rsyslog\", \"restart\"))\n            hutil_log_info(\"Installed local syslog configuration files and restarted syslog\")\n\n        if os.path.exists('/etc/syslog-ng/syslog-ng.conf') and not os.path.exists('/etc/syslog-ng/conf.d/azuremonitoragent.conf'):\n            if os.path.exists('/etc/syslog-ng/conf.d/azuremonitoragent-tcp.conf'):\n                os.remove(\"/etc/syslog-ng/conf.d/azuremonitoragent-tcp.conf\")\n            syslog_ng_confpath = os.path.join('/etc/syslog-ng/', 'conf.d')\n            if not os.path.exists(syslog_ng_confpath):\n                os.makedirs(syslog_ng_confpath)\n            copyfile(\"/etc/opt/microsoft/azuremonitoragent/syslog/syslog-ngconf/azuremonitoragent.conf\",\"/etc/syslog-ng/conf.d/azuremonitoragent.conf\")\n            os.chmod('/etc/syslog-ng/conf.d/azuremonitoragent.conf', stat.S_IRGRP | stat.S_IRUSR | stat.S_IWUSR | stat.S_IROTH)\n            run_command_and_log(get_service_command(\"syslog-ng\", \"restart\"))\n            hutil_log_info(\"Installed local syslog configuration files and restarted syslog\")\n\ndef remove_localsyslog_configs():\n    \"\"\"\n    Remove local syslog configuration files if present and restart syslog\n    \"\"\"    \n    if os.path.exists('/etc/rsyslog.d/10-azuremonitoragent.conf') or os.path.exists('/etc/rsyslog.d/10-azuremonitoragent-omfwd.conf'):\n        if os.path.exists('/etc/rsyslog.d/10-azuremonitoragent-omfwd.conf'):\n            os.remove(\"/etc/rsyslog.d/10-azuremonitoragent-omfwd.conf\")\n        if os.path.exists('/etc/rsyslog.d/05-azuremonitoragent-loadomuxsock.conf'):\n            os.remove(\"/etc/rsyslog.d/05-azuremonitoragent-loadomuxsock.conf\")\n        if os.path.exists('/etc/rsyslog.d/10-azuremonitoragent.conf'):            \n            os.remove(\"/etc/rsyslog.d/10-azuremonitoragent.conf\")\n        run_command_and_log(get_service_command(\"rsyslog\", \"restart\"))\n        hutil_log_info(\"Removed local syslog configuration files if found and restarted syslog\")\n\n    if os.path.exists('/etc/syslog-ng/conf.d/azuremonitoragent.conf') or os.path.exists('/etc/syslog-ng/conf.d/azuremonitoragent-tcp.conf'):\n        if os.path.exists('/etc/syslog-ng/conf.d/azuremonitoragent-tcp.conf'):\n            os.remove(\"/etc/syslog-ng/conf.d/azuremonitoragent-tcp.conf\")\n        if os.path.exists('/etc/syslog-ng/conf.d/azuremonitoragent.conf'):\n            os.remove(\"/etc/syslog-ng/conf.d/azuremonitoragent.conf\")\n        run_command_and_log(get_service_command(\"syslog-ng\", \"restart\"))\n        hutil_log_info(\"Removed local syslog configuration files if found and restarted syslog\")\n\ndef metrics():\n    \"\"\"\n    Take care of setting up telegraf and ME for metrics if configuration is present\n    \"\"\"\n    pids_filepath = os.path.join(os.getcwd(), 'amametrics.pid')\n    py_pid = os.getpid()\n    with open(pids_filepath, 'w') as f:\n        f.write(str(py_pid) + '\\n')\n\n    watcher_thread = Thread(target = metrics_watcher, args = [hutil_log_error, hutil_log_info])\n    watcher_thread.start()\n    watcher_thread.join()\n\n    return 0, \"\"\n\ndef syslogconfig():\n    \"\"\"\n    Take care of setting up syslog configuration change watcher\n    \"\"\"\n    pids_filepath = os.path.join(os.getcwd(), 'amasyslogconfig.pid')\n    py_pid = os.getpid()\n    with open(pids_filepath, 'w') as f:\n        f.write(str(py_pid) + '\\n')\n\n    watcher_thread = Thread(target = syslogconfig_watcher, args = [hutil_log_error, hutil_log_info])\n    watcher_thread.start()\n    watcher_thread.join()\n\n    return 0, \"\"\n\ndef transformconfig():\n    \"\"\"\n    Take care of setting up agent transformation configuration change watcher\n    \"\"\"\n    pids_filepath = os.path.join(os.getcwd(), 'amatransformconfig.pid')\n    py_pid = os.getpid()\n    with open(pids_filepath, 'w') as f:\n        f.write(str(py_pid) + '\\n')\n\n    watcher_thread = Thread(target = transformconfig_watcher, args = [hutil_log_error, hutil_log_info])\n    watcher_thread.start()\n    watcher_thread.join()\n\n    return 0, \"\"\n\n# Dictionary of operations strings to methods\noperations = {'Disable' : disable,\n              'Uninstall' : uninstall,\n              'Install' : install,\n              'Enable' : enable,\n              'Update' : update,\n              'Metrics' : metrics,\n              'Syslogconfig' : syslogconfig,\n              'Transformconfig' : transformconfig\n}\n\n\ndef parse_context(operation):\n    \"\"\"\n    Initialize a HandlerUtil object for this operation.\n    If the required modules have not been imported, this will return None.\n    \"\"\"\n    hutil = None\n    if ('Utils.WAAgentUtil' in sys.modules\n            and 'Utils.HandlerUtil' in sys.modules):\n        try:\n\n            logFileName = 'extension.log'\n            hutil = HUtil.HandlerUtility(waagent.Log, waagent.Error, logFileName=logFileName)\n            hutil.do_parse_context(operation)\n\n            # As per VM extension team, we have to manage rotation for our extension.log\n            # for now, this is our extension code, but to be moved to HUtil library.\n            if os.path.exists(WAGuestAgentLogRotateFilePath):      \n                if os.path.exists(AMAExtensionLogRotateFilePath):\n                    try:\n                        os.remove(AMAExtensionLogRotateFilePath)\n                    except Exception as ex:\n                        output = 'Logrotate removal failed with error: {0}\\nStacktrace: {1}'.format(ex, traceback.format_exc())\n                        hutil_log_info(output)\n            else:\n                if not os.path.exists(AMAExtensionLogRotateFilePath):      \n                    logrotateFilePath = os.path.join(os.getcwd(), 'azuremonitoragentextension.logrotate')\n                    copyfile(logrotateFilePath,AMAExtensionLogRotateFilePath)\n            \n        # parse_context may throw KeyError if necessary JSON key is not\n        # present in settings\n        except KeyError as e:\n            waagent_log_error('Unable to parse context with error: ' \\\n                              '{0}'.format(e))\n            raise ParameterMissingException\n    return hutil\n\ndef set_os_arch(operation):\n    \"\"\"\n    Checks if the current system architecture is present in the SupportedArch set and replaces \n    the package names accordingly\n    \"\"\"\n    global BundleFileName, SupportedArch\n    current_arch = platform.machine()\n\n    if current_arch in SupportedArch:\n\n        # Replace the AMA package name according to architecture\n        BundleFileName = BundleFileName.replace('x86_64', current_arch)\n    \n\ndef find_package_manager(operation):\n    \"\"\"\n    Checks if the dist is debian based or centos based and assigns the package manager accordingly\n    \"\"\"\n    global PackageManager, PackageManagerOptions, BundleFileName\n    dist, _ = find_vm_distro(operation)\n\n    dpkg_set = set([\"debian\", \"ubuntu\"])\n    rpm_set = set([\"oracle\", \"ol\", \"redhat\", \"centos\", \"red hat\", \"suse\", \"sles\", \"opensuse\", \"cbl-mariner\", \"mariner\", \"azurelinux\", \"rhel\", \"rocky\", \"alma\", \"amzn\"])\n    for dpkg_dist in dpkg_set:\n        if dist.startswith(dpkg_dist):\n            PackageManager = \"dpkg\"\n            # OK to replace the /etc/default/azuremonitoragent, since the placeholders gets replaced again.\n            # Otherwise, the package manager prompts for action (Y/I/N/O/D/Z) [default=N]\n            PackageManagerOptions = \"--force-overwrite --force-confnew\"\n            BundleFileName = BundleFileNameDeb\n            break\n\n    for rpm_dist in rpm_set:\n        if dist.startswith(rpm_dist):\n            PackageManager = \"rpm\"\n            # Same as above.\n            PackageManagerOptions = \"--force\"\n            BundleFileName = BundleFileNameRpm\n            break\n\n    if PackageManager == \"\":\n        log_and_exit(operation, UnsupportedOperatingSystem, \"The OS has neither rpm nor dpkg\" )\n\n\ndef find_vm_distro(operation):\n    \"\"\"\n    Finds the Linux Distribution this VM is running on by directly parsing\n    distribution-specific files for reliable detection.\n    \"\"\"\n    vm_dist = vm_ver = \"\"\n    detection_files_checked = []\n    \n    # Try to read from /etc/os-release first (most modern distributions)\n    if os.path.exists('/etc/os-release'):\n        detection_files_checked.append('/etc/os-release')\n        try:\n            with open('/etc/os-release', 'r') as fp:\n                os_release = {}\n                for line in fp:\n                    if line.strip() and '=' in line:\n                        k, v = line.strip().split('=', 1)\n                        os_release[k] = v.strip('\"\\'').strip()\n                \n                if 'ID' in os_release:\n                    vm_dist = os_release['ID'].lower()\n                    # Clean up the ID by removing any vendor-specific suffixes\n                    vm_dist = vm_dist.split('-')[0]\n                \n                if 'VERSION_ID' in os_release:\n                    vm_ver = os_release['VERSION_ID'].lower()\n                \n                # Fallback for ID_LIKE if direct ID isn't recognized\n                if not vm_dist and 'ID_LIKE' in os_release:\n                    # Get first value from ID_LIKE\n                    vm_dist = os_release['ID_LIKE'].lower().split()[0].strip('\"\\'')\n                    vm_dist = vm_dist.split('-')[0]\n                \n                hutil_log_info(\"OS detected from /etc/os-release: {0} {1}\".format(vm_dist, vm_ver))\n        except Exception as e:\n            hutil_log_error(\"Error reading /etc/os-release: {0}\".format(str(e)))\n    \n    # If we couldn't get the distribution from /etc/os-release, try other files\n    if not vm_dist or not vm_ver:\n        # Try /etc/system-release first (used by Amazon Linux and others)\n        if os.path.exists('/etc/system-release'):\n            detection_files_checked.append('/etc/system-release')\n            try:\n                with open('/etc/system-release', 'r') as fp:\n                    content = fp.read().lower()\n                    if 'amazon' in content:\n                        vm_dist = 'amzn'\n                        # Try to extract version\n                        version_match = re.search(r'release\\s+(\\d+(\\.\\d+)?)', content)\n                        if version_match:\n                            vm_ver = version_match.group(1)\n                        hutil_log_info(\"OS detected from /etc/system-release: {0} {1}\".format(vm_dist, vm_ver))\n            except Exception as e:\n                hutil_log_error(\"Error reading /etc/system-release: {0}\".format(str(e)))\n        \n        # SUSE specific detection\n        if not vm_dist and os.path.exists('/etc/SuSE-release'):\n            detection_files_checked.append('/etc/SuSE-release')\n            try:\n                with open('/etc/SuSE-release', 'r') as fp:\n                    content = fp.read()\n                    if 'SUSE Linux Enterprise Server' in content:\n                        vm_dist = 'sles'\n                    elif 'openSUSE' in content:\n                        vm_dist = 'opensuse'\n                    else:\n                        vm_dist = 'suse'\n                    \n                    # Try to extract the version\n                    version_match = re.search(r'VERSION\\s*=\\s*(\\d+)', content)\n                    if version_match:\n                        vm_ver = version_match.group(1)\n                    \n                    # Also look for service pack level\n                    sp_match = re.search(r'PATCHLEVEL\\s*=\\s*(\\d+)', content)\n                    if sp_match and vm_ver:\n                        vm_ver = '{0}.{1}'.format(vm_ver, sp_match.group(1))\n                    \n                    hutil_log_info(\"OS detected from /etc/SuSE-release: {0} {1}\".format(vm_dist, vm_ver))\n            except Exception as e:\n                hutil_log_error(\"Error reading /etc/SuSE-release: {0}\".format(str(e)))\n        \n        # Red Hat based systems\n        if not vm_dist and os.path.exists('/etc/redhat-release'):\n            detection_files_checked.append('/etc/redhat-release')\n            try:\n                with open('/etc/redhat-release', 'r') as fp:\n                    content = fp.read().lower()\n                    if 'red hat' in content:\n                        vm_dist = 'redhat'\n                    elif 'centos' in content:\n                        vm_dist = 'centos'\n                    elif 'oracle' in content:\n                        vm_dist = 'oracle'\n                    elif 'fedora' in content:\n                        vm_dist = 'fedora'\n                    elif 'rocky' in content:\n                        vm_dist = 'rocky'\n                    elif 'alma' in content:\n                        vm_dist = 'alma'\n                    else:\n                        vm_dist = 'redhat'  # Default to redhat for RHEL-based systems\n                    \n                    # Try to extract version using a more flexible pattern\n                    # This handles formats like \"release 8.6\" or \"release 7.9.2009\"\n                    version_match = re.search(r'release\\s+(\\d+(\\.\\d+){0,2})', content)\n                    if version_match:\n                        vm_ver = version_match.group(1)\n                    \n                    hutil_log_info(\"OS detected from /etc/redhat-release: {0} {1}\".format(vm_dist, vm_ver))\n            except Exception as e:\n                hutil_log_error(\"Error reading /etc/redhat-release: {0}\".format(str(e)))\n        \n        # Debian based systems with lsb-release\n        if not vm_dist and os.path.exists('/etc/lsb-release'):\n            detection_files_checked.append('/etc/lsb-release')\n            try:\n                lsb_data = {}\n                with open('/etc/lsb-release', 'r') as fp:\n                    for line in fp:\n                        if line.strip() and '=' in line:\n                            k, v = line.strip().split('=', 1)\n                            lsb_data[k] = v.strip('\"\\'')\n                \n                if 'DISTRIB_ID' in lsb_data:\n                    vm_dist = lsb_data['DISTRIB_ID'].lower()\n                if 'DISTRIB_RELEASE' in lsb_data:\n                    vm_ver = lsb_data['DISTRIB_RELEASE'].lower()\n                \n                hutil_log_info(\"OS detected from /etc/lsb-release: {0} {1}\".format(vm_dist, vm_ver))\n            except Exception as e:\n                hutil_log_error(\"Error reading /etc/lsb-release: {0}\".format(str(e)))\n        \n        # Debian specific detection\n        if not vm_dist and os.path.exists('/etc/debian_version'):\n            detection_files_checked.append('/etc/debian_version')\n            try:\n                with open('/etc/debian_version', 'r') as fp:\n                    vm_ver = fp.read().strip()\n                vm_dist = 'debian'\n                hutil_log_info(\"OS detected from /etc/debian_version: {0} {1}\".format(vm_dist, vm_ver))\n            except Exception as e:\n                hutil_log_error(\"Error reading /etc/debian_version: {0}\".format(str(e)))\n    \n    # Final fallback - try /proc/version\n    if not vm_dist and os.path.exists('/proc/version'):\n        detection_files_checked.append('/proc/version')\n        try:\n            with open('/proc/version', 'r') as fp:\n                content = fp.read().lower()\n                if 'debian' in content:\n                    vm_dist = 'debian'\n                elif 'ubuntu' in content:\n                    vm_dist = 'ubuntu'\n                elif 'red hat' in content or 'redhat' in content:\n                    vm_dist = 'redhat'\n                elif 'suse' in content:\n                    vm_dist = 'suse'\n                \n                # Try to extract version - not always reliable from /proc/version\n                hutil_log_info(\"OS detected from /proc/version: {0}\".format(vm_dist))\n        except Exception as e:\n            hutil_log_error(\"Error reading /proc/version: {0}\".format(str(e)))\n    \n    # If we still couldn't determine the OS, log what we tried and throw an error\n    if not vm_dist:\n        error_msg = 'Indeterminate operating system. Files checked: {0}'.format(\", \".join(detection_files_checked))\n        log_and_exit(operation, IndeterminateOperatingSystem, error_msg)\n    \n    # Normalize distribution names\n    if vm_dist == 'rhel' or vm_dist == 'red hat':\n        vm_dist = 'redhat'\n    elif vm_dist == 'ol':\n        vm_dist = 'oracle'\n\n    if vm_ver and '.' in vm_ver and vm_dist != 'ubuntu':\n        # For Ubuntu, keep major.minor format (e.g., \"18.04\")\n        # For other distributions, extract only the major version\n        # This is needed for matching with supported_distros.py\n        vm_ver = vm_ver.split('.')[0]\n    \n    # Add debugging info\n    hutil_log_info(\"Final OS detection result: {0} {1}\".format(vm_dist.lower(), vm_ver.lower()))\n    \n    return vm_dist.lower(), vm_ver.lower()\n\ndef is_vm_supported_for_extension(operation):\n    \"\"\"\n    Checks if the VM this extension is running on is supported by AzureMonitorAgent\n    Returns for platform.linux_distribution() vary widely in format, such as\n    '7.3.1611' returned for a VM with CentOS 7, so the first provided\n    digits must match\n    The supported distros of the AzureMonitorLinuxAgent are allowed to utilize\n    this VM extension. All other distros will get error code 51\n    \"\"\"\n\n    if platform.machine() == 'aarch64':\n        supported_dists = supported_distros.supported_dists_aarch64\n    else:\n        supported_dists = supported_distros.supported_dists_x86_64\n\n    vm_supported = False\n    vm_dist, vm_ver = find_vm_distro(operation)\n    # Find this VM distribution in the supported list\n    for supported_dist in list(supported_dists.keys()):\n        if not vm_dist.startswith(supported_dist):\n            continue\n\n        # Check if this VM distribution version is supported\n        vm_ver_split = vm_ver.split('.')\n        for supported_ver in supported_dists[supported_dist]:\n            supported_ver_split = supported_ver.split('.')\n\n            # If vm_ver is at least as precise (at least as many digits) as\n            # supported_ver and matches all the supported_ver digits, then\n            # this VM is guaranteed to be supported\n            vm_ver_match = True\n            for idx, supported_ver_num in enumerate(supported_ver_split):\n                try:\n                    supported_ver_num = int(supported_ver_num)\n                    vm_ver_num = int(vm_ver_split[idx])\n                except IndexError:\n                    vm_ver_match = False\n                    break\n                if vm_ver_num != supported_ver_num:\n                    vm_ver_match = False\n                    break\n            if vm_ver_match:\n                vm_supported = True\n                break\n\n        if vm_supported:\n            break\n\n    return vm_supported, vm_dist, vm_ver\n\n\ndef exit_if_vm_not_supported(operation):\n    \"\"\"\n    Check if this VM distro and version are supported by the AzureMonitorLinuxAgent.\n    If VM is supported, find the package manager present in this distro\n    If this VM is not supported, log the proper error code and exit.\n    \"\"\"\n    vm_supported, vm_dist, vm_ver = is_vm_supported_for_extension(operation)\n    if not vm_supported:\n        log_and_exit(operation, UnsupportedOperatingSystem, 'Unsupported operating system: ' \\\n                                    '{0} {1}'.format(vm_dist, vm_ver))\n    return 0\n\ndef is_feature_enabled(feature):\n    \"\"\"\n    Checks if the feature is enabled in the current region\n    \"\"\"\n    feature_support_matrix = {\n        'useDynamicSSL'             : ['all'],\n        'enableCMV2'                : ['all'],\n        'enableAzureOTelCollector'  : ['all']\n    }\n    \n    featurePreviewFlagPath = PreviewFeaturesDirectory + feature\n    if os.path.exists(featurePreviewFlagPath):\n        return True\n    \n    featurePreviewDisabledFlagPath = PreviewFeaturesDirectory + feature + 'Disabled'\n    if os.path.exists(featurePreviewDisabledFlagPath):\n        return False\n    \n    _, region = get_azure_environment_and_region()\n\n    if feature in feature_support_matrix.keys():\n        if region in feature_support_matrix[feature] or \"all\" in feature_support_matrix[feature]:\n            return True\n    \n    return False\n\n\ndef get_ssl_cert_info(operation):\n    \"\"\"\n    Get the appropriate SSL_CERT_DIR / SSL_CERT_FILE based on the Linux distro\n    \"\"\"\n    name = value = None\n\n    distro, version = find_vm_distro(operation)\n\n    for name in ['ubuntu', 'debian']:\n        if distro.startswith(name):\n            return 'SSL_CERT_DIR', '/etc/ssl/certs'\n\n    for name in ['centos', 'redhat', 'red hat', 'oracle', 'ol', 'cbl-mariner', 'mariner', 'azurelinux', 'rhel', 'rocky', 'alma', 'amzn']:\n        if distro.startswith(name):\n            return 'SSL_CERT_FILE', '/etc/pki/tls/certs/ca-bundle.crt'\n\n    for name in ['suse', 'sles', 'opensuse']:\n        if distro.startswith(name):\n            if version.startswith('12'):\n                return 'SSL_CERT_DIR', '/var/lib/ca-certificates/openssl'\n            elif version.startswith('15') or version.startswith('16'):\n                return 'SSL_CERT_DIR', '/etc/ssl/certs'\n\n    log_and_exit(operation, GenericErrorCode, 'Unable to determine values for SSL_CERT_DIR or SSL_CERT_FILE')\n\ndef copy_astextension_binaries():\n    astextension_bin_local_path = os.getcwd() + \"/AstExtensionBin/\"\n    astextension_bin = \"/opt/microsoft/azuremonitoragent/bin/astextension/\"\n    astextension_runtimesbin = \"/opt/microsoft/azuremonitoragent/bin/astextension/runtimes/\"\n    if os.path.exists(astextension_runtimesbin):\n        # only for versions of AMA with .NET runtimes\n        rmtree(astextension_runtimesbin)\n    # only for versions with Ast .net cleanup .NET files as it is causing issues with AOT runtime\n    for f in os.listdir(astextension_bin):\n        if f != 'AstExtension' and f != 'appsettings.json':\n            os.remove(os.path.join(astextension_bin, f))\n\n    for f in os.listdir(astextension_bin_local_path):\n        compare_and_copy_bin(astextension_bin_local_path + f, astextension_bin + f)\n\n\ndef is_arc_installed():\n    \"\"\"\n    Check if this is an Arc machine\n    \"\"\"\n    # Using systemctl to check this since Arc only supports VMs that have systemd\n    check_arc = os.system('systemctl status himdsd 1>/dev/null 2>&1')\n    return check_arc == 0\n\n\ndef get_arc_endpoint():\n    \"\"\"\n    Find the endpoint for Arc IMDS\n    \"\"\"\n    endpoint_filepath = '/lib/systemd/system.conf.d/azcmagent.conf'\n    endpoint = ''\n    try:\n        with open(endpoint_filepath, 'r') as f:\n            data = f.read()\n        endpoint = data.split(\"\\\"IMDS_ENDPOINT=\")[1].split(\"\\\"\\n\")[0]\n    except:\n        hutil_log_error('Unable to load Arc IMDS endpoint from {0}'.format(endpoint_filepath))\n    return endpoint\n\n\ndef get_imds_endpoint():\n    \"\"\"\n    Find the appropriate endpoint (Azure or Arc) for IMDS\n    \"\"\"\n    azure_imds_endpoint = 'http://169.254.169.254/metadata/instance?api-version=2018-10-01'\n    if (is_arc_installed()):\n        hutil_log_info('Arc is installed, loading Arc-specific IMDS endpoint')\n        imds_endpoint = get_arc_endpoint()\n        if imds_endpoint:\n            imds_endpoint += '/metadata/instance?api-version=2019-08-15'\n        else:\n            # Fall back to the traditional IMDS endpoint; the cloud domain and VM\n            # resource id detection logic are resilient to failed queries to IMDS\n            imds_endpoint = azure_imds_endpoint\n            hutil_log_info('Falling back to default Azure IMDS endpoint')\n    else:\n        imds_endpoint = azure_imds_endpoint\n\n    hutil_log_info('Using IMDS endpoint \"{0}\"'.format(imds_endpoint))\n    return imds_endpoint\n\n\ndef get_azure_environment_and_region():\n    \"\"\"\n    Retreive the Azure environment and region from Azure or Arc IMDS\n    \"\"\"\n    imds_endpoint = get_imds_endpoint()\n    req = urllib.Request(imds_endpoint)\n    req.add_header('Metadata', 'True')\n\n    environment = region = None\n\n    try:\n        response = json.loads(urllib.urlopen(req).read().decode('utf-8', 'ignore'))\n\n        if ('compute' in response):\n            if ('azEnvironment' in response['compute']):\n                environment = response['compute']['azEnvironment'].lower()\n            if ('location' in response['compute']):\n                region = response['compute']['location'].lower()\n    except urlerror.HTTPError as e:\n        hutil_log_error('Request to Metadata service URL failed with an HTTPError: {0}'.format(e))\n        hutil_log_error('Response from Metadata service: {0}'.format(e.read()))\n    except Exception as e:\n        hutil_log_error('Unexpected error from Metadata service: {0}'.format(e))\n\n    hutil_log_info('Detected environment: {0}, region: {1}'.format(environment, region))\n\n    return environment, region\n\n\ndef run_command_and_log(cmd, check_error = True, log_cmd = True, log_output = True):\n    \"\"\"\n    Run the provided shell command and log its output, including stdout and\n    stderr.\n    The output should not contain any PII, but the command might. In this case,\n    log_cmd should be set to False.\n    \"\"\"\n    exit_code, output = run_get_output(cmd, check_error, log_cmd)\n    if log_cmd:\n        hutil_log_info('Output of command \"{0}\": \\n{1}'.format(cmd.rstrip(), output))\n    elif log_output:\n        hutil_log_info('Output: \\n{0}'.format(output))\n\n    if \"cannot open Packages database\" in output:\n        # Install failures\n        # External issue. Package manager db is either corrupt or needs cleanup\n        # https://github.com/Azure/azure-marketplace/wiki/Extension-Build-Notes-Best-Practices#error-codes-and-messages-output-to-stderr\n        exit_code = MissingDependency\n        output += \"Package manager database is in a bad state. Please recover package manager, db cache and try install again later.\"\n    elif \"Permission denied\" in output:\n        # Enable failures\n        # https://github.com/Azure/azure-marketplace/wiki/Extension-Build-Notes-Best-Practices#error-codes-and-messages-output-to-stderr\n        exit_code = MissingDependency\n\n    return exit_code, output\n\ndef run_command_with_retries_output(cmd, retries, retry_check, final_check = None,\n                             check_error = True, log_cmd = True,\n                             initial_sleep_time = InitialRetrySleepSeconds,\n                             sleep_increase_factor = 1):\n    \"\"\"\n    Caller provides a method, retry_check, to use to determine if a retry\n    should be performed. This must be a function with two parameters:\n    exit_code and output\n    The final_check can be provided as a method to perform a final check after\n    retries have been exhausted\n    Logic used: will retry up to retries times with initial_sleep_time in\n    between tries\n    If the retry_check retuns True for retry_verbosely, we will try cmd with\n    the standard -v verbose flag added\n    \"\"\"\n    try_count = 0\n    sleep_time = initial_sleep_time\n    run_cmd = cmd\n    run_verbosely = False\n\n    while try_count <= retries:\n        if run_verbosely:\n            run_cmd = cmd + ' -v'\n        exit_code, output = run_command_and_log(run_cmd, check_error, log_cmd)\n        should_retry, retry_message, run_verbosely = retry_check(exit_code,\n                                                                 output)\n        if not should_retry:\n            break\n        try_count += 1\n        hutil_log_info(retry_message)\n        time.sleep(sleep_time)\n        sleep_time *= sleep_increase_factor\n\n    if final_check is not None:\n        exit_code = final_check(exit_code, output)\n\n    return exit_code, output\n\n\ndef is_dpkg_or_rpm_locked(exit_code, output):\n    \"\"\"\n    If dpkg is locked, the output will contain a message similar to 'dpkg\n    status database is locked by another process'\n    \"\"\"\n    if exit_code != 0:\n        dpkg_locked_search = r'^.*dpkg.+lock.*$'\n        dpkg_locked_re = re.compile(dpkg_locked_search, re.M)\n        if dpkg_locked_re.search(output):\n            return True\n\n        rpm_locked_search = r'^.*rpm.+lock.*$'\n        rpm_locked_re = re.compile(rpm_locked_search, re.M)\n        if rpm_locked_re.search(output):\n            return True\n    return False\n\n\ndef retry_if_dpkg_or_rpm_locked(exit_code, output):\n    \"\"\"\n    Some commands fail because the package manager is locked (apt-get/dpkg\n    only); this will allow retries on failing commands.\n    \"\"\"\n    retry_verbosely = False\n    dpkg_or_rpm_locked = is_dpkg_or_rpm_locked(exit_code, output)\n    if dpkg_or_rpm_locked:\n        return True, 'Retrying command because package manager is locked.', \\\n               retry_verbosely\n    else:\n        return False, '', False\n\n\ndef final_check_if_dpkg_or_rpm_locked(exit_code, output):\n    \"\"\"\n    If dpkg or rpm is still locked after the retries, we want to return a specific\n    error code\n    \"\"\"\n    dpkg_or_rpm_locked = is_dpkg_or_rpm_locked(exit_code, output)\n    if dpkg_or_rpm_locked:\n        exit_code = DPKGOrRPMLockedErrorCode\n    return exit_code\n\ndef get_settings():\n    \"\"\"\n    Retrieve the configuration for this extension operation\n    \"\"\"\n    global SettingsDict\n    public_settings = None\n    protected_settings = None\n\n    if HUtilObject is not None:\n        public_settings = HUtilObject.get_public_settings()\n        protected_settings = HUtilObject.get_protected_settings()\n    elif SettingsDict is not None:\n        public_settings = SettingsDict['public_settings']\n        protected_settings = SettingsDict['protected_settings']\n    else:\n        SettingsDict = {}\n        handler_env = get_handler_env()\n        try:\n            config_dir = str(handler_env['handlerEnvironment']['configFolder'])\n        except:\n            config_dir = os.path.join(os.getcwd(), 'config')\n\n        seq_no = get_latest_seq_no()\n        settings_path = os.path.join(config_dir, '{0}.settings'.format(seq_no))\n        try:\n            with open(settings_path, 'r') as settings_file:\n                settings_txt = settings_file.read()\n            settings = json.loads(settings_txt)\n            h_settings = settings['runtimeSettings'][0]['handlerSettings']\n            public_settings = h_settings['publicSettings']\n            SettingsDict['public_settings'] = public_settings\n        except:\n            hutil_log_error('Unable to load handler settings from ' \\\n                            '{0}'.format(settings_path))\n\n        if ('protectedSettings' in h_settings\n                and 'protectedSettingsCertThumbprint' in h_settings\n                and h_settings['protectedSettings'] is not None\n                and h_settings['protectedSettingsCertThumbprint'] is not None):\n            encoded_settings = h_settings['protectedSettings']\n            settings_thumbprint = h_settings['protectedSettingsCertThumbprint']\n            encoded_cert_path = os.path.join('/var/lib/waagent',\n                                             '{0}.crt'.format(\n                                                       settings_thumbprint))\n            encoded_key_path = os.path.join('/var/lib/waagent',\n                                            '{0}.prv'.format(\n                                                      settings_thumbprint))\n            decoded_settings = base64.standard_b64decode(encoded_settings)\n\n\n             # FIPS 140-3: use 'openssl cms' (supports AES256 & DES_EDE3_CBC) with fallback to legacy 'openssl smime'\n            cms_cmd = 'openssl cms -inform DER -decrypt -recip {0} -inkey {1}'.format(encoded_cert_path, encoded_key_path)\n            smime_cmd = 'openssl smime -inform DER -decrypt -recip {0} -inkey {1}'.format(encoded_cert_path, encoded_key_path)\n\n            protected_settings_str = None\n            for decrypt_cmd in [cms_cmd, smime_cmd]:\n                try:\n                    session = subprocess.Popen([decrypt_cmd], shell=True,\n                                               stdin=subprocess.PIPE,\n                                               stderr=subprocess.STDOUT,\n                                               stdout=subprocess.PIPE)\n                    output = session.communicate(decoded_settings)\n                    # success only if return code is 0 and we have output\n                    if session.returncode == 0 and output[0]:\n                        protected_settings_str = output[0]\n                        if decrypt_cmd == cms_cmd:\n                            hutil_log_info('Decrypted protectedSettings using openssl cms.')\n                        else:\n                            hutil_log_info('Decrypted protectedSettings using openssl smime fallback.')\n                        break\n                    else:\n                        hutil_log_info('Attempt to decrypt protectedSettings with \"{0}\" failed (rc={1}).'.format(decrypt_cmd, session.returncode))\n                except OSError:\n                    pass\n\n            if protected_settings_str is None:\n                log_and_exit('Enable', GenericErrorCode, 'Failed decrypting protectedSettings')\n            protected_settings = ''\n            try:\n                protected_settings = json.loads(protected_settings_str)\n            except:\n                hutil_log_error('JSON exception decoding protected settings')\n            SettingsDict['protected_settings'] = protected_settings\n\n    return public_settings, protected_settings\n\n\ndef update_status_file(operation, exit_code, exit_status, message):\n    \"\"\"\n    Mimic HandlerUtil method do_status_report in case hutil method is not\n    available\n    Write status to status file\n    \"\"\"\n    handler_env = get_handler_env()\n    try:\n        extension_version = str(handler_env['version'])\n        status_dir = str(handler_env['handlerEnvironment']['statusFolder'])\n    except:\n        extension_version = \"1.0\"\n        status_dir = os.path.join(os.getcwd(), 'status')\n\n    status_txt = [{\n        \"version\" : extension_version,\n        \"timestampUTC\" : time.strftime(\"%Y-%m-%dT%H:%M:%SZ\", time.gmtime()),\n        \"status\" : {\n            \"name\" : \"Microsoft.Azure.Monitor.AzureMonitorLinuxAgent\",\n            \"operation\" : operation,\n            \"status\" : exit_status,\n            \"code\" : exit_code,\n            \"formattedMessage\" : {\n                \"lang\" : \"en-US\",\n                \"message\" : message\n            }\n        }\n    }]\n\n    status_json = json.dumps(status_txt)\n\n    # Find the most recently changed config file and then use the\n    # corresponding status file\n    latest_seq_no = get_latest_seq_no()\n\n    status_path = os.path.join(status_dir, '{0}.status'.format(latest_seq_no))\n    status_tmp = '{0}.tmp'.format(status_path)\n    with open(status_tmp, 'w+') as tmp_file:\n        tmp_file.write(status_json)\n    os.rename(status_tmp, status_path)\n\n\ndef get_handler_env():\n    \"\"\"\n    Set and retrieve the contents of HandlerEnvironment.json as JSON\n    \"\"\"\n    global HandlerEnvironment\n    if HandlerEnvironment is None:\n        handler_env_path = os.path.join(os.getcwd(), 'HandlerEnvironment.json')\n        try:\n            with open(handler_env_path, 'r') as handler_env_file:\n                handler_env_txt = handler_env_file.read()\n            handler_env = json.loads(handler_env_txt)\n            if type(handler_env) == list:\n                handler_env = handler_env[0]\n            HandlerEnvironment = handler_env\n        except Exception as e:\n            waagent_log_error(str(e))\n    return HandlerEnvironment\n\n\ndef get_latest_seq_no():\n    \"\"\"\n    Determine the latest operation settings number to use\n    \"\"\"\n    global SettingsSequenceNumber\n    if SettingsSequenceNumber is None:\n        handler_env = get_handler_env()\n        try:\n            config_dir = str(handler_env['handlerEnvironment']['configFolder'])\n        except:\n            config_dir = os.path.join(os.getcwd(), 'config')\n\n        latest_seq_no = -1\n        cur_seq_no = -1\n        latest_time = None\n        try:\n            for dir_name, sub_dirs, file_names in os.walk(config_dir):\n                for file_name in file_names:\n                    file_basename = os.path.basename(file_name)\n                    match = re.match(r'[0-9]{1,10}\\.settings', file_basename)\n                    if match is None:\n                        continue\n                    cur_seq_no = int(file_basename.split('.')[0])\n                    file_path = os.path.join(config_dir, file_name)\n                    cur_time = os.path.getmtime(file_path)\n                    if latest_time is None or cur_time > latest_time:\n                        latest_time = cur_time\n                        latest_seq_no = cur_seq_no\n        except:\n            pass\n        if latest_seq_no < 0:\n            latest_seq_no = 0\n        SettingsSequenceNumber = latest_seq_no\n\n    return SettingsSequenceNumber\n\n\ndef run_get_output(cmd, chk_err = False, log_cmd = True):\n    \"\"\"\n    Mimic waagent mothod RunGetOutput in case waagent is not available\n    Run shell command and return exit code and output\n    \"\"\"\n    if 'Utils.WAAgentUtil' in sys.modules:\n        # WALinuxAgent-2.0.14 allows only 2 parameters for RunGetOutput\n        # If checking the number of parameters fails, pass 2\n        try:\n            sig = inspect.signature(waagent.RunGetOutput)\n            params = sig.parameters\n            waagent_params = len(params)\n        except:\n            try:\n                spec = inspect.getargspec(waagent.RunGetOutput)\n                params = spec.args\n                waagent_params = len(params)\n            except:\n                waagent_params = 2\n        if waagent_params >= 3:\n            exit_code, output = waagent.RunGetOutput(cmd, chk_err, log_cmd)\n        else:\n            exit_code, output = waagent.RunGetOutput(cmd, chk_err)\n    else:\n        try:\n            output = subprocess.check_output(cmd, stderr = subprocess.STDOUT,\n                                             shell = True)\n            exit_code = 0\n        except subprocess.CalledProcessError as e:\n            exit_code = e.returncode\n            output = e.output\n    \n    # Python 2: encode unicode -> UTF-8 bytes (str). Python 3: decode bytes -> str.\n    try:  # Python 2\n        if isinstance(output, unicode):  # type: ignore  # noqa: F821\n            output = output.encode('utf-8', 'ignore')\n    except NameError:  # Python 3\n        if isinstance(output, (bytes, bytearray)):\n            output = bytes(output).decode('utf-8', 'ignore')\n\n    return exit_code, output.strip()\n\n\ndef init_waagent_logger():\n    \"\"\"\n    Initialize waagent logger\n    If waagent has not been imported, catch the exception\n    \"\"\"\n    try:\n        waagent.LoggerInit('/var/log/waagent.log', '/dev/stdout', True)\n    except Exception as e:\n        print('Unable to initialize waagent log because of exception ' \\\n              '{0}'.format(e))\n\n\ndef waagent_log_info(message):\n    \"\"\"\n    Log informational message, being cautious of possibility that waagent may\n    not be imported\n    \"\"\"\n    if 'Utils.WAAgentUtil' in sys.modules:\n        waagent.Log(message)\n    else:\n        print('Info: {0}'.format(message))\n\n\ndef waagent_log_error(message):\n    \"\"\"\n    Log error message, being cautious of possibility that waagent may not be\n    imported\n    \"\"\"\n    if 'Utils.WAAgentUtil' in sys.modules:\n        waagent.Error(message)\n    else:\n        print('Error: {0}'.format(message))\n\n\ndef hutil_log_info(message):\n    \"\"\"\n    Log informational message, being cautious of possibility that hutil may\n    not be imported and configured\n    \"\"\"\n    if HUtilObject is not None:\n        HUtilObject.log(message)\n    else:\n        print('Info: {0}'.format(message))\n\n\ndef hutil_log_error(message):\n    \"\"\"\n    Log error message, being cautious of possibility that hutil may not be\n    imported and configured\n    \"\"\"\n    if HUtilObject is not None:\n        HUtilObject.error(message)\n    else:\n        print('Error: {0}'.format(message))\n\n\ndef log_and_exit(operation, exit_code = GenericErrorCode, message = ''):\n    \"\"\"\n    Log the exit message and perform the exit\n    \"\"\"\n    if exit_code == 0:\n        waagent_log_info(message)\n        hutil_log_info(message)\n        exit_status = 'success'\n    else:\n        waagent_log_error(message)\n        hutil_log_error(message)\n        exit_status = 'failed'\n\n    if HUtilObject is not None:\n        HUtilObject.do_exit(exit_code, operation, exit_status, str(exit_code),\n                            message)\n    else:\n        update_status_file(operation, str(exit_code), exit_status, message)\n        sys.exit(exit_code)\n\n\ndef validate_port_number(port_value, port_name):\n    \"\"\"\n    Validates that a port value is a valid integer within the range 1-65535.\n\n    Args:\n        port_value: The port value to validate (string)\n        port_name: The name of the port for error messages (e.g., \"fluent\", \"syslog\")\n\n    Returns:\n        The validated port number as a string, or empty string if invalid\n    \"\"\"\n    if not port_value:\n        return ''\n\n    try:\n        port_int = int(port_value.strip())\n        if port_int < 1 or port_int > 65535:\n            hutil_log_error('Invalid {0} port number: {1}. Must be between 1-65535.'.format(port_name, port_int))\n            return ''\n        return str(port_int)\n    except ValueError:\n        hutil_log_error('Invalid {0} port value: {1}. Must be an integer.'.format(port_name, port_value))\n        return ''\n\n\n# Exceptions\n# If these exceptions are expected to be caught by the main method, they\n# include an error_code field with an integer with which to exit from main\n\nclass AzureMonitorAgentForLinuxException(Exception):\n    \"\"\"\n    Base exception class for all exceptions; as such, its error code is the\n    basic error code traditionally returned in Linux: 1\n    \"\"\"\n    error_code = GenericErrorCode\n    def get_error_message(self, operation):\n        \"\"\"\n        Return a descriptive error message based on this type of exception\n        \"\"\"\n        return '{0} failed with exit code {1}'.format(operation,\n                                                      self.error_code)\n\n\nclass ParameterMissingException(AzureMonitorAgentForLinuxException):\n    \"\"\"\n    There is a missing parameter for the AzureMonitorLinuxAgent Extension\n    \"\"\"\n    error_code = MissingorInvalidParameterErrorCode\n    def get_error_message(self, operation):\n        return '{0} failed due to a missing parameter: {1}'.format(operation,\n                                                                   self.error_code)\n\nif __name__ == '__main__' :\n    main()\n"
  },
  {
    "path": "AzureMonitorAgent/agent.version",
    "content": "AGENT_VERSION=\"1.12.0\"\nAGENT_VERSION_DATE=\"\"\nMDSD_DEB_PACKAGE_NAME=\"azuremonitoragent_1.12.0-build.master.89_x86_64.deb\"\nMDSD_RPM_PACKAGE_NAME=\"azuremonitoragent-1.12.0-build.master.89_x86_64.rpm\"\n"
  },
  {
    "path": "AzureMonitorAgent/ama_tst/AMA-Troubleshooting-Tool.md",
    "content": "# Troubleshooting Tool for Azure Monitor Linux Agent\nThe following document provides quick information on the AMA Troubleshooting Tool, including how to use it and its checks.\n\n# Table of Contents\n- [Troubleshooter Basics](#troubleshooter-basics)\n- [Using the Troubleshooter](#using-the-troubleshooter)\n- [Requirements](#requirements)\n- [Scenarios Covered](#scenarios-covered)\n\n## Troubleshooter Basics\n\nThe Azure Monitor Linux Agent Troubleshooter is designed in order to help find and diagnose issues with the agent, as well as general health checks. At the current moment, the AMA TST can run checks to verify agent installation, connection, and general heartbeat, as well as collect AMA-related logs automatically from the affected Linux VM. In addition, more checks are being added regularly, to help increase the number of scenarios the AMA TST can catch.\n\n## Using the Troubleshooter\n\nThe AMA Linux Troubleshooter is automatically installed upon installation of AMA, and can be located and run by the following commands:\n1. Go to the troubleshooter's installed location: `cd /var/lib/waagent/Microsoft.Azure.Monitor.AzureMonitorLinuxAgent-<version>/ama_tst`\n2. Run the troubleshooter: `sudo sh ama_troubleshooter.sh`\n\nIf the troubleshooter isn't properly installed, or needs to be updated, the newest version can be downloaded and run by following the steps below.\n\n1. Copy the troubleshooter bundle onto your machine: `wget https://github.com/Azure/azure-linux-extensions/raw/master/AzureMonitorAgent/ama_tst/ama_tst.tgz`\n2. Unpack the bundle: `tar -xzvf ama_tst.tgz`\n3. Run the troubleshooter: `sudo sh ama_troubleshooter.sh`\n\n## Requirements\n\nThe AMA Linux Troubleshooter requires Python 2.6+ installed on the machine, but will work with either Python2 or Python3. In addition, the following Python packages are required to run (all should be present on a default install of Python2 or Python3):\n| Python Package | Required for Python2? | Required for Python3? |\n| --- | --- | --- |\n| copy | **yes** | **yes** |\n| datetime | **yes** | **yes** |\n| json | **yes** | **yes** |\n| os | **yes** | **yes** |\n| platform | **yes** | **yes** |\n| re | **yes** | **yes** |\n| requests | no | **yes** |\n| shutil | **yes** | **yes** |\n| subprocess | **yes** | **yes** |\n| urllib | **yes** | no |\n| xml.dom.minidom | **yes** | **yes** |\n\n## Scenarios Covered\n\n1. Agent having installation issues\n\t* Supported OS / version\n\t* Available disk space\n\t* Package manager is available (dpkg/rpm)\n\t* Submodules are installed successfully\n\t* AMA installed properly\n\t* Syslog available (rsyslog/syslog-ng)\n\t* Using newest version of AMA\n\t* Syslog user generated successfully\n2. Agent doesn't start, can't connect to Log Analytics\n  \t* AMA parameters set up\n  \t* AMA DCR created successfully\n  \t* Connectivity to endpoints\n  \t* Submodules started\n  \t* IMDS/HIMDS metadata and MSI tokens available\n3. Agent is unhealthy, heartbeat doesn't work properly\n  \t* Submodule status\n  \t* Parse error files\n4. Agent has high CPU / memory usage\n\t* Check logrotate\n\t* Monitor CPU/memory usage in 5 minutes (interaction mode only)\n5.  Agent syslog collection doesn't work properly\n\t* Rsyslog / syslog-ng set up and running\n\t* Syslog configuration being pulled / used\n\t* Syslog socket is accessible\n6. Agent custom log collection doesn't work properly\n\t* Custom log configuration being pulled / used\n\t* Log file paths is valid\n7. Agent metrics collection doesn't work properly\n\t* Runs the metrics troubleshooter script\n\t* Produces `MdmDataCollectionOutput_*.tar.gz` for investigation\n8. (A) Run all scenarios\n\t* Run through scenarios 1-7 in order\n9. (L) Collect logs\n\t* Collects all of the logs needed to troubleshoot AMA in a zip file\n\t* Includes MDSD and AMACoreAgent environment variables\n"
  },
  {
    "path": "AzureMonitorAgent/ama_tst/__init__.py",
    "content": "# AMA troubleshooter modules"
  },
  {
    "path": "AzureMonitorAgent/ama_tst/ama_troubleshooter.sh",
    "content": "#!/usr/bin/env bash\n\nCOMMAND=\"./modules/main.py\"\nPYTHON=\"\"\nTST_VERSION=\"1.7\"  # update when changes are made to TST\nARG=\"$@\"\n\ndisplay_help() {\n    echo \"OPTIONS\"\n    echo \"  -A              Run All Troubleshooting Tool checks\"\n    echo \"  -L              Run Log Collector\"\n    echo \"  -v, --version   Print Troubleshooting Tool version\"\n}\n\nfind_python() {\n    local python_exec_command=$1\n\n    if command -v python3 >/dev/null 2>&1 ; then\n        eval ${python_exec_command}=\"python3\"\n    elif command -v python2 >/dev/null 2>&1 ; then\n        eval ${python_exec_command}=\"python2\"\n    elif command -v /usr/libexec/platform-python >/dev/null 2>&1 ; then\n        # If a user-installed python isn't available, check for a platform-python. This is typically only used in RHEL 8.0.\n        echo \"User-installed python not found. Using /usr/libexec/platform-python as the python interpreter.\"\n        eval ${python_exec_command}=\"/usr/libexec/platform-python\"\n    fi\n}\n\nfind_python PYTHON\n\nif [ -z \"$PYTHON\" ] # If python is not installed, we will fail the install with the following error, requiring cx to have python pre-installed\nthen\n    echo \"No Python interpreter found, which is an AMA extension dependency. Please install Python 3, or Python 2 if the former is unavailable.\" >&2\n    exit 1\nelse\n    echo \"Python version being used is:\"\n    ${PYTHON} --version 2>&1\n    echo \"\"\nfi\n\nif [ \"$1\" = \"--help\" ] || [ \"$1\" = \"-h\" ]\nthen\n    display_help\nelif [ \"$1\" = \"--version\" ] || [ \"$1\" = \"-v\" ]\nthen\n    echo \"AMA Troubleshooting Tool v.$TST_VERSION\"\nelse\n    echo \"Starting AMA Troubleshooting Tool v.$TST_VERSION...\"\n    echo \"\"\n    PYTHONPATH=${PYTHONPATH} ${PYTHON} ${COMMAND} ${ARG}\nfi\nexit $?\n"
  },
  {
    "path": "AzureMonitorAgent/ama_tst/modules/__init__.py",
    "content": "# AMA troubleshooter modules"
  },
  {
    "path": "AzureMonitorAgent/ama_tst/modules/connect/__init__.py",
    "content": "# Connection check helper script for AMA"
  },
  {
    "path": "AzureMonitorAgent/ama_tst/modules/connect/check_endpts.py",
    "content": "import subprocess\nimport traceback\n\nfrom error_codes import *\nfrom errors      import error_info\nfrom helpers     import geninfo_lookup, find_dce\n\nSSL_CMD = \"echo | openssl s_client -connect {0}:443 -brief\"\nCURL_CMD = \"curl -s -S -k https://{0}/ping\"\n\nGLOBAL_HANDLER_URL = \"global.handler.control.monitor.azure.com\"\nREGION_HANDLER_URL = \"{0}.handler.control.monitor.azure.com\"\nODS_URL = \"{0}.ods.opinsights.azure.com\"\nME_URL = \"management.azure.com\"\nME_REGION_URL = \"{0}.monitoring.azure.com\"\n\n\ndef _log_ssl_error(context, exception, show_traceback=True):\n    \"\"\"Helper function to log SSL errors cleanly\"\"\"\n    print(\"{0}:\".format(context))\n    print(\"  Type: {0}\".format(type(exception).__name__))\n    print(\"  Message: {0}\".format(str(exception)))\n    \n    # For CalledProcessError, show command details\n    if isinstance(exception, subprocess.CalledProcessError):\n        print(\"  Command: {0}\".format(getattr(exception, 'cmd', 'Unknown')))\n        print(\"  Return code: {0}\".format(getattr(exception, 'returncode', 'Unknown')))\n        if hasattr(exception, 'output') and exception.output:\n            print(\"  Output: {0}\".format(exception.output.strip()))\n    \n    # Show traceback if requested\n    if show_traceback:\n        print(\"  Traceback:\")\n        print(traceback.format_exc())\n\n\ndef check_endpt_ssl(ssl_cmd, endpoint):\n    \"\"\"\n    openssl connect to specific endpoint\n    \"\"\"\n    try:\n        ssl_output = subprocess.check_output(ssl_cmd.format(endpoint), shell=True,\\\n                     stderr=subprocess.STDOUT, universal_newlines=True)\n        ssl_output_lines = ssl_output.split('\\n')\n        \n        (connected, verified) = (False, False)\n        for line in ssl_output_lines:\n            if (line == \"CONNECTION ESTABLISHED\"):\n                connected = True\n                continue\n            if (line == \"Verification: OK\"):\n                verified = True\n                continue\n\n        # If connection established but no explicit verification status in brief mode,\n        # try a verification check to determine if SSL cert is valid\n        if connected and not verified:\n            try:\n                # Use verify_return_error flag to test certificate verification\n                verify_cmd = ssl_cmd.replace('-brief', '-verify_return_error -brief')\n                verify_output = subprocess.check_output(verify_cmd.format(endpoint), shell=True,\\\n                               stderr=subprocess.STDOUT, universal_newlines=True)\n                # If verify command succeeds (no exception), verification is OK\n                if \"CONNECTION ESTABLISHED\" in verify_output:\n                    verified = True\n            except subprocess.CalledProcessError as e:\n                # Verification failed - certificate issues\n                _log_ssl_error(\"SSL verification failed\", e, show_traceback=False)\n                verified = False\n            except Exception as e:\n                # Other error - assume verified if basic connection worked\n                # This handles cases where verify_return_error isn't supported\n                _log_ssl_error(\"SSL verification exception\", e, show_traceback=True)\n                verified = False\n\n        return (connected, verified, ssl_output)\n    except Exception as e:\n        _log_ssl_error(\"SSL connection failed\", e, show_traceback=True)\n        return (False, False, str(e))\n\n\ndef check_internet_connect():\n    \"\"\"\n    check general internet connectivity\n    \"\"\"\n    (connected_docs, verified_docs, e) = check_endpt_ssl(SSL_CMD, \"docs.microsoft.com\")\n    if (connected_docs and verified_docs):\n        return NO_ERROR\n    elif (connected_docs and not verified_docs):\n        error_info.append((SSL_CMD.format(\"docs.microsoft.com\"),))\n        return WARN_INTERNET\n    else:\n        error_info.append((SSL_CMD.format(\"docs.microsoft.com\"),))\n        return WARN_INTERNET_CONN\n\n\ndef resolve_ip(endpoint):\n    try:\n        result = subprocess.call(['nslookup', endpoint], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n        if not result == 0:\n            return False, \"nslookup {0}\".format(endpoint)\n        else:\n            return (True, None)\n    except Exception as e:\n        return (False, e)\n\n\ndef check_endpt_curl(endpoint):\n    command = CURL_CMD.format(endpoint)\n    try:\n        # check proxy\n        proxy = geninfo_lookup('MDSD_PROXY_ADDRESS')\n        username = geninfo_lookup('MDSD_PROXY_USERNAME')\n        if not proxy == None:\n            command = command + ' -x {0}'.format(proxy)\n        if not username == None:\n            password = geninfo_lookup('MDSD_PROXY_PASSWORD')\n            command = command + ' -U {0}:{1}'.format(username, password)\n        output = subprocess.check_output(command, shell=True,\\\n                     stderr=subprocess.STDOUT, universal_newlines=True)\n        if output == \"Healthy\":\n            return NO_ERROR\n        else:\n            if proxy == None:\n                error_info.append((endpoint, command, output))\n                return ERR_ENDPT\n            else:\n                error_info.append((endpoint, command, output))\n                return ERR_ENDPT_PROXY\n    except Exception as e:\n        error_info.append((endpoint, command, e))\n        return ERR_ENDPT\n    \n    \ndef check_ama_endpts():    \n    # compose URLs to check\n    endpoints = [GLOBAL_HANDLER_URL]\n    regions = geninfo_lookup('DCR_REGION')\n    workspace_ids = geninfo_lookup('DCR_WORKSPACE_ID')\n    \n    if regions == None or workspace_ids == None:\n        return ERR_INFO_MISSING\n    for region in regions:\n        endpoints.append(REGION_HANDLER_URL.format(region))\n        \n    for id in workspace_ids:\n        endpoints.append(ODS_URL.format(id))\n    \n    if not geninfo_lookup('ME_REGION') == None:\n        endpoints.append(ME_URL)\n    for me_region in geninfo_lookup('ME_REGION'):\n        endpoints.append(ME_REGION_URL.format(me_region))\n\n    # modify URLs if URL suffix is .us(Azure Government) or .cn(Azure China)\n    url_suffix = geninfo_lookup('URL_SUFFIX')\n    if not url_suffix == '.com':\n        for endpoint in endpoints:\n            endpoint.replace('.com', url_suffix)\n\n    dce, e = find_dce()\n    if e != None:\n        error_info.append((e,))\n        return ERR_DCE\n    for endpoint in dce:\n        endpoints.append(endpoint)\n        \n    for endpoint in endpoints:\n        # check if IP address can be resolved using nslookup\n        resolved, e = resolve_ip(endpoint)\n        if not resolved:\n            error_info.append((endpoint,e))\n            return ERR_RESOLVE_IP\n        \n        # check ssl handshake\n        command = SSL_CMD\n        \n        # skip openssl check with authenticated proxy\n        if not geninfo_lookup('MDSD_PROXY_USERNAME') == None:\n            return WARN_OPENSSL_PROXY\n        proxy = geninfo_lookup('MDSD_PROXY_ADDRESS')\n        if not proxy == None:\n            proxy = proxy.replace('http://', '')\n            command = command + ' -proxy {0}'.format(proxy)\n        if not geninfo_lookup('SSL_CERT_DIR') == None:\n            command = command + \" -CApath \" + geninfo_lookup('SSL_CERT_DIR')\n        if not geninfo_lookup('SSL_CERT_FILE') == None:\n            command = command + \" -CAfile \" + geninfo_lookup('SSL_CERT_FILE')\n        (connected, verified, e) = check_endpt_ssl(command, endpoint)\n        if not connected or not verified:\n            error_info.append((endpoint, command.format(endpoint), e))\n            return ERR_ENDPT\n        \n        # check AMCS ping results\n        if \"handler.control.monitor\" in endpoint:\n            checked_curl = check_endpt_curl(endpoint)\n            if checked_curl != NO_ERROR:\n                return checked_curl\n    return NO_ERROR"
  },
  {
    "path": "AzureMonitorAgent/ama_tst/modules/connect/check_imds.py",
    "content": "import subprocess\nimport json\n\nfrom error_codes    import *\nfrom errors         import error_info\nfrom helpers        import general_info, geninfo_lookup, is_arc_installed\n\nMETADATA_CMD = 'curl -s -H Metadata:true --noproxy \"*\" \"http://{0}/metadata/instance/compute?api-version=2020-06-01\"'\nAZURE_IP = \"169.254.169.254\"\nARC_IP = \"127.0.0.1:40342\"\n\nAZURE_TOKEN_CMD = \"curl 'http://169.254.169.254/metadata/identity/oauth2/token?api-version=2018-02-01&resource=https%3A%2F%2Fmanagement.azure.com%2F' -H Metadata:true -s\"\nARC_TOKEN_CMD = 'ChallengeTokenPath=$(curl -s -D - -H Metadata:true \"http://127.0.0.1:40342/metadata/identity/oauth2/token?api-version=2019-11-01&resource=https%3A%2F%2Fmanagement.azure.com\"'\\\n                    '| grep Www-Authenticate | cut -d \"=\" -f 2 | tr -d \"[:cntrl:]\") ; ' \\\n                    'ChallengeToken=$(cat $ChallengeTokenPath) ; ' \\\n                    'curl -s -H Metadata:true -H \"Authorization: Basic $ChallengeToken\" \"http://127.0.0.1:40342/metadata/identity/oauth2/token?api-version=2019-11-01&resource=https%3A%2F%2Fmanagement.azure.com\"'\n\n\ndef check_metadata():\n    global general_info\n    \n    type = \"Azure\"\n    if is_arc_installed():\n        command = METADATA_CMD.format(ARC_IP)\n        type = \"Hybrid\"\n    else: \n        command = METADATA_CMD.format(AZURE_IP)\n    try:\n        output = subprocess.check_output(command, shell=True,\\\n                     stderr=subprocess.STDOUT, universal_newlines=True)\n        output_json = json.loads(output)\n        attributes = ['azEnvironment', 'resourceId', 'location']\n        for attr in attributes:\n            if not attr in output_json:\n                error_info.append((type, command, output))\n                return ERR_IMDS_METADATA\n            else:\n                attr_result = output_json[attr]\n                general_info[attr] = attr_result\n    except Exception as e:\n        error_info.append((type, command, e))\n        return ERR_IMDS_METADATA\n    return NO_ERROR\n\n\ndef check_token():\n    if is_arc_installed():\n        command = ARC_TOKEN_CMD\n    else: \n        command = AZURE_TOKEN_CMD\n    try:\n        # check AMA use UAI\n        managed_identity = geninfo_lookup('MANAGED_IDENTITY')\n        if not managed_identity == None:\n            managed_identity = managed_identity.replace('mi_res_id#', 'mi_res_id=')\n            command = command.replace('token?', 'token?{0}&'.format(managed_identity))\n        \n        output = subprocess.check_output(command, shell=True,\\\n                     stderr=subprocess.STDOUT, universal_newlines=True)\n        output_json = json.loads(output)\n        if not 'access_token' in output_json:\n            error_info.append((command, output))\n            return ERR_ACCESS_TOKEN\n    except Exception as e:\n        error_info.append((command, e))\n        return ERR_ACCESS_TOKEN\n    return NO_ERROR\n     \n     \ndef check_imds_api():\n    # check metadata\n    checked_metadata = check_metadata()\n    if not checked_metadata == NO_ERROR:\n        return checked_metadata\n    \n    # check access token\n    checked_token = check_token()\n    if not checked_token == NO_ERROR:\n        return checked_token\n    return NO_ERROR"
  },
  {
    "path": "AzureMonitorAgent/ama_tst/modules/connect/connect.py",
    "content": "import os\nimport json\nimport subprocess\nimport platform\n\nfrom error_codes       import *\nfrom errors            import error_info, is_error, print_errors\nfrom helpers           import general_info, is_metrics_configured, find_dcr_workspace\nfrom .check_endpts     import check_internet_connect, check_ama_endpts\nfrom .check_imds       import check_imds_api\n\ntry:\n    FileNotFoundError\nexcept NameError:\n    FileNotFoundError = IOError\n\ndef check_parameters():\n    global general_info\n    try:\n        with open('/etc/default/azuremonitoragent', 'r') as fp:\n            for line in fp:\n                line = line.split('export')[1].strip()\n                key = line.split('=')[0]\n                value = line.split('=')[1]\n                general_info[key] = value\n    except (FileNotFoundError, AttributeError) as e:\n        error_info.append((e,))\n        return ERR_AMA_PARAMETERS\n    return NO_ERROR\n   \ndef check_workspace():\n    wkspc_id, wkspc_region, agent_settings, e = find_dcr_workspace()\n    if e != None:\n        error_info.append((e,))\n        return ERR_NO_DCR \n    return NO_ERROR\n\ndef check_subcomponents(): \n\n    services = ['azuremonitoragent']\n    services.append('azuremonitor-coreagent')\n    services.append('azuremonitor-agentlauncher')\n\n    if is_metrics_configured():\n        services.append('metrics-sourcer')\n        services.append('metrics-extension')\n        \n    for service in services:\n        try:\n            status = subprocess.check_output(['systemctl', 'status', service],\\\n                                    universal_newlines=True, stderr=subprocess.STDOUT)\n            status_lines = status.split('\\n')\n            for line in status_lines:\n                line = line.strip()\n                if line.startswith('Active:'):\n                    if not line.split()[1] == 'active':\n                        error_info.append((service, status))\n                        return ERR_SUBCOMPONENT_STATUS\n        except subprocess.CalledProcessError as e:\n            error_info.append((e,))\n            return ERR_CHECK_STATUS\n            \n    return NO_ERROR\n\ndef check_connection(interactive, err_codes=True, prev_success=NO_ERROR):\n    print(\"CHECKING CONNECTION...\")\n\n    success = prev_success\n    \n    # check /etc/default/azuremonitoragent file\n    print(\"Checking AMA parameters in /etc/default/azuremonitoragent...\")\n    checked_parameters = check_parameters()\n    if (is_error(checked_parameters)):\n        return print_errors(checked_parameters)\n    else:\n        success = print_errors(checked_parameters)\n        \n    # check DCR\n    print(\"Checking DCR...\")\n    checked_workspace = check_workspace()\n    if (is_error(checked_workspace)):\n        return print_errors(checked_workspace)\n    else:\n        success = print_errors(checked_workspace)\n\n    # check general internet connectivity\n    print(\"Checking if machine is connected to the internet...\")\n    checked_internet_connect = check_internet_connect()\n    if (is_error(checked_internet_connect)):\n        return print_errors(checked_internet_connect)\n    else:\n        success = print_errors(checked_internet_connect)\n\n\n    # check if AMA endpoints connected\n    print(\"Checking if machine can connect to Azure Monitor control-plane and data ingestion endpoints...\")\n    checked_ama_endpts = check_ama_endpts()\n    if (is_error(checked_ama_endpts)):\n        return print_errors(checked_ama_endpts)\n    else:\n        success = print_errors(checked_ama_endpts)\n\n    # check if subcomponents are active (e.g. mdsd, telegraf, etc)\n    print(\"Checking if subcomponents have been started...\")\n    checked_subcomponents = check_subcomponents()\n    if (is_error(checked_subcomponents)):\n        return print_errors(checked_subcomponents)\n    else:\n        success = print_errors(checked_subcomponents)\n        \n    print(\"Checking if IMDS metadata and MSI tokens are available...\")\n    checked_imds_api = check_imds_api()\n    if (is_error(checked_imds_api)):\n        return print_errors(checked_imds_api)\n    else:\n        success = print_errors(checked_imds_api)\n    return success\n"
  },
  {
    "path": "AzureMonitorAgent/ama_tst/modules/custom_logs/__init__.py",
    "content": "# Custom logs check helper script for AMA"
  },
  {
    "path": "AzureMonitorAgent/ama_tst/modules/custom_logs/check_clconf.py",
    "content": "import os\n\nfrom error_codes import *\nfrom errors      import error_info\nfrom helpers     import general_info, geninfo_lookup, run_cmd_output\n\nCLCONF_PATH = \"/etc/opt/microsoft/azuremonitoragent/config-cache/fluentbit/td-agent.conf\"\n\ndef check_customlog_input():\n    cl_input = geninfo_lookup('CL_INPUT')\n    if (cl_input == None or len(cl_input) == 0):\n        error_info.append((\"No custom logs file path\",))\n        return ERR_CL_INPUT\n    # cl_input is a list, not a dictionary - iterate over the paths directly\n    for path in cl_input:\n        # Skip malformed entries that don't look like valid file paths\n        if not path or not path.startswith('/'):\n            continue\n        try: \n            check_path = run_cmd_output('ls {0}'.format(path)).strip()\n            if check_path.endswith('No such file or directory'):\n                error_info.append((check_path,))\n                return ERR_CL_INPUT\n        except Exception as e:\n            error_info.append((e,))\n            return ERR_CL_INPUT\n\n    return NO_ERROR\n        \n\ndef check_customlog_conf():\n    global general_info\n    # verify td-agent.conf exists / not empty\n    if (not os.path.isfile(CLCONF_PATH)):\n        error_info.append(('file', CLCONF_PATH))\n        return ERR_FILE_MISSING\n    if (os.stat(CLCONF_PATH).st_size == 0):\n        error_info.append((CLCONF_PATH,))\n        return ERR_FILE_EMPTY\n    general_info['CL_INPUT'] = []\n    try:    \n        with open(CLCONF_PATH, 'r') as cl_file:\n            cl_lines = cl_file.readlines()\n            for cl_line in cl_lines: \n                if (cl_line.strip().startswith('log_file')):\n                    cl_log_file = cl_line.strip().split('log_file')[1]\n                    general_info['CL_LOG'] =  cl_log_file\n                    \n                # Only match exact \"Path\" lines (not \"Path_Key\" or other variants)\n                if (cl_line.strip().startswith('Path ') or cl_line.strip().startswith('Path\\t')):\n                    # Extract the path value after the whitespace\n                    parts = cl_line.strip().split(None, 1)  # Split on any whitespace, max 1 split\n                    if len(parts) > 1:\n                        cl_input_path = parts[1].strip()\n                        # Only add valid file paths (should start with /)\n                        if cl_input_path.startswith('/'):\n                            general_info['CL_INPUT'].append(cl_input_path)\n\n    except Exception as e:\n        error_info.append((e,))\n        return ERR_CL_CONF\n\n    print('cl_input value: {0}'.format(general_info['CL_INPUT']))\n\n    return NO_ERROR"
  },
  {
    "path": "AzureMonitorAgent/ama_tst/modules/custom_logs/custom_logs.py",
    "content": "from error_codes          import *\nfrom errors               import is_error, get_input, print_errors\nfrom .check_clconf        import check_customlog_conf, check_customlog_input\n\ndef check_custom_logs(interactive, prev_success=NO_ERROR):\n    if (interactive):\n        using_cl = get_input(\"Are you currently using custom logs? (y/n)\",\\\n                            (lambda x : x.lower() in ['y','yes','n','no']),\\\n                            \"Please type either 'y'/'yes' or 'n'/'no' to proceed.\")\n        # not using custom logs\n        if (using_cl in ['n','no']):\n            print(\"Continuing on with the rest of the troubleshooter...\")\n            print(\"================================================================================\")\n            return prev_success\n        # using custom logs\n        else:\n            print(\"Continuing on with troubleshooter...\")\n            print(\"--------------------------------------------------------------------------------\")\n\n    print(\"CHECKING FOR CUSTOM LOG ISSUES...\")\n\n    success = prev_success\n\n\n    # check td-agent.conf\n    print(\"Checking for custom logs configuration files...\")\n    checked_clconf = check_customlog_conf()\n    if (is_error(checked_clconf)):\n        return print_errors(checked_clconf)\n    else:\n        success = print_errors(checked_clconf)\n\n    # check custom logs input file path\n    print(\"Checking for custom logs input files...\")\n    checked_customlog_input = check_customlog_input()\n    if (is_error(checked_customlog_input)):\n        return print_errors(checked_customlog_input)\n    else:\n        success = print_errors(checked_customlog_input)\n    return success"
  },
  {
    "path": "AzureMonitorAgent/ama_tst/modules/error_codes.py",
    "content": "# # General Errors\nNO_ERROR = 0\nUSER_EXIT = 1\nERR_SUDO_PERMS = 100\nERR_FOUND = 101\n\n# Warnings\nWARN_INTERNET_CONN = 10\nWARN_INTERNET = 11\nWARN_OPENSSL_PROXY = 12\nWARN_MDSD_ERR_FILE = 13\nWARN_RESTART_LOOP = 14\n\n# Installation Errors\nERR_BITS = 102\nERR_OS_VER = 103\nERR_OS = 104\nERR_FINDING_OS = 105\nERR_FREE_SPACE = 106\nERR_PKG_MANAGER = 107\nERR_SUBCOMPONENT_INSTALL = 108\nERR_MULTIPLE_AMA = 109\nERR_AMA_INSTALL = 110\nERR_LOG_DAEMON = 111\nERR_SYSLOG_USER = 112\nERR_OLD_AMA_VER = 113\nERR_GETTING_AMA_VER = 114\nERR_COUNTER_FILE_MISSING = 115\n\n# Onboarding Errors\nERR_AMA_PARAMETERS = 200\nERR_NO_DCR = 201\nERR_INFO_MISSING = 202\nERR_ENDPT = 203\nERR_SUBCOMPONENT_STATUS = 204\nERR_CHECK_STATUS = 205\nERR_RESOLVE_IP = 206\nERR_IMDS_METADATA = 207\nERR_ACCESS_TOKEN = 208\nERR_ENDPT_PROXY = 209\nERR_DCE = 210\n\n# CPU/Memory Errors\nERR_FILE_MISSING = 300\nERR_LOGROTATE_SIZE = 301\nWARN_LOGROTATE = 302\nERR_FILE_ACCESS = 303\n\n# Syslog Errors\nERR_SYSLOG = 400\nERR_SERVICE_STATUS = 401\nERR_FILE_EMPTY = 402\nERR_CONF_FILE_PERMISSION = 403\n\n# Custom Logs Errors\nERR_CL_CONF = 500\nERR_CL_INPUT = 501\n\n"
  },
  {
    "path": "AzureMonitorAgent/ama_tst/modules/errors.py",
    "content": "import copy\nimport subprocess\n\nfrom error_codes import *\n\n# backwards compatible input() function for Python 2 vs 3\ntry:\n    input = raw_input\nexcept NameError:\n    pass\n\n# error info edited when error occurs\nerror_info = []\n\n# list of all errors called when script ran\nerr_summary = []\n\n\n\n# set of all errors which are actually warnings\nwarnings = set([WARN_INTERNET_CONN, WARN_INTERNET, WARN_OPENSSL_PROXY, WARN_MDSD_ERR_FILE, WARN_RESTART_LOOP, WARN_LOGROTATE])\n\n# dictionary correlating error codes to error messages\nerror_messages = {\n    WARN_INTERNET : \"SSL connection couldn't be verified. Please run the command below for more information on this warning:\\n\"\\\n          \"\\n  $ {0}\\n\",\n    WARN_INTERNET_CONN : \"Machine is not connected to the internet: openssl command failed. \"\\\n          \"Please run the command below for more information on the failure:\\n\"\\\n          \"\\n  $ {0}\\n\",\n    ERR_SUDO_PERMS : \"Couldn't access {0} due to inadequate permissions. Please run the troubleshooter \"\\\n          \"as root in order to allow access.\",\n    ERR_FOUND : \"Please go through the output above to find the errors caught by the troubleshooter.\",\n    ERR_BITS : \"Couldn't get AMA if CPU is not 64-bit.\",\n    ERR_OS_VER : \"This version of {0} ({1}) is not supported. Please download {2}. To see all \"\\\n          \"supported Operating Systems, please go to:\\n\"\\\n          \"\\n   https://docs.microsoft.com/en-us/azure/azure-monitor/agents/agents-overview#linux\\n\",\n    ERR_OS : \"{0} is not a supported Operating System. To see all supported Operating \"\\\n          \"Systems, please go to:\\n\"\\\n          \"\\n   https://docs.microsoft.com/en-us/azure/azure-monitor/agents/agents-overview#linux\\n\",\n    ERR_FINDING_OS : \"Coudln't determine Operating System. To see all supported Operating \"\\\n          \"Systems, please go to:\\n\"\\\n          \"\\n   https://docs.microsoft.com/en-us/azure/azure-monitor/agents/agents-overview#linux\\n\" \\\n          \"\\n\\nError Details: \\n{0}\",\n    ERR_FREE_SPACE : \"There isn't enough space in directory {0} to install AMA - there needs to be at least 500MB free, \"\\\n          \"but {0} has {1}MB free. Please free up some space and try installing again.\",\n    ERR_PKG_MANAGER : \"This system does not have a supported package manager. Please install 'dpkg' or 'rpm' \"\\\n          \"and run this troubleshooter again.\",\n    ERR_MULTIPLE_AMA : \"There is more than one instance of AMA installed, please remove the extra AMA packages.\",\n    ERR_AMA_INSTALL : \"AMA package isn't installed correctly.\\n\\nError Details: \\n{0}\",\n    ERR_SUBCOMPONENT_INSTALL : \"Subcomponents(s) {0} not installed correctly.\",\n    ERR_LOG_DAEMON : \"No logging daemon found. Please install rsyslog or syslog-ng.\",\n    ERR_SYSLOG_USER : \"Syslog user is not created successfully.\",\n    ERR_OLD_AMA_VER : \"You are currently running AMA Version {0}. This troubleshooter only \"\\\n          \"supports versions 1.9 and newer. Please upgrade to the newest version. You can find \"\\\n          \"more information at the link below:\\n\"\\\n          \"\\n    https://docs.microsoft.com/en-us/azure/azure-monitor/agents/azure-monitor-agent-manage\\n\",\n    ERR_GETTING_AMA_VER : \"Couldn't get most current released version of AMA.\\n\\nError Details: \\n{0}\",\n    ERR_COUNTER_FILE_MISSING : \"metricCounters.json file is not found. Please check your perf counters configuration.\",\n    ERR_AMA_PARAMETERS : \"Couldn't read and parse AMA configuration in /etc/default/azuremonitoragent.\\n\\nError Details:\\n{0}\",\n    ERR_NO_DCR : \"Couldn't parse DCR information on this VM. Please check your DCR configuration.\\n\\nError Details:{0}\",\n    ERR_INFO_MISSING: \"NO DCR workspace id or region is found. Please check if DCR is configured correctly and match the information in\"\\\n            \"/etc/opt/microsoft/azuremonitoragent/config-cache/configchunks.*.json\",\n    ERR_ENDPT : \"Machine couldn't connect to {0}: curl/openssl command failed. \"\\\n          \"\\n\\nError Details:\\n $ {1} \\n\\n{2}\",\n    ERR_SUBCOMPONENT_STATUS : \"Subcomponent {0} has not been started. Status details: {1}\",\n    ERR_CHECK_STATUS : \"Couldn't get the status of subcomponents.\\n\\nError Details:{0}\",\n    ERR_RESOLVE_IP : \"The endpoint {0} cannot be resolved. Please run the command below for more information on the failure:\\n\\n $ {1}\",\n    ERR_IMDS_METADATA : \"Couldn't access {0} Instance Metadata Service when executing command\\n $ {1}\\n\\nError Details:\\n{2}\",\n    ERR_ACCESS_TOKEN : \"Couldn't use managed identities to acquire an access token when executing command\\n $ {0}\\n\\nError Details:\\n{1}\",\n    ERR_ENDPT_PROXY : \"Machine couldn't connect to {0} with proxy: curl/openssl command failed. Please check your proxy configuration.\"\\\n          \"\\n\\nError Details:\\n $ {1} \\n\\n{2}\",\n    ERR_DCE : \"Couldn't parse DCE information on this VM. Please check your DCE configuration.\\n\\nError Details:{0}\",\n    WARN_OPENSSL_PROXY : \"Skip SSL handshake checks because AMA is configured with authenticated proxy.\",\n    WARN_MDSD_ERR_FILE : \"Found errors in log file {0}, displaying last few lines of error messages:\\n {1}\",\n    WARN_RESTART_LOOP : \"Subcomponents might be in a restart loop. Details:\\n\\n{0}\",\n    ERR_FILE_MISSING : \"{0} {1} doesn't exist.\",\n    ERR_LOGROTATE_SIZE : \"Logrotate size limit for log {0} has invalid formatting. Please see {1} for more \"\\\n          \"information.\",\n    WARN_LOGROTATE : \"Logrotate isn't rotating log {0}: its current size is {1}, and it should have \"\\\n          \"been rotated at {2}. Please see {3} for more information.\",\n    ERR_FILE_ACCESS : \"Couldn't access or run {0} due to the following reason: {1}.\",\n    ERR_SYSLOG : \"Couldn't find either 'rsyslog' or 'syslog-ng' on machine. Please download \"\\\n          \"one of the two services and try again.\",\n    ERR_SERVICE_STATUS : \"{0} current status is the following: '{1}'. Please check the status of {0} \"\\\n          \"using {2} for more information.\",\n    ERR_FILE_EMPTY : \"File {0} is empty.\",\n    ERR_CONF_FILE_PERMISSION : \"{0} {1} is not accesible by syslog user. Please grant syslog user {2} permission.\",\n    ERR_CL_CONF : \"Custom logs configuration file /etc/opt/microsoft/azuremonitoragent/config-cache/fluentbit/td-agent.conf \"\\\n                        \"cannot be parsed.\\n\\nError Details:\\n{0}\",\n    ERR_CL_INPUT : \"Custom logs input file path is either empty or invalid. Please check your input path in \"\\\n                        \"/etc/opt/microsoft/azuremonitoragent/config-cache/fluentbit/td-agent.conf.\\n\\nError Details:\\n{0}\"\n}\n\n\n\n# check if either has no error or is warning\ndef is_error(err_code):\n    not_errs = warnings.copy()\n    not_errs.add(NO_ERROR)\n    return (err_code not in not_errs)\n\n\n\n# for getting inputs from the user\ndef get_input(question, check_ans, no_fit):\n    answer = input(\" {0}: \".format(question))\n    while (not check_ans(answer.lower())):\n        print(\"Unclear input. {0}\".format(no_fit))\n        answer = input(\" {0}: \".format(question))\n    return answer\n\ndef print_errors(err_code):\n    not_errors = set([NO_ERROR, USER_EXIT])\n    if (err_code in not_errors):\n        return err_code\n\n    warning = False\n    if (err_code in warnings):\n        warning = True\n\n    err_string = error_messages[err_code]\n    # no formatting\n    if (error_info == []):\n        err_string = \"ERROR FOUND: {0}\".format(err_string)\n        err_summary.append(err_string)\n    # needs input\n    else:\n        while (len(error_info) > 0):\n            tup = error_info.pop(0)\n            temp_err_string = err_string.format(*tup)\n            if (warning):\n                final_err_string = \"WARNING FOUND: {0}\".format(temp_err_string)\n            else:\n                final_err_string = \"ERROR FOUND: {0}\".format(temp_err_string)\n            err_summary.append(final_err_string)\n    if (warning):\n        print(\"WARNING(S) FOUND.\")\n        return NO_ERROR\n    else:\n        print(\"ERROR(S) FOUND.\")\n        return ERR_FOUND"
  },
  {
    "path": "AzureMonitorAgent/ama_tst/modules/general_health/__init__.py",
    "content": "# General health check helper script for AMA"
  },
  {
    "path": "AzureMonitorAgent/ama_tst/modules/general_health/check_status.py",
    "content": "import subprocess\nimport re\nimport platform\n\nfrom error_codes import *\nfrom errors      import error_info\nfrom helpers     import run_cmd_output, get_input, is_metrics_configured\n\ndef check_restart_status(interactive):\n    \"\"\"\n    check if the subcomponents restart in a given time interval\n    \"\"\"\n    subcomponents = {'azuremonitoragent': 'azuremonitoragent'}\n\n    subcomponents['azuremonitor-agentlauncher'] = 'agentlauncher'\n    subcomponents['azuremonitor-coreagent'] = 'amacoreagent'\n    if is_metrics_configured():\n        subcomponents['metrics-extension'] = 'MetricsExtension'\n        subcomponents['metrics-sourcer'] = 'Telegraf'\n    restart_logs = \"\"\n    start = \"yesterday\"\n    end = \"now\"\n    since = \"--since={0}\".format(start)\n    until = \"--until={0}\".format(end)\n    \n    if interactive:\n        print(\"--------------------------------------------------------------------------------\")\n        print(\"Please enter a certain time range that you want to filter logs (default time range: from yesterday to now):\\n\")\n        print(\"(e.g. Since: <yyyy-mm-dd hh:mm:ss>) or <yyyy-mm-dd>\")\n        start_input = get_input(\"Since: \")\n        end_input = get_input(\"Until: \")\n        print(\"--------------------------------------------------------------------------------\")\n        if start_input != \"\":\n            since = '--since=\\\"{0}\\\"'.format(start_input)\n            start = start_input\n        if end_input != \"\":\n            until = '--until=\\\"{0}\\\"'.format(end_input)\n            end = end_input\n    for key in subcomponents.keys():\n        cmd = 'journalctl -n 100 --no-pager -u {0} {1} {2}'.format(key, since, until)\n        output = run_cmd_output(cmd)\n        lines = output.split('\\n')\n        process_logs = {}\n        for line in lines:\n            match = re.findall(\".*{0}\\[.*\\].*\".format(subcomponents[key]), line)\n            if len(match) == 0:\n                continue\n            log = match[0]\n            pid = log.split('[')[1].split(']')[0]\n            if pid not in process_logs:\n                process_logs[pid] = log\n        \n        # add to warning if restart more than 10 times recently\n        if len(process_logs) > 10:\n            logs = '\\n'.join(process_logs.values())\n            restart_logs = restart_logs + \"Possible restart loop in {0} detected ({1} restarts from {2} to {3}):\\n{4}\".format(key, len(process_logs), start, end, logs)\n            restart_logs = restart_logs + \"\\n--------------------------------------------------------------------------------\\n\"\n    \n    if restart_logs:\n        error_info.append((restart_logs,))\n        return WARN_RESTART_LOOP\n    return NO_ERROR\n"
  },
  {
    "path": "AzureMonitorAgent/ama_tst/modules/general_health/general_health.py",
    "content": "import os\n\nfrom error_codes  import *\nfrom errors       import error_info, is_error, print_errors\nfrom .check_status import check_restart_status\n\nERR_FILE_PATH = \"/var/opt/microsoft/azuremonitoragent/log/mdsd.err\"\n\ndef check_err_file():\n    \"\"\"\n    output mdsd.err contents if the file is not empty\n    \"\"\"\n    tail_size = -50\n    pattern = ' [DAEMON] '\n    err_logs = []\n    with open(ERR_FILE_PATH) as f:\n        lines = f.readlines(10000)\n        lines = lines[tail_size:]\n        for line in lines:\n            line = line.rstrip('\\n')\n            # skip empty lines, daemon start/exit logs\n            if line == '':\n                continue\n            elif pattern in line:\n                continue\n            else:\n                err_logs.append(line)\n                \n    if len(err_logs) > 0:\n        err_logs_str = '\\n' + ('\\n'.join(err_logs))\n        error_info.append((ERR_FILE_PATH, err_logs_str))\n        return WARN_MDSD_ERR_FILE\n    return NO_ERROR\n\ndef check_general_health(interactive, err_codes=True, prev_success=NO_ERROR):\n    print(\"CHECKING IF THE AGENT IS HEALTHY...\")\n    success = prev_success\n\n    print(\"Checking status of subcomponents\")\n    checked_restart_status = check_restart_status(interactive)\n    if (is_error(checked_restart_status)):\n        return print_errors(checked_restart_status)\n    else:\n        success = print_errors(checked_restart_status)\n    \n    print(\"Checking mdsd.err file\")\n    checked_err_file = check_err_file()\n    if (is_error(checked_err_file)):\n        return print_errors(checked_err_file)\n    else:\n        success = print_errors(checked_err_file)\n    \n    print(\"============================================\")\n    return success"
  },
  {
    "path": "AzureMonitorAgent/ama_tst/modules/helpers.py",
    "content": "import os\nimport json\nimport platform\nimport subprocess\nfrom errors         import error_info\nfrom error_codes    import *\n\nCONFIG_DIR = '/etc/opt/microsoft/azuremonitoragent/config-cache/configchunks'\nMETRICS_FILE = \"/etc/opt/microsoft/azuremonitoragent/config-cache/metricCounters.json\"\n\n# backwards compatible input() function for Python 2 vs 3\ntry:\n    input = raw_input\nexcept NameError:\n    pass\n    \ntry:\n    FileNotFoundError\nexcept NameError:\n    FileNotFoundError = IOError\n\n# backwards compatible JSONDecodeError for Python 2 vs 3\ntry:\n    json.JSONDecodeError\nexcept AttributeError:\n    # Python 2 doesn't have json.JSONDecodeError, use ValueError instead\n    json.JSONDecodeError = ValueError\n\n# backwards compatible devnull variable for Python 3.3 vs earlier\ntry:\n    DEVNULL = subprocess.DEVNULL\nexcept:\n    DEVNULL = open(os.devnull)\n\ngeneral_info = dict()\n\ndef geninfo_lookup(key):\n    try:\n        val = general_info[key]\n    except KeyError:\n        return None\n    return val\n\ndef get_input(question, check_ans=None, no_fit=None):\n    if check_ans == None and no_fit == None:\n        return input(question)\n    answer = input(\" {0}: \".format(question))\n    while (not check_ans(answer.lower())):\n        print(\"Unclear input. {0}\".format(no_fit))\n        answer = input(\" {0}: \".format(question))\n    return answer\n\ndef is_arc_installed():\n    \"\"\"\n    Check if this is an Arc machine\n    \"\"\"\n    # Using systemctl to check this since Arc only supports VMs that have systemd\n    check_arc = os.system('systemctl status himdsd 1>/dev/null 2>&1')\n    return check_arc == 0\n\ndef find_vm_bits():\n    cpu_info = subprocess.check_output(['lscpu'], universal_newlines=True)\n    cpu_opmodes = (cpu_info.split('\\n'))[1]\n    cpu_bits = cpu_opmodes[-6:]\n    return cpu_bits\n\ndef find_vm_distro():\n    \"\"\"\n    Finds the Linux Distribution this vm is running on.\n    \"\"\"\n    vm_dist = vm_id = vm_ver =  None\n    parse_manually = False\n    try:\n        vm_dist, vm_ver, vm_id = platform.linux_distribution()\n    except AttributeError:\n        try:\n            vm_dist, vm_ver, vm_id = platform.dist()\n        except AttributeError:\n            # Falling back to /etc/os-release distribution parsing\n            pass\n\n    # Some python versions *IF BUILT LOCALLY* (ex 3.5) give string responses (ex. 'bullseye/sid') to platform.dist() function\n    # This causes exception in the method below. Thus adding a check to switch to manual parsing in this case\n    try:\n        temp_vm_ver = int(vm_ver.split('.')[0])\n    except:\n        parse_manually = True\n\n    if (not vm_dist and not vm_ver) or parse_manually: # SLES 15 and others\n        try:\n            with open('/etc/os-release', 'r') as fp:\n                for line in fp:\n                    if line.startswith('ID='):\n                        vm_dist = line.split('=')[1]\n                        vm_dist = vm_dist.split('-')[0]\n                        vm_dist = vm_dist.replace('\\\"', '').replace('\\n', '')\n                        vm_dist = vm_dist.lower()\n                    elif line.startswith('VERSION_ID='):\n                        vm_ver = line.split('=')[1]\n                        vm_ver = vm_ver.replace('\\\"', '').replace('\\n', '')\n                        vm_ver = vm_ver.lower()\n        except (FileNotFoundError, AttributeError) as e:  # indeterminate OS\n            return (None, None, e)\n    return (vm_dist, vm_ver, None)\n\n\ndef find_package_manager():\n    global general_info\n    \"\"\"\n    Checks which package manager is on the system\n    \"\"\"\n    pkg_manager = \"\"\n    \n    # check if debian system\n    if (os.path.isfile(\"/etc/debian_version\")):\n        try:\n            subprocess.check_output(\"command -v dpkg\", shell=True)\n            pkg_manager = \"dpkg\"\n        except subprocess.CalledProcessError:\n            pass\n    # check if redhat system\n    elif (os.path.isfile(\"/etc/redhat_version\")):\n        try:\n            subprocess.check_output(\"command -v rpm\", shell=True)\n            pkg_manager = \"rpm\"\n        except subprocess.CalledProcessError:\n            pass\n\n    # likely SUSE or modified VM, just check dpkg and rpm\n    if (pkg_manager == \"\"):\n        try:\n            subprocess.check_output(\"command -v dpkg\", shell=True)\n            pkg_manager = \"dpkg\"\n        except subprocess.CalledProcessError:\n            try:\n                subprocess.check_output(\"command -v rpm\", shell=True)\n                pkg_manager = \"rpm\"\n            except subprocess.CalledProcessError:\n                pass\n    general_info['PKG_MANAGER'] = pkg_manager\n    return pkg_manager\n\ndef get_package_version(pkg):\n    pkg_mngr = geninfo_lookup('PKG_MANAGER')\n    # dpkg\n    if (pkg_mngr == 'dpkg'):\n        return get_dpkg_pkg_version(pkg)\n    # rpm\n    elif (pkg_mngr == 'rpm'):\n        return get_rpm_pkg_version(pkg)\n    else:\n        return (None, None)\n    \n# Package Info\ndef get_dpkg_pkg_version(pkg):\n    try:\n        dpkg_info = subprocess.check_output(['dpkg', '-s', pkg], universal_newlines=True,\\\n                                            stderr=subprocess.STDOUT)\n        dpkg_lines = dpkg_info.split('\\n')\n        for line in dpkg_lines:\n            if (line.startswith('Package: ') and not line.endswith(pkg)):\n                # wrong package\n                return (None, None)\n            if (line.startswith('Status: ') and not line.endswith('installed')):\n                # not properly installed\n                return (None, None)\n            if (line.startswith('Version: ')):\n                version = (line.split())[-1]\n                return (version, None)\n        return (None, None)\n    except subprocess.CalledProcessError as e:\n        return (None, e.output)\n\ndef get_rpm_pkg_version(pkg):\n    try:\n        rpm_info = subprocess.check_output(['rpm', '-qi', pkg], universal_newlines=True,\\\n                                            stderr=subprocess.STDOUT)\n        if (\"package {0} is not installed\".format(pkg) in rpm_info):\n            # didn't find package\n            return (None, None)\n        rpm_lines = rpm_info.split('\\n')\n        for line in rpm_lines:\n            parsed_line = line.split()\n            if (parsed_line[0] == 'Name'):\n                # ['Name', ':', name]\n                name = parsed_line[2]\n                if (name != pkg):\n                    # wrong package\n                    return (None, None)\n            if (parsed_line[0] == 'Version'):\n                # ['Version', ':', version]\n                version = parsed_line[2]\n                return (version, None)\n        return (None, None)\n    except subprocess.CalledProcessError as e:\n        return (None, e.output)\n\ndef find_ama_version():\n    \"\"\"\n    Gets a list of all AMA versions installed on the VM\n    \"\"\"\n    try:\n        config_dirs = filter((lambda x : x.startswith(\"Microsoft.Azure.Monitor.AzureMonitorLinuxAgent-\")), os.listdir(\"/var/lib/waagent\"))\n        ama_vers = list(map((lambda x : (x.split('-'))[-1]), config_dirs))\n    except FileNotFoundError as e:\n        return (None, e)\n    return (ama_vers, None)\n\n\ndef check_ama_installed(ama_vers):\n    \"\"\"\n    Checks to verify AMA is installed and only has one version installed at a time\n    \"\"\"\n    ama_exists = ((ama_vers != None) and (len(ama_vers) > 0))\n    ama_unique = (ama_exists and (len(ama_vers) == 1))\n    return (ama_exists, ama_unique)\n\ndef run_cmd_output(cmd):\n    \"\"\"\n    Common logic to run any command and check/get its output for further use\n    \"\"\"\n    try:\n        out = subprocess.check_output(cmd, shell=True, universal_newlines=True, stderr=subprocess.STDOUT)\n        return out\n    except subprocess.CalledProcessError as e:\n        return (e.output)\n\n\ndef find_dcr_workspace():\n    \"\"\"\n    Parse DCR configuration files to find workspace IDs and regions.\n    \"\"\"\n    global general_info\n    \n    if 'DCR_WORKSPACE_ID' in general_info and 'DCR_REGION' in general_info:\n        return (general_info['DCR_WORKSPACE_ID'], general_info['DCR_REGION'], None)\n    dcr_workspace = set()\n    dcr_region = set()\n    me_region = set()\n    agent_settings = {}\n    general_info['URL_SUFFIX'] = '.com'\n    try:\n        for file in os.listdir(CONFIG_DIR):\n            file_path = CONFIG_DIR + \"/\" + file\n            with open(file_path) as f:\n                result = json.load(f)\n                \n                # Check if this is an AgentSettings DCR - parse its settings\n                if 'kind' in result and result['kind'] == 'AgentSettings' and 'channels' not in result:\n                    if 'settings' in result:\n                        settings_str = result['settings']\n                        try:\n                            # The settings field is a JSON string, so parse it\n                            if isinstance(settings_str, str):\n                                settings_list = json.loads(settings_str)\n                            else:\n                                settings_list = settings_str\n                            \n                            # Process each setting\n                            for setting in settings_list:\n                                name = setting['name']\n                                value = setting['value']\n                                if name:\n                                    agent_settings[name] = value\n                        except (json.JSONDecodeError, TypeError) as e:\n                            # If parsing fails, skip this AgentSettings DCR\n                            print(\"Error parsing settings key in AgentSettings DCR\")\n                    continue\n                channels = result['channels']\n                for channel in channels:\n                    if channel['protocol'] == 'ods':\n                        # parse dcr workspace id\n                        endpoint_url = channel['endpoint']\n                        workspace_id = endpoint_url.split('https://')[1].split('.ods')[0]\n                        dcr_workspace.add(workspace_id)\n                        # parse dcr region\n                        token_endpoint_uri = channel['tokenEndpointUri']\n                        region = token_endpoint_uri.split('Location=')[1].split('&')[0]\n                        dcr_region.add(region)\n                        # parse url suffix\n                        if '.us' in endpoint_url:\n                            general_info['URL_SUFFIX'] = '.us'\n                        if '.cn' in endpoint_url:\n                            general_info['URL_SUFFIX'] = '.cn'                            \n                    if channel['protocol'] == 'me':\n                        # parse ME region\n                        endpoint_url = channel['endpoint']\n                        region = endpoint_url.split('https://')[1].split('.monitoring')[0]\n                        me_region.add(region)\n    except Exception as e:\n        return (None, None, None, e)\n\n    general_info['DCR_WORKSPACE_ID'] = dcr_workspace\n    general_info['DCR_REGION'] = dcr_region\n    general_info['ME_REGION'] = me_region\n    return (dcr_workspace, dcr_region, agent_settings, None)\n\ndef find_dce():\n    \"\"\"\n    Parse DCR configuration files to find Data Collection Endpoints (DCE).\n    \"\"\"\n    global general_info\n    \n    dce = set()\n    try:\n        for file in os.listdir(CONFIG_DIR):\n            file_path = CONFIG_DIR + \"/\" + file\n            with open(file_path) as f:\n                result = json.load(f)\n                # Check if this is an AgentSettings DCR, if so skip it\n                if 'kind' in result and result['kind'] == 'AgentSettings' and 'channels' not in result:\n                    continue\n                channels = result['channels']\n                for channel in channels:\n                    if channel['protocol'] == 'gig':\n                        # parse dce logs ingestion endpoint\n                        ingest_endpoint_url = channel['endpointUriTemplate']\n                        ingest_endpoint = ingest_endpoint_url.split('https://')[1].split('/')[0]\n                        dce.add(ingest_endpoint)\n                        # parse dce configuration access endpoint\n                        configuration_endpoint_url = channel['tokenEndpointUri']\n                        configuration_endpoint = configuration_endpoint_url.split('https://')[1].split('/')[0]\n                        dce.add(configuration_endpoint)\n    except Exception as e:\n        return (None, None, e)\n\n    general_info['DCE'] = dce\n    return (dce, None)\n\ndef is_metrics_configured():\n    global general_info\n    if 'metrics' in general_info:\n        return general_info['metrics']\n    \n    with open(METRICS_FILE) as f:\n        output = f.read(2)\n        if output != '[]':\n            general_info['metrics'] = True\n        else:\n            general_info['metrics'] = False\n    return general_info['metrics']\n    \n    \n"
  },
  {
    "path": "AzureMonitorAgent/ama_tst/modules/high_cpu_mem/__init__.py",
    "content": "# CPU/memory check helper script for AMA"
  },
  {
    "path": "AzureMonitorAgent/ama_tst/modules/high_cpu_mem/check_logrot.py",
    "content": "import errno\nimport os\nimport re\n\nfrom error_codes import *\nfrom errors      import error_info\n\nLR_CONFIG_PATH = \"/etc/logrotate.d/azuremonitoragent\"\n\ndef hr2bytes(hr_size):\n    if (hr_size.isdigit()):\n        return int(hr_size)\n    hr_digits = hr_size[:-1]\n    hr_units = hr_size[-1]\n    if (hr_digits.isdigit()):\n        # kilobytes\n        if (hr_units == 'k'):\n            return int(hr_digits) * 1000\n        # megabytes\n        elif (hr_units == 'M'):\n            return int(hr_digits) * 1000000\n        # gigabytes\n        elif (hr_units == 'G'):\n            return int(hr_digits) * 1000000000\n    # wrong formatting\n    return None\n\n\n\ndef check_size_config(logrotate_configs):\n    for k in list(logrotate_configs.keys()):\n        # grab size limit if exists\n        size_config = next((x for x in logrotate_configs[k] if x.startswith('size ')), None)\n        if (size_config == None):\n            continue\n        size_limit = hr2bytes(size_config.split()[1])\n        if (size_limit == None):\n            error_info.append((k, LR_CONFIG_PATH))\n            return ERR_LOGROTATE_SIZE\n\n        # get current size of file\n        try:\n            size_curr = os.path.getsize(k)\n            if (size_curr > size_limit):\n                error_info.append((k, size_curr, size_limit, LR_CONFIG_PATH))\n                return WARN_LOGROTATE\n\n        # couldn't get current size of file\n        except os.error as e:\n            if (e.errno == errno.EACCES):\n                error_info.append((k,))\n                return ERR_SUDO_PERMS\n            elif (e.errno == errno.ENOENT):\n                if ('missingok' in logrotate_configs[k]):\n                    continue\n                else:\n                    error_info.append(('log file', k))\n                    return ERR_FILE_MISSING\n            else:\n                error_info.append((k, e.strerror))\n                return ERR_FILE_ACCESS\n    return NO_ERROR\n\n\n\n\ndef check_log_rotation():\n    # check logrotate config file exists\n    if (not os.path.isfile(LR_CONFIG_PATH)):\n        error_info.append(('logrotate config file', LR_CONFIG_PATH))\n        return ERR_FILE_MISSING\n    \n    # go through logrotate config file\n    logrotate_configs = dict()\n    with open(LR_CONFIG_PATH, 'r') as f:\n        lr_lines = f.readlines()\n        in_file = None\n        for lr_line in lr_lines:\n            lr_line = lr_line.rstrip('\\n')\n\n            # start of log rotation config\n            lr_start = re.match(\"^/(\\S+)\", lr_line)\n            if (lr_start != None):\n                in_file = lr_start.group()\n                logrotate_configs[in_file] = set()\n                continue\n            # log rotation config info\n            elif (in_file != None):\n                logrotate_configs[in_file].add(lr_line.lstrip())\n                continue\n            # end of log rotation config\n            elif (lr_line == '}'):\n                in_file = None\n                continue\n\n    # check size rotation working\n    checked_size_config = check_size_config(logrotate_configs)\n    if (checked_size_config != NO_ERROR):\n        return checked_size_config\n\n    return NO_ERROR"
  },
  {
    "path": "AzureMonitorAgent/ama_tst/modules/high_cpu_mem/check_usage.py",
    "content": "import time\nimport subprocess\n\nfrom error_codes import *\nfrom errors      import error_info\nfrom helpers     import get_input, run_cmd_output\n\ndef find_mdsd_pid():\n    try:\n        status = run_cmd_output('systemctl status azuremonitoragent')\n        status_lines = status.split('\\n')\n        for line in status_lines:\n            line = line.strip()\n            if line.startswith('Main PID:'):\n                pid = line.split()[2]\n                return (pid, None)\n    except subprocess.CalledProcessError as e:\n        return (None, e)\n    \ndef check_usage(interactive):\n    if interactive:\n        print(\"Checking CPU/memory usage of AMA subcomponents...\")\n        result = get_input(\"Do you want to monitor the CPU/memory usage of AMA in 5 minutes? (YES/no)\", \\\n                        (lambda x : x.lower() in ['y','yes','n','no', '']),\\\n                        \"Please enter 'y'/'yes' to run this check, 'n'/'no' to skip this check. \\n\")\n        if result.lower() in ['n', 'no']:\n            return NO_ERROR\n        \n        mdsd_pid, e = find_mdsd_pid()\n        if e != None:\n            error_info.append((e,))\n            return ERR_CHECK_STATUS\n        cmd = \"top -b -n1 | grep {0}\".format(mdsd_pid)\n        cpu = []\n        mem = []\n        # run 5 minutes to collect min/max/avg usage\n        for i in range(0, 30):\n            output = run_cmd_output(cmd)\n            values = list(filter(None, output.strip().split(\" \")))\n            cpu.append(float(values[8]))\n            mem.append(float(values[9]))\n            time.sleep(10)\n        \n        max_cpu = max(cpu)\n        min_cpu = min(cpu)\n        avg_cpu = sum(cpu)/len(cpu)\n        max_mem = max(mem)\n        min_mem = min(mem)\n        avg_mem = sum(mem)/len(mem)\n        print(\"--------------------------------------------------------------------------------\")\n        print(\"CPU usage in the last 5 minutes (%CPU)\")\n        print(\"Max: \", max_cpu, \"Min: \", min_cpu, \"Avg: \", \"%.1f\" % avg_cpu)\n        print(\"Memory usage in the last 5 minutes (%MEM)\")\n        print(\"Max: \", max_mem, \"Min: \", min_mem, \"Avg: \", \"%.1f\" % avg_mem)\n    return NO_ERROR"
  },
  {
    "path": "AzureMonitorAgent/ama_tst/modules/high_cpu_mem/high_cpu_mem.py",
    "content": "from error_codes          import *\nfrom errors               import is_error, print_errors\nfrom .check_logrot        import check_log_rotation\nfrom .check_usage         import check_usage\n\ndef check_high_cpu_memory(interactive, prev_success=NO_ERROR):\n    print(\"CHECKING FOR HIGH CPU / MEMORY USAGE...\")\n\n    success = prev_success\n\n    # check log rotation\n    print(\"Checking if log rotation is working correctly...\")\n    checked_logrot = check_log_rotation()\n    if (is_error(checked_logrot)):\n        return print_errors(checked_logrot)\n    else:\n        success = print_errors(checked_logrot)\n\n    # check AMA CPU/memory usage\n    checked_usage = check_usage(interactive)\n    if (is_error(checked_usage)):\n        return print_errors(checked_usage)\n    else:\n        success = print_errors(checked_usage)\n    return success"
  },
  {
    "path": "AzureMonitorAgent/ama_tst/modules/install/__init__.py",
    "content": "# Install check helper script for AMA"
  },
  {
    "path": "AzureMonitorAgent/ama_tst/modules/install/check_ama.py",
    "content": "import re\nimport sys\nimport socket\nimport xml.dom.minidom\nif sys.version_info[0] == 3:\n    import urllib.request as urllib\n    import urllib.error as urlerror\n\nelif sys.version_info[0] == 2:\n    import urllib2 as urllib\n    import urllib2 as urlerror\n    \ntry:\n    import requests\nexcept ImportError:\n    pass\n\nfrom error_codes import *\nfrom errors      import error_info, get_input\nfrom helpers     import get_package_version\nfrom connect.check_endpts import check_internet_connect\n\nAMA_URL = 'https://docs.microsoft.com/en-us/azure/azure-monitor/agents/azure-monitor-agent-extension-versions'\n# Timeout for fetching latest AMA version (in seconds)\nAMA_VERSION_FETCH_TIMEOUT = 60\n\ndef get_latest_ama_version(curr_version):\n    # python2 and python3 compatible\n    # Set timeout to prevent hanging\n    timeout = AMA_VERSION_FETCH_TIMEOUT\n    \n    try:\n        if sys.version_info[0] == 3:\n            # Python 3 - try urllib first, then requests as fallback\n            try:\n                r = urllib.urlopen(AMA_URL, timeout=timeout).read()\n            except AttributeError:\n                # If urllib doesn't work, try requests\n                r = requests.get(AMA_URL, timeout=timeout).text\n        else:\n            # Python 2 - use urllib2 which supports timeout\n            r = urllib.urlopen(AMA_URL, timeout=timeout).read()\n            \n    except socket.timeout:\n        return None, \"Connection timed out after {0} seconds while trying to fetch latest AMA version from {1}. Please check your network connectivity and firewall settings.\".format(timeout, AMA_URL)\n    except Exception as e:\n        # More specific timeout detection\n        error_str = str(e).lower()\n        error_type = type(e).__name__\n        \n        # Check for various timeout conditions\n        if (error_type == 'timeout' or \n            'timeout' in error_str or \n            'timed out' in error_str or\n            'read timeout' in error_str or\n            'connect timeout' in error_str):\n            return None, \"Request timed out after {0} seconds while trying to fetch latest AMA version from {1}. This may be due to network connectivity issues or firewall restrictions.\".format(timeout, AMA_URL)\n        \n        # Handle HTTP and URL errors\n        if hasattr(e, 'code'):\n            return None, \"HTTP error {0} while trying to fetch latest AMA version from {1}. The documentation server may be temporarily unavailable.\".format(e.code, AMA_URL)\n        elif 'urlerror' in error_type.lower() or 'httperror' in error_type.lower():\n            return None, \"Network error while trying to fetch latest AMA version from {1}: {0}\".format(str(e), AMA_URL)\n        elif 'name or service not known' in error_str:\n            return None, \"DNS resolution failed for {1}. Please check the URL and your network settings: {0}\".format(str(e), AMA_URL)\n        elif 'connection refused' in error_str:\n            return None, \"Connection refused while trying to connect to {1}. The server may be down: {0}\".format(str(e), AMA_URL)\n        elif 'network is unreachable' in error_str:\n            return None, \"Network is unreachable while trying to connect to {1}. Please check your network configuration: {0}\".format(str(e), AMA_URL)\n        else:\n            return None, \"Unexpected error while trying to fetch latest AMA version from {1}: {0}\".format(str(e), AMA_URL)\n\n    try:\n        # Ensure we have a string for both Python 2 and 3 compatibility\n        if sys.version_info[0] == 3 and isinstance(r, bytes):\n            # Python 3: convert bytes to string\n            r = r.decode('utf-8')\n        # Python 2: urllib2.urlopen().read() returns str, which works fine with regex\n            \n        # Find all table rows in tbody and extract all 4th columns (Linux columns)\n        # This approach is more robust and handles missing values and multiple rows\n        tbody_pattern = r'<tbody>(.*?)</tbody>'\n        tbody_match = re.search(tbody_pattern, r, re.DOTALL)\n        \n        if not tbody_match:\n            return None, \"Could not find version table in Microsoft documentation\"\n        \n        tbody_content = tbody_match.group(1)\n        \n        # Find all table rows\n        row_pattern = r'<tr[^>]*>(.*?)</tr>'\n        rows = re.findall(row_pattern, tbody_content, re.DOTALL)\n        \n        latest_version = None\n        \n        # Process each row to find the latest version\n        # Since rows are in chronological order (newest first), we want the first non-empty row\n        for row in rows:\n            # Extract all cells from this row\n            cell_pattern = r'<td[^>]*>(.*?)</td>'\n            cells = re.findall(cell_pattern, row, re.DOTALL)\n            \n            # Check if we have at least 4 columns and the 4th column (Linux) is not empty\n            if len(cells) >= 4:\n                linux_cell = cells[3]  # 4th column (index 3)\n                \n                # Remove HTML tags and normalize whitespace\n                # First replace <br> tags with spaces to avoid concatenation\n                clean_content = re.sub(r'<br[^>]*>', ' ', linux_cell)\n                # Remove all other HTML tags (including superscript)\n                clean_content = re.sub(r'<[^>]+>', '', clean_content)\n                # Normalize whitespace\n                clean_content = re.sub(r'\\s+', ' ', clean_content).strip()\n                \n                # Skip empty cells\n                if not clean_content or clean_content.lower() in ['', 'none', 'n/a']:\n                    continue  # Go to next row\n                \n                # Handle version ranges like \"1.26.2-1.26.5\"\n                # Replace hyphens between versions with commas for easier parsing\n                clean_content = re.sub(r'(\\d+\\.\\d+\\.\\d+(?:\\.\\d+)?)\\s*-\\s*(\\d+\\.\\d+\\.\\d+(?:\\.\\d+)?)', r'\\1, \\2', clean_content)\n                \n                # Find all version numbers in this cell (handles multiple versions)\n                # More flexible regex that handles superscript and other text\n                version_matches = re.findall(r'(\\d+\\.\\d+\\.\\d+(?:\\.\\d+)?)', clean_content)\n                \n                if version_matches:\n                    # If multiple versions found, take the highest one from this cell\n                    cell_latest = None\n                    for version in version_matches:\n                        if cell_latest is None or not comp_versions_ge(cell_latest, version):\n                            cell_latest = version\n                    \n                    # Since this is the first non-empty row we found, use this as the latest\n                    latest_version = cell_latest\n                    break  # Stop processing rows since we found the latest version\n        \n        if not latest_version:\n            return None, \"No version numbers found in Linux columns of Microsoft documentation\"\n        \n        # Compare with current version\n        if comp_versions_ge(curr_version, latest_version):\n            return None, None  # Current version is up to date\n        else:\n            return latest_version, None  # New version available\n            \n    except Exception as e:\n        return None, \"Error parsing version information from Microsoft documentation: {0}\".format(str(e))\n    return None, None\n\n# \ndef comp_versions_ge(version1, version2):\n    \"\"\"\n    compare two versions, see if the first is newer than / the same as the second\n    \"\"\"\n    versions1 = [int(v) for v in version1.split(\".\")]\n    versions2 = [int(v) for v in version2.split(\".\")]\n    for i in range(max(len(versions1), len(versions2))):\n        v1 = versions1[i] if i < len(versions1) else 0\n        v2 = versions2[i] if i < len(versions2) else 0\n        if v1 > v2:\n            return True\n        elif v1 < v2:\n            return False\n    return True\n\ndef ask_update_old_version(ama_version, curr_ama_version):\n    print(\"--------------------------------------------------------------------------------\")\n    print(\"You are currently running AMA Version {0}. There is a newer version\\n\"\\\n          \"available which may fix your issue (version {1}).\".format(ama_version, curr_ama_version))\n    answer = get_input(\"Do you want to update? (y/n)\", (lambda x : x.lower() in ['y','yes','n','no']),\\\n                       \"Please type either 'y'/'yes' or 'n'/'no' to proceed.\")\n    # user does want to update\n    if (answer.lower() in ['y', 'yes']):\n        print(\"--------------------------------------------------------------------------------\")\n        print(\"Please follow the instructions given here:\")\n        print(\"\\n    https://docs.microsoft.com/en-us/azure/azure-monitor/agents/azure-monitor-agent-manage\\n\")\n        return USER_EXIT\n    # user doesn't want to update\n    elif (answer.lower() in ['n', 'no']):\n        print(\"Continuing on with troubleshooter...\")\n        print(\"--------------------------------------------------------------------------------\")\n        return NO_ERROR\n\ndef check_ama(interactive):\n    (ama_version, e) = get_package_version('azuremonitoragent')\n    if e is not None:\n        error_info.append((e,))\n        return ERR_AMA_INSTALL\n\n    ama_version = ama_version.split('-')[0]\n    if not comp_versions_ge(ama_version, '1.21.0'):\n        error_info.append((ama_version,))\n        return ERR_OLD_AMA_VER\n\n    print(\"Current AMA version: {0}\".format(ama_version))\n    (newer_ama_version, e) = get_latest_ama_version(ama_version)\n    \n    if newer_ama_version is None:\n        if e is None:\n            # No error and no newer version found - current version is up to date\n            print(\"AMA version is up to date (latest version)\")\n            return NO_ERROR\n        else:\n            # There was an error fetching the latest version\n            print(\"Unable to determine latest AMA version\")\n            print(\"Error: {0}\".format(e))\n            \n            # Add error details to error_info for reporting\n            error_info.append((e,))\n            \n            # Check if we have general internet connectivity\n            checked_internet = check_internet_connect()\n            if checked_internet != NO_ERROR:\n                # No internet connectivity - this is a broader issue\n                print(\"Internet connectivity test also failed. Skipping version check...\")\n                print(\"This may indicate broader network connectivity issues.\")\n                print(\"--------------------------------------------------------------------------------\")\n                return ERR_GETTING_AMA_VER  # Return error code for version check failure\n            else:\n                # Internet works but AMA version check failed - this might be specific to the documentation site\n                print(\"Internet connectivity is working, but unable to access AMA documentation.\")\n                print(\"This could be due to firewall restrictions or temporary server issues.\")\n                print(\"The troubleshooter will continue, but version information may be outdated.\")\n                print(\"--------------------------------------------------------------------------------\")\n                return ERR_GETTING_AMA_VER  # Return error code for version check failure\n    else:\n        # Found a newer version available\n        print(\"Update available: {0} -> {1}\".format(ama_version, newer_ama_version))\n        if interactive:\n            if ask_update_old_version(ama_version, newer_ama_version) == USER_EXIT:\n                return USER_EXIT\n\n    return NO_ERROR"
  },
  {
    "path": "AzureMonitorAgent/ama_tst/modules/install/check_os.py",
    "content": "from __future__ import absolute_import\nimport platform\nfrom error_codes import *\nfrom errors      import error_info\nfrom helpers     import find_vm_bits, find_vm_distro\nfrom . import supported_distros\n\n    \ndef format_alternate_versions(supported_dist, versions):\n    \"\"\"\n    print out warning if running the wrong version of OS\n    \"\"\"\n    last = versions.pop()\n    if (versions == []):\n        s = \"{0}\".format(last)\n    else:\n        s = \"{0} or {1}\".format(', '.join(versions), last)\n    return s\n\n\ndef check_vm_supported(vm_dist, vm_ver):\n    if platform.machine() == 'aarch64':\n        supported_dists = supported_distros.supported_dists_aarch64\n    else:\n        supported_dists = supported_distros.supported_dists_x86_64\n\n    vm_supported = False\n\n    # find VM distribution in supported list\n    vm_supported_dist = None\n    for supported_dist in (supported_dists.keys()):\n        if (not vm_dist.lower().startswith(supported_dist)):\n            continue\n        vm_supported_dist = supported_dist\n        # check if version is supported\n        vm_ver_split = vm_ver.split('.')\n        for supported_ver in (supported_dists[supported_dist]):\n            supported_ver_split = supported_ver.split('.')\n            vm_ver_match = True\n            # try matching VM version with supported version\n            for (idx, supported_ver_num) in enumerate(supported_ver_split):\n                try:\n                    supported_ver_num = int(supported_ver_num)\n                    vm_ver_num = int(vm_ver_split[idx])\n                    if (vm_ver_num is not supported_ver_num):\n                        vm_ver_match = False\n                        break\n                except (IndexError, ValueError) as e:\n                    vm_ver_match = False\n                    break\n                \n            # check if successful in matching\n            if (vm_ver_match):\n                vm_supported = True\n                break\n\n        # check if any version successful in matching\n        if (vm_supported):\n            return NO_ERROR\n\n    # VM distribution is supported, but not current version\n    if (vm_supported_dist != None):\n        versions = supported_dists[vm_supported_dist]\n        alt_vers = format_alternate_versions(vm_supported_dist, versions)\n        error_info.append((vm_dist, vm_ver, alt_vers))\n        return ERR_OS_VER\n\n    # VM distribution isn't supported\n    else:\n        error_info.append((vm_dist,))\n        return ERR_OS\n\n\ndef check_os():\n    if platform.machine() == 'x86_64':\n        cpu_bits = find_vm_bits()\n        if (not cpu_bits == '64-bit'):\n            return ERR_BITS\n\n    # get OS version\n    (vm_dist, vm_ver, e) = find_vm_distro()\n    if (vm_dist == None or vm_ver == None):\n        error_info.append((e,))\n        return ERR_FINDING_OS\n    \n    # check if OS version is supported\n    return check_vm_supported(vm_dist, vm_ver)"
  },
  {
    "path": "AzureMonitorAgent/ama_tst/modules/install/check_pkgs.py",
    "content": "import os\n\nfrom error_codes import *\nfrom errors      import error_info\nfrom helpers     import get_package_version, find_ama_version, is_metrics_configured\n\nMETRICS_FIILE = \"/etc/opt/microsoft/azuremonitoragent/config-cache/metricCounters.json\"\n\ndef check_packages():\n    # check azuremonitoragent rpm/dpkg\n    (ama_vers, e) = find_ama_version()\n    if (ama_vers == None):\n        error_info.append((e,))\n        return ERR_AMA_INSTALL\n    if (len(ama_vers) > 1):\n        return ERR_MULTIPLE_AMA\n    \n    # find subcomponent binaries\n    subcomponents = ['mdsd', 'agentlauncher', 'amacoreagent', 'fluent-bit']\n    \n    if not os.path.isfile(METRICS_FIILE):\n        return ERR_COUNTER_FILE_MISSING\n    if is_metrics_configured():\n        subcomponents.append('MetricsExtension')\n        subcomponents.append('telegraf')\n    missed_subcomponent = []\n    for subcomponent in subcomponents:\n        bin_file = '/opt/microsoft/azuremonitoragent/bin/{0}'.format(subcomponent)\n        if (not os.path.isfile(bin_file)):\n            missed_subcomponent.append(subcomponent)\n    if len(missed_subcomponent) > 0:\n        error_info.append((', '.join(missed_subcomponent),))\n        return ERR_SUBCOMPONENT_INSTALL\n    return NO_ERROR\n\ndef check_syslog():\n    pkg_version, e = get_package_version('rsyslog')\n    if (pkg_version != None):\n        return NO_ERROR\n    pkg_version, e = get_package_version('syslog-ng')\n    if (pkg_version != None):\n        return NO_ERROR\n    pkg_version, e = get_package_version('syslog-ng-core')\n    if (pkg_version != None):\n        return NO_ERROR\n    return ERR_LOG_DAEMON"
  },
  {
    "path": "AzureMonitorAgent/ama_tst/modules/install/install.py",
    "content": "import os\n\nfrom error_codes  import *\nfrom errors       import error_info, is_error, print_errors\nfrom .check_os    import check_os\nfrom .check_pkgs  import check_packages, check_syslog\nfrom .check_ama   import check_ama\nfrom helpers      import find_package_manager\n\ndef check_space():\n    \"\"\"\n    check space in MB for each main directory\n    \"\"\"\n    dirnames = [\"/etc\", \"/opt\", \"/var\"]\n    for dirname in dirnames:\n        space = os.statvfs(dirname)\n        free_space = space.f_bavail * space.f_frsize / 1024 / 1024\n        if (free_space < 500):\n            error_info.append((dirname, free_space))\n            return ERR_FREE_SPACE\n    return NO_ERROR\n\n\ndef check_pkg_manager():\n    pkg_manager = find_package_manager()\n    if (pkg_manager == \"\"):\n        return ERR_PKG_MANAGER\n    return NO_ERROR\n\ndef check_syslog_user():\n    with open('/etc/passwd', 'r') as fp:\n        for line in fp:\n            if line.startswith('syslog:'):\n                return NO_ERROR\n    return ERR_SYSLOG_USER\n\ndef check_installation(interactive, err_codes=True, prev_success=NO_ERROR):\n    \"\"\"\n    check all packages are installed\n    \"\"\"\n    print(\"CHECKING INSTALLATION...\")\n    success = prev_success\n    \n    # check Supported OS / version\n    print(\"Checking if running a supported OS version...\")\n    checked_os = check_os()\n    if (is_error(checked_os)):\n        return print_errors(checked_os)\n    else:\n        success = print_errors(checked_os)\n    \n    # check Available disk space\n    print(\"Checking if enough disk space is available...\")\n    checked_space = check_space()\n    if (is_error(checked_space)):\n        return print_errors(checked_space)\n    else:\n        success = print_errors(checked_space)\n        \n    # check Package manager (dpkg/rpm)\n    print(\"Checking if machine has a supported package manager...\")\n    checked_pkg_manager = check_pkg_manager()\n    if (is_error(checked_pkg_manager)):\n        return print_errors(checked_pkg_manager)\n    else:\n        success = print_errors(checked_pkg_manager)\n    \n    # check package + subcomponents installation states\n    print(\"Checking if packages and subcomponents are installed correctly...\")\n    checked_packages = check_packages()\n    if (is_error(checked_packages)):\n        return print_errors(checked_packages)\n    else:\n        success = print_errors(checked_packages)\n        \n    # check AMA version installed\n    print(\"Checking if running a supported version of AMA...\")\n    checked_ama = check_ama(interactive)\n    if (is_error(checked_ama)):\n        return print_errors(checked_ama)\n    else:\n        success = print_errors(checked_ama)\n        \n    # check Existence of rsyslog or syslog-ng\n    print(\"Checking if rsyslog or syslog-ng exists...\")\n    checked_syslog = check_syslog()\n    if (is_error(checked_syslog)):\n        return print_errors(checked_syslog)\n    else:\n        success = print_errors(checked_syslog)\n\n    # check Syslog user created successfully\n    print(\"Checking if syslog user exists...\")\n    checked_syslog_user = check_syslog_user()\n    if (is_error(checked_syslog_user)):\n        return print_errors(checked_syslog_user)\n    else:\n        success = print_errors(checked_syslog_user)\n    print(\"============================================\")\n    return success"
  },
  {
    "path": "AzureMonitorAgent/ama_tst/modules/install/supported_distros.py",
    "content": "supported_dists_x86_64 = {'redhat' : ['7', '8', '9', '10'], # Rhel\n                       'centos' : ['7', '8'], # CentOS\n                       'oracle' : ['7', '8', '9'], # Oracle\n                       'ol' : ['7', '8', '9'], # Oracle Linux\n                       'debian' : ['9', '10', '11', '12', '13'], # Debian\n                       'ubuntu' : ['16.04', '18.04', '20.04', '22.04', '24.04'], # Ubuntu\n                       'suse' : ['12', '15', '16'], 'sles' : ['12', '15', '16'], # SLES\n                       'mariner' : ['2'], # Mariner\n                       'azurelinux' : ['3'], # Azure Linux / Mariner 3\n                       'rocky' : ['8', '9'], # Rocky\n                       'alma' : ['8', '9'], # Alma\n                       'opensuse' : ['15'], # openSUSE\n                       'amzn' : ['2', '2023'] # Amazon Linux 2\n}\n\nsupported_dists_aarch64 = {'redhat' : ['8', '9', '10'], # Rhel\n                    'ubuntu' : ['18.04', '20.04', '22.04', '24.04'], # Ubuntu\n                    'alma' : ['8'], # Alma\n                    'centos' : ['7'], # CentOS\n                    'mariner' : ['2'], # Mariner 2\n                    'azurelinux' : ['3'], # Azure Linux / Mariner 3\n                    'sles' : ['15', '16'], # SLES\n                    'debian' : ['11', '12', '13'], # Debian\n                    'rocky linux' : ['8', '9'], # Rocky\n                    'rocky' : ['8', '9'] # Rocky\n}"
  },
  {
    "path": "AzureMonitorAgent/ama_tst/modules/logcollector.py",
    "content": "import datetime\nimport glob\nimport os\nimport platform\nimport shutil\nimport json\n\nimport helpers\nfrom error_codes        import *\nfrom connect.check_imds import check_metadata\nfrom metrics_troubleshooter.metrics_troubleshooter import run_metrics_troubleshooter\n\n\nDPKG_CMD = \"dpkg -s azuremonitoragent\"\nRPM_CMD = \"rpm -qi azuremonitoragent\"\nPS_CMD = \"ps -ef | grep {0} | grep -v grep\"\nOPENSSL_CMD = \"echo | openssl s_client -connect {0}:443 -brief\"\nSYSTEMCTL_CMD = \"systemctl status {0} --no-pager\"\nJOURNALCTL_CMD = \"journalctl -u {0} --no-pager --since \\\"30 days ago\\\" > {1}\"\nPS_CMD_CPU = \"ps aux --sort=-pcpu | head -10\"\nPS_CMD_RSS = \"ps aux --sort -rss | head -10\"\nPS_CMD_VSZ = \"ps aux --sort -vsz | head -10\"\nDU_CMD = \"du -h -d 1 {0} /var/opt/microsoft/azuremonitoragent/events\"\nVAR_DU_CMD = \"du -h -d 1 {0} /var\"\nLS_CMD = \"ls -al {0}\"\nNAMEI_CMD = \"namei -om {0}\"\nTAIL_SYSLOG_CMD = \"tail -10000 /var/log/{0} > {1}\"\nArcSettingsFile = '/var/opt/azcmagent/localconfig.json'\nPERMISSION_CHECK_FILES = [\"/etc/opt/microsoft/azuremonitoragent/config-cache\",\n                            \"/etc/opt/microsoft/azuremonitoragent\",\n                            \"/var/opt/microsoft/azuremonitoragent\",\n                            \"/var/run/azuremonitoragent\",\n                            \"/opt/microsoft/azuremonitoragent\",\n                            \"/run/azuremonitoragent\",\n                            \"/var/lib/waagent/Microsoft.Azure.Monitor.AzureMonitorLinuxAgent-*\"]\n\n\n# File copying functions\n\ndef copy_file(src, dst):\n    if (os.path.isfile(src)):\n        print(\"Copying file {0}\".format(src))\n        try:\n            if (not os.path.isdir(dst)):\n                os.mkdir(dst)\n            shutil.copy2(src, dst)\n        except Exception as e:\n            print(\"ERROR: Could not copy {0}: {1}\".format(src, e))\n            print(\"Skipping over file {0}\".format(src))\n    else:\n        print(\"File {0} doesn't exist, skipping\".format(src))\n    return\n\n\ndef copy_dircontents(src, dst):\n    if (os.path.isdir(src)):\n        print(\"Copying contents of directory {0}\".format(src))\n        try:\n            shutil.copytree(src, dst)\n            auth_token_path = os.path.join(dst, \"metrics_configs\", \"AuthToken-MSI.json\")\n            if (os.path.isfile(auth_token_path)):\n                print(\"Found AuthToken-MSI.json\")\n                try:\n                    with open(auth_token_path, 'r') as auth_token:\n                        auth_token_json = json.load(auth_token)\n                    if (auth_token_json and \"access_token\" in auth_token_json):\n                        print(\"Removing access_token value from AuthToken-MSI.json\")\n                        auth_token_json[\"access_token\"] = \"\"\n                        with open(auth_token_path, 'w') as auth_token:\n                            json.dump(auth_token_json, auth_token, indent=4)\n                        print(\"Successfully removed access_token value from AuthToken-MSI.json\")\n                except Exception as e:\n                    print(\"ERROR: Could not decode JSON from {0}: {1}\".format(auth_token_path, e))\n        except Exception as e:\n            print(\"ERROR: Could not copy {0}: {1}\".format(src, e))\n            print(\"Skipping over contents of directory {0}\".format(src))\n    else:\n        print(\"Directory {0} doesn't exist, skipping\".format(src))\n    return\n\n\n\n\n# Log collecting functions\n\ndef collect_process_environ(output_dirpath, process_name, outfile_handle=None):\n    \"\"\"\n    Collect environment variables for a specific process.\n    If outfile_handle is provided, writes to that file handle (for main log).\n    If outfile_handle is None, creates a separate file in the process directory.\n    \"\"\"\n    \n    if outfile_handle is None:\n        # Create separate file mode\n        process_dir = os.path.join(output_dirpath, process_name)\n        if not os.path.isdir(process_dir):\n            os.makedirs(process_dir)\n        \n        environ_file_path = os.path.join(process_dir, \"{0}_environ.txt\".format(process_name))\n        \n        try:\n            with open(environ_file_path, 'w') as environ_file:\n                _write_process_environ_data(environ_file, process_name, separate_file=True)\n            print(\"{0} environment variables saved to {1}\".format(process_name.upper(), environ_file_path))\n        except Exception as e:\n            print(\"ERROR: Could not create {0} environment variables file: {1}\".format(process_name, e))\n    else:\n        # Write to existing file handle mode (for main log)\n        _write_process_environ_data(outfile_handle, process_name, separate_file=False)\n\n\ndef _write_process_environ_data(file_handle, process_name, separate_file=True):\n    \"\"\"Helper function to write process environment data to a file handle\"\"\"\n    if separate_file:\n        # Format for separate file\n        file_handle.write(\"{0} Environment Variables Collection\\n\".format(process_name.upper()))\n        file_handle.write(\"=====================================\\n\")\n        file_handle.write(\"Collected on: {0}\\n\\n\".format(datetime.datetime.utcnow().isoformat()))\n    else:\n        # Format for main log file\n        file_handle.write(\"{0} Environment Variables:\\n\".format(process_name.upper()))\n        file_handle.write(\"========================================\\n\")\n    \n    # Get all process PIDs\n    process_pids_output = helpers.run_cmd_output(\"pidof {0}\".format(process_name))\n    if process_pids_output.strip():\n        process_pids = process_pids_output.strip().split()\n        for pid in process_pids:\n            file_handle.write(\"PID: {0}\\n\".format(pid))\n            environ_path = \"/proc/{0}/environ\".format(pid)\n            if os.path.isfile(environ_path):\n                try:\n                    with open(environ_path, 'rb') as proc_environ_file:\n                        environ_data = proc_environ_file.read()\n                        # Convert null-separated variables to readable format\n                        # Use try/except for Python 2/3 compatibility with decode errors parameter\n                        try:\n                            environ_vars = environ_data.decode('utf-8', errors='replace').replace('\\x00', '')\n                        except TypeError:\n                            # Python 2.6 doesn't support errors parameter\n                            environ_vars = environ_data.decode('utf-8').replace('\\x00', '')\n                        file_handle.write(\"{0}\\n\".format(environ_vars))\n                except Exception as e:\n                    file_handle.write(\"Error reading environment variables for PID {0}: {1}\\n\".format(pid, e))\n            else:\n                file_handle.write(\"Environment file not found for PID {0}\\n\".format(pid))\n            file_handle.write(\"=====================================\\n\")\n    else:\n        file_handle.write(\"No {0} processes found\\n\".format(process_name))\n    \n    if not separate_file:\n        # Add separator for main log file\n        file_handle.write(\"--------------------------------------------------------------------------------\\n\")\n\ndef collect_logs(output_dirpath, pkg_manager):\n    # collect MDSD information\n    copy_file(\"/etc/default/azuremonitoragent\", os.path.join(output_dirpath,\"mdsd\"))\n    copy_file(\"/var/opt/microsoft/azuremonitoragent/events/taskstate.json\", os.path.join(output_dirpath,\"mdsd\"))\n    copy_dircontents(\"/var/opt/microsoft/azuremonitoragent/log\", os.path.join(output_dirpath,\"mdsd\",\"logs\"))\n    # collect MDSD environment variables\n    collect_process_environ(output_dirpath, \"mdsd\")\n    # collect AMA Core Agent environment variables\n    collect_process_environ(output_dirpath, \"amacoreagent\")\n    # collect AMA DCR\n    copy_dircontents(\"/etc/opt/microsoft/azuremonitoragent\", os.path.join(output_dirpath,\"DCR\"))\n\n    # get all AzureMonitorLinuxAgent-* directory names\n    for config_dir in filter((lambda x : x.startswith(\"Microsoft.Azure.Monitor.AzureMonitorLinuxAgent-\")), os.listdir(\"/var/lib/waagent\")):\n        # collect AMA config and status information for all AzureMonitorLinuxAgent-* directories\n        ver = (config_dir.split('-'))[-1]\n        copy_dircontents(os.path.join(\"/var/lib/waagent\",config_dir,\"status\"), os.path.join(output_dirpath,ver+\"-status\"))\n        copy_dircontents(os.path.join(\"/var/lib/waagent\",config_dir,\"config\"), os.path.join(output_dirpath,ver+\"-config\"))\n\n    # collect system logs\n    system_logs = \"\"\n    if (pkg_manager == \"dpkg\"):\n        system_logs = \"syslog\"\n    elif (pkg_manager == \"rpm\"):\n        system_logs = \"messages\"\n    if (system_logs != \"\"):\n        for systemlog_file in filter((lambda x : x.startswith(system_logs)), os.listdir(\"/var/log\")):\n            helpers.run_cmd_output(TAIL_SYSLOG_CMD.format(systemlog_file, os.path.join(output_dirpath,\"system_logs\")))\n\n    # collect rsyslog information (if present)\n    copy_file(\"/etc/rsyslog.conf\", os.path.join(output_dirpath,\"rsyslog\"))\n    copy_dircontents(\"/etc/rsyslog.d\", os.path.join(output_dirpath,\"rsyslog\",\"rsyslog.d\"))\n    if (os.path.isfile(\"/etc/rsyslog.conf\")):\n        helpers.run_cmd_output(JOURNALCTL_CMD.format(\"rsyslog\", os.path.join(output_dirpath,\"rsyslog\",\"journalctl_output.log\")))\n    # collect syslog-ng information (if present)\n    copy_dircontents(\"/etc/syslog-ng\", os.path.join(output_dirpath,\"syslog-ng\"))\n\n    return\n\n\ndef collect_arc_logs(output_dirpath, pkg_manager):\n    # collect GC Extension logs\n    copy_dircontents(\"/var/lib/GuestConfig/ext_mgr_logs\", os.path.join(output_dirpath,\"GC_Extension\"))\n    # collect AMA Extension logs\n    for config_dir in filter((lambda x : x.startswith(\"Microsoft.Azure.Monitor.AzureMonitorLinuxAgent-\")), os.listdir(\"/var/lib/GuestConfig/extension_logs\")):\n        # collect AMA config and status information for all AzureMonitorLinuxAgent-* directories\n        ver = (config_dir.split('-'))[-1]\n        copy_dircontents(os.path.join(\"/var/lib/GuestConfig/extension_logs\",config_dir), os.path.join(output_dirpath,ver+\"-extension_logs\"))\n\n    copy_file(ArcSettingsFile, os.path.join(output_dirpath,\"Arc\"))\n    \n    # collect logs same to both Arc + Azure VM\n    collect_logs(output_dirpath, pkg_manager)\n\n    print(\"Arc logs collected\")\n    return\n\n\ndef collect_azurevm_logs(output_dirpath, pkg_manager):\n    # collect waagent logs\n    for waagent_file in filter((lambda x : x.startswith(\"waagent.log\")), os.listdir(\"/var/log\")):\n        copy_file(os.path.join(\"/var/log\",waagent_file), os.path.join(output_dirpath,\"waagent\"))\n    # collect AMA Extension logs\n    copy_dircontents(\"/var/log/azure/Microsoft.Azure.Monitor.AzureMonitorLinuxAgent\", os.path.join(output_dirpath,\"Microsoft.Azure.Monitor.AzureMonitorLinuxAgent\"))\n    \n    # collect logs same to both Arc + Azure VM\n    collect_logs(output_dirpath, pkg_manager)\n\n    print(\"Azure VM logs collected\")\n    return\n\n\ndef collect_metrics_logs(output_dirpath):\n    \"\"\"\n    Run the metrics troubleshooter and collect any MdmDataCollectionOutput_*.tar.gz files.\n    \"\"\"\n    print(\"Running metrics troubleshooter...\")\n    \n    # Run the metrics troubleshooter (it produces MdmDataCollectionOutput_*.tar.gz)\n    run_metrics_troubleshooter(interactive=False)\n    \n    # Find and copy any MdmDataCollectionOutput_*.tar.gz files from common locations\n    metrics_output_patterns = [\n        \"/tmp/MdmDataCollectionOutput_*.tar.gz\",\n        \"/var/tmp/MdmDataCollectionOutput_*.tar.gz\",\n        os.path.join(os.getcwd(), \"MdmDataCollectionOutput_*.tar.gz\")\n    ]\n    \n    metrics_dir = os.path.join(output_dirpath, \"metrics\")\n    files_found = False\n    \n    for pattern in metrics_output_patterns:\n        for metrics_file in glob.glob(pattern):\n            if not files_found:\n                if not os.path.isdir(metrics_dir):\n                    os.makedirs(metrics_dir)\n                files_found = True\n            print(\"Copying metrics output file: {0}\".format(metrics_file))\n            try:\n                shutil.copy2(metrics_file, metrics_dir)\n            except Exception as e:\n                print(\"ERROR: Could not copy {0}: {1}\".format(metrics_file, e))\n    \n    if not files_found:\n        print(\"No MdmDataCollectionOutput_*.tar.gz files found.\")\n    else:\n        print(\"Metrics logs collected\")\n    \n    return\n\n\n\n# Outfile function\n    \ndef create_outfile(output_dirpath, logs_date, pkg_manager):\n    with open(os.path.join(output_dirpath,\"amalinux.out\"), 'w') as outfile:\n        outfile.write(\"Log Collection Start Time: {0}\\n\".format(logs_date))\n        outfile.write(\"--------------------------------------------------------------------------------\\n\")\n\n        # detected OS + version\n        vm_dist, vm_ver, _ = helpers.find_vm_distro()\n        if (vm_dist and vm_ver):\n            outfile.write(\"Linux OS detected: {0}\\n\".format(vm_dist))\n            outfile.write(\"Linux OS version detected: {0}\\n\".format(vm_ver))\n        else:\n            outfile.write(\"Indeterminate OS.\\n\")\n\n        # detected package manager\n        if (pkg_manager != \"\"):\n            outfile.write(\"Package manager detected: {0}\\n\".format(pkg_manager))\n        else:\n            outfile.write(\"Indeterminate package manager.\\n\")\n        outfile.write(\"--------------------------------------------------------------------------------\\n\")\n\n        # uname info\n        os_uname = os.uname()\n        outfile.write(\"Hostname: {0}\\n\".format(os_uname[1]))\n        outfile.write(\"Release Version: {0}\\n\".format(os_uname[2]))\n        outfile.write(\"Linux UName: {0}\\n\".format(os_uname[3]))\n        outfile.write(\"Machine Type: {0}\\n\".format(os_uname[4]))\n        outfile.write(\"--------------------------------------------------------------------------------\\n\")\n\n        # python version\n        outfile.write(\"Python Version: {0}\\n\".format(platform.python_version()))\n        outfile.write(\"--------------------------------------------------------------------------------\\n\")\n        \n        # /etc/os-release\n        if (os.path.isfile(\"/etc/os-release\")):\n            outfile.write(\"Contents of /etc/os-release:\\n\")\n            with open(\"/etc/os-release\", 'r') as os_info:\n                for line in os_info:\n                    outfile.write(line)\n            outfile.write(\"--------------------------------------------------------------------------------\\n\")\n\n        # VM Metadata\n        attributes = ['azEnvironment', 'resourceId', 'location']\n        outfile.write(\"VM Metadata from IMDS:\\n\")\n        for attr in attributes:\n            attr_result = helpers.geninfo_lookup(attr)\n            if (not attr_result) and (check_metadata() == NO_ERROR):\n                attr_result = helpers.geninfo_lookup(attr)\n            if (attr_result != None):\n                outfile.write(\"{0}: {1}\\n\".format(attr, attr_result))\n        outfile.write(\"--------------------------------------------------------------------------------\\n\")\n        outfile.write(\"--------------------------------------------------------------------------------\\n\")\n\n \n        # AMA install status\n        (ama_vers, _) = helpers.find_ama_version()\n        (ama_installed, ama_unique) = helpers.check_ama_installed(ama_vers)\n        outfile.write(\"AMA Install Status: {0}\\n\".format(\"installed\" if ama_installed else \"not installed\"))\n        if (ama_installed):\n            if (not ama_unique):\n                outfile.write(\"Multiple AMA versions detected: {0}\\n\".format(', '.join(ama_vers)))\n            else:\n                outfile.write(\"AMA Version: {0}\\n\".format(ama_vers[0]))\n        outfile.write(\"--------------------------------------------------------------------------------\\n\")\n\n        # connection to endpoints\n        wkspc_id, wkspc_region, agent_settings, e = helpers.find_dcr_workspace()\n        if e == None:\n            outfile.write(\"Workspace ID: {0}\\n\".format(str(wkspc_id)))\n            outfile.write(\"Workspace region: {0}\\n\".format(str(wkspc_region)))\n            outfile.write(\"--------------------------------------------------------------------------------\\n\")\n            if agent_settings != {}:\n                outfile.write(\"AgentSettinsgs file found: {0}\\n\".format(str(agent_settings)))\n\n        # AMA package info (dpkg/rpm)\n        if (pkg_manager == \"dpkg\"):\n            outfile.write(\"Output of command: {0}\\n\".format(DPKG_CMD))\n            outfile.write(\"========================================\\n\")\n            outfile.write(helpers.run_cmd_output(DPKG_CMD))\n            outfile.write(\"--------------------------------------------------------------------------------\\n\")\n        elif (pkg_manager == \"rpm\"):\n            outfile.write(\"Output of command: {0}\\n\".format(RPM_CMD))\n            outfile.write(\"========================================\\n\")\n            outfile.write(helpers.run_cmd_output(RPM_CMD))\n            outfile.write(\"--------------------------------------------------------------------------------\\n\")\n        outfile.write(\"--------------------------------------------------------------------------------\\n\")\n\n        # ps -ef output\n        for process in [\"azuremonitoragent\", \"mdsd\", \"telegraf\"]:\n            ps_process_cmd = PS_CMD.format(process)\n            outfile.write(\"Output of command: {0}\\n\".format(ps_process_cmd))\n            outfile.write(\"========================================\\n\")\n            outfile.write(helpers.run_cmd_output(ps_process_cmd))\n            outfile.write(\"--------------------------------------------------------------------------------\\n\")\n        \n        # process environment variables output\n        collect_process_environ(output_dirpath, \"mdsd\", outfile)\n        collect_process_environ(output_dirpath, \"amacoreagent\", outfile)\n\n        # rsyslog / syslog-ng status via systemctl\n        for syslogd in [\"rsyslog\", \"syslog-ng\"]:\n            systemctl_cmd = SYSTEMCTL_CMD.format(syslogd)\n            outfile.write(\"Output of command: {0}\\n\".format(systemctl_cmd))\n            outfile.write(\"========================================\\n\")\n            outfile.write(helpers.run_cmd_output(systemctl_cmd))\n            outfile.write(\"--------------------------------------------------------------------------------\\n\")\n        outfile.write(\"--------------------------------------------------------------------------------\\n\")\n\n        # ps aux output\n        for cmd in [PS_CMD_CPU, PS_CMD_RSS, PS_CMD_VSZ]:\n            outfile.write(\"Output of command: {0}\\n\".format(cmd))\n            outfile.write(\"========================================\\n\")\n            outfile.write(helpers.run_cmd_output(cmd))\n            outfile.write(\"--------------------------------------------------------------------------------\\n\")\n        outfile.write(\"--------------------------------------------------------------------------------\\n\")\n\n        # du output on events folder\n        for flag in [\"\", \"--apparent-size\"]:\n            du_full_cmd = DU_CMD.format(flag)\n            outfile.write(\"Output of command: {0}\\n\".format(du_full_cmd))\n            outfile.write(\"========================================\\n\")\n            outfile.write(helpers.run_cmd_output(du_full_cmd))\n            outfile.write(\"--------------------------------------------------------------------------------\\n\")\n        outfile.write(\"--------------------------------------------------------------------------------\\n\")\n\n        # du output on /var folder\n        for flag in [\"\", \"--apparent-size\"]:\n            du_full_cmd = VAR_DU_CMD.format(flag)\n            outfile.write(\"Output of command: {0}\\n\".format(du_full_cmd))\n            outfile.write(\"========================================\\n\")\n            outfile.write(helpers.run_cmd_output(du_full_cmd))\n            outfile.write(\"--------------------------------------------------------------------------------\\n\")\n            \n        # file permission check\n        for file in PERMISSION_CHECK_FILES:\n            file_permission_cmd = LS_CMD.format(file)\n            outfile.write(\"Output of command: {0}\\n\".format(file_permission_cmd))\n            outfile.write(\"========================================\\n\")\n            outfile.write(helpers.run_cmd_output(file_permission_cmd))\n            outfile.write(\"--------------------------------------------------------------------------------\\n\")\n        outfile.write(\"--------------------------------------------------------------------------------\\n\")\n        \n        # parent directory permission check\n        for file in PERMISSION_CHECK_FILES:\n            dir_permission_cmd = NAMEI_CMD.format(file)\n            outfile.write(\"Output of command: {0}\\n\".format(dir_permission_cmd))\n            outfile.write(\"========================================\\n\")\n            outfile.write(helpers.run_cmd_output(dir_permission_cmd))\n            outfile.write(\"--------------------------------------------------------------------------------\\n\")\n        outfile.write(\"--------------------------------------------------------------------------------\\n\")\n\n\n\n\n\n\n### MAIN FUNCTION BODY BELOW ###\n\n\n\ndef run_logcollector(output_location):\n    # check if Arc is being used\n    is_arc_vm = helpers.is_arc_installed()\n\n    # create directory to hold copied logs\n    vm_type = \"azurearc\" if is_arc_vm else \"azurevm\"\n    logs_date = str(datetime.datetime.utcnow().isoformat()).replace(\":\", \".\")  # ':' causes issues with tar\n    output_dirname = \"amalogs-{0}-{1}\".format(vm_type, logs_date)\n    output_dirpath = os.path.join(output_location, output_dirname)\n    try:\n        os.mkdir(output_dirpath)\n    except OSError as e:\n        print(\"ERROR: Could not create output directory: {0}\".format(e))\n        return\n\n    # get VM information needed for log collection\n    pkg_manager = helpers.find_package_manager()\n\n    # collect the logs\n    if (is_arc_vm):\n        print(\"Azure Arc detected, collecting logs for Azure Arc.\")\n        print(\"--------------------------------------------------------------------------------\")\n        collect_arc_logs(output_dirpath, pkg_manager)\n    else:\n        print(\"Azure Arc not detected, collected logs for Azure VM.\")\n        print(\"--------------------------------------------------------------------------------\")\n        collect_azurevm_logs(output_dirpath, pkg_manager)\n    print(\"--------------------------------------------------------------------------------\")\n\n    # create out file (for simple checks)\n    print(\"Creating 'amalinux.out' file\")\n    create_outfile(output_dirpath, logs_date, pkg_manager)\n    print(\"--------------------------------------------------------------------------------\")\n\n    # collect metrics troubleshooter logs\n    print(\"Collecting metrics troubleshooter logs...\")\n    collect_metrics_logs(output_dirpath)\n    print(\"--------------------------------------------------------------------------------\")\n\n    # zip up logs\n    print(\"Zipping up logs and removing temporary output directory\")\n    tgz_filename = \"{0}.tgz\".format(output_dirname)\n    tgz_filepath = os.path.join(output_location, tgz_filename)\n    print(\"--------------------------------------------------------------------------------\")\n    print(helpers.run_cmd_output(\"cd {0}; tar -zcf {1} {2}\".format(output_location, tgz_filename, output_dirname)))\n    # This makes archive not readable by anyone else but the user who created it\n    print(\"Setting permissions on the archive to 600 so only the user who created it can read it\")\n    print(\"--------------------------------------------------------------------------------\")\n    os.chmod(tgz_filepath, 0o600)\n    shutil.rmtree(output_dirpath, ignore_errors=True)\n\n    print(\"--------------------------------------------------------------------------------\")\n    print(\"You can find the AMA logs at the following location: {0}\".format(tgz_filepath))\n    return\n"
  },
  {
    "path": "AzureMonitorAgent/ama_tst/modules/main.py",
    "content": "import os\nimport sys\n\nfrom helpers        import get_input\nfrom logcollector   import run_logcollector\nfrom error_codes    import *\nfrom errors         import get_input, is_error, err_summary\nfrom install.install import check_installation\nfrom connect.connect import check_connection\nfrom general_health.general_health  import check_general_health\nfrom high_cpu_mem.high_cpu_mem      import check_high_cpu_memory\nfrom syslog_tst.syslog                  import check_syslog\nfrom custom_logs.custom_logs        import check_custom_logs\nfrom metrics_troubleshooter.metrics_troubleshooter import run_metrics_troubleshooter\n\n# check to make sure the user is running as root\ndef check_sudo():\n    if (os.geteuid() != 0):\n        print(\"The troubleshooter is not currently being run as root. In order to have accurate results, we ask that you run this troubleshooter as root.\")\n        print(\"NOTE: it will not add, modify, or delete any files without express permission.\")\n        print(\"Please try running the troubleshooter again with 'sudo'. Thank you!\")\n        return False\n    else:\n        return True\n\ndef check_all(interactive):\n    \"\"\"\n    Run all troubleshooter checks, continuing even if errors occur.\n    Collects all results and reports the most severe issue at the end.\n    \"\"\"\n    checks = [\n        (\"Installation\", check_installation),\n        (\"Connection\", check_connection),\n        (\"General Health\", check_general_health),\n        (\"High CPU/Memory Usage\", check_high_cpu_memory),\n        (\"Syslog\", check_syslog),\n        (\"Custom logs\", check_custom_logs),\n        (\"Metrics\", run_metrics_troubleshooter),\n    ]\n    \n    results = []\n    overall_status = NO_ERROR\n    \n    for i, (check_name, check_func) in enumerate(checks, 1):\n        print(\"================================================================================\")\n        print(\"Running check {0}/7: {1}...\".format(i, check_name))\n        \n        try:\n            result = check_func(interactive)\n            results.append((check_name, result))\n            \n            # Track the most severe error (higher error codes are more severe)\n            if is_error(result) and result > overall_status:\n                overall_status = result\n            elif not is_error(result) and result > overall_status and overall_status == NO_ERROR:\n                overall_status = result\n                \n            # Print immediate result for this check\n            if is_error(result):\n                print(\"[ERROR] {0}: ERROR (code {1})\".format(check_name, result))\n            elif result != NO_ERROR:\n                print(\"[WARN]  {0}: WARNING (code {1})\".format(check_name, result))\n            else:\n                print(\"[OK]    {0}: OK\".format(check_name))\n                \n        except Exception as e:\n            print(\"[EXCEPTION] {0}: EXCEPTION - {1}\".format(check_name, str(e)))\n            results.append((check_name, \"EXCEPTION: {0}\".format(str(e))))\n            overall_status = ERR_FOUND  # Set a generic error code\n    \n    # Summary of all results\n    print(\"\\n================================================================================\")\n    print(\"SUMMARY OF ALL CHECKS:\")\n    print(\"================================================================================\")\n    for check_name, result in results:\n        if isinstance(result, str) and result.startswith(\"EXCEPTION\"):\n            print(\"[EXCEPTION] {0}: {1}\".format(check_name, result))\n        elif is_error(result):\n            print(\"[ERROR] {0}: ERROR (code {1})\".format(check_name, result))\n        elif result != NO_ERROR:\n            print(\"[WARN]  {0}: WARNING (code {1})\".format(check_name, result))\n        else:\n            print(\"[OK]    {0}: OK\".format(check_name))\n    \n    return overall_status\n\ndef collect_logs():\n    # get output directory for logs\n    print(\"Please input an existing, absolute filepath to a directory where the output for the zip file will be placed upon completion.\")\n    output_location = get_input(\"Output Directory\", (lambda x : os.path.isdir(x)), \\\n                                \"Please input an existing, absolute filepath.\")    \n    \n    print(\"Collecting AMA logs...\")\n    print(\"================================================================================\")\n    run_logcollector(output_location)\n\ndef print_results(success):\n    print(\"================================================================================\")\n    print(\"================================================================================\")\n    # print out all errors/warnings\n    if (len(err_summary) > 0):\n        print(\"ALL ERRORS/WARNINGS ENCOUNTERED:\")\n        for err in err_summary:\n            print(\"  {0}\".format(err))\n            print(\"--------------------------------------------------------------------------------\")\n        \n    # no errors found\n    if (success == NO_ERROR):\n        print(\"No errors were found.\")\n    # user requested to exit\n    elif (success == USER_EXIT):\n        return\n    # error found\n    else:\n        print(\"Please review the errors found above.\")\n\n''' \ngive information to user about next steps\n'''\ndef print_next_steps():\n    print(\"================================================================================\")\n    print(\"If you still have an issue, please run the troubleshooter again and collect the logs for AMA.\\n\"\\\n        \"In addition, please include the following information:\\n\"\\\n        \"  - Azure Subscription ID where the Log Analytics Workspace is located\\n\"\\\n        \"  - Workspace ID the agent has been onboarded to\\n\"\\\n        \"  - Workspace Name\\n\"\\\n        \"  - Region Workspace is located\\n\"\\\n        \"  - Pricing Tier assigned to the Workspace\\n\"\\\n        \"  - Linux Distribution on the VM\\n\"\\\n        \"  - Azure Monitor Agent Version\")\n\n    print(\"================================================================================\")\n    print(\"Restarting AMA can solve some of the problems. If you need to restart Azure Monitor Agent on this machine, \"\\\n          \"please execute the following commands as the root user:\")\n    print(\"  $ cd /var/lib/waagent/Microsoft.Azure.Monitor.AzureMonitorLinuxAgent-<agent version number>/\")\n    print(\"  $ ./shim.sh -disable\")\n    print(\"  $ ./shim.sh -enable\")\n    \n### MAIN FUNCTION BODY BELOW ###\n\n\n\ndef run_troubleshooter():\n    # check if running as sudo\n    if (not check_sudo()):\n        return\n    \n    # run all checks from command line\n    if len(sys.argv) > 1 and sys.argv[1] == '-A':\n        success = check_all(False)\n        print_results(success)\n        print_next_steps()\n        return\n    \n    # run log collector from command line\n    if len(sys.argv) > 1 and sys.argv[1] == '-L':\n        collect_logs()\n        return\n            \n    # check if want to run again\n    run_again = True\n\n    print(\"Welcome to the Azure Monitor Linux Agent Troubleshooter! What is your issue?\\n\")\n    while (run_again):\n        print(\"================================================================================\\n\"\\\n            # TODO: come up with scenarios\n              \"1: Installation failures. \\n\"\\\n              \"2: Agent doesn't start or cannot connect to Log Analytics service.\\n\"\\\n              \"3: Agent in unhealthy state. \\n\"\\\n              \"4: Agent consuming high CPU/memory. \\n\"\\\n              \"5: Syslog not flowing. \\n\"\\\n              \"6: Custom logs not flowing. \\n\"\\\n              \"7: Metrics not flowing.\\n\"\\\n              \"================================================================================\\n\"\\\n              \"A: Run through all scenarios.\\n\"\\\n              \"L: Collect the logs for AMA.\\n\"\\\n              \"Q: Press 'Q' to quit.\\n\"\\\n              \"================================================================================\")\n        switcher = {\n            '1': check_installation,\n            '2': check_connection,\n            '3': check_general_health,\n            '4': check_high_cpu_memory,\n            '5': check_syslog,\n            '6': check_custom_logs,\n            '7': run_metrics_troubleshooter,\n            'A': check_all\n        }\n    \n        issue = get_input(\"Please select an option\",\\\n                        (lambda x : x.lower() in ['1','2','3','4','5','6','7','q','quit','l','a']),\\\n                        \"Please enter an integer corresponding with your issue (1-6) to\\n\"\\\n                        \"continue, 'A' to run through all scenarios, 'L' to run the log collector, or 'Q' to quit.\")\n        # quit troubleshooter\n        if (issue.lower() in ['q','quit']):\n            print(\"Exiting the troubleshooter...\")\n            return\n\n        # collect logs\n        if (issue.lower() == 'l'):\n            collect_logs()\n            return\n\n        # silent vs interactive mode\n        print(\"--------------------------------------------------------------------------------\")\n        print(\"The troubleshooter can be run in two different modes.\\n\"\\\n            \"  - Silent Mode runs through with no input required\\n\"\\\n            \"  - Interactive Mode includes extra checks that require input\")\n        mode = get_input(\"Do you want to run the troubleshooter in silent (s) or interactive (i) mode?\",\\\n                        (lambda x : x.lower() in ['s','silent','i','interactive','q','quit']),\\\n                        \"Please enter 's'/'silent' to run silent mode, 'i'/'interactive' to run \\n\"\\\n                            \"interactive mode, or 'q'/'quit' to quit.\")\n        if (mode.lower() in ['q','quit']):\n            print(\"Exiting the troubleshooter...\")\n            return\n        elif (mode.lower() in ['s','silent']):\n            print(\"Running troubleshooter in silent mode...\")\n            interactive_mode = False\n        elif (mode.lower() in ['i','interactive']):\n            print(\"Running troubleshooter in interactive mode...\")\n            interactive_mode = True\n\n        # run troubleshooter\n        section = switcher.get(issue.upper(), lambda: \"Invalid input\")\n        print(\"================================================================================\")\n        success = section(interactive=interactive_mode)\n    \n        print_results(success)\n\n        # if user ran single scenario, ask if they want to run again\n        if (issue in ['1', '2', '3', '4', '5', '6', '7']):\n            run_again = get_input(\"Do you want to run another scenario? (y/n)\",\\\n                                  (lambda x : x.lower() in ['y','yes','n','no']),\\\n                                  \"Please type either 'y'/'yes' or 'n'/'no' to proceed.\")\n            \n            if (run_again.lower() in ['y', 'yes']):\n                print(\"Please select another scenario below:\")\n            elif (run_again.lower() in ['n', 'no']):\n                run_again = False\n        else:\n            run_again = False\n            \n        print_next_steps()\n    return\n    \n\nif __name__ == '__main__':\n    run_troubleshooter()\n"
  },
  {
    "path": "AzureMonitorAgent/ama_tst/modules/metrics_troubleshooter/__init__.py",
    "content": "# metrics troubleshooter script for AMA"
  },
  {
    "path": "AzureMonitorAgent/ama_tst/modules/metrics_troubleshooter/metrics_troubleshooter.py",
    "content": "import os\nimport subprocess\n\nfrom error_codes import *\n\n# Resolve absolute path to the script\nSCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))\nTROUBLESHOOTER_FILE = os.path.abspath(os.path.join(SCRIPT_DIR, \"..\", \"..\", \"metrics_troubleshooter.sh\"))\n\ndef run_metrics_troubleshooter(interactive):\n    \"\"\"\n    Executes the metrics troubleshooter script.\n    \"\"\"\n    if not os.path.exists(TROUBLESHOOTER_FILE):\n        print(\"Metrics Troubleshooter script not found at: {}\".format(TROUBLESHOOTER_FILE))\n        return ERR_FOUND\n\n    status = None\n    if interactive:\n        print(\"================================================================================\")\n        print(\"Metrics Troubleshooter does not support interactive mode yet.\")\n        print(\"The troubleshooter produces `MdmDataCollectionOutput_.*tar.gz`, which is required for investigating the issue.\")\n\n    try:\n        proc = subprocess.Popen(\n            [\"/bin/sh\", TROUBLESHOOTER_FILE],\n            stdout=subprocess.PIPE,\n            stderr=subprocess.PIPE\n        )\n        stdout, stderr = proc.communicate()\n        status = proc.returncode\n\n        if status != 0:\n            print(\"Error ({}): {}\".format(status, stderr.strip()))\n            # raise Exception or return False here if needed\n        else:\n            print(\"Troubleshooter output: {}\".format(stdout.strip()))\n\n    except Exception as e:\n        print(\"Unexpected error: {}\".format(str(e)))\n\n    return NO_ERROR\n"
  },
  {
    "path": "AzureMonitorAgent/ama_tst/modules/syslog_tst/__init__.py",
    "content": "# Syslog check helper script for AMA"
  },
  {
    "path": "AzureMonitorAgent/ama_tst/modules/syslog_tst/check_conf.py",
    "content": "import os\n\nfrom error_codes       import *\nfrom errors            import error_info\nfrom helpers           import geninfo_lookup, run_cmd_output\n\nCONF_ACCESS_CMD = 'sudo -u syslog test -r {0}; echo \"$?\"'\nSOCKET_ACCESS_CMD = 'sudo -u syslog test -{0} {1}; echo \"$?\"'\nAMA_SOCKET = \"/run/azuremonitoragent/default_syslog.socket\"\n\n\ndef check_conf_files():\n    # update syslog destination path with correct location\n    syslog_dest = geninfo_lookup('SYSLOG_DEST')\n    if (syslog_dest == None):\n        return ERR_SYSLOG\n\n    # verify syslog destination exists / not empty / accessible by syslog user\n    if (not os.path.isfile(syslog_dest)):\n        error_info.append(('file', syslog_dest))\n        return ERR_FILE_MISSING\n    if (os.stat(syslog_dest).st_size == 0):\n        error_info.append((syslog_dest,))\n        return ERR_FILE_EMPTY\n\n    if (run_cmd_output(CONF_ACCESS_CMD.format(syslog_dest)).strip() != '0'):\n        error_info.append(('file', syslog_dest, 'read'))\n        return ERR_CONF_FILE_PERMISSION\n    \n    return NO_ERROR\n\ndef check_socket():\n    if (not os.path.exists(AMA_SOCKET)):\n        error_info.append(('socket', AMA_SOCKET))\n        return ERR_FILE_MISSING\n    if (run_cmd_output(SOCKET_ACCESS_CMD.format('r', AMA_SOCKET)).strip() != '0'):\n        error_info.append(('socket', AMA_SOCKET, 'read'))\n        return ERR_CONF_FILE_PERMISSION\n    if (run_cmd_output(SOCKET_ACCESS_CMD.format('w', AMA_SOCKET)).strip() != '0'):\n        error_info.append(('socket', AMA_SOCKET, 'write'))\n        return ERR_CONF_FILE_PERMISSION\n    return NO_ERROR"
  },
  {
    "path": "AzureMonitorAgent/ama_tst/modules/syslog_tst/check_rsysng.py",
    "content": "import subprocess\n\nfrom error_codes import *\nfrom errors      import error_info\nfrom helpers     import general_info\n\nRSYSLOG_CONF = \"/etc/rsyslog.d/10-azuremonitoragent-omfwd.conf\"\nSYSLOG_NG_CONF = \"/etc/syslog-ng/conf.d/azuremonitoragent-tcp.conf\"\n\n\n\n# check syslog with systemctl\ndef check_sys_systemctl(service): \n    try:\n        sys_status = subprocess.check_output(['systemctl', 'status', service], \\\n                        universal_newlines=True, stderr=subprocess.STDOUT)\n        sys_lines = sys_status.split('\\n')\n        for line in sys_lines:\n            line = line.strip()\n            if line.startswith('Active: '):\n                stripped_line = line.lstrip('Active: ')\n                # exists and running correctly\n                if stripped_line.startswith('active (running) since '):\n                    return NO_ERROR\n                # exists but not running correctly\n                else:\n                    error_info.append((service, stripped_line, 'systemctl'))\n                    return ERR_SERVICE_STATUS\n    except subprocess.CalledProcessError as e:\n        # service not on machine\n        if (e.returncode == 4):\n            return ERR_SYSLOG\n        else:\n            error_info.append((service, e.output, 'systemctl'))\n            return ERR_SERVICE_STATUS\n\ndef check_services():\n    global general_info\n    checked_rsyslog = check_sys_systemctl('rsyslog')\n    # rsyslog successful\n    if (checked_rsyslog == NO_ERROR):\n        general_info['SYSLOG_DEST'] = RSYSLOG_CONF\n        return NO_ERROR\n\n    checked_syslog_ng = check_sys_systemctl('syslog-ng')\n    # syslog-ng successful\n    if (checked_syslog_ng == NO_ERROR):\n        general_info['SYSLOG_DEST'] = SYSLOG_NG_CONF\n        return NO_ERROR\n\n    # ran into error trying to get syslog\n    if ((checked_rsyslog==ERR_SERVICE_STATUS) or (checked_syslog_ng==ERR_SERVICE_STATUS)):\n        return ERR_SERVICE_STATUS\n\n    return ERR_SYSLOG\n"
  },
  {
    "path": "AzureMonitorAgent/ama_tst/modules/syslog_tst/syslog.py",
    "content": "from error_codes          import *\nfrom errors               import is_error, print_errors\nfrom .check_conf          import check_conf_files, check_socket\nfrom .check_rsysng        import check_services\n\ndef check_syslog(interactive, prev_success=NO_ERROR):\n    print(\"CHECKING FOR SYSLOG ISSUES...\")\n\n    success = prev_success\n\n    # check rsyslog / syslogng running\n    print(\"Checking if machine has rsyslog or syslog-ng running...\")\n    checked_services = check_services()\n    if (is_error(checked_services)):\n        return print_errors(checked_services)\n    else:\n        success = print_errors(checked_services)\n\n    # check for rsyslog / syslog-ng configuration files\n    print(\"Checking for syslog configuration files...\")\n    checked_conf_files = check_conf_files()\n    if (is_error(checked_conf_files)):\n        return print_errors(checked_conf_files)\n    else:\n        success = print_errors(checked_conf_files)\n\n    # check for syslog socket existence and permissions\n    print(\"Checking for syslog socket...\")\n    checked_socket = check_socket()\n    if (is_error(checked_socket)):\n        return print_errors(checked_socket)\n    else:\n        success = print_errors(checked_socket)\n    return success"
  },
  {
    "path": "AzureMonitorAgent/apply_version.sh",
    "content": "#! /bin/bash\n\nsource ./agent.version\n\necho \"AGENT_VERSION=$AGENT_VERSION\"\necho \"MDSD_DEB_PACKAGE_NAME=$MDSD_DEB_PACKAGE_NAME\"\necho \"MDSD_RPM_PACKAGE_NAME=$MDSD_RPM_PACKAGE_NAME\"\n\n\n# updating HandlerManifest.json\n# check for \"version\": \"x.x.x\",\nsed -i \"s/\\\"version\\\".*$/\\\"version\\\": \\\"$AGENT_VERSION\\\",/g\" HandlerManifest.json\n\n# updating agent.py\nsed -i \"s/^BundleFileNameDeb = .*$/BundleFileNameDeb = '$MDSD_DEB_PACKAGE_NAME'/\" agent.py\nsed -i \"s/^BundleFileNameRpm = .*$/BundleFileNameRpm = '$MDSD_RPM_PACKAGE_NAME'/\" agent.py\n\nsed -i \"s/AMA_VERSION/$AGENT_VERSION/\" services/metrics-extension-otlp.service\nsed -i \"s/AMA_VERSION/$AGENT_VERSION/\" services/metrics-extension-cmv2.service\n\n# updating manifest.xml\n# check <Version>...</Version>\nsed -i -e \"s|<Version>[0-9a-z.]\\{1,\\}</Version>|<Version>$AGENT_VERSION</Version>|g\" manifest.xml\n"
  },
  {
    "path": "AzureMonitorAgent/azuremonitoragentextension.logrotate",
    "content": "/var/log/azure/Microsoft.Azure.Monitor.AzureMonitorLinuxAgent/extension.log\n{\n    copytruncate\n    rotate 7\n    daily\n    missingok\n    notifempty\n    delaycompress\n    compress\n    size 10M\n}\n/var/log/azure/Microsoft.Azure.Monitor.AzureMonitorLinuxAgent/CommandExecution.log\n{\n    copytruncate\n    rotate 7\n    daily\n    missingok\n    notifempty\n    delaycompress\n    compress\n    size 10M\n}\n/var/log/azure/Microsoft.Azure.Monitor.AzureMonitorLinuxAgent/telegraf.log\n{\n    copytruncate\n    rotate 7\n    daily\n    missingok\n    notifempty\n    delaycompress\n    compress\n    size 10M\n}\n\n"
  },
  {
    "path": "AzureMonitorAgent/manifest.xml",
    "content": "<?xml version='1.0' encoding='utf-8' ?>\n<ExtensionImage xmlns=\"http://schemas.microsoft.com/windowsazure\">\n  <ProviderNameSpace>Microsoft.Azure.Monitor</ProviderNameSpace>\n  <Type>AzureMonitorLinuxAgent</Type>\n  <Version>1.5.124</Version>\n  <Label>Microsoft Azure Monitoring Agent for Linux</Label>\n  <HostingResources>VmRole</HostingResources>\n  <MediaLink></MediaLink>\n  <Description>Microsoft Azure Monitoring Agent for Linux</Description>\n  <IsInternalExtension>true</IsInternalExtension>\n  <Eula>https://docs.microsoft.com/en-us/azure/azure-monitor/learn/quick-collect-linux-computer</Eula>\n  <PrivacyUri>http://www.microsoft.com/privacystatement/en-us/OnlineServices/Default.aspx</PrivacyUri>\n  <HomepageUri>https://msazure.visualstudio.com/DefaultCollection/One/_git/Compute-Runtime-Tux</HomepageUri>\n  <IsJsonExtension>true</IsJsonExtension>\n  <SupportedOS>Linux</SupportedOS>\n  <CompanyName>Microsoft</CompanyName>\n  <!--%REGIONS%-->\n</ExtensionImage>\n"
  },
  {
    "path": "AzureMonitorAgent/packaging.sh",
    "content": "#! /bin/bash\nset -e\nsource agent.version\n\nusage()\n{\n    local basename=`basename $0`\n    echo \"usage: ./$basename <path to mdsd-<version>.{.deb, .rpm}> [path for zip output]\"\n}\n\ninput_path=$1\noutput_path=$2\nPACKAGE_NAME=\"azuremonitor$AGENT_VERSION.zip\"\nif [[ \"$1\" == \"--help\" ]]; then\n    usage\n    exit 0\nelif [[ ! -d $input_path ]]; then\n    echo \"DEB/RPM files path '$input_path' not found\"\n    usage\n    exit 1\nfi\n\nif [[ \"$output_path\" == \"\" ]]; then\n    output_path=\"../\"\nfi\n\n# Packaging starts here\ncp -r ../Utils .\ncp ../Common/WALinuxAgent-2.0.16/waagent .\n\ncp -r  ../LAD-AMA-Common/metrics_ext_utils .\ncp -r  ../LAD-AMA-Common/telegraf_utils .\ncp -f  ../Diagnostic/services/metrics-sourcer.service services/metrics-sourcer.service\n\n# cleanup packages, ext\nrm -rf packages MetricsExtensionBin azureotelcollector amaCoreAgentBin AstExtensionBin agentLauncherBin mdsdBin fluentBitBin tmp\nmkdir -p packages MetricsExtensionBin azureotelcollector amaCoreAgentBin AstExtensionBin agentLauncherBin mdsdBin fluentBitBin\n\n# copy shell bundle to packages/\ncp $input_path/azuremonitoragent_$AGENT_VERSION* packages/\ncp $input_path/azuremonitoragent-$AGENT_VERSION* packages/\n\n# remove dynamic ssl packages\nrm -f packages/*dynamicssl*\n\n# validate HandlerManifest.json syntax\njq empty < HandlerManifest.json\n\nmkdir -p tmp\ncp $input_path/azuremonitoragent_$AGENT_VERSION*dynamicssl_x86_64.deb tmp/\nAMA_DEB_PACKAGE_NAME=$(find tmp/ -type f -name \"azuremonitoragent_*x86_64.deb\" -printf \"%f\\\\n\" | head -n 1)\nar vx tmp/$AMA_DEB_PACKAGE_NAME --output=tmp\ntar xvf tmp/data.tar.gz -C tmp\ncp tmp/opt/microsoft/azuremonitoragent/bin/mdsd mdsdBin/mdsd_x86_64\ncp tmp/opt/microsoft/azuremonitoragent/bin/mdsdmgr mdsdBin/mdsdmgr_x86_64\ncp tmp/opt/microsoft/azuremonitoragent/bin/fluent-bit fluentBitBin/fluent-bit_x86_64\nrm -rf tmp/\n\nmkdir -p tmp\ncp $input_path/azuremonitoragent_$AGENT_VERSION*dynamicssl_aarch64.deb tmp/\nAMA_DEB_PACKAGE_NAME=$(find tmp/ -type f -name \"azuremonitoragent_*aarch64.deb\" -printf \"%f\\\\n\" | head -n 1)\nar vx tmp/$AMA_DEB_PACKAGE_NAME --output=tmp\ntar xvf tmp/data.tar.gz -C tmp\ncp tmp/opt/microsoft/azuremonitoragent/bin/mdsd mdsdBin/mdsd_aarch64\ncp tmp/opt/microsoft/azuremonitoragent/bin/mdsdmgr mdsdBin/mdsdmgr_aarch64\ncp tmp/opt/microsoft/azuremonitoragent/bin/fluent-bit fluentBitBin/fluent-bit_aarch64\nrm -rf tmp/\n\ncp $input_path/x86_64/metricsextension MetricsExtensionBin/metricsextension_x86_64\ncp $input_path/aarch64/metricsextension MetricsExtensionBin/metricsextension_aarch64\n\ncp $input_path/azureotelcollector/* azureotelcollector/\ncp -r $input_path/AstExtension/* AstExtensionBin/\n\ncp $input_path/x86_64/amacoreagent amaCoreAgentBin/amacoreagent_x86_64\ncp $input_path/x86_64/liblz4x64.so amaCoreAgentBin/\n#cp $input_path/x86_64/libgrpc_csharp_ext.x64.so amaCoreAgentBin/\ncp $input_path/x86_64/agentlauncher agentLauncherBin/agentlauncher_x86_64\n\ncp $input_path/metrics_troubleshooter.sh ama_tst/\n\ncp $input_path/aarch64/amacoreagent amaCoreAgentBin/amacoreagent_aarch64\n#cp $input_path/aarch64/libgrpc_csharp_ext.arm64.so amaCoreAgentBin/\ncp $input_path/aarch64/agentlauncher agentLauncherBin/agentlauncher_aarch64\n\n# make the shim.sh file executable\nchmod +x shim.sh\n\n# sync the file copy\nsync\n\nif [[ -f $output_path/$PACKAGE_NAME ]]; then\n    echo \"Removing existing $PACKAGE_NAME ...\"\n    rm -f $output_path/$PACKAGE_NAME\nfi\n\necho \"Packaging extension $PACKAGE_NAME to $output_path\"\nexcluded_files=\"agent.version packaging.sh apply_version.sh update_version.sh\"\nzip -r $output_path/$PACKAGE_NAME * -x $excluded_files \"./test/*\" \"./extension-test/*\" \"./references\" \"./tmp\"\n\n# validate package size is within limits; these limits come from arc, ideally they are removed in the future\nmax_uncompressed_size=$((1000 * 1024 * 1024))\nmax_compressed_size=$((500 * 1024 * 1024))\n\n# easiest to validate by immediately unzipping versus trying to `du` with various exclusions \nunzip -d $output_path/unzipped $output_path/$PACKAGE_NAME\nuncompressed_size=$(du -sb $output_path/unzipped | cut -f1)\ncompressed_size=$(du -sb $output_path/$PACKAGE_NAME | cut -f1)\nrm -rf $output_path/unzipped\n\nif [[ $uncompressed_size -gt $max_uncompressed_size ]]; then\n    echo \"Uncompressed size of $PACKAGE_NAME is $uncompressed_size bytes, which exceeds the limit of $max_uncompressed_size bytes\"\n    exit 1\nfi\n\nif [[ $compressed_size -gt $max_compressed_size ]]; then\n    echo \"Compressed size of $PACKAGE_NAME is $compressed_size bytes, which exceeds the limit of $max_compressed_size bytes\"\n    exit 1\nfi\n\n# cleanup newly added dir or files\nrm -rf Utils/ waagent\n"
  },
  {
    "path": "AzureMonitorAgent/references",
    "content": "Utils/\n"
  },
  {
    "path": "AzureMonitorAgent/services/metrics-extension-cmv1.service",
    "content": "[Unit]\nDescription=Metrics Extension service for Linux Agent metrics sourcing\nAfter=network.target\n\n[Service]\nExecStart=%ME_BIN% -TokenSource MSI -Input influxdb_local -InfluxDbSocketPath %ME_INFLUX_SOCKET_FILE_PATH% -DataDirectory %ME_DATA_DIRECTORY% -LocalControlChannel -MonitoringAccount %ME_MONITORING_ACCOUNT% -LogLevel Error\nExecReload=/bin/kill -HUP $MAINPID\nKillMode=control-group\n\n[Install]\nWantedBy=multi-user.target\n"
  },
  {
    "path": "AzureMonitorAgent/services/metrics-extension-cmv2.service",
    "content": "[Unit]\nDescription=Metrics Extension service for Linux Agent metrics sourcing\nAfter=network.target\n\n[Service]\nEnvironment=\"OTLP_GRPC_HOST=127.0.0.1\"\nEnvironment=\"OTLP_GRPC_PORT=4317\"\nEnvironment=\"OTLP_GRPC_PROM_HOST=127.0.0.1\"\nEnvironment=\"OTLP_GRPC_PROM_PORT=4316\"\nEnvironmentFile=-/etc/metrics-extension.d/options.conf\nExecStart=%ME_BIN% -TokenSource AMCS -ManagedIdentity %ME_MANAGED_IDENTITY% -DataDirectory %ME_DATA_DIRECTORY% -Input influxdb_local,otlp_grpc,otlp_grpc_prom -InfluxDbSocketPath %ME_INFLUX_SOCKET_FILE_PATH% -LogLevel Info -Logger Console -OperationEnvironment AMA-Linux/AMA_VERSION -ConfigOverrides \"{\\\"otlp\\\":{\\\"endpoints\\\":[\\\"${OTLP_GRPC_PROM_HOST}:${OTLP_GRPC_PROM_PORT}\\\"]}}\"\nExecReload=/bin/kill -HUP $MAINPID\nKillMode=control-group\nUser=azuremetricsext\nGroup=azuremonitoragent\nRuntimeDirectory=azureotelcollector azuremetricsext\nRuntimeDirectoryMode=0755\n\n[Install]\nWantedBy=multi-user.target\n"
  },
  {
    "path": "AzureMonitorAgent/services/metrics-extension-otlp.service",
    "content": "[Unit]\nDescription=Metrics Extension service for Linux Agent metrics sourcing\nAfter=network.target\n\n[Service]\nEnvironment=\"OTLP_GRPC_HOST=127.0.0.1\"\nEnvironment=\"OTLP_GRPC_PORT=4317\"\nEnvironmentFile=-/etc/metrics-extension.d/options.conf\nExecStart=%ME_BIN% -TokenSource AMCS -ManagedIdentity %ME_MANAGED_IDENTITY% -Input influxdb_local,otlp_grpc -InfluxDbSocketPath %ME_INFLUX_SOCKET_FILE_PATH% -LogLevel Info -Logger Console -OperationEnvironment AMA-Linux/AMA_VERSION\nExecReload=/bin/kill -HUP $MAINPID\nKillMode=control-group\n\n[Install]\nWantedBy=multi-user.target\n"
  },
  {
    "path": "AzureMonitorAgent/shim.sh",
    "content": "#!/usr/bin/env bash\n\n# This is the main driver file for AMA extension. This file first checks if Python 3 or 2 is available on the VM \n# and if yes then uses that Python (if both are available then, default is set to python3) to run extension operations in agent.py\n# Control arguments passed to the shim are redirected to agent.py without validation.\n\nCOMMAND=\"./agent.py\"\nPYTHON=\"\"\nARG=\"$@\"\n\nfunction find_python() {\n    local python_exec_command=$1\n\n    if command -v python3 >/dev/null 2>&1 ; then\n        eval ${python_exec_command}=\"python3\"\n    elif command -v python2 >/dev/null 2>&1 ; then\n        eval ${python_exec_command}=\"python2\"\n    elif command -v /usr/libexec/platform-python >/dev/null 2>&1 ; then\n        # If a user-installed python isn't available, check for a platform-python. This is typically only used in RHEL 8.0.\n        echo \"User-installed python not found. Using /usr/libexec/platform-python as the python interpreter.\"\n        eval ${python_exec_command}=\"/usr/libexec/platform-python\"\n    fi\n}\n\nfind_python PYTHON\n\nif [ -z \"$PYTHON\" ] # If python is not installed, we will fail the install with the following error, requiring cx to have python pre-installed\nthen\n    echo \"No Python interpreter found, which is an AMA extension dependency. Please install Python 3, or Python 2 if the former is unavailable.\" >&2\n    exit 52 # Missing Dependency\nelse\n    ${PYTHON} --version 2>&1\nfi\n\nexport NO_PROXY=\"169.254.169.254\"\nPYTHONPATH=${PYTHONPATH} ${PYTHON} ${COMMAND} ${ARG}\nexit $?\n"
  },
  {
    "path": "AzureMonitorAgent/update_version.sh",
    "content": "#! /bin/bash\nset -x\n\nif [[ \"$1\" == \"--help\" ]]; then\n    echo \"update_version.sh <AGENT_VERSION> <MDSD_DEB_PACKAGE_NAME> <MDSD_RPM_PACKAGE_NAME>\"\n    exit 0\nfi\n\nUPDATE_DATE=`date +%Y%m%d`\nAGENT_VERSION=$1\nMDSD_DEB_PACKAGE_NAME=$2\nMDSD_RPM_PACKAGE_NAME=$3\n\nif [[ \"$AGENT_VERSION\" == \"\" ]]; then\n    echo \"AGENT_VERSION version is empty\"\n    exit 1\nfi\n\nif [[ \"$MDSD_DEB_PACKAGE_NAME\" == \"\" ]]; then\n    echo \"MDSD_DEB_PACKAGE_NAME is empty\"\n    exit 1\nfi\n\nif [[ \"$MDSD_RPM_PACKAGE_NAME\" == \"\" ]]; then\n    echo \"MDSD_RPM_PACKAGE_NAME is empty\"\n    exit 1\nfi\n\n\nsed -i \"s/^AGENT_VERSION=.*$/AGENT_VERSION=$AGENT_VERSION/\" agent.version\nsed -i \"s/^MDSD_DEB_PACKAGE_NAME=.*$/MDSD_DEB_PACKAGE_NAME=$MDSD_DEB_PACKAGE_NAME/\" agent.version\nsed -i \"s/^MDSD_RPM_PACKAGE_NAME=.*$/MDSD_RPM_PACKAGE_NAME=$MDSD_RPM_PACKAGE_NAME/\" agent.version\nsed -i \"s/^AGENT_VERSION_DATE=.*$/AGENT_VERSION_DATE=$UPDATE_DATE/\" agent.version\n"
  },
  {
    "path": "CODEOWNERS",
    "content": "# See https://help.github.com/articles/about-codeowners/\n# for more info about CODEOWNERS file\n\n# It uses the same pattern rule for gitignore file\n# https://git-scm.com/docs/gitignore#_pattern_format\n\n# These owners will be the default owners for everything in\n# the repo. Unless a later match takes precedence,\n# the following owners will be requested for\n# review when someone opens a pull request.\n* @nkuchta @Azure/azure-agent-extensions\n\n# Azure Monitor Agent Extension\n/AzureMonitorAgent/ @Azure/geneva-linux-agents\n\n# CustomScript Extension\n/CustomScript/ @D1v38om83r @nkuchta @Azure/azure-agent-extensions\n\n# Diagnostics (LAD) Extension\n/Diagnostic/ @Azure/geneva-linux-agents\n\n# Utils for LAD/AMA metrics\n/LAD-AMA-Common/ @Azure/geneva-linux-agents\n\n# DSCForLinux Extension\n/DSC/ @Bhargava-Chary-Chollaty\n\n# OMS Agent Extension\n/OmsAgent/ @Azure/geneva-linux-agents\n\n# OpenCensus Translator Extension\n/opencensus-service/ @Azure/geneva-linux-agents\n\n# VMAccess Extension\n/VMAccess/ @D1v38om83r @nkuchta @Azure/azure-agent-extensions\n\n# VMBackup Extension\n/VMBackup/ @vityagi @mearvind @arisettisanjana @deveshjagwani\n\n# VMEncryption Extension\n/VMEncryption/ @vimish @ejarvi\n\n# WALinuxAgent\n/Common/ @D1v38om83r @nkuchta @Azure/walinuxagent @Azure/azure-agent-extensions\n/Utils/ @D1v38om83r @nkuchta @Azure/walinuxagent @Azure/azure-agent-extensions\n\n# Abandoned?\n# /AzureEnhancedMonitor/ \n# /OSPatching/ \n# /RDMAUpdate/\n# /SampleExtension/\n# /TestHandlerLinux/\n# /docs/\n# /registration-scripts/\n# /script/\n# /ui-extension-packages/\n"
  },
  {
    "path": "Common/WALinuxAgent-2.0.14/waagent",
    "content": "#!/usr/bin/env python\n#\n# Windows Azure Linux Agent\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Implements parts of RFC 2131, 1541, 1497 and\n# http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx\n# http://msdn.microsoft.com/en-us/library/cc227259%28PROT.13%29.aspx\n\nimport array\nimport base64\nimport httplib\nimport os\nimport os.path\nimport platform\nimport pwd\nimport re\nimport shutil\nimport socket\nimport SocketServer\nimport struct\nimport string\nimport subprocess\nimport sys\nimport tempfile\nimport textwrap\nimport threading\nimport time\nimport traceback\nimport xml.dom.minidom\nimport fcntl\nimport inspect\nimport zipfile\nimport json\nimport datetime\nimport xml.sax.saxutils\n\nif not hasattr(subprocess,'check_output'):\n    def check_output(*popenargs, **kwargs):\n        r\"\"\"Backport from subprocess module from python 2.7\"\"\"\n        if 'stdout' in kwargs:\n            raise ValueError('stdout argument not allowed, it will be overridden.')\n        process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)\n        output, unused_err = process.communicate()\n        retcode = process.poll()\n        if retcode:\n            cmd = kwargs.get(\"args\")\n            if cmd is None:\n                cmd = popenargs[0]\n            raise subprocess.CalledProcessError(retcode, cmd, output=output)\n        return output\n\n    # Exception classes used by this module.\n    class CalledProcessError(Exception):\n        def __init__(self, returncode, cmd, output=None):\n            self.returncode = returncode\n            self.cmd = cmd\n            self.output = output\n        def __str__(self):\n            return \"Command '%s' returned non-zero exit status %d\" % (self.cmd, self.returncode)\n\n    subprocess.check_output=check_output\n    subprocess.CalledProcessError=CalledProcessError\n    \nGuestAgentName = \"WALinuxAgent\"\nGuestAgentLongName = \"Windows Azure Linux Agent\"\nGuestAgentVersion = \"WALinuxAgent-2.0.14\"\nProtocolVersion = \"2012-11-30\" #WARNING this value is used to confirm the correct fabric protocol.\n\nConfig = None\nWaAgent = None\nDiskActivated = False\nOpenssl = \"openssl\"\nChildren = []\nExtensionChildren = []\nVMM_STARTUP_SCRIPT_NAME='install'\nVMM_CONFIG_FILE_NAME='linuxosconfiguration.xml'\nglobal RulesFiles\nRulesFiles = [ \"/lib/udev/rules.d/75-persistent-net-generator.rules\",\n               \"/etc/udev/rules.d/70-persistent-net.rules\" ]\nVarLibDhcpDirectories = [\"/var/lib/dhclient\", \"/var/lib/dhcpcd\", \"/var/lib/dhcp\"]\nEtcDhcpClientConfFiles = [\"/etc/dhcp/dhclient.conf\", \"/etc/dhcp3/dhclient.conf\"]\nglobal LibDir\nLibDir = \"/var/lib/waagent\"\nglobal provisioned\nprovisioned=False\nglobal provisionError\nprovisionError=None\nHandlerStatusToAggStatus = {\"installed\":\"Installing\", \"enabled\":\"Ready\", \"unintalled\":\"NotReady\", \"disabled\":\"NotReady\"}\n\nWaagentConf = \"\"\"\\\n#\n# Windows Azure Linux Agent Configuration\n#\n\nRole.StateConsumer=None                 # Specified program is invoked with the argument \"Ready\" when we report ready status\n                                        # to the endpoint server.\nRole.ConfigurationConsumer=None         # Specified program is invoked with XML file argument specifying role configuration.\nRole.TopologyConsumer=None              # Specified program is invoked with XML file argument specifying role topology.\n\nProvisioning.Enabled=y                  #\nProvisioning.DeleteRootPassword=y       # Password authentication for root account will be unavailable.\nProvisioning.RegenerateSshHostKeyPair=y # Generate fresh host key pair.\nProvisioning.SshHostKeyPairType=rsa     # Supported values are \"rsa\", \"dsa\" and \"ecdsa\".\nProvisioning.MonitorHostName=y          # Monitor host name changes and publish changes via DHCP requests.\n\nResourceDisk.Format=y                   # Format if unformatted. If 'n', resource disk will not be mounted.\nResourceDisk.Filesystem=ext4            # Typically ext3 or ext4. FreeBSD images should use 'ufs2' here.\nResourceDisk.MountPoint=/mnt/resource   #\nResourceDisk.EnableSwap=n               # Create and use swapfile on resource disk.\nResourceDisk.SwapSizeMB=0               # Size of the swapfile.\n\nLBProbeResponder=y                      # Respond to load balancer probes if requested by Windows Azure.\n\nLogs.Verbose=n                          # Enable verbose logs\n\nOS.RootDeviceScsiTimeout=300            # Root device timeout in seconds.\nOS.OpensslPath=None                     # If \"None\", the system default version is used.\n\"\"\"\nREADME_FILENAME=\"DATALOSS_WARNING_README.txt\"\nREADME_FILECONTENT=\"\"\"\\\nWARNING: THIS IS A TEMPORARY DISK. \n\nAny data stored on this drive is SUBJECT TO LOSS and THERE IS NO WAY TO RECOVER IT.\n\nPlease do not use this disk for storing any personal or application data.\n\nFor additional details to please refer to the MSDN documentation at : http://msdn.microsoft.com/en-us/library/windowsazure/jj672979.aspx\n\"\"\"\n\n############################################################\n# BEGIN DISTRO CLASS DEFS\n############################################################\n############################################################    \n#\tAbstractDistro\n############################################################    \nclass AbstractDistro(object):\n    \"\"\"\n    AbstractDistro defines a skeleton neccesary for a concrete Distro class.\n\n    Generic methods and attributes are kept here, distribution specific attributes\n    and behavior are to be placed in the concrete child named distroDistro, where\n    distro is the string returned by calling python platform.linux_distribution()[0].\n    So for CentOS the derived class is called 'centosDistro'.\n    \"\"\"\n\n    def __init__(self):\n        \"\"\"\n        Generic Attributes go here.  These are based on 'majority rules'.\n        This __init__() may be called or overriden by the child.\n        \"\"\"\n        self.agent_service_name = os.path.basename(sys.argv[0]) \n        self.selinux=None\n        self.service_cmd='/usr/sbin/service'\n        self.ssh_service_restart_option='restart'\n        self.ssh_service_name='ssh'\n        self.ssh_config_file='/etc/ssh/sshd_config'\n        self.hostname_file_path='/etc/hostname'\n        self.dhcp_client_name='dhclient'\n        self.requiredDeps = [ 'route', 'shutdown', 'ssh-keygen', 'useradd', \n                              'openssl', 'sfdisk', 'fdisk', 'mkfs', 'chpasswd', \n                              'sed', 'grep', 'sudo', 'parted' ]\n        self.init_script_file='/etc/init.d/waagent'\n        self.agent_package_name='WALinuxAgent'\n        self.fileBlackList = [ \"/root/.bash_history\", \"/var/log/waagent.log\",'/etc/resolv.conf' ]        \n        self.agent_files_to_uninstall = [\"/etc/waagent.conf\", \"/etc/logrotate.d/waagent\"]\n        self.grubKernelBootOptionsFile = '/etc/default/grub'\n        self.grubKernelBootOptionsLine = 'GRUB_CMDLINE_LINUX_DEFAULT='\n        self.getpidcmd = 'pidof'\n        self.mount_dvd_cmd = 'mount'\n        self.sudoers_dir_base = '/etc'\n        self.waagent_conf_file = WaagentConf\n        self.shadow_file_mode=0600\n        self.dhcp_enabled = False\n        \n    def isSelinuxSystem(self):\n        \"\"\"\n        Checks and sets self.selinux = True if SELinux is available on system.\n        \"\"\"\n        if self.selinux == None:\n            if Run(\"which getenforce\",chk_err=False):\n                self.selinux = False\n            else:\n                self.selinux = True\n        return self.selinux\n    \n    def isSelinuxRunning(self):\n        \"\"\"\n        Calls shell command 'getenforce' and returns True if 'Enforcing'.\n        \"\"\"\n        if self.isSelinuxSystem():\n            return RunGetOutput(\"getenforce\")[1].startswith(\"Enforcing\")\n        else:\n            return False\n        \n    def setSelinuxEnforce(self,state):\n        \"\"\"\n        Calls shell command 'setenforce' with 'state' and returns resulting exit code.\n        \"\"\"\n        if self.isSelinuxSystem():\n            if state: s = '1'\n            else: s='0'\n            return Run(\"setenforce \"+s)\n\n    def setSelinuxContext(self,path,cn):\n        \"\"\"\n        Calls shell 'chcon' with 'path' and 'cn' context.\n        Returns exit result.\n        \"\"\"\n        if self.isSelinuxSystem():\n            return Run('chcon ' + cn + ' ' + path)\n        \n    def setHostname(self,name):\n        \"\"\"\n        Shell call to hostname.\n        Returns resulting exit code.\n        \"\"\"\n        return Run('hostname ' + name)\n        \n    def publishHostname(self,name):\n        \"\"\"\n        Set the contents of the hostname file to 'name'.\n        Return 1 on failure.\n        \"\"\"\n        try:\n            r=SetFileContents(self.hostname_file_path, name)\n            for f in EtcDhcpClientConfFiles:\n                if os.path.exists(f) and FindStringInFile(f,r'^[^#]*?send\\s*host-name.*?(<hostname>|gethostname[(,)])') == None :\n                    r=ReplaceFileContentsAtomic('/etc/dhcp/dhclient.conf', \"send host-name \\\"\" + name + \"\\\";\\n\"\n                                                + \"\\n\".join(filter(lambda a: not a.startswith(\"send host-name\"), GetFileContents('/etc/dhcp/dhclient.conf').split('\\n'))))\n        except:\n            return 1\n        return r\n        \n    def installAgentServiceScriptFiles(self):\n        \"\"\"\n        Create the waagent support files for service installation.\n        Called by registerAgentService()\n        Abstract Virtual Function.  Over-ridden in concrete Distro classes.\n        \"\"\"\n        pass\n\n    def registerAgentService(self):\n        \"\"\"\n        Calls installAgentService to create service files.\n        Shell exec service registration commands. (e.g. chkconfig --add waagent)\n        Abstract Virtual Function.  Over-ridden in concrete Distro classes.\n        \"\"\"\n        pass\n    \n    def uninstallAgentService(self):\n        \"\"\"\n        Call service subsystem to remove waagent script.\n        Abstract Virtual Function.  Over-ridden in concrete Distro classes.\n        \"\"\"\n        pass\n\n    def unregisterAgentService(self):\n        \"\"\"\n        Calls self.stopAgentService and call self.uninstallAgentService()\n        \"\"\"\n        self.stopAgentService()\n        self.uninstallAgentService()\n    \n    def startAgentService(self):\n        \"\"\"\n        Service call to start the Agent service\n        \"\"\"\n        return Run(self.service_cmd + ' ' + self.agent_service_name + ' start')\n        \n    def stopAgentService(self):\n        \"\"\"\n        Service call to stop the Agent service\n        \"\"\"\n        return Run(self.service_cmd + ' '  + self.agent_service_name + ' stop',False)\n    \n    def restartSshService(self):\n        \"\"\"\n        Service call to re(start) the SSH service\n        \"\"\"\n        sshRestartCmd = self.service_cmd + \" \" + self.ssh_service_name + \" \" + self.ssh_service_restart_option\n        retcode = Run(sshRestartCmd)\n        if retcode > 0:\n            Error(\"Failed to restart SSH service with return code:\" + str(retcode))\n        return retcode\n\n    def sshDeployPublicKey(self,fprint,path):\n        \"\"\"\n        Generic sshDeployPublicKey - over-ridden in some concrete Distro classes due to minor differences in openssl packages deployed\n        \"\"\"\n        error=0\n        SshPubKey = OvfEnv().OpensslToSsh(fprint)\n        if SshPubKey != None:\n            AppendFileContents(path, SshPubKey)\n        else:\n            Error(\"Failed: \" + fprint + \".crt -> \" + path)\n            error = 1\n        return error\n    \n    def checkPackageInstalled(self,p):\n        \"\"\"\n        Query package database for prescence of an installed package.\n        Abstract Virtual Function.  Over-ridden in concrete Distro classes.\n        \"\"\"\n        pass\n\n    def checkPackageUpdateable(self,p):\n        \"\"\"\n        Online check if updated package of walinuxagent is available.\n        Abstract Virtual Function.  Over-ridden in concrete Distro classes.\n        \"\"\"\n        pass\n\n    def deleteRootPassword(self):\n        \"\"\"\n        Generic root password removal.\n        \"\"\"\n        filepath=\"/etc/shadow\"\n        ReplaceFileContentsAtomic(filepath,\"root:*LOCK*:14600::::::\\n\"\n                                  + \"\\n\".join(filter(lambda a: not a.startswith(\"root:\"),GetFileContents(filepath).split('\\n'))))\n        os.chmod(filepath,self.shadow_file_mode)\n        if self.isSelinuxSystem():\n            self.setSelinuxContext(filepath,'system_u:object_r:shadow_t:s0')\n        Log(\"Root password deleted.\")\n        return 0\n    \n    def changePass(self,user,password):\n        return RunSendStdin(\"chpasswd\",(user + \":\" + password + \"\\n\"),use_shell=False)\n    \n    def load_ata_piix(self):\n        return WaAgent.TryLoadAtapiix()\n\n    def unload_ata_piix(self):\n        \"\"\"\n        Generic function to remove ata_piix.ko.\n        \"\"\"\n        return WaAgent.TryUnloadAtapiix()\n        \n    def deprovisionWarnUser(self):\n        \"\"\"\n        Generic user warnings used at deprovision.\n        \"\"\"\n        print(\"WARNING! Nameserver configuration in /etc/resolv.conf will be deleted.\")\n\n    def deprovisionDeleteFiles(self):\n        \"\"\"\n        Files to delete when VM is deprovisioned\n        \"\"\"\n        for a in VarLibDhcpDirectories:\n            Run(\"rm -f \" + a + \"/*\")\n\n        # Clear LibDir, remove nameserver and root bash history\n        \n        for f in os.listdir(LibDir) + self.fileBlackList:\n            try:\n                os.remove(f)\n            except:\n                pass\n        return 0\n    \n    def uninstallDeleteFiles(self):\n        \"\"\"\n        Files to delete when agent is uninstalled.\n        \"\"\"\n        for f in self.agent_files_to_uninstall:\n            try:\n                os.remove(f)\n            except:\n                pass\n        return 0\n    \n    def checkDependencies(self):\n        \"\"\"\n        Generic dependency check.\n        Return 1 unless all dependencies are satisfied.\n        \"\"\"\n        if self.checkPackageInstalled('NetworkManager'):\n            Error(GuestAgentLongName + \" is not compatible with network-manager.\")\n            return 1\n        try:\n            m= __import__('pyasn1')\n        except ImportError:\n            Error(GuestAgentLongName + \" requires python-pyasn1 for your Linux distribution.\")\n            return 1\n        for a in self.requiredDeps:\n            if Run(\"which \" + a + \" > /dev/null 2>&1\",chk_err=False):\n                Error(\"Missing required dependency: \" + a)\n                return 1\n        return 0\n\n    def packagedInstall(self,buildroot):\n        \"\"\"\n        Called from setup.py for use by RPM.\n        Copies generated files waagent.conf, under the buildroot.\n        \"\"\"\n        if not os.path.exists(buildroot+'/etc'):\n            os.mkdir(buildroot+'/etc')\n        SetFileContents(buildroot+'/etc/waagent.conf', MyDistro.waagent_conf_file)\n        \n        if not os.path.exists(buildroot+'/etc/logrotate.d'):\n            os.mkdir(buildroot+'/etc/logrotate.d')\n        SetFileContents(buildroot+'/etc/logrotate.d/waagent', WaagentLogrotate)\n    \n        self.init_script_file=buildroot+self.init_script_file\n        # this allows us to call installAgentServiceScriptFiles()\n        if not os.path.exists(os.path.dirname(self.init_script_file)):\n            os.mkdir(os.path.dirname(self.init_script_file))\n        self.installAgentServiceScriptFiles()\n\n    def GetIpv4Address(self):\n        \"\"\"\n        Return the ip of the \n        first active non-loopback interface.\n        \"\"\"\n        addr=''\n        iface,addr=GetFirstActiveNetworkInterfaceNonLoopback()\n        return addr\n\n    def GetMacAddress(self):\n        return GetMacAddress()\n\n    def GetInterfaceName(self):\n        return GetFirstActiveNetworkInterfaceNonLoopback()[0]\n\n    def RestartInterface(self, iface):\n        Run(\"ifdown \" + iface + \" && ifup \" + iface)\n\n    def CreateAccount(self,user, password, expiration, thumbprint):\n        return CreateAccount(user, password, expiration, thumbprint)\n    \n    def DeleteAccount(self,user):\n        return DeleteAccount(user)\n\n    def ActivateResourceDisk(self):\n        \"\"\"\n        Format, mount, and if specified in the configuration\n        set resource disk as swap.\n        \"\"\"\n        global DiskActivated\n        format = Config.get(\"ResourceDisk.Format\")\n        if format == None or format.lower().startswith(\"n\"):\n            DiskActivated = True\n            return\n        device = DeviceForIdePort(1)\n        if device == None:\n            Error(\"ActivateResourceDisk: Unable to detect disk topology.\")\n            return\n        device = \"/dev/\" + device\n\n        mountlist = RunGetOutput(\"mount\")[1]\n        mountpoint = GetMountPoint(mountlist, device)\n\n        if(mountpoint):\n            Log(\"ActivateResourceDisk: \" + device + \"1 is already mounted.\")\n        else:\n            mountpoint = Config.get(\"ResourceDisk.MountPoint\")\n            if mountpoint == None:\n                mountpoint = \"/mnt/resource\"\n            CreateDir(mountpoint, \"root\", 0755)\n            fs = Config.get(\"ResourceDisk.Filesystem\")\n            if fs == None:\n                fs = \"ext3\"\n\n            partition = device + \"1\"\n\n            #Check partition type\n            Log(\"Detect GPT...\")\n            ret = RunGetOutput(\"parted {0} print\".format(device))\n            if ret[0] == 0 and \"gpt\" in ret[1]:\n                Log(\"GPT detected.\")\n                #GPT(Guid Partition Table) is used.\n                #Get partitions.\n                parts = filter(lambda x : re.match(\"^\\s*[0-9]+\", x), ret[1].split(\"\\n\"))\n                #If there are more than 1 partitions, remove all partitions \n                #and create a new one using the entire disk space.\n                if len(parts) > 1:\n                    for i in range(1, len(parts) + 1):\n                        Run(\"parted {0} rm {1}\".format(device, i))\n                    Run(\"parted {0} mkpart primary 0% 100%\".format(device))\n                    Run(\"mkfs.\" + fs + \" \" + partition + \" -F\")\n            else:\n                existingFS = RunGetOutput(\"sfdisk -q -c \" + device + \" 1\", chk_err=False)[1].rstrip()\n                if existingFS == \"7\" and fs != \"ntfs\":\n                    Run(\"sfdisk -c \" + device + \" 1 83\")\n                    Run(\"mkfs.\" + fs + \" \" + partition)\n            if Run(\"mount \" + partition + \" \" + mountpoint, chk_err=False):\n                #If mount failed, try to format the partition and mount again\n                Warn(\"Failed to mount resource disk. Retry mounting.\")            \n                Run(\"mkfs.\" + fs + \" \" + partition + \" -F\")\n                if Run(\"mount \" + partition + \" \" + mountpoint):\n                    Error(\"ActivateResourceDisk: Failed to mount resource disk (\" + partition + \").\")\n                    return\n            Log(\"Resource disk (\" + partition + \") is mounted at \" + mountpoint + \" with fstype \" + fs)\n\n        #Create README file under the root of resource disk\n        SetFileContents(os.path.join(mountpoint,README_FILENAME), README_FILECONTENT)\n        DiskActivated = True\n\n        #Create swap space\n        swap = Config.get(\"ResourceDisk.EnableSwap\")\n        if swap == None or swap.lower().startswith(\"n\"):\n            return\n        sizeKB = int(Config.get(\"ResourceDisk.SwapSizeMB\")) * 1024\n        if os.path.isfile(mountpoint + \"/swapfile\") and os.path.getsize(mountpoint + \"/swapfile\") != (sizeKB * 1024):\n            os.remove(mountpoint + \"/swapfile\")\n        if not os.path.isfile(mountpoint + \"/swapfile\"):\n            Run(\"dd if=/dev/zero of=\" + mountpoint + \"/swapfile bs=1024 count=\" + str(sizeKB))\n            Run(\"mkswap \" + mountpoint + \"/swapfile\")\n        if not Run(\"swapon \" + mountpoint + \"/swapfile\"):\n            Log(\"Enabled \" + str(sizeKB) + \" KB of swap at \" + mountpoint + \"/swapfile\")\n        else:\n            Error(\"ActivateResourceDisk: Failed to activate swap at \" + mountpoint + \"/swapfile\")\n\n    def Install(self):\n        return Install()\n\n    def mediaHasFilesystem(self,dsk):\n        if len(dsk) == 0 :\n            return False\n        if Run(\"LC_ALL=C fdisk -l \" + dsk + \" | grep Disk\"):\n            return False\n        return True\n    \n    def mountDVD(self,dvd,location):\n        return RunGetOutput(self.mount_dvd_cmd + ' ' + dvd + ' ' + location)\n\n    def GetHome(self):\n        return GetHome()\n\n    def getDhcpClientName(self):\n        return self.dhcp_client_name\n\n    def initScsiDiskTimeout(self):\n        \"\"\"\n        Set the SCSI disk timeout when the agent starts running\n        \"\"\"\n        self.setScsiDiskTimeout()\n\n    def setScsiDiskTimeout(self):\n        \"\"\"\n        Iterate all SCSI disks(include hot-add) and set their timeout if their value are different from the OS.RootDeviceScsiTimeout\n        \"\"\"\n        try:\n            scsiTimeout = Config.get(\"OS.RootDeviceScsiTimeout\")\n            for diskName in [disk for disk in os.listdir(\"/sys/block\") if disk.startswith(\"sd\")]:\n                self.setBlockDeviceTimeout(diskName, scsiTimeout)\n        except:\n            pass\n\n    def setBlockDeviceTimeout(self, device, timeout):\n        \"\"\"\n        Set SCSI disk timeout by set /sys/block/sd*/device/timeout\n        \"\"\"\n        if timeout != None and device:\n            filePath = \"/sys/block/\" + device + \"/device/timeout\"\n            if(GetFileContents(filePath).splitlines()[0].rstrip() != timeout):\n                SetFileContents(filePath,timeout)\n                Log(\"SetBlockDeviceTimeout: Update the device \" + device + \" with timeout \" + timeout)\n\n    def waitForSshHostKey(self, path):\n        \"\"\"\n        Provide a dummy waiting, since by default, ssh host key is created by waagent and the key\n        should already been created.\n        \"\"\"\n        if(os.path.isfile(path)):\n            return True\n        else:\n            Error(\"Can't find host key: {0}\".format(path))\n            return False\n\n    def isDHCPEnabled(self):\n        return self.dhcp_enabled\n\n    def stopDHCP(self):\n        \"\"\"\n        Stop the system DHCP client so that the agent can bind on its port. If\n        the distro has set dhcp_enabled to True, it will need to provide an\n        implementation of this method.\n        \"\"\"\n        raise NotImplementedError('stopDHCP method missing')\n\n    def startDHCP(self):\n        \"\"\"\n        Start the system DHCP client. If the distro has set dhcp_enabled to\n        True, it will need to provide an implementation of this method.\n        \"\"\"\n        raise NotImplementedError('startDHCP method missing')\n\n    def translateCustomData(self, data):\n        \"\"\"\n        Translate the custom data from a Base64 encoding. Default to no-op.\n        \"\"\"\n        decodeCustomData = Config.get(\"Provisioning.DecodeCustomData\")\n        if decodeCustomData != None and decodeCustomData.lower().startswith(\"y\"):\n            return base64.b64decode(data)\n        return data\n\n    def getConfigurationPath(self):\n        return \"/etc/waagent.conf\"\n    \n    def getProcessorCores(self):\n        return int(RunGetOutput(\"grep 'processor.*:' /proc/cpuinfo |wc -l\")[1])\n    \n    def getTotalMemory(self):\n        return int(RunGetOutput(\"grep MemTotal /proc/meminfo |awk '{print $2}'\")[1])/1024\n    \n    def getInterfaceNameByMac(self, mac):\n        ret, output = RunGetOutput(\"ifconfig -a\")\n        if ret != 0:\n            raise Exception(\"Failed to get network interface info\")\n        output = output.replace('\\n', '')\n        match = re.search(r\"(eth\\d).*(HWaddr|ether) {0}\".format(mac), \n                          output, re.IGNORECASE)\n        if match is None:\n            raise Exception(\"Failed to get ifname with mac: {0}\".format(mac))\n        output = match.group(0)\n        eths = re.findall(r\"eth\\d\", output)\n        if eths is None or len(eths) == 0:\n            raise Exception(\"Failed to get ifname with mac: {0}\".format(mac))\n        return eths[-1]\n\n    def configIpV4(self, ifName, addr, netmask=24):\n        ret, output = RunGetOutput(\"ifconfig {0} up\".format(ifName))\n        if ret != 0:\n            raise Exception(\"Failed to bring up {0}: {1}\".format(ifName, \n                                                                 output))\n        ret, output = RunGetOutput(\"ifconfig {0} {1}/{2}\".format(ifName, addr,\n                                                                 netmask))\n        if ret != 0:\n            raise Exception(\"Failed to config ipv4 for {0}: {1}\".format(ifName, \n                                                                        output))\n\n############################################################\n#\tGentooDistro\n############################################################\ngentoo_init_file = \"\"\"\\\n#!/sbin/runscript\n\ncommand=/usr/sbin/waagent\npidfile=/var/run/waagent.pid\ncommand_args=-daemon\ncommand_background=true\nname=\"Windows Azure Linux Agent\"\n\ndepend()\n{\n\tneed localmount\n\tuse logger network\n\tafter bootmisc modules\n}\n\n\"\"\"\nclass gentooDistro(AbstractDistro):\n    \"\"\"\n    Gentoo distro concrete class\n    \"\"\"\n\n    def __init__(self): #\n        super(gentooDistro,self).__init__()\n        self.service_cmd='/sbin/service'\n        self.ssh_service_name='sshd'\n        self.hostname_file_path='/etc/conf.d/hostname'\n        self.dhcp_client_name='dhcpcd'\n        self.shadow_file_mode=0640\n        self.init_file=gentoo_init_file\n        \n    def publishHostname(self,name):\n        try:\n            if (os.path.isfile(self.hostname_file_path)):\n                r=ReplaceFileContentsAtomic(self.hostname_file_path, \"hostname=\\\"\" + name + \"\\\"\\n\"\n                    + \"\\n\".join(filter(lambda a: not a.startswith(\"hostname=\"), GetFileContents(self.hostname_file_path).split(\"\\n\"))))\n        except:\n            return 1\n        return r\n        \n    def installAgentServiceScriptFiles(self):\n        SetFileContents(self.init_script_file, self.init_file)\n        os.chmod(self.init_script_file, 0755)\n\n    def registerAgentService(self):\n        self.installAgentServiceScriptFiles()\n        return Run('rc-update add ' + self.agent_service_name + ' default')\n    \n    def uninstallAgentService(self):\n        return Run('rc-update del ' + self.agent_service_name + ' default')\n\n    def unregisterAgentService(self):\n        self.stopAgentService()\n        return self.uninstallAgentService()\n\n    def checkPackageInstalled(self,p):\n        if Run('eix -I ^' + p + '$',chk_err=False):\n            return 0\n        else:\n            return 1\n\n    def checkPackageUpdateable(self,p):\n        if Run('eix -u ^' + p + '$',chk_err=False):\n            return 0\n        else:\n            return 1\n\n    def RestartInterface(self, iface):\n        Run(\"/etc/init.d/net.\" + iface + \" restart\")\n\n############################################################    \n#\tSuSEDistro\n############################################################    \nsuse_init_file = \"\"\"\\\n#! /bin/sh\n#\n# Windows Azure Linux Agent sysV init script\n#\n# Copyright 2013 Microsoft Corporation\n# Copyright SUSE LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# /etc/init.d/waagent\n#\n#  and symbolic link\n#\n# /usr/sbin/rcwaagent\n#\n# System startup script for the waagent\n#\n### BEGIN INIT INFO\n# Provides: WindowsAzureLinuxAgent\n# Required-Start: $network sshd\n# Required-Stop: $network sshd\n# Default-Start: 3 5\n# Default-Stop: 0 1 2 6\n# Description: Start the WindowsAzureLinuxAgent\n### END INIT INFO\n\nPYTHON=/usr/bin/python\nWAZD_BIN=/usr/sbin/waagent\nWAZD_CONF=/etc/waagent.conf\nWAZD_PIDFILE=/var/run/waagent.pid\n\ntest -x \"$WAZD_BIN\" || { echo \"$WAZD_BIN not installed\"; exit 5; }\ntest -e \"$WAZD_CONF\" || { echo \"$WAZD_CONF not found\"; exit 6; }\n\n. /etc/rc.status\n\n# First reset status of this service\nrc_reset\n\n# Return values acc. to LSB for all commands but status:\n# 0 - success\n# 1 - misc error\n# 2 - invalid or excess args\n# 3 - unimplemented feature (e.g. reload)\n# 4 - insufficient privilege\n# 5 - program not installed\n# 6 - program not configured\n#\n# Note that starting an already running service, stopping\n# or restarting a not-running service as well as the restart\n# with force-reload (in case signalling is not supported) are\n# considered a success.\n\n\ncase \"$1\" in\n    start)\n        echo -n \"Starting WindowsAzureLinuxAgent\"\n        ## Start daemon with startproc(8). If this fails\n        ## the echo return value is set appropriate.\n        startproc -f ${PYTHON} ${WAZD_BIN} -daemon\n        rc_status -v\n        ;;\n    stop)\n        echo -n \"Shutting down WindowsAzureLinuxAgent\"\n        ## Stop daemon with killproc(8) and if this fails\n        ## set echo the echo return value.\n        killproc -p ${WAZD_PIDFILE} ${PYTHON} ${WAZD_BIN}\n        rc_status -v\n        ;;\n    try-restart)\n        ## Stop the service and if this succeeds (i.e. the\n        ## service was running before), start it again.\n        $0 status >/dev/null && $0 restart\n        rc_status\n        ;;\n    restart)\n        ## Stop the service and regardless of whether it was\n        ## running or not, start it again.\n        $0 stop\n        sleep 1\n        $0 start\n        rc_status\n        ;;\n    force-reload|reload)\n        rc_status\n        ;;\n    status)\n        echo -n \"Checking for service WindowsAzureLinuxAgent \"\n        ## Check status with checkproc(8), if process is running\n        ## checkproc will return with exit status 0.\n\n        checkproc -p ${WAZD_PIDFILE} ${PYTHON} ${WAZD_BIN}\n        rc_status -v\n        ;;\n    probe)\n        ;;\n    *)\n        echo \"Usage: $0 {start|stop|status|try-restart|restart|force-reload|reload}\"\n        exit 1\n        ;;\nesac\nrc_exit\n\"\"\"\nclass SuSEDistro(AbstractDistro):\n    \"\"\"\n    SuSE Distro concrete class\n    Put SuSE specific behavior here...\n    \"\"\"\n    def __init__(self):\n        super(SuSEDistro,self).__init__()\n        self.service_cmd='/sbin/service'\n        self.ssh_service_name='sshd'\n        self.kernel_boot_options_file='/boot/grub/menu.lst'\n        self.hostname_file_path='/etc/HOSTNAME'\n        self.requiredDeps += [ \"/sbin/insserv\" ]\n        self.init_file=suse_init_file\n        self.dhcp_client_name='dhcpcd'\n        if ((DistInfo(fullname=1)[0] == 'SUSE Linux Enterprise Server' and DistInfo()[1] >= '12') or \\\n            (DistInfo(fullname=1)[0] == 'openSUSE'                     and DistInfo()[1] >= '13.2')):\n            self.dhcp_client_name='wickedd-dhcp4'\n        self.grubKernelBootOptionsFile = '/boot/grub/menu.lst'\n        self.grubKernelBootOptionsLine = 'kernel'\n        self.getpidcmd='pidof '\n        self.dhcp_enabled=True\n        \n    def checkPackageInstalled(self,p):\n        if Run(\"rpm -q \" + p,chk_err=False):\n            return 0\n        else:\n            return 1\n\n    def checkPackageUpdateable(self,p):\n        if Run(\"zypper list-updates | grep \" + p,chk_err=False):\n            return 1\n        else:\n            return 0\n        \n\n    def installAgentServiceScriptFiles(self):\n        try:\n            SetFileContents(self.init_script_file, self.init_file)\n            os.chmod(self.init_script_file, 0744)\n        except:\n            pass\n        \n    def registerAgentService(self):\n        self.installAgentServiceScriptFiles()\n        return Run('insserv ' + self.agent_service_name)\n\n    def uninstallAgentService(self):\n        return Run('insserv -r ' + self.agent_service_name)\n\n    def unregisterAgentService(self):\n        self.stopAgentService()\n        return self.uninstallAgentService()\n\n    def startDHCP(self):\n        Run(\"service \" + self.dhcp_client_name + \" start\", chk_err=False)\n\n    def stopDHCP(self):\n        Run(\"service \" + self.dhcp_client_name + \" stop\", chk_err=False)\n    \n############################################################    \n#\tredhatDistro\n############################################################    \n\nredhat_init_file= \"\"\"\\\n#!/bin/bash\n#\n# Init file for WindowsAzureLinuxAgent.\n#\n# chkconfig: 2345 60 80\n# description: WindowsAzureLinuxAgent\n#\n\n# source function library\n. /etc/rc.d/init.d/functions\n\nRETVAL=0\nFriendlyName=\"WindowsAzureLinuxAgent\"\nWAZD_BIN=/usr/sbin/waagent\n\nstart()\n{\n    echo -n $\"Starting $FriendlyName: \"\n    $WAZD_BIN -daemon &\n}\n\nstop()\n{\n    echo -n $\"Stopping $FriendlyName: \"\n    killproc -p /var/run/waagent.pid $WAZD_BIN\n    RETVAL=$?\n    echo\n    return $RETVAL\n}\n\ncase \"$1\" in\n    start)\n        start\n        ;;\n    stop)\n        stop\n        ;;\n    restart)\n        stop\n        start\n        ;;\n    reload)\n        ;;\n    report)\n        ;;\n    status)\n        status $WAZD_BIN\n        RETVAL=$?\n        ;;\n    *)\n        echo $\"Usage: $0 {start|stop|restart|status}\"\n        RETVAL=1\nesac\nexit $RETVAL\n\"\"\"\n\nclass redhatDistro(AbstractDistro):\n    \"\"\"\n    Redhat Distro concrete class\n    Put Redhat specific behavior here...\n    \"\"\"\n    def __init__(self):\n        super(redhatDistro,self).__init__()\n        self.service_cmd='/sbin/service'\n        self.ssh_service_restart_option='condrestart'\n        self.ssh_service_name='sshd'\n        self.hostname_file_path= None if DistInfo()[1] < '7.0' else '/etc/hostname'\n        self.init_file=redhat_init_file\n        self.grubKernelBootOptionsFile = '/boot/grub/menu.lst'\n        self.grubKernelBootOptionsLine = 'kernel'\n\n    def publishHostname(self,name):\n        super(redhatDistro,self).publishHostname(name)\n        if DistInfo()[1] < '7.0' :\n            filepath = \"/etc/sysconfig/network\"\n            if os.path.isfile(filepath):\n                ReplaceFileContentsAtomic(filepath, \"HOSTNAME=\" + name + \"\\n\"\n                    + \"\\n\".join(filter(lambda a: not a.startswith(\"HOSTNAME\"), GetFileContents(filepath).split('\\n'))))\n\n        ethernetInterface = MyDistro.GetInterfaceName()\n        filepath = \"/etc/sysconfig/network-scripts/ifcfg-\" + ethernetInterface\n        if os.path.isfile(filepath):\n            ReplaceFileContentsAtomic(filepath, \"DHCP_HOSTNAME=\" + name + \"\\n\"\n                    + \"\\n\".join(filter(lambda a: not a.startswith(\"DHCP_HOSTNAME\"), GetFileContents(filepath).split('\\n'))))\n        return 0\n\n    def installAgentServiceScriptFiles(self):\n        SetFileContents(self.init_script_file, self.init_file)\n        os.chmod(self.init_script_file, 0744)\n        return 0\n\n    def registerAgentService(self):\n        self.installAgentServiceScriptFiles()\n        return Run('chkconfig --add waagent')\n   \n    def uninstallAgentService(self):\n        return Run('chkconfig --del ' + self.agent_service_name)\n\n    def unregisterAgentService(self):\n        self.stopAgentService()\n        return self.uninstallAgentService()\n    \n    def checkPackageInstalled(self,p):\n        if Run(\"yum list installed \" + p,chk_err=False):\n            return 0\n        else:\n            return 1\n\n    def checkPackageUpdateable(self,p):\n        if Run(\"yum check-update | grep \"+ p,chk_err=False):\n            return 1\n        else:\n            return 0\n\n\n\n############################################################    \n#\tcentosDistro\n############################################################    \n\nclass centosDistro(redhatDistro):\n    \"\"\"\n    CentOS Distro concrete class\n    Put CentOS specific behavior here...\n    \"\"\"\n    def __init__(self):\n        super(centosDistro,self).__init__()\n\n\n############################################################\n#   CoreOSDistro\n############################################################\n\nclass CoreOSDistro(AbstractDistro):\n    \"\"\"\n    CoreOS Distro concrete class\n    Put CoreOS specific behavior here...\n    \"\"\"\n    CORE_UID = 500\n\n    def __init__(self):\n        super(CoreOSDistro,self).__init__()\n        self.requiredDeps += [ \"/usr/bin/systemctl\" ]\n        self.agent_service_name = 'waagent'\n        self.init_script_file='/etc/systemd/system/waagent.service'\n        self.fileBlackList.append(\"/etc/machine-id\")\n        self.dhcp_client_name='systemd-networkd'\n        self.getpidcmd='pidof '\n        self.shadow_file_mode=0640\n        self.waagent_path='/usr/share/oem/bin'\n        self.python_path='/usr/share/oem/python/bin'\n        self.dhcp_enabled=True\n        if 'PATH' in os.environ:\n            os.environ['PATH'] = \"{0}:{1}\".format(os.environ['PATH'], self.python_path)\n        else:\n            os.environ['PATH'] = self.python_path\n\n        if 'PYTHONPATH' in os.environ:\n            os.environ['PYTHONPATH'] = \"{0}:{1}\".format(os.environ['PYTHONPATH'], self.waagent_path)\n        else:\n            os.environ['PYTHONPATH'] = self.waagent_path\n\n    def checkPackageInstalled(self,p):\n        \"\"\"\n        There is no package manager in CoreOS.  Return 1 since it must be preinstalled.\n        \"\"\"\n        return 1\n\n    def checkDependencies(self):\n        for a in self.requiredDeps:\n            if Run(\"which \" + a + \" > /dev/null 2>&1\",chk_err=False):\n                Error(\"Missing required dependency: \" + a)\n                return 1\n        return 0\n\n\n    def checkPackageUpdateable(self,p):\n        \"\"\"\n        There is no package manager in CoreOS.  Return 0 since it can't be updated via package.\n        \"\"\"\n        return 0\n\n    def startAgentService(self):\n        return Run('systemctl start ' + self.agent_service_name)\n\n    def stopAgentService(self):\n        return Run('systemctl stop ' + self.agent_service_name)\n\n    def restartSshService(self):\n        \"\"\"\n        SSH is socket activated on CoreOS. No need to restart it.\n        \"\"\"\n        return 0\n\n    def sshDeployPublicKey(self,fprint,path):\n        \"\"\"\n        We support PKCS8.\n        \"\"\"\n        if Run(\"ssh-keygen -i -m PKCS8 -f \" + fprint + \" >> \" + path):\n            return 1\n        else :\n            return 0\n\n    def RestartInterface(self, iface):\n        Run(\"systemctl restart systemd-networkd\")\n\n    def CreateAccount(self, user, password, expiration, thumbprint):\n        \"\"\"\n        Create a user account, with 'user', 'password', 'expiration', ssh keys\n        and sudo permissions.\n        Returns None if successful, error string on failure.\n        \"\"\"\n        userentry = None\n        try:\n            userentry = pwd.getpwnam(user)\n        except:\n            pass\n        uidmin = None\n        try:\n            uidmin = int(GetLineStartingWith(\"UID_MIN\", \"/etc/login.defs\").split()[1])\n        except:\n            pass\n        if uidmin == None:\n            uidmin = 100\n        if userentry != None and userentry[2] < uidmin and userentry[2] != self.CORE_UID:\n            Error(\"CreateAccount: \" + user + \" is a system user. Will not set password.\")\n            return \"Failed to set password for system user: \" + user + \" (0x06).\"\n        if userentry == None:\n            command = \"useradd --create-home --password '*' \" + user\n            if expiration != None:\n                command += \" --expiredate \" + expiration.split('.')[0]\n            if Run(command):\n                Error(\"Failed to create user account: \" + user)\n                return \"Failed to create user account: \" + user + \" (0x07).\"\n        else:\n            Log(\"CreateAccount: \" + user + \" already exists. Will update password.\")\n        if password != None:\n            RunSendStdin(\"chpasswd\", user + \":\" + password + \"\\n\")\n        try:\n            if password == None:\n                SetFileContents(\"/etc/sudoers.d/waagent\", user + \" ALL = (ALL) NOPASSWD: ALL\\n\")\n            else:\n                SetFileContents(\"/etc/sudoers.d/waagent\", user + \" ALL = (ALL) ALL\\n\")\n            os.chmod(\"/etc/sudoers.d/waagent\", 0440)\n        except:\n            Error(\"CreateAccount: Failed to configure sudo access for user.\")\n            return \"Failed to configure sudo privileges (0x08).\"\n        home = MyDistro.GetHome()\n        if thumbprint != None:\n            dir = home + \"/\" + user + \"/.ssh\"\n            CreateDir(dir, user, 0700)\n            pub = dir + \"/id_rsa.pub\"\n            prv = dir + \"/id_rsa\"\n            Run(\"ssh-keygen -y -f \" + thumbprint + \".prv > \" + pub)\n            SetFileContents(prv, GetFileContents(thumbprint + \".prv\"))\n            for f in [pub, prv]:\n                os.chmod(f, 0600)\n                ChangeOwner(f, user)\n            SetFileContents(dir + \"/authorized_keys\", GetFileContents(pub))\n            ChangeOwner(dir + \"/authorized_keys\", user)\n        Log(\"Created user account: \" + user)\n        return None\n\n    def startDHCP(self):\n        Run(\"systemctl start \" + self.dhcp_client_name, chk_err=False)\n\n    def stopDHCP(self):\n        Run(\"systemctl stop \" + self.dhcp_client_name, chk_err=False)\n\n    def translateCustomData(self, data):\n        return base64.b64decode(data)\n\n    def getConfigurationPath(self):\n        return \"/usr/share/oem/waagent.conf\"\n\n############################################################    \n#\tdebianDistro\n############################################################    \ndebian_init_file = \"\"\"\\\n#!/bin/sh\n### BEGIN INIT INFO\n# Provides:          WindowsAzureLinuxAgent\n# Required-Start:    $network $syslog\n# Required-Stop:     $network $syslog\n# Should-Start:      $network $syslog\n# Should-Stop:       $network $syslog\n# Default-Start:     2 3 4 5\n# Default-Stop:      0 1 6\n# Short-Description: WindowsAzureLinuxAgent\n# Description:       WindowsAzureLinuxAgent\n### END INIT INFO\n\n. /lib/lsb/init-functions\n\nOPTIONS=\"-daemon\"\nWAZD_BIN=/usr/sbin/waagent\nWAZD_PID=/var/run/waagent.pid\n\ncase \"$1\" in\n    start)\n        log_begin_msg \"Starting WindowsAzureLinuxAgent...\"\n        pid=$( pidofproc $WAZD_BIN )\n        if [ -n \"$pid\" ] ; then\n              log_begin_msg \"Already running.\"\n              log_end_msg 0\n              exit 0\n        fi\n        start-stop-daemon --start --quiet --oknodo --background --exec $WAZD_BIN -- $OPTIONS\n        log_end_msg $?\n        ;;\n\n    stop)\n        log_begin_msg \"Stopping WindowsAzureLinuxAgent...\"\n        start-stop-daemon --stop --quiet --oknodo --pidfile $WAZD_PID\n        ret=$?\n        rm -f $WAZD_PID\n        log_end_msg $ret\n        ;;\n    force-reload)\n        $0 restart\n        ;;\n    restart)\n        $0 stop\n        $0 start\n        ;;\n    status)\n        status_of_proc $WAZD_BIN && exit 0 || exit $?\n        ;;\n    *)\n        log_success_msg \"Usage: /etc/init.d/waagent {start|stop|force-reload|restart|status}\"\n        exit 1\n        ;;\nesac\n\nexit 0\n\"\"\"\n\nclass debianDistro(AbstractDistro):\n    \"\"\"\n    debian Distro concrete class\n    Put debian specific behavior here...\n    \"\"\"\n    def __init__(self):\n        super(debianDistro,self).__init__()\n        self.requiredDeps += [ \"/usr/sbin/update-rc.d\" ]\n        self.init_file=debian_init_file\n        self.agent_package_name='walinuxagent'\n        self.dhcp_client_name='dhclient'\n        self.getpidcmd='pidof '\n        self.shadow_file_mode=0640\n\n    def checkPackageInstalled(self,p):\n        \"\"\"\n        Check that the package is installed.\n        Return 1 if installed, 0 if not installed.\n        This method of using dpkg-query\n        allows wildcards to be present in the\n        package name.\n        \"\"\"\n        if not Run(\"dpkg-query -W -f='${Status}\\n' '\" + p + \"' | grep ' installed' 2>&1\",chk_err=False):\n            return 1\n        else:\n            return 0\n        \n    def checkDependencies(self):\n        \"\"\"\n        Debian dependency check.  python-pyasn1 is NOT needed.\n        Return 1 unless all dependencies are satisfied.\n        NOTE: using network*manager will catch either package name in Ubuntu or debian.\n        \"\"\"\n        if self.checkPackageInstalled('network*manager'):\n            Error(GuestAgentLongName + \" is not compatible with network-manager.\")\n            return 1\n        for a in self.requiredDeps:\n            if Run(\"which \" + a + \" > /dev/null 2>&1\",chk_err=False):\n                Error(\"Missing required dependency: \" + a)\n                return 1\n        return 0\n\n    def checkPackageUpdateable(self,p):\n        if Run(\"apt-get update ; apt-get upgrade -us | grep \" + p,chk_err=False):\n            return 1\n        else:\n            return 0\n            \n    def installAgentServiceScriptFiles(self):\n        \"\"\"\n        If we are packaged - the service name is walinuxagent, do nothing.\n        \"\"\"\n        if self.agent_service_name == 'walinuxagent':\n            return 0\n        try:\n            SetFileContents(self.init_script_file, self.init_file)\n            os.chmod(self.init_script_file, 0744)\n        except OSError, e:\n            ErrorWithPrefix('installAgentServiceScriptFiles','Exception: '+str(e)+' occured creating ' + self.init_script_file)\n            return 1\n        return 0\n    \n    def registerAgentService(self):\n        if self.installAgentServiceScriptFiles() == 0:\n            return Run('update-rc.d waagent defaults')\n        else :\n            return 1\n    \n    def uninstallAgentService(self):\n        return Run('update-rc.d -f ' + self.agent_service_name + ' remove')\n\n    def unregisterAgentService(self):\n        self.stopAgentService()\n        return self.uninstallAgentService()\n    \n    def sshDeployPublicKey(self,fprint,path):\n        \"\"\"\n        We support PKCS8.\n        \"\"\"\n        if Run(\"ssh-keygen -i -m PKCS8 -f \" + fprint + \" >> \" + path):\n            return 1\n        else :\n            return 0\n        \n############################################################    \n#\tKaliDistro - WIP\n#       Functioning on Kali 1.1.0a so far\n############################################################ \nclass KaliDistro(debianDistro):\n    \"\"\"\n    Kali Distro concrete class\n    Put Kali specific behavior here...\n    \"\"\"\n    def __init__(self):\n        super(KaliDistro,self).__init__()\n\n############################################################    \n#\tUbuntuDistro\n############################################################    \nubuntu_upstart_file = \"\"\"\\\n#walinuxagent - start Windows Azure agent\n\ndescription \"walinuxagent\"\nauthor \"Ben Howard <ben.howard@canonical.com>\"\n\nstart on (filesystem and started rsyslog)\n\npre-start script\n\n\tWALINUXAGENT_ENABLED=1\n    [ -r /etc/default/walinuxagent ] && . /etc/default/walinuxagent\n\n    if [ \"$WALINUXAGENT_ENABLED\" != \"1\" ]; then\n        exit 1\n    fi\n\n    if [ ! -x /usr/sbin/waagent ]; then\n        exit 1\n    fi\n\n    #Load the udf module\n    modprobe -b udf\nend script\n\nexec /usr/sbin/waagent -daemon\n\"\"\"\n\nclass UbuntuDistro(debianDistro):\n    \"\"\"\n    Ubuntu Distro concrete class\n    Put Ubuntu specific behavior here...\n    \"\"\"\n    def __init__(self):\n        super(UbuntuDistro,self).__init__()\n        self.init_script_file='/etc/init/waagent.conf'\n        self.init_file=ubuntu_upstart_file\n        self.fileBlackList = [ \"/root/.bash_history\", \"/var/log/waagent.log\"]\n        self.dhcp_client_name=None\n        self.getpidcmd='pidof '\n\n    def registerAgentService(self):\n        return self.installAgentServiceScriptFiles()\n    \n    def uninstallAgentService(self):\n        \"\"\"\n        If we are packaged - the service name is walinuxagent, do nothing.\n        \"\"\"\n        if self.agent_service_name == 'walinuxagent':\n            return 0\n        os.remove('/etc/init/' + self.agent_service_name + '.conf')\n\n    def unregisterAgentService(self):\n        \"\"\"\n        If we are packaged - the service name is walinuxagent, do nothing.\n        \"\"\"\n        if self.agent_service_name == 'walinuxagent':\n            return\n        self.stopAgentService()\n        return self.uninstallAgentService()\n    \n    def deprovisionWarnUser(self):\n        \"\"\"\n        Ubuntu specific warning string from Deprovision.\n        \"\"\"\n        print(\"WARNING! Nameserver configuration in /etc/resolvconf/resolv.conf.d/{tail,original} will be deleted.\")\n\n    def deprovisionDeleteFiles(self):\n        \"\"\"\n        Ubuntu uses resolv.conf by default, so removing /etc/resolv.conf will\n        break resolvconf. Therefore, we check to see if resolvconf is in use,\n        and if so, we remove the resolvconf artifacts.\n        \"\"\"\n        if os.path.realpath('/etc/resolv.conf') != '/run/resolvconf/resolv.conf':\n            Log(\"resolvconf is not configured. Removing /etc/resolv.conf\")\n            self.fileBlackList.append('/etc/resolv.conf')\n        else:\n            Log(\"resolvconf is enabled; leaving /etc/resolv.conf intact\")\n            resolvConfD = '/etc/resolvconf/resolv.conf.d/'\n            self.fileBlackList.extend([resolvConfD + 'tail', resolvConfD + 'original'])\n        for f in os.listdir(LibDir)+self.fileBlackList:\n            try:\n                os.remove(f)\n            except:\n                pass\n        return 0\n\n    def getDhcpClientName(self):\n        if self.dhcp_client_name != None :\n            return self.dhcp_client_name\n        if DistInfo()[1] == '12.04' :\n            self.dhcp_client_name='dhclient3'\n        else :\n            self.dhcp_client_name='dhclient'\n        return self.dhcp_client_name\n\n    def waitForSshHostKey(self, path):\n        \"\"\"\n        Wait until the ssh host key is generated by cloud init.\n        \"\"\"\n        for retry in range(0, 10):\n            if(os.path.isfile(path)):\n                return True\n            time.sleep(1)\n        Error(\"Can't find host key: {0}\".format(path))\n        return False\n\n\n############################################################    \n#\tLinuxMintDistro\n############################################################    \n\nclass LinuxMintDistro(UbuntuDistro):\n    \"\"\"\n    LinuxMint Distro concrete class\n    Put LinuxMint specific behavior here...\n    \"\"\"\n    def __init__(self):\n        super(LinuxMintDistro,self).__init__()\n\n############################################################    \n#\tfedoraDistro\n############################################################    \nfedora_systemd_service = \"\"\"\\\n[Unit]\nDescription=Windows Azure Linux Agent\nAfter=network.target\nAfter=sshd.service\nConditionFileIsExecutable=/usr/sbin/waagent\nConditionPathExists=/etc/waagent.conf\n\n[Service]\nType=simple\nExecStart=/usr/sbin/waagent -daemon\n\n[Install]\nWantedBy=multi-user.target\n\"\"\"\n\nclass fedoraDistro(redhatDistro):\n    \"\"\"\n    FedoraDistro concrete class\n    Put Fedora specific behavior here...\n    \"\"\"\n    def __init__(self):\n        super(fedoraDistro,self).__init__()\n        self.service_cmd = '/usr/bin/systemctl'\n        self.hostname_file_path = '/etc/hostname'\n        self.init_script_file = '/usr/lib/systemd/system/' + self.agent_service_name + '.service'\n        self.init_file = fedora_systemd_service\n        self.grubKernelBootOptionsFile = '/etc/default/grub'\n        self.grubKernelBootOptionsLine = 'GRUB_CMDLINE_LINUX='\n\n    def publishHostname(self, name):\n        SetFileContents(self.hostname_file_path, name + '\\n')\n        ethernetInterface = MyDistro.GetInterfaceName()\n        filepath = \"/etc/sysconfig/network-scripts/ifcfg-\" + ethernetInterface\n        if os.path.isfile(filepath):\n            ReplaceFileContentsAtomic(filepath, \"DHCP_HOSTNAME=\" + name + \"\\n\"\n                    + \"\\n\".join(filter(lambda a: not a.startswith(\"DHCP_HOSTNAME\"), GetFileContents(filepath).split('\\n'))))\n        return 0\n\n    def installAgentServiceScriptFiles(self):\n        SetFileContents(self.init_script_file, self.init_file)\n        os.chmod(self.init_script_file, 0644)\n        return Run(self.service_cmd + ' daemon-reload')\n\n    def registerAgentService(self):\n        self.installAgentServiceScriptFiles()\n        return Run(self.service_cmd + ' enable ' + self.agent_service_name)\n\n    def uninstallAgentService(self):\n        \"\"\"\n        Call service subsystem to remove waagent script.\n        \"\"\"\n        return Run(self.service_cmd + ' disable ' + self.agent_service_name)\n\n    def unregisterAgentService(self):\n        \"\"\"\n        Calls self.stopAgentService and call self.uninstallAgentService()\n        \"\"\"\n        self.stopAgentService()\n        self.uninstallAgentService()\n\n    def startAgentService(self):\n        \"\"\"\n        Service call to start the Agent service\n        \"\"\"\n        return Run(self.service_cmd + ' start ' + self.agent_service_name)\n\n    def stopAgentService(self):\n        \"\"\"\n        Service call to stop the Agent service\n        \"\"\"\n        return Run(self.service_cmd + ' stop '  + self.agent_service_name, False)\n\n    def restartSshService(self):\n        \"\"\"\n        Service call to re(start) the SSH service\n        \"\"\"\n        sshRestartCmd = self.service_cmd + \" \" +  self.ssh_service_restart_option + \" \" + self.ssh_service_name\n        retcode = Run(sshRestartCmd)\n        if retcode > 0:\n            Error(\"Failed to restart SSH service with return code:\" + str(retcode))\n        return retcode\n\n    def checkPackageInstalled(self, p):\n        \"\"\"\n        Query package database for prescence of an installed package.\n        \"\"\"\n        import rpm\n        ts = rpm.TransactionSet()\n        rpms = ts.dbMatch(rpm.RPMTAG_PROVIDES, p)\n        return bool(len(rpms) > 0)\n\n    def deleteRootPassword(self):\n        return Run(\"/sbin/usermod root -p '!!'\")\n\n    def packagedInstall(self,buildroot):\n        \"\"\"\n        Called from setup.py for use by RPM.\n        Copies generated files waagent.conf, under the buildroot.\n        \"\"\"\n        if not os.path.exists(buildroot+'/etc'):\n            os.mkdir(buildroot+'/etc')\n        SetFileContents(buildroot+'/etc/waagent.conf', MyDistro.waagent_conf_file)\n\n        if not os.path.exists(buildroot+'/etc/logrotate.d'):\n            os.mkdir(buildroot+'/etc/logrotate.d')\n        SetFileContents(buildroot+'/etc/logrotate.d/WALinuxAgent', WaagentLogrotate)\n\n        self.init_script_file=buildroot+self.init_script_file\n        # this allows us to call installAgentServiceScriptFiles()\n        if not os.path.exists(os.path.dirname(self.init_script_file)):\n            os.mkdir(os.path.dirname(self.init_script_file))\n        self.installAgentServiceScriptFiles()\n\n    def CreateAccount(self, user, password, expiration, thumbprint):\n        super(fedoraDistro, self).CreateAccount(user, password, expiration, thumbprint)\n        Run('/sbin/usermod ' + user + ' -G wheel')\n\n    def DeleteAccount(self, user):\n        Run('/sbin/usermod ' + user + ' -G \"\"')\n        super(fedoraDistro, self).DeleteAccount(user)\n\n############################################################    \n#\tFreeBSD\n############################################################    \nFreeBSDWaagentConf = \"\"\"\\\n#\n# Windows Azure Linux Agent Configuration\n#\n\nRole.StateConsumer=None                 # Specified program is invoked with the argument \"Ready\" when we report ready status\n                                        # to the endpoint server.\nRole.ConfigurationConsumer=None         # Specified program is invoked with XML file argument specifying role configuration.\nRole.TopologyConsumer=None              # Specified program is invoked with XML file argument specifying role topology.\n\nProvisioning.Enabled=y                  #\nProvisioning.DeleteRootPassword=y       # Password authentication for root account will be unavailable.\nProvisioning.RegenerateSshHostKeyPair=y # Generate fresh host key pair.\nProvisioning.SshHostKeyPairType=rsa     # Supported values are \"rsa\", \"dsa\" and \"ecdsa\".\nProvisioning.MonitorHostName=y          # Monitor host name changes and publish changes via DHCP requests.\n\nResourceDisk.Format=y                   # Format if unformatted. If 'n', resource disk will not be mounted.\nResourceDisk.Filesystem=ufs2            #\nResourceDisk.MountPoint=/mnt/resource   #\nResourceDisk.EnableSwap=n               # Create and use swapfile on resource disk.\nResourceDisk.SwapSizeMB=0               # Size of the swapfile.\n\nLBProbeResponder=y                      # Respond to load balancer probes if requested by Windows Azure.\n\nLogs.Verbose=n                          # Enable verbose logs\n\nOS.RootDeviceScsiTimeout=300            # Root device timeout in seconds.\nOS.OpensslPath=None                     # If \"None\", the system default version is used.\n\"\"\"\n\nbsd_init_file=\"\"\"\\\n#! /bin/sh\n\n# PROVIDE: waagent\n# REQUIRE: DAEMON cleanvar sshd\n# BEFORE: LOGIN\n# KEYWORD: nojail\n\n. /etc/rc.subr\nexport PATH=$PATH:/usr/local/bin\nname=\"waagent\"\nrcvar=\"waagent_enable\"\ncommand=\"/usr/sbin/${name}\"\ncommand_interpreter=\"/usr/local/bin/python\"\nwaagent_flags=\" daemon &\"\n\npidfile=\"/var/run/waagent.pid\"\n\nload_rc_config $name\nrun_rc_command \"$1\"\n\n\"\"\"\nbsd_activate_resource_disk_txt=\"\"\"\\\n#!/usr/bin/env python\n\nimport os\nimport sys\nimport imp\n\n# waagent has no '.py' therefore create waagent module import manually.\n__name__='setupmain' #prevent waagent.__main__ from executing\nwaagent=imp.load_source('waagent','/tmp/waagent') \nwaagent.LoggerInit('/var/log/waagent.log','/dev/console')\nfrom waagent import RunGetOutput,Run\nConfig=waagent.ConfigurationProvider()\nformat = Config.get(\"ResourceDisk.Format\")\nif format == None or format.lower().startswith(\"n\"):\n    sys.exit(0)\ndevice_base = 'da1'\ndevice = \"/dev/\" + device_base\nfor entry in RunGetOutput(\"mount\")[1].split():\n    if entry.startswith(device + \"s1\"):\n        waagent.Log(\"ActivateResourceDisk: \" + device + \"s1 is already mounted.\")\n        sys.exit(0)\nmountpoint = Config.get(\"ResourceDisk.MountPoint\")\nif mountpoint == None:\n    mountpoint = \"/mnt/resource\"\nwaagent.CreateDir(mountpoint, \"root\", 0755)\nfs = Config.get(\"ResourceDisk.Filesystem\")\nif waagent.FreeBSDDistro().mediaHasFilesystem(device) == False :\n    Run(\"newfs \" + device + \"s1\")\nif Run(\"mount \" + device + \"s1 \" + mountpoint):\n    waagent.Error(\"ActivateResourceDisk: Failed to mount resource disk (\" + device + \"s1).\")\n    sys.exit(0)\nwaagent.Log(\"Resource disk (\" + device + \"s1) is mounted at \" + mountpoint + \" with fstype \" + fs)\nwaagent.SetFileContents(os.path.join(mountpoint,waagent.README_FILENAME), waagent.README_FILECONTENT)\nswap = Config.get(\"ResourceDisk.EnableSwap\")\nif swap == None or swap.lower().startswith(\"n\"):\n    sys.exit(0)\nsizeKB = int(Config.get(\"ResourceDisk.SwapSizeMB\")) * 1024\nif os.path.isfile(mountpoint + \"/swapfile\") and os.path.getsize(mountpoint + \"/swapfile\") != (sizeKB * 1024):\n    os.remove(mountpoint + \"/swapfile\")\nif not os.path.isfile(mountpoint + \"/swapfile\"):\n    Run(\"dd if=/dev/zero of=\" + mountpoint + \"/swapfile bs=1024 count=\" + str(sizeKB))\nif Run(\"mdconfig -a -t vnode -f \" + mountpoint + \"/swapfile -u 0\"):\n    waagent.Error(\"ActivateResourceDisk: Configuring swap - Failed to create md0\")\nif not Run(\"swapon /dev/md0\"):\n    waagent.Log(\"Enabled \" + str(sizeKB) + \" KB of swap at \" + mountpoint + \"/swapfile\")\nelse:\n    waagent.Error(\"ActivateResourceDisk: Failed to activate swap at \" + mountpoint + \"/swapfile\")\n\"\"\"\n\nclass FreeBSDDistro(AbstractDistro):\n    \"\"\"\n    \"\"\"\n    def __init__(self):\n        \"\"\"\n        Generic Attributes go here.  These are based on 'majority rules'.\n        This __init__() may be called or overriden by the child.\n        \"\"\"\n        super(FreeBSDDistro,self).__init__()\n        self.agent_service_name = os.path.basename(sys.argv[0]) \n        self.selinux=False\n        self.ssh_service_name='sshd'\n        self.ssh_config_file='/etc/ssh/sshd_config'\n        self.hostname_file_path='/etc/hostname'\n        self.dhcp_client_name='dhclient'\n        self.requiredDeps = [ 'route', 'shutdown', 'ssh-keygen', 'pw'\n                              , 'openssl', 'fdisk', 'sed', 'grep' , 'sudo']\n        self.init_script_file='/etc/rc.d/waagent'\n        self.init_file=bsd_init_file\n        self.agent_package_name='WALinuxAgent'\n        self.fileBlackList = [ \"/root/.bash_history\", \"/var/log/waagent.log\",'/etc/resolv.conf' ]        \n        self.agent_files_to_uninstall = [\"/etc/waagent.conf\"]\n        self.grubKernelBootOptionsFile = '/boot/loader.conf'\n        self.grubKernelBootOptionsLine = ''\n        self.getpidcmd = 'pgrep -n'\n        self.mount_dvd_cmd = 'dd bs=2048 count=33 skip=295 if=' # custom data max len is 64k \n        self.sudoers_dir_base = '/usr/local/etc'\n        self.waagent_conf_file = FreeBSDWaagentConf\n        \n    def installAgentServiceScriptFiles(self):\n        SetFileContents(self.init_script_file, self.init_file)\n        os.chmod(self.init_script_file, 0777)\n        AppendFileContents(\"/etc/rc.conf\",\"waagent_enable='YES'\\n\")\n        return 0\n\n    def registerAgentService(self):\n        self.installAgentServiceScriptFiles()\n        return Run(\"services_mkdb \" + self.init_script_file)\n\n        \n    def sshDeployPublicKey(self,fprint,path):\n        \"\"\"\n        We support PKCS8.\n        \"\"\"\n        if Run(\"ssh-keygen -i -m PKCS8 -f \" + fprint + \" >> \" + path):\n            return 1\n        else :\n            return 0\n\n    def deleteRootPassword(self):\n        \"\"\"\n        BSD root password removal.\n        \"\"\"\n        filepath=\"/etc/master.passwd\"\n        ReplaceStringInFile(filepath,r'root:.*?:','root::')\n        #ReplaceFileContentsAtomic(filepath,\"root:*LOCK*:14600::::::\\n\"\n        #                          + \"\\n\".join(filter(lambda a: not a.startswith(\"root:\"),GetFileContents(filepath).split('\\n'))))\n        os.chmod(filepath,self.shadow_file_mode)\n        if self.isSelinuxSystem():\n            self.setSelinuxContext(filepath,'system_u:object_r:shadow_t:s0')\n        RunGetOutput(\"pwd_mkdb -u root /etc/master.passwd\")\n        Log(\"Root password deleted.\")\n        return 0\n\n    def changePass(self,user,password):\n        return RunSendStdin(\"pw usermod \" + user + \" -h 0 \",password)\n    \n    def load_ata_piix(self):\n        return 0\n\n    def unload_ata_piix(self):\n        return 0\n\n    def checkDependencies(self):\n        \"\"\"\n        FreeBSD dependency check.\n        Return 1 unless all dependencies are satisfied.\n        \"\"\"\n        for a in self.requiredDeps:\n            if Run(\"which \" + a + \" > /dev/null 2>&1\",chk_err=False):\n                Error(\"Missing required dependency: \" + a)\n                return 1\n        return 0\n\n    def packagedInstall(self,buildroot):\n        pass\n\n    def GetInterfaceName(self):\n        \"\"\"\n        Return the ip of the \n        active ethernet interface.\n        \"\"\"\n        iface,inet,mac=self.GetFreeBSDEthernetInfo()\n        return iface\n\n    def RestartInterface(self, iface):\n        Run(\"service netif restart\")\n\n    def GetIpv4Address(self):\n        \"\"\"\n        Return the ip of the \n        active ethernet interface.\n        \"\"\"\n        iface,inet,mac=self.GetFreeBSDEthernetInfo()\n        return inet\n\n    def GetMacAddress(self):\n        \"\"\"\n        Return the ip of the \n        active ethernet interface.\n        \"\"\"\n        iface,inet,mac=self.GetFreeBSDEthernetInfo()\n        l=mac.split(':')\n        r=[]\n        for i in l:\n            r.append(string.atoi(i,16))\n        return r\n\n    def GetFreeBSDEthernetInfo(self):\n        \"\"\"\n        There is no SIOCGIFCONF\n        on freeBSD - just parse ifconfig.\n        Returns strings: iface, inet4_addr, and mac\n        or 'None,None,None' if unable to parse.\n        We will sleep and retry as the network must be up.\n        \"\"\"\n        code,output=RunGetOutput(\"ifconfig\",chk_err=False)\n        Log(output)\n        retries=10\n        cmd='ifconfig | grep -A2 -B2 ether | grep -B3 inet | grep -A4 UP '\n        code=1\n\n        while code > 0 :\n            if code > 0 and retries == 0:\n                Error(\"GetFreeBSDEthernetInfo - Failed to detect ethernet interface\")\n                return None, None, None\n            code,output=RunGetOutput(cmd,chk_err=False)\n            retries-=1\n            if code > 0 and retries > 0 :\n                Log(\"GetFreeBSDEthernetInfo - Error: retry ethernet detection \" + str(retries))\n                if retries == 9 :\n                    c,o=RunGetOutput(\"ifconfig | grep -A1 -B2 ether\",chk_err=False)\n                    if c == 0:\n                        t=o.replace('\\n',' ')\n                        t=t.split()\n                        i=t[0][:-1]\n                        Log(RunGetOutput('id')[1])\n                        Run('dhclient '+i)\n                time.sleep(10)\n\n        j=output.replace('\\n',' ')\n        j=j.split()\n        iface=j[0][:-1]\n\n        for i in range(len(j)):\n            if j[i] == 'inet' :\n                inet=j[i+1]\n            elif j[i] == 'ether' :\n                mac=j[i+1]\n\n        return iface, inet, mac\n\n    def CreateAccount(self,user, password, expiration, thumbprint):\n        \"\"\"\n        Create a user account, with 'user', 'password', 'expiration', ssh keys\n        and sudo permissions.\n        Returns None if successful, error string on failure.\n        \"\"\"\n        userentry = None\n        try:\n            userentry = pwd.getpwnam(user)\n        except:\n            pass\n        uidmin = None\n        try:\n            if os.path.isfile(\"/etc/login.defs\"):\n                uidmin = int(GetLineStartingWith(\"UID_MIN\", \"/etc/login.defs\").split()[1])\n        except:\n            pass\n        if uidmin == None:\n            uidmin = 100\n        if userentry != None and userentry[2] < uidmin:\n            Error(\"CreateAccount: \" + user + \" is a system user. Will not set password.\")\n            return \"Failed to set password for system user: \" + user + \" (0x06).\"\n        if userentry == None:\n            command = \"pw useradd \" + user + \" -m\"\n            if expiration != None:\n                command += \" -e \" + expiration.split('.')[0]\n            if Run(command):\n                Error(\"Failed to create user account: \" + user)\n                return \"Failed to create user account: \" + user + \" (0x07).\"\n            else:\n                Log(\"CreateAccount: \" + user + \" already exists. Will update password.\")\n        \n        if password != None:\n            self.changePass(user,password)\n        try:\n            # for older distros create sudoers.d\n            if not os.path.isdir(MyDistro.sudoers_dir_base+'/sudoers.d/'):\n                # create the /etc/sudoers.d/ directory\n                os.mkdir(MyDistro.sudoers_dir_base+'/sudoers.d')\n                # add the include of sudoers.d to the /etc/sudoers\n                SetFileContents(MyDistro.sudoers_dir_base+'/sudoers',GetFileContents(MyDistro.sudoers_dir_base+'/sudoers')+'\\n#includedir ' + MyDistro.sudoers_dir_base + '/sudoers.d\\n')\n            if password == None:\n                SetFileContents(MyDistro.sudoers_dir_base+\"/sudoers.d/waagent\", user + \" ALL = (ALL) NOPASSWD: ALL\\n\")\n            else:\n                SetFileContents(MyDistro.sudoers_dir_base+\"/sudoers.d/waagent\", user + \" ALL = (ALL) ALL\\n\")\n            os.chmod(MyDistro.sudoers_dir_base+\"/sudoers.d/waagent\", 0440)\n        except:\n            Error(\"CreateAccount: Failed to configure sudo access for user.\")\n            return \"Failed to configure sudo privileges (0x08).\"\n        home = MyDistro.GetHome()\n        if thumbprint != None:\n            dir = home + \"/\" + user + \"/.ssh\"\n            CreateDir(dir, user, 0700)\n            pub = dir + \"/id_rsa.pub\"\n            prv = dir + \"/id_rsa\"\n            Run(\"ssh-keygen -y -f \" + thumbprint + \".prv > \" + pub)\n            SetFileContents(prv, GetFileContents(thumbprint + \".prv\"))\n            for f in [pub, prv]:\n                os.chmod(f, 0600)\n                ChangeOwner(f, user)\n            SetFileContents(dir + \"/authorized_keys\", GetFileContents(pub))\n            ChangeOwner(dir + \"/authorized_keys\", user)\n        Log(\"Created user account: \" + user)\n        return None\n    \n    def DeleteAccount(self,user):\n        \"\"\"\n        Delete the 'user'.\n        Clear utmp first, to avoid error.\n        Removes the /etc/sudoers.d/waagent file.\n        \"\"\"\n        userentry = None\n        try:\n            userentry = pwd.getpwnam(user)\n        except:\n            pass\n        if userentry == None:\n            Error(\"DeleteAccount: \" + user + \" not found.\")\n            return\n        uidmin = None\n        try:\n            if os.path.isfile(\"/etc/login.defs\"):\n                uidmin = int(GetLineStartingWith(\"UID_MIN\", \"/etc/login.defs\").split()[1])\n        except:\n            pass\n        if uidmin == None:\n            uidmin = 100\n        if userentry[2] < uidmin:\n            Error(\"DeleteAccount: \" + user + \" is a system user. Will not delete account.\")\n            return\n        Run(\"> /var/run/utmp\") #Delete utmp to prevent error if we are the 'user' deleted\n        pid = subprocess.Popen(['rmuser', '-y', user], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE).pid\n        try:\n            os.remove(MyDistro.sudoers_dir_base+\"/sudoers.d/waagent\")\n        except:\n            pass\n        return\n\n    def ActivateResourceDiskNoThread(self):\n        \"\"\"\n        Format, mount, and if specified in the configuration\n        set resource disk as swap.\n        \"\"\"\n        global DiskActivated\n        Run('cp /usr/sbin/waagent /tmp/')\n        SetFileContents('/tmp/bsd_activate_resource_disk.py',bsd_activate_resource_disk_txt)\n        Run('chmod +x /tmp/bsd_activate_resource_disk.py')\n        pid = subprocess.Popen([\"/tmp/bsd_activate_resource_disk.py\", \"\"]).pid\n        Log(\"Spawning bsd_activate_resource_disk.py\")\n        DiskActivated = True\n        return\n\n    def Install(self):\n        \"\"\"\n        Install the agent service.\n        Check dependencies.\n        Create /etc/waagent.conf and move old version to\n        /etc/waagent.conf.old\n        Copy RulesFiles to /var/lib/waagent\n        Create /etc/logrotate.d/waagent\n        Set /etc/ssh/sshd_config ClientAliveInterval to 180\n        Call ApplyVNUMAWorkaround()\n        \"\"\"\n        if MyDistro.checkDependencies():\n            return 1\n        os.chmod(sys.argv[0], 0755)\n        SwitchCwd()\n        for a in RulesFiles:\n            if os.path.isfile(a):\n                if os.path.isfile(GetLastPathElement(a)):\n                    os.remove(GetLastPathElement(a))\n                shutil.move(a, \".\")\n                Warn(\"Moved \" + a + \" -> \" + LibDir + \"/\" + GetLastPathElement(a) )\n        MyDistro.registerAgentService()\n        if os.path.isfile(\"/etc/waagent.conf\"):\n            try:\n                os.remove(\"/etc/waagent.conf.old\")\n            except:\n                pass\n            try:\n                os.rename(\"/etc/waagent.conf\", \"/etc/waagent.conf.old\")\n                Warn(\"Existing /etc/waagent.conf has been renamed to /etc/waagent.conf.old\")\n            except:\n                pass\n        SetFileContents(\"/etc/waagent.conf\", self.waagent_conf_file)\n        if os.path.exists('/usr/local/etc/logrotate.d/'):\n            SetFileContents(\"/usr/local/etc/logrotate.d/waagent\", WaagentLogrotate)\n        filepath = \"/etc/ssh/sshd_config\"\n        ReplaceFileContentsAtomic(filepath, \"\\n\".join(filter(lambda a: not\n            a.startswith(\"ClientAliveInterval\"),\n            GetFileContents(filepath).split('\\n'))) + \"\\nClientAliveInterval 180\\n\")\n        Log(\"Configured SSH client probing to keep connections alive.\")\n        #ApplyVNUMAWorkaround()\n        return 0\n    \n    def mediaHasFilesystem(self,dsk):\n        if Run('LC_ALL=C fdisk -p ' + dsk + ' | grep \"invalid fdisk partition table found\" ',False):\n            return False\n        return True\n    \n    def mountDVD(self,dvd,location):\n        #At this point we cannot read a joliet option udf DVD in freebsd10 - so we 'dd' it into our location\n        retcode,out = RunGetOutput(self.mount_dvd_cmd + dvd + ' of=' + location + '/ovf-env.xml')\n        if retcode != 0:\n            return retcode,out\n\n        ovfxml = (GetFileContents(location+\"/ovf-env.xml\",asbin=False))\n        if ord(ovfxml[0]) > 128 and ord(ovfxml[1]) > 128 and ord(ovfxml[2]) > 128 :\n            ovfxml = ovfxml[3:] # BOM is not stripped. First three bytes are > 128 and not unicode chars so we ignore them.\n        ovfxml = ovfxml.strip(chr(0x00))\n        ovfxml = \"\".join(filter(lambda x: ord(x)<128, ovfxml))\n        ovfxml = re.sub(r'</Environment>.*\\Z','',ovfxml,0,re.DOTALL)\n        ovfxml += '</Environment>'\n        SetFileContents(location+\"/ovf-env.xml\", ovfxml)\n        return retcode,out\n\n    def GetHome(self):\n        return '/home'\n\n    def initScsiDiskTimeout(self):\n        \"\"\"\n        Set the SCSI disk timeout by updating the kernal config\n        \"\"\"\n        timeout = Config.get(\"OS.RootDeviceScsiTimeout\") \n        if timeout:\n            Run(\"sysctl kern.cam.da.default_timeout=\" + timeout)\n\n    def setScsiDiskTimeout(self):\n        return\n\n    def setBlockDeviceTimeout(self, device, timeout):\n        return\n\n    def getProcessorCores(self):\n        return int(RunGetOutput(\"sysctl hw.ncpu | awk '{print $2}'\")[1])\n    \n    def getTotalMemory(self):\n        return int(RunGetOutput(\"sysctl hw.realmem | awk '{print $2}'\")[1])/1024\n        \n############################################################\n# END DISTRO CLASS DEFS\n############################################################  \n\n# This lets us index into a string or an array of integers transparently.\ndef Ord(a):\n    \"\"\"\n    Allows indexing into a string or an array of integers transparently.\n    Generic utility function.\n    \"\"\"\n    if type(a) == type(\"a\"):\n        a = ord(a)\n    return a\n\ndef IsLinux():\n    \"\"\"\n    Returns True if platform is Linux.\n    Generic utility function.\n    \"\"\"\n    return (platform.uname()[0] == \"Linux\")\n\ndef GetLastPathElement(path):\n    \"\"\"\n    Similar to basename.\n    Generic utility function.\n    \"\"\"\n    return path.rsplit('/', 1)[1]\n\ndef GetFileContents(filepath,asbin=False):\n    \"\"\"\n    Read and return contents of 'filepath'.\n    \"\"\"\n    mode='r'\n    if asbin:\n        mode+='b'\n    c=None\n    try:\n        with open(filepath, mode) as F :\n            c=F.read()\n    except IOError, e:\n        ErrorWithPrefix('GetFileContents','Reading from file ' + filepath + ' Exception is ' + str(e))\n        return None        \n    return c\n\ndef SetFileContents(filepath, contents):\n    \"\"\"\n    Write 'contents' to 'filepath'.\n    \"\"\"\n    if type(contents) == str :\n        contents=contents.encode('latin-1', 'ignore')\n    try:\n        with open(filepath, \"wb+\") as F :\n            F.write(contents)\n    except IOError, e:\n        ErrorWithPrefix('SetFileContents','Writing to file ' + filepath + ' Exception is ' + str(e))\n        return None\n    return 0\n\ndef AppendFileContents(filepath, contents):\n    \"\"\"\n    Append 'contents' to 'filepath'.\n    \"\"\"\n    if type(contents) == str :\n        contents=contents.encode('latin-1')\n    try: \n        with open(filepath, \"a+\") as F :\n            F.write(contents)\n    except IOError, e:\n        ErrorWithPrefix('AppendFileContents','Appending to file ' + filepath + ' Exception is ' + str(e))\n        return None\n    return 0\n\ndef ReplaceFileContentsAtomic(filepath, contents):\n    \"\"\"\n    Write 'contents' to 'filepath' by creating a temp file, and replacing original.\n    \"\"\"\n    handle, temp = tempfile.mkstemp(dir = os.path.dirname(filepath))\n    if type(contents) == str :\n        contents=contents.encode('latin-1')\n    try:\n        os.write(handle, contents)\n    except IOError, e:\n        ErrorWithPrefix('ReplaceFileContentsAtomic','Writing to file ' + filepath + ' Exception is ' + str(e))\n        return None\n    finally:\n        os.close(handle)\n    try:\n        os.rename(temp, filepath)\n        return None\n    except IOError, e:\n        ErrorWithPrefix('ReplaceFileContentsAtomic','Renaming ' + temp+ ' to ' + filepath + ' Exception is ' + str(e))\n    try:\n        os.remove(filepath)\n    except IOError, e:\n        ErrorWithPrefix('ReplaceFileContentsAtomic','Removing '+ filepath + ' Exception is ' + str(e))\n    try:\n        os.rename(temp,filepath)\n    except IOError, e:\n        ErrorWithPrefix('ReplaceFileContentsAtomic','Removing '+ filepath + ' Exception is ' + str(e))\n        return 1\n    return 0\n\ndef GetLineStartingWith(prefix, filepath):\n    \"\"\"\n    Return line from 'filepath' if the line startswith 'prefix'\n    \"\"\"\n    for line in GetFileContents(filepath).split('\\n'):\n        if line.startswith(prefix):\n            return line\n    return None\n\ndef Run(cmd,chk_err=True):\n    \"\"\"\n    Calls RunGetOutput on 'cmd', returning only the return code.\n    If chk_err=True then errors will be reported in the log.\n    If chk_err=False then errors will be suppressed from the log.\n    \"\"\"\n    retcode,out=RunGetOutput(cmd,chk_err)\n    return retcode\n\ndef RunGetOutput(cmd,chk_err=True):\n    \"\"\"\n    Wrapper for subprocess.check_output.\n    Execute 'cmd'.  Returns return code and STDOUT, trapping expected exceptions.\n    Reports exceptions to Error if chk_err parameter is True\n    \"\"\"\n    LogIfVerbose(cmd)\n    try:                                     \n        output=subprocess.check_output(cmd,stderr=subprocess.STDOUT,shell=True)\n    except subprocess.CalledProcessError,e :\n        if chk_err :\n            Error('CalledProcessError.  Error Code is ' + str(e.returncode)  )\n            Error('CalledProcessError.  Command string was ' + e.cmd  )\n            Error('CalledProcessError.  Command result was ' + (e.output[:-1]).decode('latin-1'))\n        return e.returncode,e.output.decode('latin-1')\n    return 0,output.decode('latin-1')\n\ndef RunSendStdin(cmd,input,chk_err=True,use_shell=True):\n    \"\"\"\n    Wrapper for subprocess.Popen.\n    Execute 'cmd', sending 'input' to STDIN of 'cmd'.\n    Returns return code and STDOUT, trapping expected exceptions.\n    Reports exceptions to Error if chk_err parameter is True\n    \"\"\"\n    LogIfVerbose(cmd+input)\n    try:                                     \n        me=subprocess.Popen([cmd], shell=use_shell, stdin=subprocess.PIPE,stderr=subprocess.STDOUT,stdout=subprocess.PIPE)\n        output=me.communicate(input)\n    except OSError , e :\n        if chk_err :\n            Error('CalledProcessError.  Error Code is ' + str(me.returncode)  )\n            Error('CalledProcessError.  Command string was ' + cmd  )\n            Error('CalledProcessError.  Command result was ' + output[0].decode('latin-1'))\n            return 1,output[0].decode('latin-1')\n    if me.returncode is not 0 and chk_err is True:\n            Error('CalledProcessError.  Error Code is ' + str(me.returncode)  )\n            Error('CalledProcessError.  Command string was ' + cmd  )\n            Error('CalledProcessError.  Command result was ' + output[0].decode('latin-1'))\n    return me.returncode,output[0].decode('latin-1')\n\ndef GetNodeTextData(a):\n    \"\"\"\n    Filter non-text nodes from DOM tree\n    \"\"\"\n    for b in a.childNodes:\n        if b.nodeType == b.TEXT_NODE:\n            return b.data\n\ndef GetHome():\n    \"\"\"\n    Attempt to guess the $HOME location.\n    Return the path string.\n    \"\"\"\n    home = None\n    try:\n        home = GetLineStartingWith(\"HOME\", \"/etc/default/useradd\").split('=')[1].strip()\n    except:\n        pass\n    if (home == None) or (home.startswith(\"/\") == False):\n        home = \"/home\"\n    return home\n\ndef ChangeOwner(filepath, user):\n    \"\"\"\n    Lookup user.  Attempt chown 'filepath' to 'user'.\n    \"\"\"\n    p = None\n    try:\n        p = pwd.getpwnam(user)\n    except:\n        pass\n    if p != None:\n        os.chown(filepath, p[2], p[3])\n\ndef CreateDir(dirpath, user, mode):\n    \"\"\"\n    Attempt os.makedirs, catch all exceptions.\n    Call ChangeOwner afterwards.\n    \"\"\"\n    try:\n        os.makedirs(dirpath, mode)\n    except:\n        pass\n    ChangeOwner(dirpath, user)\n\ndef CreateAccount(user, password, expiration, thumbprint):\n    \"\"\"\n    Create a user account, with 'user', 'password', 'expiration', ssh keys\n    and sudo permissions.\n    Returns None if successful, error string on failure.\n    \"\"\"\n    userentry = None\n    try:\n        userentry = pwd.getpwnam(user)\n    except:\n        pass\n    uidmin = None\n    try:\n        uidmin = int(GetLineStartingWith(\"UID_MIN\", \"/etc/login.defs\").split()[1])\n    except:\n        pass\n    if uidmin == None:\n        uidmin = 100\n    if userentry != None and userentry[2] < uidmin:\n        Error(\"CreateAccount: \" + user + \" is a system user. Will not set password.\")\n        return \"Failed to set password for system user: \" + user + \" (0x06).\"\n    if userentry == None:\n        command = \"useradd -m \" + user\n        if expiration != None:\n            command += \" -e \" + expiration.split('.')[0]\n        if Run(command):\n            Error(\"Failed to create user account: \" + user)\n            return \"Failed to create user account: \" + user + \" (0x07).\"\n    else:\n        Log(\"CreateAccount: \" + user + \" already exists. Will update password.\")\n    if password != None:\n        RunSendStdin(\"chpasswd\",(user + \":\" + password + \"\\n\"))\n    try:\n        # for older distros create sudoers.d\n        if not os.path.isdir('/etc/sudoers.d/'):\n            # create the /etc/sudoers.d/ directory\n            os.mkdir('/etc/sudoers.d/')\n            # add the include of sudoers.d to the /etc/sudoers\n            SetFileContents('/etc/sudoers',GetFileContents('/etc/sudoers')+'\\n#includedir /etc/sudoers.d\\n')\n        if password == None:\n            SetFileContents(\"/etc/sudoers.d/waagent\", user + \" ALL = (ALL) NOPASSWD: ALL\\n\")\n        else:\n            SetFileContents(\"/etc/sudoers.d/waagent\", user + \" ALL = (ALL) ALL\\n\")\n        os.chmod(\"/etc/sudoers.d/waagent\", 0440)\n    except:\n        Error(\"CreateAccount: Failed to configure sudo access for user.\")\n        return \"Failed to configure sudo privileges (0x08).\"\n    home = MyDistro.GetHome()\n    if thumbprint != None:\n        dir = home + \"/\" + user + \"/.ssh\"\n        CreateDir(dir, user, 0700)\n        pub = dir + \"/id_rsa.pub\"\n        prv = dir + \"/id_rsa\"\n        Run(\"ssh-keygen -y -f \" + thumbprint + \".prv > \" + pub)\n        SetFileContents(prv, GetFileContents(thumbprint + \".prv\"))\n        for f in [pub, prv]:\n            os.chmod(f, 0600)\n            ChangeOwner(f, user)\n        SetFileContents(dir + \"/authorized_keys\", GetFileContents(pub))\n        ChangeOwner(dir + \"/authorized_keys\", user)\n    Log(\"Created user account: \" + user)\n    return None\n\ndef DeleteAccount(user):\n    \"\"\"\n    Delete the 'user'.\n    Clear utmp first, to avoid error.\n    Removes the /etc/sudoers.d/waagent file.\n    \"\"\"\n    userentry = None\n    try:\n        userentry = pwd.getpwnam(user)\n    except:\n        pass\n    if userentry == None:\n        Error(\"DeleteAccount: \" + user + \" not found.\")\n        return\n    uidmin = None\n    try:\n        uidmin = int(GetLineStartingWith(\"UID_MIN\", \"/etc/login.defs\").split()[1])\n    except:\n        pass\n    if uidmin == None:\n        uidmin = 100\n    if userentry[2] < uidmin:\n        Error(\"DeleteAccount: \" + user + \" is a system user. Will not delete account.\")\n        return\n    Run(\"> /var/run/utmp\") #Delete utmp to prevent error if we are the 'user' deleted\n    Run(\"userdel -f -r \" + user)\n    try:\n        os.remove(\"/etc/sudoers.d/waagent\")\n    except:\n        pass\n    return\n\ndef IsInRangeInclusive(a, low, high):\n    \"\"\"\n    Return True if 'a' in 'low' <= a >= 'high'\n    \"\"\"\n    return (a >= low and a <= high)\n\ndef IsPrintable(ch):\n    \"\"\"\n    Return True if character is displayable.\n    \"\"\"\n    return IsInRangeInclusive(ch, Ord('A'), Ord('Z')) or IsInRangeInclusive(ch, Ord('a'), Ord('z')) or IsInRangeInclusive(ch, Ord('0'), Ord('9'))\n\ndef HexDump(buffer, size):\n    \"\"\"\n    Return Hex formated dump of a 'buffer' of 'size'.\n    \"\"\"\n    if size < 0:\n        size = len(buffer)\n    result = \"\"\n    for i in range(0, size):\n        if (i % 16) == 0:\n            result += \"%06X: \" % i\n        byte = buffer[i]\n        if type(byte) == str:\n            byte = ord(byte.decode('latin1'))\n        result += \"%02X \" % byte\n        if (i & 15) == 7:\n            result += \" \"\n        if ((i + 1) % 16) == 0 or (i + 1) == size:\n            j = i\n            while ((j + 1) % 16) != 0:\n                result += \"   \"\n                if (j & 7) == 7:\n                    result += \" \"\n                j += 1\n            result += \" \"\n            for j in range(i - (i % 16), i + 1):\n                byte=buffer[j]\n                if type(byte) == str:\n                    byte = ord(byte.decode('latin1'))\n                k = '.'\n                if IsPrintable(byte):\n                    k = chr(byte)\n                result += k\n            if (i + 1) != size:\n                result += \"\\n\"\n    return result\n\ndef SimpleLog(file_path,message):\n    if not file_path or len(message) < 1:\n        return\n    t = time.localtime()\n    t = \"%04u/%02u/%02u %02u:%02u:%02u \" % (t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec)\n    lines=re.sub(re.compile(r'^(.)',re.MULTILINE),t+r'\\1',message)\n    with open(file_path, \"a\") as F :\n        lines = filter(lambda x : x in string.printable, lines)\n        F.write(lines.encode('ascii','ignore') + \"\\n\")\n\nclass Logger(object):\n    \"\"\"\n    The Agent's logging assumptions are:\n    For Log, and LogWithPrefix all messages are logged to the\n    self.file_path and to the self.con_path.  Setting either path\n    parameter to None skips that log.  If Verbose is enabled, messages\n    calling the LogIfVerbose method will be logged to file_path yet\n    not to con_path.  Error and Warn messages are normal log messages\n    with the 'ERROR:' or 'WARNING:' prefix added.\n    \"\"\"\n\n    def __init__(self,filepath,conpath,verbose=False):\n        \"\"\"\n        Construct an instance of Logger.\n        \"\"\"\n        self.file_path=filepath\n        self.con_path=conpath\n        self.verbose=verbose\n        \n    def ThrottleLog(self,counter):\n        \"\"\"\n        Log everything up to 10, every 10 up to 100, then every 100.\n        \"\"\"\n        return (counter < 10) or ((counter < 100) and ((counter % 10) == 0)) or ((counter % 100) == 0)\n    \n    def LogToFile(self,message):\n        \"\"\"\n        Write 'message' to logfile.\n        \"\"\"\n        if self.file_path:\n            try:\n                with open(self.file_path, \"a\") as F :\n                    message = filter(lambda x : x in string.printable, message)\n                    F.write(message.encode('ascii','ignore') + \"\\n\")\n            except IOError, e:\n                print e\n                pass\n            \n    def LogToCon(self,message):\n        \"\"\"\n        Write 'message' to /dev/console.\n        This supports serial port logging if the /dev/console\n        is redirected to ttys0 in kernel boot options.\n        \"\"\"\n        if self.con_path:\n            try:\n                with open(self.con_path, \"w\") as C :\n                    message = filter(lambda x : x in string.printable, message)\n                    C.write(message.encode('ascii','ignore') + \"\\n\")\n            except IOError, e:\n                print e\n                pass\n                \n    def Log(self,message):\n        \"\"\"\n        Standard Log function.\n        Logs to self.file_path, and con_path\n        \"\"\"\n        self.LogWithPrefix(\"\", message)\n    \n    def LogWithPrefix(self,prefix, message):\n        \"\"\"\n        Prefix each line of 'message' with current time+'prefix'.\n        \"\"\"\n        t = time.localtime()\n        t = \"%04u/%02u/%02u %02u:%02u:%02u \" % (t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec)\n        t += prefix\n        for line in message.split('\\n'):\n            line = t + line\n            self.LogToFile(line)\n            self.LogToCon(line)\n            \n    def NoLog(self,message):\n        \"\"\"\n        Don't Log.\n        \"\"\"\n        pass\n    \n    def LogIfVerbose(self,message):\n        \"\"\"\n        Only log 'message' if global Verbose is True.\n        \"\"\"\n        self.LogWithPrefixIfVerbose('',message)\n    \n    def LogWithPrefixIfVerbose(self,prefix, message):\n        \"\"\"\n        Only log 'message' if global Verbose is True.\n        Prefix each line of 'message' with current time+'prefix'.\n        \"\"\"\n        if self.verbose == True:\n            t = time.localtime()\n            t = \"%04u/%02u/%02u %02u:%02u:%02u \" % (t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec)\n            t += prefix\n            for line in message.split('\\n'):\n                line = t + line\n                self.LogToFile(line)\n                self.LogToCon(line)\n                \n    def Warn(self,message):\n        \"\"\"\n        Prepend the text \"WARNING:\" to the prefix for each line in 'message'.\n        \"\"\"\n        self.LogWithPrefix(\"WARNING:\", message)\n    \n    def Error(self,message):\n        \"\"\"\n        Call ErrorWithPrefix(message).\n        \"\"\"\n        ErrorWithPrefix(\"\", message)\n    \n    def ErrorWithPrefix(self,prefix, message):\n        \"\"\"\n        Prepend the text \"ERROR:\" to the prefix for each line in 'message'.\n        Errors written to logfile, and /dev/console\n        \"\"\"\n        self.LogWithPrefix(\"ERROR:\", message)\n    \ndef LoggerInit(log_file_path,log_con_path,verbose=False):\n    \"\"\"\n    Create log object and export its methods to global scope.\n    \"\"\"\n    global Log,LogWithPrefix,LogIfVerbose,LogWithPrefixIfVerbose,Error,ErrorWithPrefix,Warn,NoLog,ThrottleLog,myLogger\n    l=Logger(log_file_path,log_con_path,verbose)\n    Log,LogWithPrefix,LogIfVerbose,LogWithPrefixIfVerbose,Error,ErrorWithPrefix,Warn,NoLog,ThrottleLog,myLogger = l.Log,l.LogWithPrefix,l.LogIfVerbose,l.LogWithPrefixIfVerbose,l.Error,l.ErrorWithPrefix,l.Warn,l.NoLog,l.ThrottleLog,l\n\ndef Linux_ioctl_GetInterfaceMac(ifname):\n    \"\"\"\n    Return the mac-address bound to the socket.\n    \"\"\"\n    s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n    info = fcntl.ioctl(s.fileno(), 0x8927,  struct.pack('256s', (ifname[:15]+('\\0'*241)).encode('latin-1')))\n    return ''.join(['%02X' % Ord(char) for char in info[18:24]])\n\ndef GetFirstActiveNetworkInterfaceNonLoopback():\n    \"\"\"\n    Return the interface name, and ip addr of the\n    first active non-loopback interface.\n    \"\"\"\n    iface=''\n    expected=16 # how many devices should I expect...\n    is_64bits = sys.maxsize > 2**32\n    struct_size=40 if is_64bits else 32 # for 64bit the size is 40 bytes, for 32bits it is 32 bytes.\n    s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n    buff=array.array('B', b'\\0' * (expected*struct_size))\n    retsize=(struct.unpack('iL', fcntl.ioctl(s.fileno(), 0x8912, struct.pack('iL',expected*struct_size,buff.buffer_info()[0]))))[0]\n    if retsize == (expected*struct_size) :\n        Warn('SIOCGIFCONF returned more than ' + str(expected) + ' up network interfaces.')\n    s=buff.tostring()\n    preferred_nic = Config.get(\"Network.Interface\")\n    for i in range(0,struct_size*expected,struct_size):\n        iface=s[i:i+16].split(b'\\0', 1)[0]\n        if iface == b'lo':\n            continue\n        elif preferred_nic is None:\n            break\n        elif iface == preferred_nic:\n            break\n    return iface.decode('latin-1'), socket.inet_ntoa(s[i+20:i+24])\n\ndef GetIpv4Address():\n    \"\"\"\n    Return the ip of the \n    first active non-loopback interface.\n    \"\"\"\n    iface,addr=GetFirstActiveNetworkInterfaceNonLoopback()\n    return addr\n\ndef HexStringToByteArray(a):\n    \"\"\"\n    Return hex string packed into a binary struct.\n    \"\"\"\n    b = b\"\"\n    for c in range(0, len(a) // 2):\n        b += struct.pack(\"B\", int(a[c * 2:c * 2 + 2], 16))\n    return b\n\ndef GetMacAddress():\n    \"\"\"\n    Convienience function, returns mac addr bound to\n    first non-loobback interface.\n    \"\"\"\n    ifname=''\n    while len(ifname) < 2 :\n        ifname=GetFirstActiveNetworkInterfaceNonLoopback()[0]\n    a = Linux_ioctl_GetInterfaceMac(ifname)        \n    return HexStringToByteArray(a)\n\ndef DeviceForIdePort(n):\n    \"\"\"\n    Return device name attached to ide port 'n'.\n    \"\"\"\n    if n > 3:\n        return None\n    g0 = \"00000000\"\n    if n > 1:\n        g0 = \"00000001\"\n        n = n - 2\n    device = None\n    path = \"/sys/bus/vmbus/devices/\"\n    for vmbus in os.listdir(path):\n        guid = GetFileContents(path + vmbus + \"/device_id\").lstrip('{').split('-')\n        if guid[0] == g0 and guid[1] == \"000\" + str(n):\n            for root, dirs, files in os.walk(path + vmbus):\n                if root.endswith(\"/block\"):\n                    device = dirs[0]\n                    break\n                else : #older distros\n                    for d in dirs:\n                        if ':' in d and \"block\" == d.split(':')[0]:\n                            device = d.split(':')[1]\n                            break\n            break\n    return device\n\nclass HttpResourceGoneError(Exception):\n    pass\n\nclass Util(object):\n    \"\"\"\n    Http communication class.\n    Base of GoalState, and Agent classes.\n    \"\"\"\n    RetryWaitingInterval=10\n\n    def __init__(self):\n        self.Endpoint = None\n\n    def _ParseUrl(self, url):\n        secure = False\n        host = self.Endpoint\n        path = url\n        port = None\n        \n        #\"http[s]://hostname[:port][/]\"\n        if url.startswith(\"http://\"):\n            url = url[7:]\n            if \"/\" in url:\n                host = url[0: url.index(\"/\")]\n                path = url[url.index(\"/\"):]\n            else:\n                host = url\n                path = \"/\"\n        elif url.startswith(\"https://\"):\n            secure = True\n            url = url[8:]\n            if \"/\" in url:\n                host = url[0: url.index(\"/\")]\n                path = url[url.index(\"/\"):]\n            else:\n                host = url\n                path = \"/\"\n\n        if host is None:\n            raise ValueError(\"Host is invalid:{0}\".format(url))\n        \n        if(\":\" in host):\n            pos = host.rfind(\":\")\n            port = int(host[pos + 1:])\n            host = host[0:pos]\n        \n        return host, port, secure, path\n\n    def GetHttpProxy(self, secure):\n        \"\"\"\n        Get http_proxy and https_proxy from environment variables.\n        Username and password is not supported now.\n        \"\"\"\n        host = Config.get(\"HttpProxy.Host\")\n        port = Config.get(\"HttpProxy.Port\")\n        return (host, port) \n\n    def _HttpRequest(self, method, host, path, port=None, data=None, secure=False, \n                     headers=None, proxyHost=None, proxyPort=None):\n        resp = None\n        conn = None\n        try:\n            if secure:\n                port = 443 if port is None else port\n                if proxyHost is not None and proxyPort is not None:\n                    conn = httplib.HTTPSConnection(proxyHost, proxyPort)\n                    conn.set_tunnel(host, port)\n                    #If proxy is used, full url is needed.\n                    path = \"https://{0}:{1}{2}\".format(host, port, path)\n                else:\n                    conn = httplib.HTTPSConnection(host, port)\n            else:\n                port = 80 if port is None else port\n                if proxyHost is not None and proxyPort is not None:\n                    conn = httplib.HTTPConnection(proxyHost, proxyPort)\n                    #If proxy is used, full url is needed.\n                    path = \"http://{0}:{1}{2}\".format(host, port, path)\n                else:\n                    conn = httplib.HTTPConnection(host, port)\n            if headers == None:\n                conn.request(method, path, data)\n            else:\n                conn.request(method, path, data, headers)\n            resp = conn.getresponse()\n        except httplib.HTTPException, e:\n            Error('HTTPException {0}, args:{1}'.format(e, repr(e.args)))\n        except IOError, e:\n            Error('Socket IOError {0}, args:{1}'.format(e, repr(e.args)))\n        return resp\n\n    def HttpRequest(self, method, url, data=None, \n                    headers=None, maxRetry=3, chkProxy=False):\n        \"\"\"\n        Sending http request to server\n        On error, sleep 10 and maxRetry times.\n        Return the output buffer or None.\n        \"\"\"\n        LogIfVerbose(\"HTTP Req: {0} {1}\".format(method, url))\n        LogIfVerbose(\"HTTP Req: Data={0}\".format(data))\n        LogIfVerbose(\"HTTP Req: Header={0}\".format(headers))\n        try:\n            host, port, secure, path = self._ParseUrl(url)\n        except ValueError, e:\n            Error(\"Failed to parse url:{0}\".format(url))\n            return None\n\n        #Check proxy\n        proxyHost, proxyPort = (None, None)\n        if chkProxy:\n            proxyHost, proxyPort = self.GetHttpProxy(secure)\n\n        #If httplib module is not built with ssl support. Fallback to http\n        if secure and not hasattr(httplib, \"HTTPSConnection\"):\n            Warn(\"httplib is not built with ssl support\")\n            secure = False\n            proxyHost, proxyPort = self.GetHttpProxy(secure)\n        \n        #If httplib module doesn't support https tunnelling. Fallback to http\n        if secure and \\\n                proxyHost is not None and \\\n                proxyPort is not None and \\\n                not hasattr(httplib.HTTPSConnection, \"set_tunnel\"):\n            Warn(\"httplib doesn't support https tunnelling(new in python 2.7)\")\n            secure = False\n            proxyHost, proxyPort = self.GetHttpProxy(secure)\n\n        resp = self._HttpRequest(method, host, path, port=port, data=data, \n                                 secure=secure, headers=headers,\n                                 proxyHost=proxyHost, proxyPort=proxyPort)\n        for retry in range(0, maxRetry):\n            if resp is not None and \\\n                   (resp.status == httplib.OK or \\\n                    resp.status == httplib.CREATED or \\\n                    resp.status == httplib.ACCEPTED):\n                return resp;\n\n            if resp is not None and resp.status == httplib.GONE:\n                raise HttpResourceGoneError(\"Http resource gone.\")\n\n            Error(\"Retry={0}\".format(retry))\n            Error(\"HTTP Req: {0} {1}\".format(method, url))\n            Error(\"HTTP Req: Data={0}\".format(data))\n            Error(\"HTTP Req: Header={0}\".format(headers))\n            if resp is None:\n                Error(\"HTTP Err: response is empty.\".format(retry))\n            else:\n                Error(\"HTTP Err: Status={0}\".format(resp.status))\n                Error(\"HTTP Err: Reason={0}\".format(resp.reason))\n                Error(\"HTTP Err: Header={0}\".format(resp.getheaders()))\n                Error(\"HTTP Err: Body={0}\".format(resp.read()))\n\n            time.sleep(self.__class__.RetryWaitingInterval)\n            resp = self._HttpRequest(method, host, path, port=port, data=data, \n                                     secure=secure, headers=headers,\n                                     proxyHost=proxyHost, proxyPort=proxyPort)\n\n        return None\n\n    def HttpGet(self, url, headers=None, maxRetry=3, chkProxy=False):\n        return self.HttpRequest(\"GET\", url, headers=headers, \n                                maxRetry=maxRetry, chkProxy=chkProxy)\n        \n    def HttpHead(self, url, headers=None, maxRetry=3, chkProxy=False):\n        return self.HttpRequest(\"HEAD\", url, headers=headers, \n                                maxRetry=maxRetry, chkProxy=chkProxy)\n        \n    def HttpPost(self, url, data, headers=None, maxRetry=3, chkProxy=False):\n        return self.HttpRequest(\"POST\", url, data=data, headers=headers, \n                                maxRetry=maxRetry, chkProxy=chkProxy)\n\n    def HttpPut(self, url, data, headers=None, maxRetry=3, chkProxy=False):\n        return self.HttpRequest(\"PUT\", url, data=data, headers=headers, \n                                maxRetry=maxRetry, chkProxy=chkProxy)\n\n    def HttpDelete(self, url, headers=None, maxRetry=3, chkProxy=False):\n        return self.HttpRequest(\"DELETE\", url, headers=headers, \n                                maxRetry=maxRetry, chkProxy=chkProxy)\n    \n    def HttpGetWithoutHeaders(self, url, maxRetry=3, chkProxy=False):\n        \"\"\"\n        Return data from an HTTP get on 'url'.\n        \"\"\"\n        resp = self.HttpGet(url, headers=None, maxRetry=maxRetry, \n                            chkProxy=chkProxy)\n        return resp.read() if resp is not None else None\n\n    def HttpGetWithHeaders(self, url, maxRetry=3, chkProxy=False):\n        \"\"\"\n        Return data from an HTTP get on 'url' with\n        x-ms-agent-name and x-ms-version\n        headers.\n        \"\"\"\n        resp = self.HttpGet(url, headers={\n            \"x-ms-agent-name\": GuestAgentName, \n            \"x-ms-version\": ProtocolVersion\n        }, maxRetry=maxRetry, chkProxy=chkProxy)\n        return resp.read() if resp is not None else None\n\n    def HttpSecureGetWithHeaders(self, url, transportCert, maxRetry=3, \n                                 chkProxy=False):\n        \"\"\"\n        Return output of get using ssl cert.\n        \"\"\"\n        resp = self.HttpGet(url, headers={\n            \"x-ms-agent-name\": GuestAgentName,\n            \"x-ms-version\": ProtocolVersion,\n            \"x-ms-cipher-name\": \"DES_EDE3_CBC\",\n            \"x-ms-guest-agent-public-x509-cert\": transportCert\n        }, maxRetry=maxRetry, chkProxy=chkProxy)\n        return resp.read() if resp is not None else None\n\n    def HttpPostWithHeaders(self, url, data, maxRetry=3, chkProxy=False):\n        headers = {\n            \"x-ms-agent-name\": GuestAgentName,\n            \"Content-Type\": \"text/xml; charset=utf-8\",\n            \"x-ms-version\": ProtocolVersion\n        }\n        return self.HttpPost(url, data=data, headers=headers, \n                             maxRetry=maxRetry, chkProxy=chkProxy)\n\n__StorageVersion=\"2014-02-14\"\n\ndef GetBlobType(url):\n    restutil = Util()\n    #Check blob type\n    LogIfVerbose(\"Check blob type.\")\n    timestamp = time.strftime(\"%Y-%m-%dT%H:%M:%SZ\", time.gmtime())\n    blobPropResp = restutil.HttpHead(url, {\n        \"x-ms-date\" :  timestamp,\n        'x-ms-version' : __StorageVersion\n    }, chkProxy=True);\n    blobType = None\n    if blobPropResp is None:\n        Error(\"Can't get status blob type.\")\n        return None\n    blobType = blobPropResp.getheader(\"x-ms-blob-type\")\n    LogIfVerbose(\"Blob type={0}\".format(blobType))\n    return blobType\n\ndef PutBlockBlob(url, data):\n    restutil = Util()\n    LogIfVerbose(\"Upload block blob\")\n    timestamp = time.strftime(\"%Y-%m-%dT%H:%M:%SZ\", time.gmtime())\n    ret = restutil.HttpPut(url, data, {\n        \"x-ms-date\" :  timestamp,\n        \"x-ms-blob-type\" : \"BlockBlob\",\n        \"Content-Length\": str(len(data)),\n        \"x-ms-version\" : __StorageVersion\n    }, chkProxy=True)\n    if ret is None:\n        Error(\"Failed to upload block blob for status.\")\n\ndef PutPageBlob(url, data):\n    restutil = Util()\n    LogIfVerbose(\"Replace old page blob\")\n    timestamp = time.strftime(\"%Y-%m-%dT%H:%M:%SZ\", time.gmtime())\n    #Align to 512 bytes\n    pageBlobSize = ((len(data) + 511) / 512) * 512\n    ret = restutil.HttpPut(url, \"\", {\n        \"x-ms-date\" :  timestamp,\n        \"x-ms-blob-type\" : \"PageBlob\",\n        \"Content-Length\": \"0\",\n        \"x-ms-blob-content-length\" : str(pageBlobSize),\n        \"x-ms-version\" : __StorageVersion\n    }, chkProxy=True)\n    if ret is None:\n        Error(\"Failed to clean up page blob for status\")\n        return\n        \n    if url.index('?') < 0:\n        url = \"{0}?comp=page\".format(url)\n    else:\n        url = \"{0}&comp=page\".format(url)\n   \n    LogIfVerbose(\"Upload page blob\")\n    pageMax = 4 * 1024 * 1024 #Max page size: 4MB\n    start = 0\n    end = 0\n    while end < len(data):\n        end = min(len(data), start + pageMax)\n        contentSize = end - start\n        #Align to 512 bytes\n        pageEnd = ((end + 511) / 512) * 512\n        bufSize = pageEnd - start\n        buf = bytearray(bufSize)\n        buf[0 : contentSize] = data[start : end]\n        ret = restutil.HttpPut(url, buffer(buf), {\n            \"x-ms-date\" :  timestamp,\n            \"x-ms-range\" : \"bytes={0}-{1}\".format(start, pageEnd - 1),\n            \"x-ms-page-write\" : \"update\",\n            \"x-ms-version\" : __StorageVersion,\n            \"Content-Length\": str(pageEnd - start)\n        }, chkProxy=True)\n        if ret is None:\n            Error(\"Failed to upload page blob for status\")\n            return\n        start = end\n\ndef UploadStatusBlob(url, data):\n    LogIfVerbose(\"Upload status blob\")\n    LogIfVerbose(\"Status={0}\".format(data))\n    blobType = GetBlobType(url) \n\n    if blobType == \"BlockBlob\":\n        PutBlockBlob(url, data)    \n    elif blobType == \"PageBlob\":\n        PutPageBlob(url, data)    \n    else:\n        Error(\"Unknown blob type: {0}\".format(blobType))\n        return None\n\nclass TCPHandler(SocketServer.BaseRequestHandler):\n    \"\"\"\n    Callback object for LoadBalancerProbeServer.\n    Recv and send LB probe messages.\n    \"\"\"\n    def __init__(self,lb_probe):\n        super(TCPHandler,self).__init__()\n        self.lb_probe=lb_probe\n        \n    def GetHttpDateTimeNow(self):\n        \"\"\"\n        Return formatted gmtime \"Date: Fri, 25 Mar 2011 04:53:10 GMT\"\n        \"\"\"\n        return time.strftime(\"%a, %d %b %Y %H:%M:%S GMT\", time.gmtime())\n\n    def handle(self):\n        \"\"\"\n        Log LB probe messages, read the socket buffer,\n        send LB probe response back to server.\n        \"\"\"\n        self.lb_probe.ProbeCounter = (self.lb_probe.ProbeCounter + 1) % 1000000\n        log = [NoLog, LogIfVerbose][ThrottleLog(self.lb_probe.ProbeCounter)]\n        strCounter = str(self.lb_probe.ProbeCounter)\n        if self.lb_probe.ProbeCounter == 1:\n            Log(\"Receiving LB probes.\")\n        log(\"Received LB probe # \" + strCounter)\n        self.request.recv(1024)\n        self.request.send(\"HTTP/1.1 200 OK\\r\\nContent-Length: 2\\r\\nContent-Type: text/html\\r\\nDate: \" + self.GetHttpDateTimeNow() + \"\\r\\n\\r\\nOK\")\n            \nclass LoadBalancerProbeServer(object):\n    \"\"\"\n    Threaded object to receive and send LB probe messages.\n    Load Balancer messages but be recv'd by\n    the load balancing server, or this node may be shut-down.\n    \"\"\"\n    def __init__(self, port):\n        self.ProbeCounter = 0\n        self.server = SocketServer.TCPServer((self.get_ip(), port), TCPHandler)\n        self.server_thread = threading.Thread(target = self.server.serve_forever)\n        self.server_thread.setDaemon(True)\n        self.server_thread.start()\n        \n    def shutdown(self):\n        self.server.shutdown()\n\n    def get_ip(self):\n        for retry in range(1,6):\n            ip = MyDistro.GetIpv4Address()\n            if ip == None :\n                Log(\"LoadBalancerProbeServer: GetIpv4Address() returned None, sleeping 10 before retry \" + str(retry+1) )\n                time.sleep(10)\n            else:\n                return ip\n\nclass ConfigurationProvider(object):\n    \"\"\"\n    Parse amd store key:values in waagent.conf\n    \"\"\"\n    def __init__(self, walaConfigFile):\n        self.values = dict()\n        if 'MyDistro' not in globals():\n            global MyDistro\n            MyDistro = GetMyDistro()\n        if walaConfigFile is None:\n            walaConfigFile = MyDistro.getConfigurationPath()\n        if os.path.isfile(walaConfigFile) == False:\n            raise Exception(\"Missing configuration in {0}\".format(walaConfigFile))\n        try:\n            for line in GetFileContents(walaConfigFile).split('\\n'):\n                if not line.startswith(\"#\") and \"=\" in line:\n                    parts = line.split()[0].split('=')\n                    value = parts[1].strip(\"\\\" \")\n                    if value != \"None\":\n                        self.values[parts[0]] = value\n                    else:\n                        self.values[parts[0]] = None\n        except:\n            Error(\"Unable to parse {0}\".format(walaConfigFile))\n            raise\n        return\n\n    def get(self, key):\n        return self.values.get(key)\n\nclass EnvMonitor(object):\n    \"\"\"\n    Montor changes to dhcp and hostname.\n    If dhcp clinet process re-start has occurred, reset routes, dhcp with fabric.\n    \"\"\"\n    def __init__(self):\n        self.shutdown = False\n        self.HostName = socket.gethostname()\n        self.server_thread = threading.Thread(target = self.monitor)\n        self.server_thread.setDaemon(True)\n        self.server_thread.start()\n        self.published = False\n\n    def monitor(self):\n        \"\"\"\n        Monitor dhcp client pid and hostname.\n        If dhcp clinet process re-start has occurred, reset routes, dhcp with fabric.\n        \"\"\"\n        publish = Config.get(\"Provisioning.MonitorHostName\")\n        dhcpcmd = MyDistro.getpidcmd+ ' ' + MyDistro.getDhcpClientName()\n        dhcppid = RunGetOutput(dhcpcmd)[1]\n        while not self.shutdown:\n            for a in RulesFiles:\n                if os.path.isfile(a):\n                    if os.path.isfile(GetLastPathElement(a)):\n                        os.remove(GetLastPathElement(a))\n                    shutil.move(a, \".\")\n                    Log(\"EnvMonitor: Moved \" + a + \" -> \" + LibDir)\n            MyDistro.setScsiDiskTimeout()\n            if publish != None and publish.lower().startswith(\"y\"):\n                try:\n                    if socket.gethostname() != self.HostName:\n                        Log(\"EnvMonitor: Detected host name change: \" + self.HostName + \" -> \" + socket.gethostname())\n                        self.HostName = socket.gethostname()\n                        WaAgent.UpdateAndPublishHostName(self.HostName)\n                        dhcppid = RunGetOutput(dhcpcmd)[1]\n                        self.published = True\n                except:\n                    pass\n            else:\n                self.published = True\n            pid = \"\"\n            if not os.path.isdir(\"/proc/\" + dhcppid.strip()):\n                pid = RunGetOutput(dhcpcmd)[1]\n            if pid != \"\" and pid != dhcppid:\n                Log(\"EnvMonitor: Detected dhcp client restart. Restoring routing table.\")\n                WaAgent.RestoreRoutes()\n                dhcppid = pid\n            for child in Children:\n                if child.poll() != None:\n                    Children.remove(child)\n            time.sleep(5)\n\n    def SetHostName(self, name):\n        \"\"\"\n        Generic call to MyDistro.setHostname(name).\n        Complian to Log on error.\n        \"\"\"\n        if socket.gethostname() == name:\n            self.published = True\n        elif MyDistro.setHostname(name):\n            Error(\"Error: SetHostName: Cannot set hostname to \" + name)\n            return (\"Error: SetHostName: Cannot set hostname to \" + name)\n\n    def IsHostnamePublished(self):\n        \"\"\"\n        Return self.published  \n        \"\"\"\n        return self.published\n\n    def ShutdownService(self):\n        \"\"\"\n        Stop server comminucation and join the thread to main thread.\n        \"\"\"\n        self.shutdown = True\n        self.server_thread.join()\n\nclass Certificates(object):\n    \"\"\"\n    Object containing certificates of host and provisioned user.\n    Parses and splits certificates into files.\n    \"\"\"\n    #     <CertificateFile xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:noNamespaceSchemaLocation=\"certificates10.xsd\">\n    #     <Version>2010-12-15</Version>\n    #     <Incarnation>2</Incarnation>\n    #     <Format>Pkcs7BlobWithPfxContents</Format>\n    #     <Data>MIILTAY...\n    #     </Data>\n    #     </CertificateFile>\n\n    def __init__(self):\n        self.reinitialize()\n\n    def reinitialize(self):\n        \"\"\"\n        Reset the Role, Incarnation\n        \"\"\"\n        self.Incarnation = None\n        self.Role = None\n\n    def Parse(self, xmlText):\n        \"\"\"\n        Parse multiple certificates into seperate files.\n        \"\"\"\n        self.reinitialize()\n        SetFileContents(\"Certificates.xml\", xmlText)\n        dom = xml.dom.minidom.parseString(xmlText)\n        for a in [ \"CertificateFile\", \"Version\", \"Incarnation\",\n                   \"Format\", \"Data\", ]:\n            if not dom.getElementsByTagName(a):\n                Error(\"Certificates.Parse: Missing \" + a)\n                return None\n        node = dom.childNodes[0]\n        if node.localName != \"CertificateFile\":\n            Error(\"Certificates.Parse: root not CertificateFile\")\n            return None\n        SetFileContents(\"Certificates.p7m\",\n            \"MIME-Version: 1.0\\n\"\n            + \"Content-Disposition: attachment; filename=\\\"Certificates.p7m\\\"\\n\"\n            + \"Content-Type: application/x-pkcs7-mime; name=\\\"Certificates.p7m\\\"\\n\"\n            + \"Content-Transfer-Encoding: base64\\n\\n\"\n            + GetNodeTextData(dom.getElementsByTagName(\"Data\")[0]))\n        if Run(Openssl + \" cms -decrypt -in Certificates.p7m -inkey TransportPrivate.pem -recip TransportCert.pem | \" + Openssl + \" pkcs12 -nodes -password pass: -out Certificates.pem\"):\n            Error(\"Certificates.Parse: Failed to extract certificates from CMS message.\")\n            return self\n        # There may be multiple certificates in this package. Split them.\n        file = open(\"Certificates.pem\")\n        pindex = 1\n        cindex = 1\n        output = open(\"temp.pem\", \"w\")\n        for line in file.readlines():\n            output.write(line)\n            if re.match(r'[-]+END .*?(KEY|CERTIFICATE)[-]+$',line):\n                output.close()\n                if re.match(r'[-]+END .*?KEY[-]+$',line):\n                    os.rename(\"temp.pem\", str(pindex) + \".prv\")\n                    pindex += 1\n                else:\n                    os.rename(\"temp.pem\", str(cindex) + \".crt\")\n                    cindex += 1\n                output = open(\"temp.pem\", \"w\")\n        output.close()\n        os.remove(\"temp.pem\")\n        keys = dict()\n        index = 1\n        filename = str(index) + \".crt\"\n        while os.path.isfile(filename):\n            thumbprint = (RunGetOutput(Openssl + \" x509 -in \" + filename + \" -fingerprint -noout\")[1]).rstrip().split('=')[1].replace(':', '').upper()\n            pubkey=RunGetOutput(Openssl + \" x509 -in \" + filename + \" -pubkey -noout\")[1]\n            keys[pubkey] = thumbprint\n            os.rename(filename, thumbprint + \".crt\")\n            os.chmod(thumbprint + \".crt\", 0600)\n            MyDistro.setSelinuxContext(thumbprint + '.crt','unconfined_u:object_r:ssh_home_t:s0')\n            index += 1\n            filename = str(index) + \".crt\"\n        index = 1\n        filename = str(index) + \".prv\"\n        while os.path.isfile(filename):\n            pubkey = RunGetOutput(Openssl + \" rsa -in \" + filename + \" -pubout 2> /dev/null \")[1]\n            os.rename(filename, keys[pubkey] + \".prv\")\n            os.chmod(keys[pubkey] + \".prv\", 0600)\n            MyDistro.setSelinuxContext( keys[pubkey] + '.prv','unconfined_u:object_r:ssh_home_t:s0')\n            index += 1\n            filename = str(index) + \".prv\"\n        return self\n\nclass SharedConfig(object):\n    \"\"\"\n    Parse role endpoint server and goal state config.\n    \"\"\"\n    #\n    # <SharedConfig version=\"1.0.0.0\" goalStateIncarnation=\"1\">\n    #   <Deployment name=\"db00a7755a5e4e8a8fe4b19bc3b330c3\" guid=\"{ce5a036f-5c93-40e7-8adf-2613631008ab}\" incarnation=\"2\">\n    #     <Service name=\"MyVMRoleService\" guid=\"{00000000-0000-0000-0000-000000000000}\" />\n    #     <ServiceInstance name=\"db00a7755a5e4e8a8fe4b19bc3b330c3.1\" guid=\"{d113f4d7-9ead-4e73-b715-b724b5b7842c}\" />\n    #   </Deployment>\n    #   <Incarnation number=\"1\" instance=\"MachineRole_IN_0\" guid=\"{a0faca35-52e5-4ec7-8fd1-63d2bc107d9b}\" />\n    #   <Role guid=\"{73d95f1c-6472-e58e-7a1a-523554e11d46}\" name=\"MachineRole\" settleTimeSeconds=\"10\" />\n    #   <LoadBalancerSettings timeoutSeconds=\"0\" waitLoadBalancerProbeCount=\"8\">\n    #     <Probes>\n    #       <Probe name=\"MachineRole\" />\n    #       <Probe name=\"55B17C5E41A1E1E8FA991CF80FAC8E55\" />\n    #       <Probe name=\"3EA4DBC19418F0A766A4C19D431FA45F\" />\n    #     </Probes>\n    #   </LoadBalancerSettings>\n    #   <OutputEndpoints>\n    #     <Endpoint name=\"MachineRole:Microsoft.WindowsAzure.Plugins.RemoteAccess.Rdp\" type=\"SFS\">\n    #       <Target instance=\"MachineRole_IN_0\" endpoint=\"Microsoft.WindowsAzure.Plugins.RemoteAccess.Rdp\" />\n    #     </Endpoint>\n    #   </OutputEndpoints>\n    #   <Instances>\n    #     <Instance id=\"MachineRole_IN_0\" address=\"10.115.153.75\">\n    #       <FaultDomains randomId=\"0\" updateId=\"0\" updateCount=\"0\" />\n    #       <InputEndpoints>\n    #         <Endpoint name=\"a\" address=\"10.115.153.75:80\" protocol=\"http\" isPublic=\"true\" loadBalancedPublicAddress=\"70.37.106.197:80\" enableDirectServerReturn=\"false\" isDirectAddress=\"false\" disableStealthMode=\"false\">\n    #           <LocalPorts>\n    #             <LocalPortRange from=\"80\" to=\"80\" />\n    #           </LocalPorts>\n    #         </Endpoint>\n    #         <Endpoint name=\"Microsoft.WindowsAzure.Plugins.RemoteAccess.Rdp\" address=\"10.115.153.75:3389\" protocol=\"tcp\" isPublic=\"false\" enableDirectServerReturn=\"false\" isDirectAddress=\"false\" disableStealthMode=\"false\">\n    #           <LocalPorts>\n    #             <LocalPortRange from=\"3389\" to=\"3389\" />\n    #           </LocalPorts>\n    #         </Endpoint>\n    #         <Endpoint name=\"Microsoft.WindowsAzure.Plugins.RemoteForwarder.RdpInput\" address=\"10.115.153.75:20000\" protocol=\"tcp\" isPublic=\"true\" loadBalancedPublicAddress=\"70.37.106.197:3389\" enableDirectServerReturn=\"false\" isDirectAddress=\"false\" disableStealthMode=\"false\">\n    #           <LocalPorts>\n    #             <LocalPortRange from=\"20000\" to=\"20000\" />\n    #           </LocalPorts>\n    #         </Endpoint>\n    #       </InputEndpoints>\n    #     </Instance>\n    #   </Instances>\n    # </SharedConfig>\n    #\n    def __init__(self):\n        self.reinitialize()\n\n    def reinitialize(self):\n        \"\"\"\n        Reset members.\n        \"\"\"\n        self.RdmaMacAddress = None\n        self.RdmaIPv4Address = None\n        self.xmlText = None\n\n    def Parse(self, xmlText):\n        \"\"\"\n        Parse and write configuration to file SharedConfig.xml.\n        \"\"\"\n        LogIfVerbose(xmlText)\n        self.reinitialize()\n        self.xmlText = xmlText\n        dom = xml.dom.minidom.parseString(xmlText)\n        for a in [ \"SharedConfig\", \"Deployment\", \"Service\",\n                   \"ServiceInstance\", \"Incarnation\", \"Role\", ]:\n            if not dom.getElementsByTagName(a):\n                Error(\"SharedConfig.Parse: Missing \" + a)\n\n        node = dom.childNodes[0]\n        if node.localName != \"SharedConfig\":\n            Error(\"SharedConfig.Parse: root not SharedConfig\")\n\n        nodes = dom.getElementsByTagName(\"Instance\")\n        if nodes is not None and len(nodes) != 0:\n            node = nodes[0]\n            if node.hasAttribute(\"rdmaMacAddress\"):\n                addr = node.getAttribute(\"rdmaMacAddress\")\n                self.RdmaMacAddress = addr[0:2]\n                for i in range(1, 6):\n                    self.RdmaMacAddress += \":\" + addr[2 * i : 2 *i + 2]\n            if node.hasAttribute(\"rdmaIPv4Address\"):\n                self.RdmaIPv4Address = node.getAttribute(\"rdmaIPv4Address\")\n        return self\n    \n    def Save(self):\n        LogIfVerbose(\"Save SharedConfig.xml\")\n        SetFileContents(\"SharedConfig.xml\", self.xmlText)\n\n    def InvokeTopologyConsumer(self):\n        program = Config.get(\"Role.TopologyConsumer\")\n        if program != None:\n            try:\n                Children.append(subprocess.Popen([program, LibDir + \"/SharedConfig.xml\"]))\n            except OSError, e :\n                ErrorWithPrefix('Agent.Run','Exception: '+ str(e) +' occured launching ' + program )\n\n    def Process(self):\n        global rdma_configured\n        if not rdma_configured and self.RdmaMacAddress is not None and self.RdmaIPv4Address is not None:\n            handler = RdmaHandler(self.RdmaMacAddress, self.RdmaIPv4Address)\n            handler.start()\n            rdma_configured = True\n        self.InvokeTopologyConsumer()\n\nrdma_configured = False\n\nclass RdmaError(Exception):\n    pass\n\nclass RdmaHandler(object):\n    \"\"\"\n    Handle rdma configuration.\n    \"\"\"\n\n    def __init__(self, mac, ip_addr, dev=\"/dev/hvnd_rdma\",\n                 dat_conf_files=['/etc/dat.conf', '/etc/rdma/dat.conf',\n                                 '/usr/local/etc/dat.conf']):\n        self.mac = mac\n        self.ip_addr = ip_addr\n        self.dev = dev\n        self.dat_conf_files = dat_conf_files\n        self.data = ('rdmaMacAddress=\"{0}\" rdmaIPv4Address=\"{1}\"'\n                     '').format(self.mac, self.ip_addr)\n\n    def start(self):\n        \"\"\"\n        Start a new thread to process rdma\n        \"\"\"\n        threading.Thread(target=self.process).start()\n\n    def process(self):\n        try:\n            self.set_dat_conf()\n            self.set_rdma_dev()\n            self.set_rdma_ip()\n        except RdmaError as e:\n            Error(\"Failed to config rdma device: {0}\".format(e))\n\n    def set_dat_conf(self):\n        \"\"\"\n        Agent needs to search all possible locations for dat.conf\n        \"\"\"\n        Log(\"Set dat.conf\")\n        for dat_conf_file in self.dat_conf_files:\n            if not os.path.isfile(dat_conf_file):\n                continue\n            try:\n                self.write_dat_conf(dat_conf_file)\n            except IOError as e:\n                raise RdmaError(\"Failed to write to dat.conf: {0}\".format(e))\n\n    def write_dat_conf(self, dat_conf_file):\n        Log(\"Write config to {0}\".format(dat_conf_file))\n        old = (\"ofa-v2-ib0 u2.0 nonthreadsafe default libdaplofa.so.2 \"\n               \"dapl.2.0 \\\"\\S+ 0\\\"\")\n        new = (\"ofa-v2-ib0 u2.0 nonthreadsafe default libdaplofa.so.2 \"\n               \"dapl.2.0 \\\"{0} 0\\\"\").format(self.ip_addr)\n        lines = GetFileContents(dat_conf_file)\n        lines = re.sub(old, new, lines)\n        SetFileContents(dat_conf_file, lines)\n\n    def set_rdma_dev(self):\n        \"\"\"\n        Write config string to /dev/hvnd_rdma\n        \"\"\"\n        Log(\"Set /dev/hvnd_rdma\")\n        self.wait_rdma_dev()\n        self.write_rdma_dev_conf()\n\n    def write_rdma_dev_conf(self):\n        Log(\"Write rdma config to {0}: {1}\".format(self.dev, self.data))\n        try:\n            with open(self.dev, \"w\") as c:\n                c.write(self.data)\n        except IOError, e:\n            raise RdmaError(\"Error writing {0}, {1}\".format(self.dev, e))\n\n    def wait_rdma_dev(self):\n        Log(\"Wait for /dev/hvnd_rdma\")\n        retry = 0\n        while retry < 120:\n            if os.path.exists(self.dev):\n                return\n            time.sleep(1)\n            retry += 1\n        raise RdmaError(\"The device doesn't show up in 120 seconds\")\n\n    def set_rdma_ip(self):\n        Log(\"Set ip addr for rdma\")\n        try:\n            if_name = MyDistro.getInterfaceNameByMac(self.mac)\n            #Azure is using 12 bits network mask for infiniband.\n            MyDistro.configIpV4(if_name, self.ip_addr, 12)\n        except Exception as e:\n            raise RdmaError(\"Failed to config rdma device: {0}\".format(e))\n\nclass ExtensionsConfig(object):\n    \"\"\"\n    Parse ExtensionsConfig, downloading and unpacking them to /var/lib/waagent.\n    Install if <enabled>true</enabled>, remove if it is set to false.\n    \"\"\"\n    #<?xml version=\"1.0\" encoding=\"utf-8\"?>\n    #<Extensions version=\"1.0.0.0\" goalStateIncarnation=\"6\"><Plugins>\n    #  <Plugin name=\"OSTCExtensions.ExampleHandlerLinux\" version=\"1.5\"\n    #location=\"http://previewusnorthcache.blob.core.test-cint.azure-test.net/d84b216d00bf4d96982be531539e1513/OSTCExtensions_ExampleHandlerLinux_usnorth_manifest.xml\"\n    #config=\"\" state=\"enabled\" autoUpgrade=\"false\" runAsStartupTask=\"false\" isJson=\"true\" />\n    #</Plugins>\n    #<PluginSettings>\n    #  <Plugin name=\"OSTCExtensions.ExampleHandlerLinux\" version=\"1.5\">\n    #    <RuntimeSettings seqNo=\"2\">{\"runtimeSettings\":[{\"handlerSettings\":{\"protectedSettingsCertThumbprint\":\"1BE9A13AA1321C7C515EF109746998BAB6D86FD1\",\n    #\"protectedSettings\":\"MIIByAYJKoZIhvcNAQcDoIIBuTCCAbUCAQAxggFxMIIBbQIBADBVMEExPzA9BgoJkiaJk/IsZAEZFi9XaW5kb3dzIEF6dXJlIFNlcnZpY2UgTWFuYWdlbWVudCBmb3IgR\n    #Xh0ZW5zaW9ucwIQZi7dw+nhc6VHQTQpCiiV2zANBgkqhkiG9w0BAQEFAASCAQCKr09QKMGhwYe+O4/a8td+vpB4eTR+BQso84cV5KCAnD6iUIMcSYTrn9aveY6v6ykRLEw8GRKfri2d6\n    #tvVDggUrBqDwIgzejGTlCstcMJItWa8Je8gHZVSDfoN80AEOTws9Fp+wNXAbSuMJNb8EnpkpvigAWU2v6pGLEFvSKC0MCjDTkjpjqciGMcbe/r85RG3Zo21HLl0xNOpjDs/qqikc/ri43Y76E/X\n    #v1vBSHEGMFprPy/Hwo3PqZCnulcbVzNnaXN3qi/kxV897xGMPPC3IrO7Nc++AT9qRLFI0841JLcLTlnoVG1okPzK9w6ttksDQmKBSHt3mfYV+skqs+EOMDsGCSqGSIb3DQEHATAUBggqh\n    #kiG9w0DBwQITgu0Nu3iFPuAGD6/QzKdtrnCI5425fIUy7LtpXJGmpWDUA==\",\"publicSettings\":{\"port\":\"3000\"}}}]}</RuntimeSettings>\n    #  </Plugin>\n    #</PluginSettings>\n    #<StatusUploadBlob>https://ostcextensions.blob.core.test-cint.azure-test.net/vhds/eg-plugin7-vm.eg-plugin7-vm.eg-plugin7-vm.status?sr=b&amp;sp=rw&amp;\n    #se=9999-01-01&amp;sk=key1&amp;sv=2012-02-12&amp;sig=wRUIDN1x2GC06FWaetBP9sjjifOWvRzS2y2XBB4qoBU%3D</StatusUploadBlob></Extensions>\n\n    def __init__(self):\n        self.reinitialize()\n\n    def reinitialize(self):\n        \"\"\"\n        Reset members.\n        \"\"\"\n        self.Extensions = None\n        self.Plugins = None\n        self.Util = None\n        \n    def Parse(self, xmlText):\n        \"\"\"\n        Write configuration to file ExtensionsConfig.xml.\n        Log plugin specific activity to /var/log/azure/<Publisher>.<PluginName>/<Version>/CommandExecution.log.\n        If state is enabled:\n            if the plugin is installed:\n                if the new plugin's version is higher\n                if DisallowMajorVersionUpgrade is false or if true, the version is a minor version do upgrade:\n                    download the new archive\n                    do the updateCommand.\n                    disable the old plugin and remove\n                    enable the new plugin\n                if the new plugin's version is the same or lower:\n                    create the new .settings file from the configuration received\n                    do the enableCommand\n            if the plugin is not installed:\n                download/unpack archive and call the installCommand/Enable\n        if state is disabled:\n            call disableCommand\n        if state is uninstall:\n            call uninstallCommand\n            remove old plugin directory.\n        \"\"\"\n        self.reinitialize()\n        self.Util=Util()\n        dom = xml.dom.minidom.parseString(xmlText)\n        LogIfVerbose(xmlText)\n        self.plugin_log_dir='/var/log/azure'\n        if not os.path.exists(self.plugin_log_dir):\n            os.mkdir(self.plugin_log_dir)\n        try:\n            self.Extensions=dom.getElementsByTagName(\"Extensions\")\n            pg = dom.getElementsByTagName(\"Plugins\")\n            if len(pg) > 0:\n                self.Plugins = pg[0].getElementsByTagName(\"Plugin\")\n            else:\n                self.Plugins = []\n            incarnation=self.Extensions[0].getAttribute(\"goalStateIncarnation\")\n            SetFileContents('ExtensionsConfig.'+incarnation+'.xml', xmlText)\n        except Exception, e:\n            Error('ERROR:  Error parsing ExtensionsConfig: {0}.'.format(e))\n            return None\n        for p in self.Plugins:\n            if len(p.getAttribute(\"location\"))<1:  # this plugin is inside the PluginSettings\n                continue\n            p.setAttribute('restricted','false')\n            previous_version = None\n            version=p.getAttribute(\"version\")\n            name=p.getAttribute(\"name\")\n            plog_dir=self.plugin_log_dir+'/'+name +'/'+ version\n            if not os.path.exists(plog_dir):\n                os.makedirs(plog_dir)\n            p.plugin_log=plog_dir+'/CommandExecution.log'\n            handler=name + '-' + version\n            if p.getAttribute(\"isJson\") != 'true':\n                Error(\"Plugin \" + name+\" version: \" +version+\" is not a JSON Extension.  Skipping.\")\n                continue\n            Log(\"Found Plugin: \" + name + ' version: ' + version)\n            if p.getAttribute(\"state\") == 'disabled' or p.getAttribute(\"state\") == 'uninstall': \n                #disable \n                zip_dir=LibDir+\"/\" + name + '-' + version\n                mfile=None\n                for root, dirs, files in os.walk(zip_dir):\n                    for f in files:\n                        if f in ('HandlerManifest.json'):\n                            mfile=os.path.join(root,f)\n                    if mfile != None:\n                        break\n                if mfile == None :\n                    Error('HandlerManifest.json not found.')\n                    continue\n                manifest = GetFileContents(mfile)\n                p.setAttribute('manifestdata',manifest)\n                if self.launchCommand(p.plugin_log,name,version,'disableCommand') == None :\n                    self.SetHandlerState(handler, 'Enabled')\n                    Error('Unable to disable '+name)\n                    SimpleLog(p.plugin_log,'ERROR: Unable to disable '+name)\n                else :\n                    self.SetHandlerState(handler, 'Disabled')\n                    Log(name+' is disabled')\n                    SimpleLog(p.plugin_log,name+' is disabled')\n\n                # uninstall if needed\n                if p.getAttribute(\"state\") == 'uninstall':\n                    if self.launchCommand(p.plugin_log,name,version,'uninstallCommand') == None :\n                        self.SetHandlerState(handler, 'Installed')\n                        Error('Unable to uninstall '+name)\n                        SimpleLog(p.plugin_log,'Unable to uninstall '+name)\n                    else :\n                        self.SetHandlerState(handler, 'NotInstalled')\n                        Log(name+' uninstallCommand completed .')\n                    # remove the plugin\n                    Run('rm -rf ' + LibDir + '/' + name +'-'+ version + '*')\n                    Log(name +'-'+ version + ' extension files deleted.')\n                    SimpleLog(p.plugin_log,name +'-'+ version + ' extension files deleted.')\n\n                continue    \n            # state is enabled\n            # if the same plugin exists and the version is newer or\n            # does not exist then download and unzip the new plugin\n            plg_dir=None\n            for root, dirs, files in os.walk(LibDir):\n                for d in dirs:\n                    if name in d:\n                        plg_dir=os.path.join(root,d)\n                    if plg_dir != None:\n                        break\n            if plg_dir != None :\n                previous_version=plg_dir.rsplit('-')[-1]\n            if plg_dir == None or version > previous_version :\n                location=p.getAttribute(\"location\")\n                Log(\"Downloading plugin manifest: \" + name + \" from \" + location)\n                SimpleLog(p.plugin_log,\"Downloading plugin manifest: \" + name + \" from \" + location)\n\n                self.Util.Endpoint=location.split('/')[2]\n                Log(\"Plugin server is: \" +  self.Util.Endpoint)\n                SimpleLog(p.plugin_log,\"Plugin server is: \" +  self.Util.Endpoint)\n\n                manifest=self.Util.HttpGetWithoutHeaders(location, chkProxy=True)\n                if manifest == None:\n                    Error(\"Unable to download plugin manifest\" + name + \" from primary location.  Attempting with failover location.\")\n                    SimpleLog(p.plugin_log,\"Unable to download plugin manifest\" + name + \" from primary location.  Attempting with failover location.\")\n                    failoverlocation=p.getAttribute(\"failoverlocation\")\n                    self.Util.Endpoint=failoverlocation.split('/')[2]\n                    Log(\"Plugin failover server is: \" +  self.Util.Endpoint)\n                    SimpleLog(p.plugin_log,\"Plugin failover server is: \" +  self.Util.Endpoint)\n\n                    manifest=self.Util.HttpGetWithoutHeaders(failoverlocation, chkProxy=True)\n                #if failoverlocation also fail what to do then?\n                if manifest == None:\n                    AddExtensionEvent(name,WALAEventOperation.Download,False,0,version,\"Download mainfest fail \"+failoverlocation)\n                    Log(\"Plugin manifest \" + name + \" downloading failed from failover location.\")\n                    SimpleLog(p.plugin_log,\"Plugin manifest \" + name + \" downloading failed from failover location.\")\n\n                filepath=LibDir+\"/\" + name + '.' + incarnation + '.manifest'\n                if os.path.splitext(location)[-1] == '.xml' : #if this is an xml file we may have a BOM\n                    if ord(manifest[0]) > 128 and ord(manifest[1]) > 128 and ord(manifest[2]) > 128:\n                        manifest=manifest[3:]\n                SetFileContents(filepath,manifest)\n                #Get the bundle url from the manifest\n                p.setAttribute('manifestdata',manifest)\n                man_dom = xml.dom.minidom.parseString(manifest)\n                bundle_uri = \"\"\n                for mp in man_dom.getElementsByTagName(\"Plugin\"):\n                    if GetNodeTextData(mp.getElementsByTagName(\"Version\")[0]) == version:\n                        bundle_uri = GetNodeTextData(mp.getElementsByTagName(\"Uri\")[0])\n                        break\n                if len(mp.getElementsByTagName(\"DisallowMajorVersionUpgrade\")):\n                    if GetNodeTextData(mp.getElementsByTagName(\"DisallowMajorVersionUpgrade\")[0]) == 'true' and previous_version !=None and previous_version.split('.')[0] != version.split('.')[0] :\n                        Log('DisallowMajorVersionUpgrade is true, this major version is restricted from upgrade.')\n                        SimpleLog(p.plugin_log,'DisallowMajorVersionUpgrade is true, this major version is restricted from upgrade.')\n                        p.setAttribute('restricted','true')\n                        continue\n                if len(bundle_uri) < 1 :\n                    Error(\"Unable to fetch Bundle URI from manifest for \" + name + \" v \" + version)\n                    SimpleLog(p.plugin_log,\"Unable to fetch Bundle URI from manifest for \" + name + \" v \" + version)\n                    continue\n                Log(\"Bundle URI = \" + bundle_uri)\n                SimpleLog(p.plugin_log,\"Bundle URI = \" + bundle_uri)\n\n                # Download the zipfile archive and save as '.zip'\n                bundle=self.Util.HttpGetWithoutHeaders(bundle_uri, chkProxy=True)\n                if bundle == None:\n                    AddExtensionEvent(name,WALAEventOperation.Download,True,0,version,\"Download zip fail \"+bundle_uri)\n                    Error(\"Unable to download plugin bundle\" + bundle_uri )\n                    SimpleLog(p.plugin_log,\"Unable to download plugin bundle\" + bundle_uri )\n                    continue\n                AddExtensionEvent(name,WALAEventOperation.Download,True,0,version,\"Download Success\")\n                b=bytearray(bundle)\n                filepath=LibDir+\"/\" + os.path.basename(bundle_uri) + '.zip'\n                SetFileContents(filepath,b)\n                Log(\"Plugin bundle\" + bundle_uri + \"downloaded successfully length = \" + str(len(bundle)))\n                SimpleLog(p.plugin_log,\"Plugin bundle\" + bundle_uri + \"downloaded successfully length = \" + str(len(bundle)))\n\n                # unpack the archive\n                z=zipfile.ZipFile(filepath)\n                zip_dir=LibDir+\"/\" + name + '-' + version\n                z.extractall(zip_dir)\n                Log('Extracted ' + bundle_uri + ' to ' + zip_dir) \n                SimpleLog(p.plugin_log,'Extracted ' + bundle_uri + ' to ' + zip_dir) \n\n                # zip no file perms in .zip so set all the scripts to +x\n                Run( \"find \" + zip_dir +\" -type f | xargs chmod  u+x \")\n                #write out the base64 config data so the plugin can process it.\n                mfile=None\n                for root, dirs, files in os.walk(zip_dir):\n                    for f in files:\n                        if f in ('HandlerManifest.json'):\n                            mfile=os.path.join(root,f)\n                    if mfile != None:\n                        break\n                if mfile == None :\n                    Error('HandlerManifest.json not found.')\n                    SimpleLog(p.plugin_log,'HandlerManifest.json not found.')\n                    continue\n                manifest = GetFileContents(mfile)\n                p.setAttribute('manifestdata',manifest)\n                # create the status and config dirs\n                Run('mkdir -p ' + root + '/status')\n                Run('mkdir -p ' + root + '/config')\n                # write out the configuration data to goalStateIncarnation.settings file in the config path.\n                config=''\n                seqNo='0'\n                if len(dom.getElementsByTagName(\"PluginSettings\")) != 0 :\n                    pslist=dom.getElementsByTagName(\"PluginSettings\")[0].getElementsByTagName(\"Plugin\")\n                    for ps in pslist:\n                        if name == ps.getAttribute(\"name\") and version == ps.getAttribute(\"version\"):\n                            Log(\"Found RuntimeSettings for \" + name + \" V \" + version)\n                            SimpleLog(p.plugin_log,\"Found RuntimeSettings for \" + name + \" V \" + version)\n\n                            config=GetNodeTextData(ps.getElementsByTagName(\"RuntimeSettings\")[0])\n                            seqNo=ps.getElementsByTagName(\"RuntimeSettings\")[0].getAttribute(\"seqNo\") \n                            break\n                if config == '':\n                    Log(\"No RuntimeSettings for \" + name + \" V \" + version)\n                    SimpleLog(p.plugin_log,\"No RuntimeSettings for \" + name + \" V \" + version)\n\n                SetFileContents(root +\"/config/\" + seqNo +\".settings\",  config )\n                #create HandlerEnvironment.json\n                handler_env='[{  \"name\": \"'+name+'\", \"seqNo\": \"'+seqNo+'\", \"version\": 1.0,  \"handlerEnvironment\": {    \"logFolder\": \"'+os.path.dirname(p.plugin_log)+'\",    \"configFolder\": \"' + root + '/config\",    \"statusFolder\": \"' + root + '/status\",    \"heartbeatFile\": \"'+ root + '/heartbeat.log\"}}]'\n                SetFileContents(root+'/HandlerEnvironment.json',handler_env)\n                self.SetHandlerState(handler, 'NotInstalled')\n\n                cmd = ''\n                getcmd='installCommand'\n                if plg_dir != None and previous_version != None and version > previous_version :\n                    previous_handler=name+'-'+previous_version\n                    if self.GetHandlerState(previous_handler) != 'NotInstalled':\n                        getcmd='updateCommand'\n                        # disable the old plugin if it exists\n                        if self.launchCommand(p.plugin_log,name,previous_version,'disableCommand') == None :\n                            self.SetHandlerState(previous_handler, 'Enabled')\n                            Error('Unable to disable old plugin '+name+' version ' + previous_version)\n                            SimpleLog(p.plugin_log,'Unable to disable old plugin '+name+' version ' + previous_version)\n                        else :\n                            self.SetHandlerState(previous_handler, 'Disabled')\n                            Log(name+' version ' + previous_version + ' is disabled')\n                            SimpleLog(p.plugin_log,name+' version ' + previous_version + ' is disabled')\n\n                isupgradeSuccess = True\n                if getcmd=='updateCommand':\n                    if self.launchCommand(p.plugin_log,name,version,getcmd,previous_version) == None :\n                        Error('Update failed for '+name+'-'+version)\n                        SimpleLog(p.plugin_log,'Update failed for '+name+'-'+version)\n                        isupgradeSuccess=False\n                    else :\n                        Log('Update complete'+name+'-'+version)\n                        SimpleLog(p.plugin_log,'Update complete'+name+'-'+version)\n\n                    # if we updated - call unistall for the old plugin\n                    if self.launchCommand(p.plugin_log,name,previous_version,'uninstallCommand') == None :\n                        self.SetHandlerState(previous_handler, 'Installed')\n                        Error('Uninstall failed for '+name+'-'+previous_version)\n                        SimpleLog(p.plugin_log,'Uninstall failed for '+name+'-'+previous_version)\n                        isupgradeSuccess=False\n                    else :\n                        self.SetHandlerState(previous_handler, 'NotInstalled')\n                        Log('Uninstall complete'+ previous_handler )\n                        SimpleLog(p.plugin_log,'Uninstall complete'+ name +'-' + previous_version)\n                    AddExtensionEvent(name,WALAEventOperation.Upgrade,isupgradeSuccess,0,previous_version)\n                else :  # run install\n                    if self.launchCommand(p.plugin_log,name,version,getcmd) == None :\n                        self.SetHandlerState(handler, 'NotInstalled')\n                        Error('Installation failed for '+name+'-'+version)\n                        SimpleLog(p.plugin_log,'Installation failed for '+name+'-'+version)\n                    else :\n                        self.SetHandlerState(handler, 'Installed')\n                        Log('Installation completed for '+name+'-'+version)\n                        SimpleLog(p.plugin_log,'Installation completed for '+name+'-'+version)\n\n            #end if plg_dir == none or version > = prev\n            # change incarnation of settings file so it knows how to name status...\n            zip_dir=LibDir+\"/\" + name + '-' + version\n            mfile=None\n            for root, dirs, files in os.walk(zip_dir):\n                for f in files:\n                    if f in ('HandlerManifest.json'):\n                        mfile=os.path.join(root,f)\n                if mfile != None:\n                    break\n            if mfile == None :\n                Error('HandlerManifest.json not found.')\n                SimpleLog(p.plugin_log,'HandlerManifest.json not found.')\n\n                continue\n            manifest = GetFileContents(mfile)\n            p.setAttribute('manifestdata',manifest)\n            config=''\n            seqNo='0'\n            if len(dom.getElementsByTagName(\"PluginSettings\")) != 0 :\n                try:\n                    pslist=dom.getElementsByTagName(\"PluginSettings\")[0].getElementsByTagName(\"Plugin\")\n                except:\n                    Error('Error parsing ExtensionsConfig.')\n                    SimpleLog(p.plugin_log,'Error parsing ExtensionsConfig.')\n\n                    continue\n                for ps in pslist:\n                    if name == ps.getAttribute(\"name\") and version == ps.getAttribute(\"version\"):\n                        Log(\"Found RuntimeSettings for \" + name + \" V \" + version)\n                        SimpleLog(p.plugin_log,\"Found RuntimeSettings for \" + name + \" V \" + version)\n\n                        config=GetNodeTextData(ps.getElementsByTagName(\"RuntimeSettings\")[0])\n                        seqNo=ps.getElementsByTagName(\"RuntimeSettings\")[0].getAttribute(\"seqNo\") \n                        break\n            if config == '':\n                Error(\"No RuntimeSettings for \" + name + \" V \" + version)\n                SimpleLog(p.plugin_log,\"No RuntimeSettings for \" + name + \" V \" + version)\n\n            SetFileContents(root +\"/config/\" + seqNo +\".settings\",  config )\n\n            # state is still enable\n            if (self.GetHandlerState(handler) == 'NotInstalled'):  # run install first if true\n                if self.launchCommand(p.plugin_log,name,version,'installCommand') == None :\n                    self.SetHandlerState(handler, 'NotInstalled')\n                    Error('Installation failed for '+name+'-'+version)\n                    SimpleLog(p.plugin_log,'Installation failed for '+name+'-'+version)\n\n                else :\n                    self.SetHandlerState(handler, 'Installed')\n                    Log('Installation completed for '+name+'-'+version)\n                    SimpleLog(p.plugin_log,'Installation completed for '+name+'-'+version)\n\n\n            if (self.GetHandlerState(handler) != 'NotInstalled'):\n                if self.launchCommand(p.plugin_log,name,version,'enableCommand') == None :\n                    self.SetHandlerState(handler, 'Installed')\n                    Error('Enable failed for '+name+'-'+version)\n                    SimpleLog(p.plugin_log,'Enable failed for '+name+'-'+version)\n\n                else :\n                    self.SetHandlerState(handler, 'Enabled')\n                    Log('Enable completed for '+name+'-'+version)\n                    SimpleLog(p.plugin_log,'Enable completed for '+name+'-'+version)\n\n            # this plugin processing is complete\n            Log('Processing completed for '+name+'-'+version)\n            SimpleLog(p.plugin_log,'Processing completed for '+name+'-'+version)\n\n        #end plugin processing loop\n        Log('Finished processing ExtensionsConfig.xml')\n        try:\n            SimpleLog(p.plugin_log,'Finished processing ExtensionsConfig.xml')\n        except:\n            pass\n        \n        return self\n    def launchCommand(self,plugin_log,name,version,command,prev_version=None):\n        commandToEventOperation={\n        \"installCommand\":WALAEventOperation.Install,\n        \"uninstallCommand\":WALAEventOperation.UnIsntall,\n        \"updateCommand\": WALAEventOperation.Upgrade,\n        \"enableCommand\": WALAEventOperation.Enable,\n        \"disableCommand\": WALAEventOperation.Disable,\n        }\n        isSuccess=True\n        start = datetime.datetime.now()\n        r=self.__launchCommandWithoutEventLog(plugin_log,name,version,command,prev_version)\n        if r==None:\n            isSuccess=False\n        Duration = int((datetime.datetime.now() - start).seconds)\n        if commandToEventOperation.get(command):\n            AddExtensionEvent(name,commandToEventOperation[command],isSuccess,Duration,version)\n        return r\n\n    def __launchCommandWithoutEventLog(self,plugin_log,name,version,command,prev_version=None):\n        # get the manifest and read the command\n        mfile=None\n        zip_dir=LibDir+\"/\" + name + '-' + version\n        for root, dirs, files in os.walk(zip_dir):\n            for f in files:\n                if f in ('HandlerManifest.json'):\n                    mfile=os.path.join(root,f)\n            if mfile != None:\n                break\n        if mfile == None :\n            Error('HandlerManifest.json not found.')\n            SimpleLog(plugin_log,'HandlerManifest.json not found.')\n            \n            return None\n        manifest = GetFileContents(mfile)\n        try:\n            jsn = json.loads(manifest)\n        except:\n            Error('Error parsing HandlerManifest.json.')\n            SimpleLog(plugin_log,'Error parsing HandlerManifest.json.')\n\n            return None\n        if type(jsn)==list:\n            jsn=jsn[0]\n        if jsn.has_key('handlerManifest') :\n            cmd = jsn['handlerManifest'][command]\n        else :\n            Error('Key handlerManifest not found.  Handler cannot be installed.')\n            SimpleLog(plugin_log,'Key handlerManifest not found.  Handler cannot be installed.')\n\n        if len(cmd) == 0 :\n            Error('Unable to read ' + command )\n            SimpleLog(plugin_log,'Unable to read ' + command )\n\n            return None\n\n        # for update we send the path of the old installation\n        arg=''\n        if prev_version != None :\n            arg=' ' + LibDir+'/' + name + '-' + prev_version\n        dirpath=os.path.dirname(mfile)\n        LogIfVerbose('Command is '+ dirpath+'/'+ cmd)\n        # launch\n        pid=None\n        try:\n            child = subprocess.Popen(dirpath+'/'+cmd+arg,shell=True,cwd=dirpath,stdout=subprocess.PIPE)\n        except Exception as e:\n            Error('Exception launching ' + cmd + str(e))\n            SimpleLog(plugin_log,'Exception launching ' + cmd + str(e))\n\n        pid = child.pid\n        if pid == None or pid < 1 :\n            ExtensionChildren.append((-1,root))\n            Error('Error launching ' + cmd + '.')\n            SimpleLog(plugin_log,'Error launching ' + cmd + '.')\n\n        else :\n            ExtensionChildren.append((pid,root))\n            Log(\"Spawned \"+ cmd + \" PID \" + str(pid))\n            SimpleLog(plugin_log,\"Spawned \"+ cmd + \" PID \" + str(pid))\n\n\n        # wait until install/upgrade is finished\n        timeout = 300 # 5 minutes\n        retry = timeout/5\n        while retry > 0 and child.poll() == None:\n            LogIfVerbose(cmd + ' still running with PID ' + str(pid))\n            time.sleep(5)\n            retry-=1\n        if retry==0:\n            Error('Process exceeded timeout of ' + str(timeout) + ' seconds. Terminating process ' + str(pid))\n            SimpleLog(plugin_log,'Process exceeded timeout of ' + str(timeout) + ' seconds. Terminating process ' + str(pid))\n\n            os.kill(pid,9)\n            return None\n        code = child.wait()\n        if code == None or code != 0:\n            Error('Process ' + str(pid) + ' returned non-zero exit code (' + str(code) + ')')\n            SimpleLog(plugin_log,'Process ' + str(pid) + ' returned non-zero exit code (' + str(code) + ')')\n\n            return None\n        Log(command + ' completed.')\n        SimpleLog(plugin_log,command + ' completed.')\n\n        return 0\n\n    def ReportHandlerStatus(self):\n        \"\"\"\n        Collect all status reports.\n        \"\"\"\n        # { \"version\": \"1.0\", \"timestampUTC\": \"2014-03-31T21:28:58Z\", \n        # \"aggregateStatus\": { \n        # \"guestAgentStatus\": { \"version\": \"2.0.4PRE\", \"status\": \"Ready\", \"formattedMessage\": { \"lang\": \"en-US\", \"message\": \"GuestAgent is running and accepting new configurations.\" } }, \n        # \"handlerAggregateStatus\": [{ \n        # \"handlerName\": \"ExampleHandlerLinux\", \"handlerVersion\": \"1.0\", \"status\": \"Ready\", \"runtimeSettingsStatus\": { \n        # \"sequenceNumber\": \"2\", \"settingsStatus\": { \"timestampUTC\": \"2014-03-31T23:46:00Z\", \"status\": { \"name\": \"ExampleHandlerLinux\", \"operation\": \"Command Execution Finished\", \"configurationAppliedTime\": \"2014-03-31T23:46:00Z\", \"status\": \"success\", \"formattedMessage\": { \"lang\": \"en-US\", \"message\": \"Finished executing command\" }, \n        # \"substatus\": [\n        # { \"name\": \"StdOut\", \"status\": \"success\", \"formattedMessage\": { \"lang\": \"en-US\", \"message\": \"Goodbye world!\" }  }, \n        # { \"name\": \"StdErr\", \"status\": \"success\", \"formattedMessage\": { \"lang\": \"en-US\", \"message\": \"\" } }\n        # ] \n        # } } } }\n        # ]\n        #  }}\n\n        try:\n            incarnation=self.Extensions[0].getAttribute(\"goalStateIncarnation\")\n        except:\n            Error('Error parsing ExtensionsConfig.  Unable to send status reports')\n            return None\n        status=''\n        statuses=''\n        for p in self.Plugins:\n            if p.getAttribute(\"state\") == 'uninstall' or p.getAttribute(\"restricted\") == 'true' :\n                continue\n            version=p.getAttribute(\"version\")\n            name=p.getAttribute(\"name\")\n            if p.getAttribute(\"isJson\") != 'true':\n                LogIfVerbose(\"Plugin \" + name+\" version: \" +version+\" is not a JSON Extension.  Skipping.\")\n                continue\n            reportHeartbeat = False\n            if len(p.getAttribute(\"manifestdata\"))<1:\n                Error(\"Failed to get manifestdata.\")\n            else:\n                reportHeartbeat = json.loads(p.getAttribute(\"manifestdata\"))[0]['handlerManifest']['reportHeartbeat']\n            if len(statuses)>0:\n                statuses+=','\n            statuses+=self.GenerateAggStatus(name, version, reportHeartbeat)\n        tstamp=time.strftime(\"%Y-%m-%dT%H:%M:%SZ\", time.gmtime())\n        #header\n        #agent state\n        if provisioned == False:\n            if provisionError == None :\n                agent_state='Provisioning'\n                agent_msg='Guest Agent is starting.'\n            else:\n                agent_state='Provisioning Error.'\n                agent_msg=provisionError\n        else:\n            agent_state='Ready'\n            agent_msg='GuestAgent is running and accepting new configurations.'\n            \n        status='{\"version\":\"1.0\",\"timestampUTC\":\"'+tstamp+'\",\"aggregateStatus\":{\"guestAgentStatus\":{\"version\":\"'+GuestAgentVersion+'\",\"status\":\"'+agent_state+'\",\"formattedMessage\":{\"lang\":\"en-US\",\"message\":\"'+agent_msg+'\"}},\"handlerAggregateStatus\":['+statuses+']}}'\n        try:\n            uri=GetNodeTextData(self.Extensions[0].getElementsByTagName(\"StatusUploadBlob\")[0]).replace('&amp;','&')\n        except:\n            Error('Error parsing ExtensionsConfig.  Unable to send status reports')\n            return None\n\n        UploadStatusBlob(uri, status.encode(\"utf-8\"))\n        LogIfVerbose('Status report '+status+' sent to ' + uri)\n        return True\n\n    def GetCurrentSequenceNumber(self, plugin_base_dir):\n        \"\"\"\n        Get the settings file with biggest file number in config folder\n        \"\"\"\n        config_dir = os.path.join(plugin_base_dir, 'config')\n        seq_no = 0\n        for subdir, dirs, files in os.walk(config_dir):\n            for file in files:\n                try:\n                    cur_seq_no = int(os.path.basename(file).split('.')[0])\n                    if cur_seq_no > seq_no:\n                        seq_no = cur_seq_no\n                except ValueError:\n                    continue\n        return str(seq_no)\n\n\n    def GenerateAggStatus(self, name, version, reportHeartbeat = False):\n        \"\"\"\n        Generate the status which Azure can understand by the status and heartbeat reported by extension\n        \"\"\"\n        plugin_base_dir = LibDir+'/'+name+'-'+version+'/'\n        current_seq_no = self.GetCurrentSequenceNumber(plugin_base_dir)\n        status_file=os.path.join(plugin_base_dir, 'status/', current_seq_no +'.status')\n        heartbeat_file = os.path.join(plugin_base_dir, 'heartbeat.log')\n\n        handler_state_file = os.path.join(plugin_base_dir,  'config', 'HandlerState')\n        agg_state = 'NotReady'\n        handler_state = None\n        status_obj = None\n        status_code = None\n        formatted_message = None\n        localized_message = None\n\n        if os.path.exists(handler_state_file):\n            handler_state = GetFileContents(handler_state_file).lower()\n        if HandlerStatusToAggStatus.has_key(handler_state):\n            agg_state = HandlerStatusToAggStatus[handler_state]\n        if reportHeartbeat:\n            if os.path.exists(heartbeat_file):\n                d=int(time.time()-os.stat(heartbeat_file).st_mtime)\n                if d > 600 :    # not updated for more than 10 min\n                    agg_state = 'Unresponsive'\n                else:\n                    try:\n                        heartbeat = json.loads(GetFileContents(heartbeat_file))[0][\"heartbeat\"]\n                        agg_state = heartbeat.get(\"status\")\n                        status_code = heartbeat.get(\"code\")\n                        formatted_message = heartbeat.get(\"formattedMessage\")\n                        localized_message = heartbeat.get(\"message\")\n                    except:\n                        Error(\"Incorrect heartbeat file. Ignore it. \")\n            else:\n                agg_state = 'Unresponsive'\n        #get status file reported by extension\n        if os.path.exists(status_file):\n            # raw status generated by extension is an array, get the first item and remove the unnecessary element\n            try:\n                status_obj = json.loads(GetFileContents(status_file))[0]\n                del status_obj[\"version\"]\n            except:\n                Error(\"Incorrect status file. Will NOT settingsStatus in settings. \")\n        agg_status_obj = {\"handlerName\": name, \"handlerVersion\": version, \"status\": agg_state, \"runtimeSettingsStatus\" :\n                {\"sequenceNumber\": current_seq_no}}\n        if status_obj:\n            agg_status_obj[\"runtimeSettingsStatus\"][\"settingsStatus\"] = status_obj\n        if status_code != None:\n            agg_status_obj[\"code\"] = status_code\n        if formatted_message:\n            agg_status_obj[\"formattedMessage\"] = formatted_message\n        if localized_message:\n            agg_status_obj[\"message\"] = localized_message\n        agg_status_string = json.dumps(agg_status_obj)\n        LogIfVerbose(\"Handler Aggregated Status:\" + agg_status_string)\n        return agg_status_string\n    \n\n    def SetHandlerState(self, handler, state=''):\n        zip_dir=LibDir+\"/\" + handler\n        mfile=None\n        for root, dirs, files in os.walk(zip_dir):\n            for f in files:\n                if f in ('HandlerManifest.json'):\n                    mfile=os.path.join(root,f)\n            if mfile != None:\n                break\n        if mfile == None :\n            Error('SetHandlerState(): HandlerManifest.json not found, cannot set HandlerState.')\n            return None\n        Log(\"SetHandlerState: \"+handler+\", \"+state)\n        return SetFileContents(os.path.dirname(mfile)+'/config/HandlerState', state)\n\n    def GetHandlerState(self, handler):\n        handlerState = GetFileContents(handler+'/config/HandlerState')\n        if (handlerState):\n            return handlerState.rstrip('\\r\\n')\n        else:\n            return 'NotInstalled'\n\n\nclass HostingEnvironmentConfig(object):\n    \"\"\"\n    Parse Hosting enviromnet config and store in\n    HostingEnvironmentConfig.xml\n    \"\"\"\n    #\n    # <HostingEnvironmentConfig version=\"1.0.0.0\" goalStateIncarnation=\"1\">\n    #   <StoredCertificates>\n    #     <StoredCertificate name=\"Stored0Microsoft.WindowsAzure.Plugins.RemoteAccess.PasswordEncryption\" certificateId=\"sha1:C093FA5CD3AAE057CB7C4E04532B2E16E07C26CA\" storeName=\"My\" configurationLevel=\"System\" />\n    #   </StoredCertificates>\n    #   <Deployment name=\"db00a7755a5e4e8a8fe4b19bc3b330c3\" guid=\"{ce5a036f-5c93-40e7-8adf-2613631008ab}\" incarnation=\"2\">\n    #     <Service name=\"MyVMRoleService\" guid=\"{00000000-0000-0000-0000-000000000000}\" />\n    #     <ServiceInstance name=\"db00a7755a5e4e8a8fe4b19bc3b330c3.1\" guid=\"{d113f4d7-9ead-4e73-b715-b724b5b7842c}\" />\n    #   </Deployment>\n    #   <Incarnation number=\"1\" instance=\"MachineRole_IN_0\" guid=\"{a0faca35-52e5-4ec7-8fd1-63d2bc107d9b}\" />\n    #   <Role guid=\"{73d95f1c-6472-e58e-7a1a-523554e11d46}\" name=\"MachineRole\" hostingEnvironmentVersion=\"1\" software=\"\" softwareType=\"ApplicationPackage\" entryPoint=\"\" parameters=\"\" settleTimeSeconds=\"10\" />\n    #   <HostingEnvironmentSettings name=\"full\" Runtime=\"rd_fabric_stable.110217-1402.RuntimePackage_1.0.0.8.zip\">\n    #     <CAS mode=\"full\" />\n    #     <PrivilegeLevel mode=\"max\" />\n    #     <AdditionalProperties><CgiHandlers></CgiHandlers></AdditionalProperties>\n    #   </HostingEnvironmentSettings>\n    #   <ApplicationSettings>\n    #     <Setting name=\"__ModelData\" value=\"&lt;m role=&quot;MachineRole&quot; xmlns=&quot;urn:azure:m:v1&quot;>&lt;r name=&quot;MachineRole&quot;>&lt;e name=&quot;a&quot; />&lt;e name=&quot;b&quot; />&lt;e name=&quot;Microsoft.WindowsAzure.Plugins.RemoteAccess.Rdp&quot; />&lt;e name=&quot;Microsoft.WindowsAzure.Plugins.RemoteForwarder.RdpInput&quot; />&lt;/r>&lt;/m>\" />\n    #     <Setting name=\"Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString\" value=\"DefaultEndpointsProtocol=http;AccountName=osimages;AccountKey=DNZQ...\" />\n    #     <Setting name=\"Microsoft.WindowsAzure.Plugins.RemoteForwarder.Enabled\" value=\"true\" />\n    #   </ApplicationSettings>\n    #   <ResourceReferences>\n    #     <Resource name=\"DiagnosticStore\" type=\"directory\" request=\"Microsoft.Cis.Fabric.Controller.Descriptions.ServiceDescription.Data.Policy\" sticky=\"true\" size=\"1\" path=\"db00a7755a5e4e8a8fe4b19bc3b330c3.MachineRole.DiagnosticStore\\\" disableQuota=\"false\" />\n    #   </ResourceReferences>\n    # </HostingEnvironmentConfig>\n    #\n    def __init__(self):\n        self.reinitialize()\n\n    def reinitialize(self):\n        \"\"\"\n        Reset Members.\n        \"\"\"\n        self.StoredCertificates = None\n        self.Deployment = None\n        self.Incarnation = None\n        self.Role = None\n        self.HostingEnvironmentSettings = None\n        self.ApplicationSettings = None\n        self.Certificates = None\n        self.ResourceReferences = None\n\n    def Parse(self, xmlText):\n        \"\"\"\n        Parse and create HostingEnvironmentConfig.xml.\n        \"\"\"\n        self.reinitialize()\n        SetFileContents(\"HostingEnvironmentConfig.xml\", xmlText)\n        dom = xml.dom.minidom.parseString(xmlText)\n        for a in [ \"HostingEnvironmentConfig\", \"Deployment\", \"Service\",\n                   \"ServiceInstance\", \"Incarnation\", \"Role\", ]:\n            if not dom.getElementsByTagName(a):\n                Error(\"HostingEnvironmentConfig.Parse: Missing \" + a)\n                return None\n        node = dom.childNodes[0]\n        if node.localName != \"HostingEnvironmentConfig\":\n            Error(\"HostingEnvironmentConfig.Parse: root not HostingEnvironmentConfig\")\n            return None\n        self.ApplicationSettings = dom.getElementsByTagName(\"Setting\")\n        self.Certificates = dom.getElementsByTagName(\"StoredCertificate\")\n        return self\n\n    def DecryptPassword(self, e):\n        \"\"\"\n        Return decrypted password.\n        \"\"\"\n        SetFileContents(\"password.p7m\",\n            \"MIME-Version: 1.0\\n\"\n            + \"Content-Disposition: attachment; filename=\\\"password.p7m\\\"\\n\"\n            + \"Content-Type: application/x-pkcs7-mime; name=\\\"password.p7m\\\"\\n\"\n            + \"Content-Transfer-Encoding: base64\\n\\n\"\n            + textwrap.fill(e, 64))\n        return RunGetOutput(Openssl + \" cms -decrypt -in password.p7m -inkey Certificates.pem -recip Certificates.pem\")[1]\n\n    def ActivateResourceDisk(self):\n        return MyDistro.ActivateResourceDisk()\n\n    def Process(self):\n        \"\"\"\n        Execute ActivateResourceDisk in separate thread.\n        Create the user account.\n        Launch ConfigurationConsumer if specified in the config.\n        \"\"\"\n        no_thread = False\n        if DiskActivated == False:\n            for m in inspect.getmembers(MyDistro):\n                if 'ActivateResourceDiskNoThread' in m:\n                    no_thread = True\n                    break\n            if no_thread == True :   \n                MyDistro.ActivateResourceDiskNoThread()\n            else :\n                diskThread = threading.Thread(target = self.ActivateResourceDisk)\n                diskThread.start()\n        User = None\n        Pass = None\n        Expiration = None\n        Thumbprint = None\n        for b in self.ApplicationSettings:\n            sname = b.getAttribute(\"name\")\n            svalue = b.getAttribute(\"value\")\n        if User != None and Pass != None:\n            if User != \"root\" and User != \"\" and Pass != \"\":\n                CreateAccount(User, Pass, Expiration, Thumbprint)\n            else:\n                Error(\"Not creating user account: \" + User)\n        for c in self.Certificates:\n            csha1 = c.getAttribute(\"certificateId\").split(':')[1].upper()\n            if os.path.isfile(csha1 + \".prv\"):\n                Log(\"Private key with thumbprint: \" + csha1 + \" was retrieved.\")\n            if os.path.isfile(csha1 + \".crt\"):\n                Log(\"Public cert with thumbprint: \" + csha1 + \" was retrieved.\")\n        program = Config.get(\"Role.ConfigurationConsumer\")\n        if program != None:\n            try:\n                Children.append(subprocess.Popen([program, LibDir + \"/HostingEnvironmentConfig.xml\"]))\n            except OSError, e :\n                ErrorWithPrefix('HostingEnvironmentConfig.Process','Exception: '+ str(e) +' occured launching ' + program )\n\nclass GoalState(Util):\n    \"\"\"\n    Primary container for all configuration except OvfXml.\n    Encapsulates http communication with endpoint server.\n    Initializes and populates:\n    self.HostingEnvironmentConfig\n    self.SharedConfig\n    self.ExtensionsConfig\n    self.Certificates\n    \"\"\"\n    #\n    # <GoalState xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:noNamespaceSchemaLocation=\"goalstate10.xsd\">\n    #   <Version>2010-12-15</Version>\n    #   <Incarnation>1</Incarnation>\n    #   <Machine>\n    #     <ExpectedState>Started</ExpectedState>\n    #     <LBProbePorts>\n    #       <Port>16001</Port>\n    #     </LBProbePorts>\n    #   </Machine>\n    #   <Container>\n    #     <ContainerId>c6d5526c-5ac2-4200-b6e2-56f2b70c5ab2</ContainerId>\n    #     <RoleInstanceList>\n    #       <RoleInstance>\n    #         <InstanceId>MachineRole_IN_0</InstanceId>\n    #         <State>Started</State>\n    #         <Configuration>\n    #           <HostingEnvironmentConfig>http://10.115.153.40:80/machine/c6d5526c-5ac2-4200-b6e2-56f2b70c5ab2/MachineRole%5FIN%5F0?comp=config&amp;type=hostingEnvironmentConfig&amp;incarnation=1</HostingEnvironmentConfig>\n    #           <SharedConfig>http://10.115.153.40:80/machine/c6d5526c-5ac2-4200-b6e2-56f2b70c5ab2/MachineRole%5FIN%5F0?comp=config&amp;type=sharedConfig&amp;incarnation=1</SharedConfig>\n    #           <Certificates>http://10.115.153.40:80/machine/c6d5526c-5ac2-4200-b6e2-56f2b70c5ab2/MachineRole%5FIN%5F0?comp=certificates&amp;incarnation=1</Certificates>\n    #          <ExtensionsConfig>http://100.67.238.230:80/machine/9c87aa94-3bda-45e3-b2b7-0eb0fca7baff/1552dd64dc254e6884f8d5b8b68aa18f.eg%2Dplug%2Dvm?comp=config&amp;type=extensionsConfig&amp;incarnation=2</ExtensionsConfig>\n    #         <FullConfig>http://100.67.238.230:80/machine/9c87aa94-3bda-45e3-b2b7-0eb0fca7baff/1552dd64dc254e6884f8d5b8b68aa18f.eg%2Dplug%2Dvm?comp=config&amp;type=fullConfig&amp;incarnation=2</FullConfig>\n\n    #         </Configuration>\n    #       </RoleInstance>\n    #     </RoleInstanceList>\n    #   </Container>\n    # </GoalState>\n    #\n    # There is only one Role for VM images.\n    #\n    # Of primary interest is:\n    #  LBProbePorts -- an http server needs to run here\n    #  We also note Container/ContainerID and RoleInstance/InstanceId to form the health report.\n    #  And of course, Incarnation\n    #\n    def __init__(self, Agent):\n        self.Agent = Agent\n        self.Endpoint = Agent.Endpoint\n        self.TransportCert = Agent.TransportCert\n        self.reinitialize()\n\n    def reinitialize(self):\n        self.Incarnation = None # integer\n        self.ExpectedState = None # \"Started\"\n        self.HostingEnvironmentConfigUrl = None\n        self.HostingEnvironmentConfigXml = None\n        self.HostingEnvironmentConfig = None\n        self.SharedConfigUrl = None\n        self.SharedConfigXml = None\n        self.SharedConfig = None\n        self.CertificatesUrl = None\n        self.CertificatesXml = None\n        self.Certificates = None\n        self.ExtensionsConfigUrl = None\n        self.ExtensionsConfigXml = None\n        self.ExtensionsConfig = None\n        self.RoleInstanceId = None\n        self.ContainerId = None\n        self.LoadBalancerProbePort = None # integer, ?list of integers\n\n    def Parse(self, xmlText):\n        \"\"\"\n        Request configuration data from endpoint server.\n        Parse and populate contained configuration objects.\n        Calls Certificates().Parse()\n        Calls SharedConfig().Parse\n        Calls ExtensionsConfig().Parse\n        Calls HostingEnvironmentConfig().Parse\n        \"\"\"\n        self.reinitialize()\n        LogIfVerbose(xmlText)\n        node = xml.dom.minidom.parseString(xmlText).childNodes[0]\n        if node.localName != \"GoalState\":\n            Error(\"GoalState.Parse: root not GoalState\")\n            return None\n        for a in node.childNodes:\n            if a.nodeType == node.ELEMENT_NODE:\n                if a.localName == \"Incarnation\":\n                    self.Incarnation = GetNodeTextData(a)\n                elif a.localName == \"Machine\":\n                    for b in a.childNodes:\n                        if b.nodeType == node.ELEMENT_NODE:\n                            if b.localName == \"ExpectedState\":\n                                self.ExpectedState = GetNodeTextData(b)\n                                Log(\"ExpectedState: \" + self.ExpectedState)\n                            elif b.localName == \"LBProbePorts\":\n                                for c in b.childNodes:\n                                    if c.nodeType == node.ELEMENT_NODE and c.localName == \"Port\":\n                                        self.LoadBalancerProbePort = int(GetNodeTextData(c))\n                elif a.localName == \"Container\":\n                    for b in a.childNodes:\n                        if b.nodeType == node.ELEMENT_NODE:\n                            if b.localName == \"ContainerId\":\n                                self.ContainerId = GetNodeTextData(b)\n                                Log(\"ContainerId: \" + self.ContainerId)\n                            elif b.localName == \"RoleInstanceList\":\n                                for c in b.childNodes:\n                                    if c.localName == \"RoleInstance\":\n                                        for d in c.childNodes:\n                                            if d.nodeType == node.ELEMENT_NODE:\n                                                if d.localName == \"InstanceId\":\n                                                    self.RoleInstanceId = GetNodeTextData(d)\n                                                    Log(\"RoleInstanceId: \" + self.RoleInstanceId)\n                                                elif d.localName == \"State\":\n                                                    pass\n                                                elif d.localName == \"Configuration\":\n                                                    for e in d.childNodes:\n                                                        if e.nodeType == node.ELEMENT_NODE:\n                                                            LogIfVerbose(e.localName)\n                                                            if e.localName == \"HostingEnvironmentConfig\":\n                                                                self.HostingEnvironmentConfigUrl = GetNodeTextData(e)\n                                                                LogIfVerbose(\"HostingEnvironmentConfigUrl:\" + self.HostingEnvironmentConfigUrl)\n                                                                self.HostingEnvironmentConfigXml = self.HttpGetWithHeaders(self.HostingEnvironmentConfigUrl)\n                                                                self.HostingEnvironmentConfig = HostingEnvironmentConfig().Parse(self.HostingEnvironmentConfigXml)\n                                                            elif e.localName == \"SharedConfig\":\n                                                                self.SharedConfigUrl = GetNodeTextData(e)\n                                                                LogIfVerbose(\"SharedConfigUrl:\" + self.SharedConfigUrl)\n                                                                self.SharedConfigXml = self.HttpGetWithHeaders(self.SharedConfigUrl)\n                                                                self.SharedConfig = SharedConfig().Parse(self.SharedConfigXml)\n                                                                self.SharedConfig.Save()\n                                                            elif e.localName == \"ExtensionsConfig\":\n                                                                self.ExtensionsConfigUrl = GetNodeTextData(e)\n                                                                LogIfVerbose(\"ExtensionsConfigUrl:\" + self.ExtensionsConfigUrl)\n                                                                self.ExtensionsConfigXml = self.HttpGetWithHeaders(self.ExtensionsConfigUrl)\n                                                            elif e.localName == \"Certificates\":\n                                                                self.CertificatesUrl = GetNodeTextData(e)\n                                                                LogIfVerbose(\"CertificatesUrl:\" + self.CertificatesUrl)\n                                                                self.CertificatesXml = self.HttpSecureGetWithHeaders(self.CertificatesUrl, self.TransportCert)\n                                                                self.Certificates = Certificates().Parse(self.CertificatesXml)\n        if self.Incarnation == None:\n            Error(\"GoalState.Parse: Incarnation missing\")\n            return None\n        if self.ExpectedState == None:\n            Error(\"GoalState.Parse: ExpectedState missing\")\n            return None\n        if self.RoleInstanceId == None:\n            Error(\"GoalState.Parse: RoleInstanceId missing\")\n            return None\n        if self.ContainerId == None:\n            Error(\"GoalState.Parse: ContainerId missing\")\n            return None\n        SetFileContents(\"GoalState.\" + self.Incarnation + \".xml\", xmlText)\n        return self\n\n    def Process(self):\n        \"\"\"\n        Calls HostingEnvironmentConfig.Process()\n        \"\"\"\n        LogIfVerbose(\"Process goalstate\")\n        self.HostingEnvironmentConfig.Process()\n        self.SharedConfig.Process()\n        \nclass OvfEnv(object):\n    \"\"\"\n    Read, and process provisioning info from provisioning file OvfEnv.xml\n    \"\"\"\n    #\n    # <?xml version=\"1.0\" encoding=\"utf-8\"?>\n    # <Environment xmlns=\"http://schemas.dmtf.org/ovf/environment/1\" xmlns:oe=\"http://schemas.dmtf.org/ovf/environment/1\" xmlns:wa=\"http://schemas.microsoft.com/windowsazure\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\">\n    #    <wa:ProvisioningSection>\n    #      <wa:Version>1.0</wa:Version>\n    #      <LinuxProvisioningConfigurationSet xmlns=\"http://schemas.microsoft.com/windowsazure\" xmlns:i=\"http://www.w3.org/2001/XMLSchema-instance\">\n    #        <ConfigurationSetType>LinuxProvisioningConfiguration</ConfigurationSetType>\n    #        <HostName>HostName</HostName>\n    #        <UserName>UserName</UserName>\n    #        <UserPassword>UserPassword</UserPassword>\n    #        <DisableSshPasswordAuthentication>false</DisableSshPasswordAuthentication>\n    #        <SSH>\n    #          <PublicKeys>\n    #            <PublicKey>\n    #              <Fingerprint>EB0C0AB4B2D5FC35F2F0658D19F44C8283E2DD62</Fingerprint>\n    #              <Path>$HOME/UserName/.ssh/authorized_keys</Path>\n    #            </PublicKey>\n    #          </PublicKeys>\n    #          <KeyPairs>\n    #            <KeyPair>\n    #              <Fingerprint>EB0C0AB4B2D5FC35F2F0658D19F44C8283E2DD62</Fingerprint>\n    #              <Path>$HOME/UserName/.ssh/id_rsa</Path>\n    #            </KeyPair>\n    #          </KeyPairs>\n    #        </SSH>\n    #      </LinuxProvisioningConfigurationSet>\n    #    </wa:ProvisioningSection>\n    # </Environment>\n    #\n    def __init__(self):\n        self.reinitialize()\n\n    def reinitialize(self):\n        \"\"\"\n        Reset members.\n        \"\"\"\n        self.WaNs = \"http://schemas.microsoft.com/windowsazure\"\n        self.OvfNs = \"http://schemas.dmtf.org/ovf/environment/1\"\n        self.MajorVersion = 1\n        self.MinorVersion = 0\n        self.ComputerName = None\n        self.AdminPassword = None\n        self.UserName = None\n        self.UserPassword = None\n        self.CustomData = None\n        self.DisableSshPasswordAuthentication = True\n        self.SshPublicKeys = []\n        self.SshKeyPairs = []\n\n    def Parse(self, xmlText, isDeprovision = False):\n        \"\"\"\n        Parse xml tree, retreiving user and ssh key information.\n        Return self.\n        \"\"\"\n        self.reinitialize()\n        LogIfVerbose(re.sub(\"<UserPassword>.*?<\", \"<UserPassword>*<\", xmlText))\n        dom = xml.dom.minidom.parseString(xmlText)\n        if len(dom.getElementsByTagNameNS(self.OvfNs, \"Environment\")) != 1:\n            Error(\"Unable to parse OVF XML.\")\n        section = None\n        newer = False\n        for p in dom.getElementsByTagNameNS(self.WaNs, \"ProvisioningSection\"):\n            for n in p.childNodes:\n                if n.localName == \"Version\":\n                    verparts = GetNodeTextData(n).split('.')\n                    major = int(verparts[0])\n                    minor = int(verparts[1])\n                    if major > self.MajorVersion:\n                        newer = True\n                    if major != self.MajorVersion:\n                        break\n                    if minor > self.MinorVersion:\n                        newer = True\n                    section = p\n        if newer == True:\n            Warn(\"Newer provisioning configuration detected. Please consider updating waagent.\")\n        if section == None:\n            Error(\"Could not find ProvisioningSection with major version=\" + str(self.MajorVersion))\n            return None\n        self.ComputerName = GetNodeTextData(section.getElementsByTagNameNS(self.WaNs, \"HostName\")[0])\n        self.UserName = GetNodeTextData(section.getElementsByTagNameNS(self.WaNs, \"UserName\")[0])\n        if isDeprovision == True:\n          return self\n        try:\n            self.UserPassword = GetNodeTextData(section.getElementsByTagNameNS(self.WaNs, \"UserPassword\")[0])\n        except:\n            pass\n        CDSection=None\n        try:\n            CDSection=section.getElementsByTagNameNS(self.WaNs, \"CustomData\")\n            if len(CDSection) > 0 :\n                self.CustomData=GetNodeTextData(CDSection[0])\n                if len(self.CustomData)>0:\n                    SetFileContents(LibDir + '/CustomData', MyDistro.translateCustomData(self.CustomData))\n                    Log('Wrote ' + LibDir + '/CustomData')\n                else :\n                    Error('<CustomData> contains no data!')\n        except Exception, e:\n            Error( str(e)+' occured creating ' + LibDir + '/CustomData')\n        disableSshPass = section.getElementsByTagNameNS(self.WaNs, \"DisableSshPasswordAuthentication\")\n        if len(disableSshPass) != 0:\n            self.DisableSshPasswordAuthentication = (GetNodeTextData(disableSshPass[0]).lower() == \"true\")\n        for pkey in section.getElementsByTagNameNS(self.WaNs, \"PublicKey\"):\n            LogIfVerbose(repr(pkey))\n            fp = None\n            path = None\n            for c in pkey.childNodes:\n                if c.localName == \"Fingerprint\":\n                    fp = GetNodeTextData(c).upper()\n                    LogIfVerbose(fp)\n                if c.localName == \"Path\":\n                    path = GetNodeTextData(c)\n                    LogIfVerbose(path)\n            self.SshPublicKeys += [[fp, path]]\n        for keyp in section.getElementsByTagNameNS(self.WaNs, \"KeyPair\"):\n            fp = None\n            path = None\n            LogIfVerbose(repr(keyp))\n            for c in keyp.childNodes:\n                if c.localName == \"Fingerprint\":\n                    fp = GetNodeTextData(c).upper()\n                    LogIfVerbose(fp)\n                if c.localName == \"Path\":\n                    path = GetNodeTextData(c)\n                    LogIfVerbose(path)\n            self.SshKeyPairs += [[fp, path]]\n        return self\n\n    def PrepareDir(self, filepath):\n        \"\"\"\n        Create home dir for self.UserName\n        Change owner and return path.\n        \"\"\"\n        home = MyDistro.GetHome()\n        # Expand HOME variable if present in path\n        path = os.path.normpath(filepath.replace(\"$HOME\", home))\n        if (path.startswith(\"/\") == False) or (path.endswith(\"/\") == True):\n            return None\n        dir = path.rsplit('/', 1)[0]\n        if dir != \"\":\n            CreateDir(dir, \"root\", 0700)\n            if path.startswith(os.path.normpath(home + \"/\" + self.UserName + \"/\")):\n                ChangeOwner(dir, self.UserName)\n        return path\n\n    def NumberToBytes(self, i):\n        \"\"\"\n        Pack number into bytes.  Retun as string.\n        \"\"\"\n        result = []\n        while i:\n            result.append(chr(i & 0xFF))\n            i >>= 8\n        result.reverse()\n        return ''.join(result)\n\n    def BitsToString(self, a):\n        \"\"\"\n        Return string representation of bits in a.\n        \"\"\"\n        index=7\n        s = \"\"\n        c = 0\n        for bit in a:\n            c = c | (bit << index)\n            index = index - 1\n            if index == -1:\n                s = s + struct.pack('>B', c)\n                c = 0\n                index = 7\n        return s\n\n    def OpensslToSsh(self, file):\n        \"\"\"\n        Return base-64 encoded key appropriate for ssh.\n        \"\"\"\n        from pyasn1.codec.der import decoder as der_decoder\n        try:\n            f = open(file).read().replace('\\n','').split(\"KEY-----\")[1].split('-')[0]\n            k=der_decoder.decode(self.BitsToString(der_decoder.decode(base64.b64decode(f))[0][1]))[0]\n            n=k[0]\n            e=k[1]\n            keydata=\"\"\n            keydata += struct.pack('>I',len(\"ssh-rsa\"))\n            keydata += \"ssh-rsa\"\n            keydata += struct.pack('>I',len(self.NumberToBytes(e)))\n            keydata += self.NumberToBytes(e)\n            keydata += struct.pack('>I',len(self.NumberToBytes(n)) + 1)\n            keydata += \"\\0\"\n            keydata += self.NumberToBytes(n)\n        except Exception, e:\n            print(\"OpensslToSsh: Exception \" + str(e))\n            return None\n        return \"ssh-rsa \" + base64.b64encode(keydata) + \"\\n\"\n\n    def Process(self):\n        \"\"\"\n        Process all certificate and key info.\n        DisableSshPasswordAuthentication if configured.\n        CreateAccount(user)\n        Wait for WaAgent.EnvMonitor.IsHostnamePublished().\n        Restart ssh service.\n        \"\"\"\n        error = None\n        if self.ComputerName == None :\n            return \"Error: Hostname missing\"\n        error=WaAgent.EnvMonitor.SetHostName(self.ComputerName)\n        if error: return error\n        if self.DisableSshPasswordAuthentication:\n            filepath = \"/etc/ssh/sshd_config\"\n            # Disable RFC 4252 and RFC 4256 authentication schemes.\n            ReplaceFileContentsAtomic(filepath, \"\\n\".join(filter(lambda a: not\n                (a.startswith(\"PasswordAuthentication\") or a.startswith(\"ChallengeResponseAuthentication\")),\n                GetFileContents(filepath).split('\\n'))) + \"\\nPasswordAuthentication no\\nChallengeResponseAuthentication no\\n\")\n            Log(\"Disabled SSH password-based authentication methods.\")\n        if self.AdminPassword != None:\n            MyDistro.changePass('root',self.AdminPassword)\n        if self.UserName != None:\n            error = MyDistro.CreateAccount(self.UserName, self.UserPassword, None, None)\n        sel = MyDistro.isSelinuxRunning()\n        if sel :\n            MyDistro.setSelinuxEnforce(0)\n        home = MyDistro.GetHome()\n        for pkey in self.SshPublicKeys:\n            Log(\"Deploy public key:{0}\".format(pkey[0]))\n            if not os.path.isfile(pkey[0] + \".crt\"):\n                Error(\"PublicKey not found: \" + pkey[0])\n                error = \"Failed to deploy public key (0x09).\"\n                continue\n            path = self.PrepareDir(pkey[1])\n            if path == None:\n                Error(\"Invalid path: \" + pkey[1] + \" for PublicKey: \" + pkey[0])\n                error = \"Invalid path for public key (0x03).\"\n                continue\n            Run(Openssl + \" x509 -in \" + pkey[0] + \".crt -noout -pubkey > \" + pkey[0] + \".pub\")\n            MyDistro.setSelinuxContext(pkey[0] + '.pub','unconfined_u:object_r:ssh_home_t:s0')\n            MyDistro.sshDeployPublicKey(pkey[0] + '.pub',path)\n            MyDistro.setSelinuxContext(path,'unconfined_u:object_r:ssh_home_t:s0')\n            if path.startswith(os.path.normpath(home + \"/\" + self.UserName + \"/\")):\n                ChangeOwner(path, self.UserName)\n        for keyp in self.SshKeyPairs:\n            Log(\"Deploy key pair:{0}\".format(keyp[0]))\n            if not os.path.isfile(keyp[0] + \".prv\"):\n                Error(\"KeyPair not found: \" + keyp[0])\n                error = \"Failed to deploy key pair (0x0A).\"\n                continue\n            path = self.PrepareDir(keyp[1])\n            if path == None:\n                Error(\"Invalid path: \" + keyp[1] + \" for KeyPair: \" + keyp[0])\n                error = \"Invalid path for key pair (0x05).\"\n                continue\n            SetFileContents(path, GetFileContents(keyp[0] + \".prv\"))\n            os.chmod(path, 0600)\n            Run(\"ssh-keygen -y -f \" + keyp[0] + \".prv > \" + path + \".pub\")\n            MyDistro.setSelinuxContext(path,'unconfined_u:object_r:ssh_home_t:s0')\n            MyDistro.setSelinuxContext(path + '.pub','unconfined_u:object_r:ssh_home_t:s0')\n            if path.startswith(os.path.normpath(home + \"/\" + self.UserName + \"/\")):\n                ChangeOwner(path, self.UserName)\n                ChangeOwner(path + \".pub\", self.UserName)\n        if sel :\n            MyDistro.setSelinuxEnforce(1)\n        while not WaAgent.EnvMonitor.IsHostnamePublished():\n            time.sleep(1)\n        MyDistro.restartSshService()\n        return error\n\n\nclass WALAEvent(object):   \n    def __init__(self):\n            \n        self.providerId=\"\"\n        self.eventId=1\n        \n        self.OpcodeName=\"\"\n        self.KeywordName=\"\"\n        self.TaskName=\"\"\n        self.TenantName=\"\"\n        self.RoleName=\"\"\n        self.RoleInstanceName=\"\"\n        self.ContainerId=\"\"\n        self.ExecutionMode=\"IAAS\"\n        self.OSVersion=\"\"\n        self.GAVersion=\"\"\n        self.RAM=0\n        self.Processors=0\n\n\n    def ToXml(self):\n        strEventid=u'<Event id=\"{0}\"/>'.format(self.eventId)\n        strProviderid=u'<Provider id=\"{0}\"/>'.format(self.providerId)\n        strRecordFormat = u'<Param Name=\"{0}\" Value=\"{1}\" T=\"{2}\" />'\n        strRecordNoQuoteFormat = u'<Param Name=\"{0}\" Value={1} T=\"{2}\" />'\n        strMtStr=u'mt:wstr'\n        strMtUInt64=u'mt:uint64'\n        strMtBool=u'mt:bool'\n        strMtFloat=u'mt:float64'\n        strEventsData=u\"\"\n\n        for attName in  self.__dict__:\n            if attName in [\"eventId\",\"filedCount\",\"providerId\"]:\n                continue\n            \n            attValue = self.__dict__[attName]\n            if type(attValue) is int:\n                strEventsData+=strRecordFormat.format(attName,attValue,strMtUInt64)\n                continue\n            if type(attValue) is str:\n                attValue = xml.sax.saxutils.quoteattr(attValue)\t\t\t                \n                strEventsData+=strRecordNoQuoteFormat.format(attName,attValue,strMtStr)\n                continue\n            if str(type(attValue)).count(\"'unicode'\") >0 :\n                attValue = xml.sax.saxutils.quoteattr(attValue)\t\t\t \n                strEventsData+=strRecordNoQuoteFormat.format(attName,attValue,strMtStr)\n                continue\n            if type(attValue) is bool:\n                strEventsData+=strRecordFormat.format(attName,attValue,strMtBool)\n                continue\n            if type(attValue) is float:\n                strEventsData+=strRecordFormat.format(attName,attValue,strMtFloat)\n                continue\n            \n            Log(\"Warning: property \"+attName+\":\"+str(type(attValue))+\":type\"+str(type(attValue))+\"Can't convert to events data:\"+\":type not supported\")\n\n        return u\"<Data>{0}{1}{2}</Data>\".format(strProviderid,strEventid,strEventsData)\n\n    def Save(self):\n        eventfolder = LibDir+\"/events\"\n        if not os.path.exists(eventfolder):\n            os.mkdir(eventfolder)\n            os.chmod(eventfolder,0700)\n        if len(os.listdir(eventfolder)) > 1000:\n            raise Exception(\"WriteToFolder:Too many file under \"+eventfolder+\" exit\")\n    \n        filename = os.path.join(eventfolder,str(int(time.time()*1000000)))\n        with open(filename+\".tmp\",'wb+') as hfile:\n            hfile.write(self.ToXml().encode(\"utf-8\"))\n        os.rename(filename+\".tmp\",filename+\".tld\")\n\n\nclass WALAEventOperation:\n    HeartBeat=\"HeartBeat\"\n    Provision = \"Provision\"\n    Install = \"Install\"\n    UnIsntall = \"UnInstall\"\n    Disable = \"Disable\"\n    Enable = \"Enable\"\n    Download = \"Download\"\n    Upgrade = \"Upgrade\"\n    Update = \"Update\"           \n\ndef AddExtensionEvent(name,op,isSuccess,duration=0,version=\"1.0\",message=\"\",type=\"\",isInternal=False):\n    event = ExtensionEvent()\n    event.Name=name \n    event.Version=version \n    event.IsInternal=isInternal\n    event.Operation=op\n    event.OperationSuccess=isSuccess\n    event.Message=message \n    event.Duration=duration\n    event.ExtensionType=type\n    try:\n        event.Save()\n    except:\n        Error(\"Error \"+traceback.format_exc())\n        \n    \nclass ExtensionEvent(WALAEvent):\n    def __init__(self):\n                \n        WALAEvent.__init__(self)\n        self.eventId=1\n        self.providerId=\"69B669B9-4AF8-4C50-BDC4-6006FA76E975\"\n        self.Name=\"\"\n        self.Version=\"\"\n        self.IsInternal=False\n        self.Operation=\"\"\n        self.OperationSuccess=True\n        self.ExtensionType=\"\"\n        self.Message=\"\"\n        self.Duration=0\n    \n               \t\t           \nclass WALAEventMonitor(WALAEvent):\n    def __init__(self,postMethod):\n        WALAEvent.__init__(self)\n        self.post = postMethod\n        self.sysInfo={}\n        self.eventdir = LibDir+\"/events\"\n        self.issysteminfoinitilized = False\n\n    def StartEventsLoop(self):\n        eventThread = threading.Thread(target = self.EventsLoop)\n        eventThread.setDaemon(True)\n        eventThread.start()\n        \n    def EventsLoop(self):\n        LastReportHeartBeatTime = datetime.datetime.min\n        try:\n            while(True):\n                if (datetime.datetime.now()-LastReportHeartBeatTime) > datetime.timedelta(hours=12):\n                    LastReportHeartBeatTime = datetime.datetime.now()\n                    AddExtensionEvent(op=WALAEventOperation.HeartBeat,name=\"WALA\",isSuccess=True)\n                self.postNumbersInOneLoop=0\n                self.CollectAndSendWALAEvents()\n                time.sleep(60)\n        except:\n            Error(\"Exception in events loop:\"+traceback.format_exc())\n\t\t\t     \t\t    \t\t\n    def SendEvent(self,providerid,events):\n        dataFormat = u'<?xml version=\"1.0\"?><TelemetryData version=\"1.0\"><Provider id=\"{0}\">{1}'\\\n        '</Provider></TelemetryData>'\n        data = dataFormat.format(providerid,events)\n        self.post(\"/machine/?comp=telemetrydata\", data)\n\n    def CollectAndSendWALAEvents(self):        \n        if not os.path.exists(self.eventdir):\n            return\n        #Throtting, can't send more than 3 events in 15 seconds  \n        eventSendNumber=0\n        eventFiles = os.listdir(self.eventdir)\n        events = {}\n        for file in eventFiles:\n            if not file.endswith(\".tld\"):\n                continue      \n            with open(os.path.join(self.eventdir,file),\"rb\") as hfile:\n            #if fail to open or delete the file, throw exception \n                xmlStr = hfile.read().decode(\"utf-8\",'ignore')\n            os.remove(os.path.join(self.eventdir,file))\n            params=\"\"\n            eventid=\"\"\n            providerid=\"\"\n            #if exception happen during process an event, catch it and continue\n            try:\n                xmlStr = self.AddSystemInfo(xmlStr)\n                for node in xml.dom.minidom.parseString(xmlStr.encode(\"utf-8\")).childNodes[0].childNodes:\n                    if node.tagName == \"Param\":\n                        params+=node.toxml()\n                    if node.tagName == \"Event\":\n                        eventid=node.getAttribute(\"id\")\n                    if node.tagName == \"Provider\":\n                        providerid = node.getAttribute(\"id\")\n            except:\n                Error(traceback.format_exc())\n                continue\n            if len(params)==0 or len(eventid)==0 or len(providerid)==0:\n                Error(\"Empty filed in params:\"+params+\" event id:\"+eventid+\" provider id:\"+providerid)\n                continue\n\n            eventstr = u'<Event id=\"{0}\"><![CDATA[{1}]]></Event>'.format(eventid,params)\n            if not events.get(providerid):\n                events[providerid]=\"\"\n            if len(events[providerid]) >0 and  len(events.get(providerid)+eventstr)>= 63*1024:\n                eventSendNumber+=1\n                self.SendEvent(providerid,events.get(providerid))\n                if eventSendNumber %3 ==0:\n                    time.sleep(15)\n                events[providerid]=\"\"\n            if len(eventstr) >= 63*1024:\n                Error(\"Signle event too large abort \"+eventstr[:300])\n                continue\n\n            events[providerid]=events.get(providerid)+eventstr\n\n        for key in events.keys():\n            if len(events[key]) > 0:\n                eventSendNumber+=1\n                self.SendEvent(key,events[key])\n                if eventSendNumber%3 == 0:\n                    time.sleep(15)\n                \n\n    def AddSystemInfo(self,eventData):\n        if not self.issysteminfoinitilized:\n            self.issysteminfoinitilized=True\n            try:\n                self.sysInfo[\"OSVersion\"]=platform.system()+\":\"+\"-\".join(DistInfo(1))+\":\"+platform.release()\n                self.sysInfo[\"GAVersion\"]=GuestAgentVersion\n                self.sysInfo[\"RAM\"]=MyDistro.getTotalMemory()\n                self.sysInfo[\"Processors\"]=MyDistro.getProcessorCores()\n                sharedConfig = xml.dom.minidom.parse(\"/var/lib/waagent/SharedConfig.xml\").childNodes[0]\n                hostEnvConfig= xml.dom.minidom.parse(\"/var/lib/waagent/HostingEnvironmentConfig.xml\").childNodes[0]\n                gfiles = RunGetOutput(\"ls -t /var/lib/waagent/GoalState.*.xml\")[1]\n                goalStateConfi =  xml.dom.minidom.parse(gfiles.split(\"\\n\")[0]).childNodes[0]\n                self.sysInfo[\"TenantName\"]=hostEnvConfig.getElementsByTagName(\"Deployment\")[0].getAttribute(\"name\")\n                self.sysInfo[\"RoleName\"]=hostEnvConfig.getElementsByTagName(\"Role\")[0].getAttribute(\"name\")\n                self.sysInfo[\"RoleInstanceName\"]=sharedConfig.getElementsByTagName(\"Instance\")[0].getAttribute(\"id\")\n                self.sysInfo[\"ContainerId\"]=goalStateConfi.getElementsByTagName(\"ContainerId\")[0].childNodes[0].nodeValue\n            except:\n                Error(traceback.format_exc())\n\n        eventObject = xml.dom.minidom.parseString(eventData.encode(\"utf-8\")).childNodes[0]\n        for node in eventObject.childNodes:\n            if node.tagName == \"Param\":\n                name = node.getAttribute(\"Name\")\n                if self.sysInfo.get(name):\n                    node.setAttribute(\"Value\",xml.sax.saxutils.escape(str(self.sysInfo[name])))\n\n        return  eventObject.toxml()            \n\n\nclass Agent(Util):\n    \"\"\"\n    Primary object container for the provisioning process.\n    \n    \"\"\"\n    def __init__(self):\n        self.GoalState = None\n        self.Endpoint = None\n        self.LoadBalancerProbeServer = None\n        self.HealthReportCounter = 0\n        self.TransportCert = \"\"\n        self.EnvMonitor = None\n        self.SendData = None\n        self.DhcpResponse = None\n\n    def CheckVersions(self):\n        \"\"\"\n        Query endpoint server for wire protocol version.\n        Fail if our desired protocol version is not seen.\n        \"\"\"\n        #<?xml version=\"1.0\" encoding=\"utf-8\"?>\n        #<Versions>\n        #  <Preferred>\n        #    <Version>2010-12-15</Version>\n        #  </Preferred>\n        #  <Supported>\n        #    <Version>2010-12-15</Version>\n        #    <Version>2010-28-10</Version>\n        #  </Supported>\n        #</Versions>\n        global ProtocolVersion\n        protocolVersionSeen = False\n        node = xml.dom.minidom.parseString(self.HttpGetWithoutHeaders(\"/?comp=versions\")).childNodes[0]\n        if node.localName != \"Versions\":\n            Error(\"CheckVersions: root not Versions\")\n            return False\n        for a in node.childNodes:\n            if a.nodeType == node.ELEMENT_NODE and a.localName == \"Supported\":\n                for b in a.childNodes:\n                    if b.nodeType == node.ELEMENT_NODE and b.localName == \"Version\":\n                        v = GetNodeTextData(b)\n                        LogIfVerbose(\"Fabric supported wire protocol version: \" + v)\n                        if v == ProtocolVersion:\n                            protocolVersionSeen = True\n            if a.nodeType == node.ELEMENT_NODE and a.localName == \"Preferred\":\n                v = GetNodeTextData(a.getElementsByTagName(\"Version\")[0])\n                Log(\"Fabric preferred wire protocol version: \" + v)\n        if not protocolVersionSeen:\n            Warn(\"Agent supported wire protocol version: \" + ProtocolVersion + \" was not advertised by Fabric.\")\n        else:\n            Log(\"Negotiated wire protocol version: \" + ProtocolVersion)\n        return True\n\n    def Unpack(self, buffer, offset, range):\n        \"\"\"\n        Unpack bytes into python values.\n        \"\"\"\n        result = 0\n        for i in range:\n            result = (result << 8) | Ord(buffer[offset + i])\n        return result\n\n    def UnpackLittleEndian(self, buffer, offset, length):\n        \"\"\"\n        Unpack little endian bytes into python values.\n        \"\"\"\n        return self.Unpack(buffer, offset, list(range(length - 1, -1, -1)))\n\n    def UnpackBigEndian(self, buffer, offset, length):\n        \"\"\"\n        Unpack big endian bytes into python values.\n        \"\"\"\n        return self.Unpack(buffer, offset, list(range(0, length)))\n\n    def HexDump3(self, buffer, offset, length):\n        \"\"\"\n        Dump range of buffer in formatted hex.\n        \"\"\"\n        return ''.join(['%02X' % Ord(char) for char in buffer[offset:offset + length]])\n\n    def HexDump2(self, buffer):\n        \"\"\"\n        Dump buffer in formatted hex.\n        \"\"\"\n        return self.HexDump3(buffer, 0, len(buffer))\n\n    def BuildDhcpRequest(self):\n        \"\"\"\n        Build DHCP request string.\n        \"\"\"\n        #\n        # typedef struct _DHCP {\n        #     UINT8   Opcode;                     /* op:     BOOTREQUEST or BOOTREPLY */\n        #     UINT8   HardwareAddressType;        /* htype:  ethernet */\n        #     UINT8   HardwareAddressLength;      /* hlen:   6 (48 bit mac address) */\n        #     UINT8   Hops;                       /* hops:   0 */\n        #     UINT8   TransactionID[4];           /* xid:    random */\n        #     UINT8   Seconds[2];                 /* secs:   0 */\n        #     UINT8   Flags[2];                   /* flags:  0 or 0x8000 for broadcast */\n        #     UINT8   ClientIpAddress[4];         /* ciaddr: 0 */\n        #     UINT8   YourIpAddress[4];           /* yiaddr: 0 */\n        #     UINT8   ServerIpAddress[4];         /* siaddr: 0 */\n        #     UINT8   RelayAgentIpAddress[4];     /* giaddr: 0 */\n        #     UINT8   ClientHardwareAddress[16];  /* chaddr: 6 byte ethernet MAC address */\n        #     UINT8   ServerName[64];             /* sname:  0 */\n        #     UINT8   BootFileName[128];          /* file:   0  */\n        #     UINT8   MagicCookie[4];             /*   99  130   83   99 */\n        #                                         /* 0x63 0x82 0x53 0x63 */\n        #     /* options -- hard code ours */\n        #\n        #     UINT8 MessageTypeCode;              /* 53 */\n        #     UINT8 MessageTypeLength;            /* 1 */\n        #     UINT8 MessageType;                  /* 1 for DISCOVER */\n        #     UINT8 End;                          /* 255 */\n        # } DHCP;\n        #\n\n        # tuple of 244 zeros\n        # (struct.pack_into would be good here, but requires Python 2.5)\n        sendData = [0] * 244\n\n        transactionID = os.urandom(4)\n        macAddress = MyDistro.GetMacAddress()\n\n        # Opcode = 1\n        # HardwareAddressType = 1 (ethernet/MAC)\n        # HardwareAddressLength = 6 (ethernet/MAC/48 bits)\n        for a in range(0, 3):\n            sendData[a] = [1, 1, 6][a]\n\n        # fill in transaction id (random number to ensure response matches request)\n        for a in range(0, 4):\n            sendData[4 + a] = Ord(transactionID[a])\n\n        LogIfVerbose(\"BuildDhcpRequest: transactionId:%s,%04X\" % (self.HexDump2(transactionID), self.UnpackBigEndian(sendData, 4, 4)))\n\n        # fill in ClientHardwareAddress\n        for a in range(0, 6):\n            sendData[0x1C + a] = Ord(macAddress[a])\n\n        # DHCP Magic Cookie: 99, 130, 83, 99\n        # MessageTypeCode = 53 DHCP Message Type\n        # MessageTypeLength = 1\n        # MessageType = DHCPDISCOVER\n        # End = 255 DHCP_END\n        for a in range(0, 8):\n            sendData[0xEC + a] = [99, 130, 83, 99, 53, 1, 1, 255][a]\n        return array.array(\"B\", sendData)\n\n    def IntegerToIpAddressV4String(self, a):\n        \"\"\"\n        Build DHCP request string.\n        \"\"\"\n        return \"%u.%u.%u.%u\" % ((a >> 24) & 0xFF, (a >> 16) & 0xFF, (a >> 8) & 0xFF, a & 0xFF)\n\n    def RouteAdd(self, net, mask, gateway):\n        \"\"\"\n        Add specified route using /sbin/route add -net.\n        \"\"\"\n        net = self.IntegerToIpAddressV4String(net)\n        mask = self.IntegerToIpAddressV4String(mask)\n        gateway = self.IntegerToIpAddressV4String(gateway)\n        Run(\"/sbin/route add -net \" + net + \" netmask \" + mask + \" gw \" + gateway,chk_err=False)\n\n    def HandleDhcpResponse(self, sendData, receiveBuffer):\n        \"\"\"\n        Parse DHCP response:\n        Set default gateway.\n        Set default routes.\n        Retrieve endpoint server.\n        Returns endpoint server or None on error.\n        \"\"\"\n        LogIfVerbose(\"HandleDhcpResponse\")\n        bytesReceived = len(receiveBuffer)\n        if bytesReceived < 0xF6:\n            Error(\"HandleDhcpResponse: Too few bytes received \" + str(bytesReceived))\n            return None\n\n        LogIfVerbose(\"BytesReceived: \" + hex(bytesReceived))\n        LogWithPrefixIfVerbose(\"DHCP response:\", HexDump(receiveBuffer, bytesReceived))\n\n        # check transactionId, cookie, MAC address\n        # cookie should never mismatch\n        # transactionId and MAC address may mismatch if we see a response meant from another machine\n\n        for offsets in [list(range(4, 4 + 4)), list(range(0x1C, 0x1C + 6)), list(range(0xEC, 0xEC + 4))]:\n            for offset in offsets:\n                sentByte = Ord(sendData[offset])\n                receivedByte = Ord(receiveBuffer[offset])\n                if sentByte != receivedByte:\n                    LogIfVerbose(\"HandleDhcpResponse: sent cookie:\" + self.HexDump3(sendData, 0xEC, 4))\n                    LogIfVerbose(\"HandleDhcpResponse: rcvd cookie:\" + self.HexDump3(receiveBuffer, 0xEC, 4))\n                    LogIfVerbose(\"HandleDhcpResponse: sent transactionID:\" + self.HexDump3(sendData, 4, 4))\n                    LogIfVerbose(\"HandleDhcpResponse: rcvd transactionID:\" + self.HexDump3(receiveBuffer, 4, 4))\n                    LogIfVerbose(\"HandleDhcpResponse: sent ClientHardwareAddress:\" + self.HexDump3(sendData, 0x1C, 6))\n                    LogIfVerbose(\"HandleDhcpResponse: rcvd ClientHardwareAddress:\" + self.HexDump3(receiveBuffer, 0x1C, 6))\n                    LogIfVerbose(\"HandleDhcpResponse: transactionId, cookie, or MAC address mismatch\")\n                    return None\n        endpoint = None\n\n        #\n        # Walk all the returned options, parsing out what we need, ignoring the others.\n        # We need the custom option 245 to find the the endpoint we talk to,\n        # as well as, to handle some Linux DHCP client incompatibilities,\n        # options 3 for default gateway and 249 for routes. And 255 is end.\n        #\n\n        i = 0xF0 # offset to first option\n        while i < bytesReceived:\n            option = Ord(receiveBuffer[i])\n            length = 0\n            if (i + 1) < bytesReceived:\n                length = Ord(receiveBuffer[i + 1])\n            LogIfVerbose(\"DHCP option \" + hex(option) + \" at offset:\" + hex(i) + \" with length:\" + hex(length))\n            if option == 255:\n                LogIfVerbose(\"DHCP packet ended at offset \" + hex(i))\n                break\n            elif option == 249:\n                # http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx\n                LogIfVerbose(\"Routes at offset:\" + hex(i) + \" with length:\" + hex(length))\n                if length < 5:\n                    Error(\"Data too small for option \" + str(option))\n                j = i + 2\n                while j < (i + length + 2):\n                    maskLengthBits = Ord(receiveBuffer[j])\n                    maskLengthBytes = (((maskLengthBits + 7) & ~7) >> 3)\n                    mask = 0xFFFFFFFF & (0xFFFFFFFF << (32 - maskLengthBits))\n                    j += 1\n                    net = self.UnpackBigEndian(receiveBuffer, j, maskLengthBytes)\n                    net <<= (32 - maskLengthBytes * 8)\n                    net &= mask\n                    j += maskLengthBytes\n                    gateway = self.UnpackBigEndian(receiveBuffer, j, 4)\n                    j += 4\n                    self.RouteAdd(net, mask, gateway)\n                if j != (i + length + 2):\n                    Error(\"HandleDhcpResponse: Unable to parse routes\")\n            elif option == 3 or option == 245:\n                if i + 5 < bytesReceived:\n                    if length != 4:\n                        Error(\"HandleDhcpResponse: Endpoint or Default Gateway not 4 bytes\")\n                        return None\n                    gateway = self.UnpackBigEndian(receiveBuffer, i + 2, 4)\n                    IpAddress = self.IntegerToIpAddressV4String(gateway)\n                    if option == 3:\n                        self.RouteAdd(0, 0, gateway)\n                        name = \"DefaultGateway\"\n                    else:\n                        endpoint = IpAddress\n                        name = \"Windows Azure wire protocol endpoint\"\n                    LogIfVerbose(name + \": \" + IpAddress + \" at \" + hex(i))\n                else:\n                    Error(\"HandleDhcpResponse: Data too small for option \" + str(option))\n            else:\n                LogIfVerbose(\"Skipping DHCP option \" + hex(option) + \" at \" + hex(i) + \" with length \" + hex(length))\n            i += length + 2\n        return endpoint\n\n    def DoDhcpWork(self):\n        \"\"\"\n        Discover the wire server via DHCP option 245.\n        And workaround incompatibility with Windows Azure DHCP servers.\n        \"\"\"\n        ShortSleep = False # Sleep 1 second before retrying DHCP queries.\n        ifname=None\n\n        sleepDurations = [0, 10, 30, 60, 60]\n        maxRetry = len(sleepDurations)\n        lastTry = (maxRetry - 1)\n        for retry in range(0, maxRetry):\n            try:\n                #Open DHCP port if iptables is enabled.\n                Run(\"iptables -D INPUT -p udp --dport 68 -j ACCEPT\",chk_err=False)  # We supress error logging on error.\n                Run(\"iptables -I INPUT -p udp --dport 68 -j ACCEPT\",chk_err=False)  # We supress error logging on error.\n                strRetry = str(retry)\n                prefix = \"DoDhcpWork: try=\" + strRetry\n                LogIfVerbose(prefix)\n                sendData = self.BuildDhcpRequest()\n                LogWithPrefixIfVerbose(\"DHCP request:\", HexDump(sendData, len(sendData)))\n                sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)\n                sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\n                sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n                missingDefaultRoute = True\n                try:\n                    if DistInfo()[0] == 'FreeBSD':\n                        missingDefaultRoute = True\n                    else:\n                        routes = RunGetOutput(\"route -n\")[1]\n                    for line in routes.split('\\n'):\n                        if line.startswith(\"0.0.0.0 \") or line.startswith(\"default \"):\n                            missingDefaultRoute = False\n                except:\n                    pass\n                if missingDefaultRoute:\n                    # This is required because sending after binding to 0.0.0.0 fails with\n                    # network unreachable when the default gateway is not set up.\n                    ifname=MyDistro.GetInterfaceName()\n                    Log(\"DoDhcpWork: Missing default route - adding broadcast route for DHCP.\")\n                    if DistInfo()[0] == 'FreeBSD':\n                        Run(\"route add -net 255.255.255.255 -iface \" + ifname,chk_err=False)\n                    else:\n                        Run(\"route add 255.255.255.255 dev \" + ifname,chk_err=False)\n                if MyDistro.isDHCPEnabled():\n                    MyDistro.stopDHCP()\n                sock.bind((\"0.0.0.0\", 68)) \n                sock.sendto(sendData, (\"<broadcast>\", 67))\n                sock.settimeout(10)\n                Log(\"DoDhcpWork: Setting socket.timeout=10, entering recv\")\n                receiveBuffer = sock.recv(1024)\n                endpoint = self.HandleDhcpResponse(sendData, receiveBuffer)\n                if endpoint == None:\n                    LogIfVerbose(\"DoDhcpWork: No endpoint found\")\n                if endpoint != None or retry == lastTry:\n                    if endpoint != None:\n                        self.SendData = sendData\n                        self.DhcpResponse = receiveBuffer\n                    if retry == lastTry:\n                        LogIfVerbose(\"DoDhcpWork: try=\" + strRetry)\n                    return endpoint\n                sleepDuration = [sleepDurations[retry % len(sleepDurations)], 1][ShortSleep]\n                LogIfVerbose(\"DoDhcpWork: sleep=\" + str(sleepDuration))\n                time.sleep(sleepDuration)\n            except Exception, e:\n                ErrorWithPrefix(prefix, str(e))\n                ErrorWithPrefix(prefix, traceback.format_exc())\n            finally:\n                sock.close()\n                if missingDefaultRoute:\n                    #We added this route - delete it\n                    Log(\"DoDhcpWork: Removing broadcast route for DHCP.\")\n                    if DistInfo()[0] == 'FreeBSD':\n                        Run(\"route del -net 255.255.255.255 -iface \" + ifname,chk_err=False)\n                    else:\n                        Run(\"route del 255.255.255.255 dev \" + ifname,chk_err=False)  # We supress error logging on error.\n                if MyDistro.isDHCPEnabled():\n                    MyDistro.startDHCP()\n        return None\n\n    def UpdateAndPublishHostName(self, name):\n        \"\"\"\n        Set hostname locally and publish to iDNS\n        \"\"\"\n        Log(\"Setting host name: \" + name)\n        MyDistro.publishHostname(name)\n        ethernetInterface = MyDistro.GetInterfaceName()\n        MyDistro.RestartInterface(ethernetInterface)\n        self.RestoreRoutes()\n\n    def RestoreRoutes(self):\n        \"\"\"\n        If there is a DHCP response, then call HandleDhcpResponse.\n        \"\"\"\n        if self.SendData != None and self.DhcpResponse != None:\n            self.HandleDhcpResponse(self.SendData, self.DhcpResponse)\n\n    def UpdateGoalState(self):\n        \"\"\"\n        Retreive goal state information from endpoint server.\n        Parse xml and initialize Agent.GoalState object.\n        Return object or None on error.\n        \"\"\"\n        goalStateXml = None\n        maxRetry = 9\n        log = NoLog\n        for retry in range(1, maxRetry + 1):\n            strRetry = str(retry)\n            log(\"retry UpdateGoalState,retry=\" + strRetry)\n            goalStateXml = self.HttpGetWithHeaders(\"/machine/?comp=goalstate\")\n            if goalStateXml != None:\n                break\n            log = Log\n            time.sleep(retry)\n        if not goalStateXml:\n            Error(\"UpdateGoalState failed.\")\n            return\n        Log(\"Retrieved GoalState from Windows Azure Fabric.\")\n        self.GoalState = GoalState(self).Parse(goalStateXml)\n        return self.GoalState\n\n    def ReportReady(self):\n        \"\"\"\n        Send health report 'Ready' to server.\n        This signals the fabric that our provosion is completed,\n        and the host is ready for operation.\n        \"\"\"\n        counter = (self.HealthReportCounter + 1) % 1000000\n        self.HealthReportCounter = counter\n        healthReport = (\"<?xml version=\\\"1.0\\\" encoding=\\\"utf-8\\\"?><Health xmlns:xsi=\\\"http://www.w3.org/2001/XMLSchema-instance\\\" xmlns:xsd=\\\"http://www.w3.org/2001/XMLSchema\\\"><GoalStateIncarnation>\"\n                        + self.GoalState.Incarnation\n                        + \"</GoalStateIncarnation><Container><ContainerId>\"\n                        + self.GoalState.ContainerId\n                        + \"</ContainerId><RoleInstanceList><Role><InstanceId>\"\n                        + self.GoalState.RoleInstanceId\n                        + \"</InstanceId><Health><State>Ready</State></Health></Role></RoleInstanceList></Container></Health>\")\n        a = self.HttpPostWithHeaders(\"/machine?comp=health\", healthReport)\n        if a != None:\n            return a.getheader(\"x-ms-latest-goal-state-incarnation-number\")\n        return None\n\n    def ReportNotReady(self, status, desc):\n        \"\"\"\n        Send health report 'Provisioning' to server.\n        This signals the fabric that our provosion is starting.\n        \"\"\"\n        healthReport = (\"<?xml version=\\\"1.0\\\" encoding=\\\"utf-8\\\"?><Health xmlns:xsi=\\\"http://www.w3.org/2001/XMLSchema-instance\\\" xmlns:xsd=\\\"http://www.w3.org/2001/XMLSchema\\\"><GoalStateIncarnation>\"\n                        + self.GoalState.Incarnation\n                        + \"</GoalStateIncarnation><Container><ContainerId>\"\n                        + self.GoalState.ContainerId\n                        + \"</ContainerId><RoleInstanceList><Role><InstanceId>\"\n                        + self.GoalState.RoleInstanceId\n                        + \"</InstanceId><Health><State>NotReady</State>\"\n                        + \"<Details><SubStatus>\" + status + \"</SubStatus><Description>\" + desc + \"</Description></Details>\"\n                        + \"</Health></Role></RoleInstanceList></Container></Health>\")\n        a = self.HttpPostWithHeaders(\"/machine?comp=health\", healthReport)\n        if a != None:\n            return a.getheader(\"x-ms-latest-goal-state-incarnation-number\")\n        return None\n\n    def ReportRoleProperties(self, thumbprint):\n        \"\"\"\n        Send roleProperties and thumbprint to server. \n        \"\"\"\n        roleProperties = (\"<?xml version=\\\"1.0\\\" encoding=\\\"utf-8\\\"?><RoleProperties><Container>\"\n                        + \"<ContainerId>\" + self.GoalState.ContainerId + \"</ContainerId>\"\n                        + \"<RoleInstances><RoleInstance>\"\n                        + \"<Id>\" + self.GoalState.RoleInstanceId + \"</Id>\"\n                        + \"<Properties><Property name=\\\"CertificateThumbprint\\\" value=\\\"\" + thumbprint + \"\\\" /></Properties>\"\n                        + \"</RoleInstance></RoleInstances></Container></RoleProperties>\")\n        a = self.HttpPostWithHeaders(\"/machine?comp=roleProperties\", \n                                     roleProperties)\n        Log(\"Posted Role Properties. CertificateThumbprint=\" + thumbprint)\n        return a\n\n    def LoadBalancerProbeServer_Shutdown(self):\n        \"\"\"\n        Shutdown the LoadBalancerProbeServer.\n        \"\"\"\n        if self.LoadBalancerProbeServer != None:\n            self.LoadBalancerProbeServer.shutdown()\n            self.LoadBalancerProbeServer = None\n\n    def GenerateTransportCert(self):\n        \"\"\"\n        Create ssl certificate for https communication with endpoint server.\n        \"\"\"\n        Run(Openssl + \" req -x509 -nodes -subj /CN=LinuxTransport -days 32768 -newkey rsa:2048 -keyout TransportPrivate.pem -out TransportCert.pem\")\n        cert = \"\"\n        for line in GetFileContents(\"TransportCert.pem\").split('\\n'):\n            if not \"CERTIFICATE\" in line:\n                cert += line.rstrip()\n        return cert\n\n    def DoVmmStartup(self):\n        \"\"\"\n        Spawn the VMM startup script.\n        \"\"\"\n        Log(\"Starting Microsoft System Center VMM Initialization Process\")\n        pid = subprocess.Popen([\"/bin/bash\",\"/mnt/cdrom/secure/\"+VMM_STARTUP_SCRIPT_NAME,\"-p /mnt/cdrom/secure/ \"]).pid\n        time.sleep(5)\n        sys.exit(0)\n        \n    def TryUnloadAtapiix(self):\n        \"\"\"\n        If global modloaded is True, then we loaded the ata_piix kernel module, unload it.\n        \"\"\"\n        if modloaded:\n            Run(\"rmmod ata_piix.ko\",chk_err=False)\n            Log(\"Unloaded ata_piix.ko driver for ATAPI CD-ROM\")\n\n    def TryLoadAtapiix(self):\n        \"\"\"\n        Load the ata_piix kernel module if it exists.\n        If successful, set global modloaded to True.\n        If unable to load module leave modloaded False.\n        \"\"\"\n        global modloaded\n        modloaded=False\n        retcode,krn=RunGetOutput('uname -r')\n        krn_pth='/lib/modules/'+krn.strip('\\n')+'/kernel/drivers/ata/ata_piix.ko'\n        if Run(\"lsmod | grep ata_piix\",chk_err=False) == 0 :\n            Log(\"Module \" + krn_pth + \" driver for ATAPI CD-ROM is already present.\")\n            return 0\n        if retcode:\n            Error(\"Unable to provision: Failed to call uname -r\")\n            return \"Unable to provision: Failed to call uname\"\n        if os.path.isfile(krn_pth):\n            retcode,output=RunGetOutput(\"insmod \" + krn_pth,chk_err=False)\n        else:\n            Log(\"Module \" + krn_pth + \" driver for ATAPI CD-ROM does not exist.\")\n            return 1\n        if retcode != 0:\n            Error('Error calling insmod for '+ krn_pth + ' driver for ATAPI CD-ROM')\n            return retcode\n        time.sleep(1)\n        # check 3 times if the mod is loaded\n        for i in range(3):\n            if Run('lsmod | grep ata_piix'):\n                continue\n            else :\n                modloaded=True\n                break\n        if not modloaded:\n            Error('Unable to load '+ krn_pth + ' driver for ATAPI CD-ROM')\n            return 1\n        \n        Log(\"Loaded \" + krn_pth + \" driver for ATAPI CD-ROM\")\n        \n        # we have succeeded loading the ata_piix mod if it can be done.\n\n    def SearchForVMMStartup(self):\n        \"\"\"\n        Search for a DVD/CDROM containing VMM's VMM_CONFIG_FILE_NAME.\n        Call TryLoadAtapiix in case we must load the ata_piix module first.\n\n        If VMM_CONFIG_FILE_NAME is found, call DoVmmStartup.\n        Else, return to Azure Provisioning process.\n        \"\"\"\n        self.TryLoadAtapiix()\n        if os.path.exists('/mnt/cdrom/secure') == False:\n            CreateDir(\"/mnt/cdrom/secure\", \"root\", 0700)\n        mounted=False\n        for dvds in [re.match(r'(sr[0-9]|hd[c-z]|cdrom[0-9]|cd[0-9]?)',x) for x in os.listdir('/dev/')]:\n            if dvds == None:\n                continue\n            dvd = '/dev/'+dvds.group(0)\n            if Run(\"LC_ALL=C fdisk -l \" + dvd + \" | grep Disk\",chk_err=False):\n                continue  # Not mountable\n            else:\n                for retry in range(1,6):\n                    retcode,output=RunGetOutput(\"mount -v \" + dvd + \" /mnt/cdrom/secure\")\n                    Log(output[:-1])\n                    if retcode == 0:\n                        Log(\"mount succeeded on attempt #\" + str(retry) )\n                        mounted=True\n                        break\n                    if 'is already mounted on /mnt/cdrom/secure' in output:\n                        Log(\"Device \" + dvd + \" is already mounted on /mnt/cdrom/secure.\" + str(retry) )\n                        mounted=True\n                        break\n                    Log(\"mount failed on attempt #\" + str(retry) )\n                    Log(\"mount loop sleeping 5...\")\n                    time.sleep(5)\n                if not mounted:\n                    # unable to mount\n                    continue\n                if not os.path.isfile(\"/mnt/cdrom/secure/\"+VMM_CONFIG_FILE_NAME):\n                    #nope - mount the next drive\n                    if mounted:\n                        Run(\"umount \"+dvd,chk_err=False)\n                        mounted=False\n                        continue\n                else : # it is the vmm startup\n                    self.DoVmmStartup()\n\n        Log(\"VMM Init script not found.  Provisioning for Azure\")\n        return \n        \n    def Provision(self):\n        \"\"\"\n        Responible for:\n        Regenerate ssh keys,\n        Mount, read, and parse ovfenv.xml from provisioning dvd rom\n        Process the ovfenv.xml info\n        Call ReportRoleProperties\n        If configured, delete root password.\n        Return None on success, error string on error.\n        \"\"\"\n        enabled = Config.get(\"Provisioning.Enabled\")\n        if enabled != None and enabled.lower().startswith(\"n\"):\n            return\n        Log(\"Provisioning image started.\")\n        type = Config.get(\"Provisioning.SshHostKeyPairType\")\n        if type == None:\n            type = \"rsa\"\n        regenerateKeys = Config.get(\"Provisioning.RegenerateSshHostKeyPair\")\n        if regenerateKeys == None or regenerateKeys.lower().startswith(\"y\"):\n            Run(\"rm -f /etc/ssh/ssh_host_*key*\")\n            Run(\"ssh-keygen -N '' -t \" + type + \" -f /etc/ssh/ssh_host_\" + type + \"_key\")\n            MyDistro.restartSshService()\n        #SetFileContents(LibDir + \"/provisioned\", \"\")\n        dvd = None\n        for dvds in [re.match(r'(sr[0-9]|hd[c-z]|cdrom[0-9]|cd[0-9]?)',x) for x in os.listdir('/dev/')]:\n            if dvds == None :\n                continue\n            dvd = '/dev/'+dvds.group(0)\n        if dvd == None:\n            # No DVD device detected\n            Error(\"No DVD device detected, unable to provision.\")\n            return \"No DVD device detected, unable to provision.\"\n        if MyDistro.mediaHasFilesystem(dvd) is False :\n            out=MyDistro.load_ata_piix()\n            if out:\n                return out\n            for i in range(10): # we may have to wait \n                if os.path.exists(dvd):\n                    break\n                Log(\"Waiting for DVD - sleeping 1 - \"+str(i+1)+\" try...\")\n                time.sleep(1)\n        if os.path.exists('/mnt/cdrom/secure') == False:\n            CreateDir(\"/mnt/cdrom/secure\", \"root\", 0700)\n        #begin mount loop - 5 tries - 5 sec wait between\n        for retry in range(1,6):\n            location='/mnt/cdrom/secure'\n            retcode,output=MyDistro.mountDVD(dvd,location)\n            Log(output[:-1])\n            if retcode == 0:\n                Log(\"mount succeeded on attempt #\" + str(retry) )\n                break\n            if 'is already mounted on /mnt/cdrom/secure' in output:\n                Log(\"Device \" + dvd + \" is already mounted on /mnt/cdrom/secure.\" + str(retry) )\n                break\n            Log(\"mount failed on attempt #\" + str(retry) )\n            Log(\"mount loop sleeping 5...\")\n            time.sleep(5)\n        if not os.path.isfile(\"/mnt/cdrom/secure/ovf-env.xml\"):\n            Error(\"Unable to provision: Missing ovf-env.xml on DVD.\")\n            return \"Failed to retrieve provisioning data (0x02).\"\n        ovfxml = (GetFileContents(u\"/mnt/cdrom/secure/ovf-env.xml\",asbin=False)) # use unicode here to ensure correct codec gets used.\n        if ord(ovfxml[0]) > 128 and ord(ovfxml[1]) > 128 and ord(ovfxml[2]) > 128 :\n            ovfxml = ovfxml[3:] # BOM is not stripped.  First three bytes are > 128 and not unicode chars so we ignore them.\n        ovfxml=ovfxml.strip(chr(0x00)) # we may have NULLs.\n        ovfxml=ovfxml[ovfxml.find('<?'):] # chop leading text if present\n        SetFileContents(\"ovf-env.xml\", re.sub(\"<UserPassword>.*?<\", \"<UserPassword>*<\", ovfxml))\n        Run(\"umount \" + dvd,chk_err=False)\n        MyDistro.unload_ata_piix()\n        error = None\n        if ovfxml != None:\n            Log(\"Provisioning image using OVF settings in the DVD.\")\n            ovfobj = OvfEnv().Parse(ovfxml)\n            if ovfobj != None:\n                error = ovfobj.Process()\n                if error :\n                    Error (\"Provisioning image FAILED \" + error)\n                    return (\"Provisioning image FAILED \" + error)\n            Log(\"Ovf XML process finished\")\n        # This is done here because regenerated SSH host key pairs may be potentially overwritten when processing the ovfxml\n        fingerprint = RunGetOutput(\"ssh-keygen -lf /etc/ssh/ssh_host_\" + type + \"_key.pub\")[1].rstrip().split()[1].replace(':','')\n        self.ReportRoleProperties(fingerprint)\n        delRootPass = Config.get(\"Provisioning.DeleteRootPassword\")\n        if delRootPass != None and delRootPass.lower().startswith(\"y\"):\n            MyDistro.deleteRootPassword()\n        Log(\"Provisioning image completed.\")\n        return error\n\n    def Run(self):\n        \"\"\"\n        Called by 'waagent -daemon.'\n        Main loop to process the goal state.  State is posted every 25 seconds\n        when provisioning has been completed.\n        \n        Search for VMM enviroment, start VMM script if found.\n        Perform DHCP and endpoint server discovery by calling DoDhcpWork().\n        Check wire protocol versions.\n        Set SCSI timeout on root device.\n        Call GenerateTransportCert() to create ssl certs for server communication.\n        Call UpdateGoalState().\n        If not provisioned, call ReportNotReady(\"Provisioning\", \"Starting\")\n        Call Provision(), set global provisioned = True if successful.\n        Call goalState.Process()\n        Start LBProbeServer if indicated in waagent.conf.\n        Start the StateConsumer if indicated in waagent.conf.\n        ReportReady if provisioning is complete.\n        If provisioning failed, call ReportNotReady(\"ProvisioningFailed\", provisionError)\n        \"\"\"\n        SetFileContents(\"/var/run/waagent.pid\", str(os.getpid()) + \"\\n\")\n\n        # Determine if we are in VMM.  Spawn VMM_STARTUP_SCRIPT_NAME if found.\n        self.SearchForVMMStartup()\n        ipv4=''\n        while ipv4 == '' or ipv4 == '0.0.0.0' :\n            ipv4=MyDistro.GetIpv4Address()\n            if ipv4 == '' or ipv4 == '0.0.0.0' :\n                Log(\"Waiting for network.\")\n                time.sleep(10)\n\n        Log(\"IPv4 address: \" + ipv4)\n        mac=''\n        mac=MyDistro.GetMacAddress()\n        if len(mac)>0 :\n            Log(\"MAC  address: \" + \":\".join([\"%02X\" % Ord(a) for a in mac]))\n        \n        # Consume Entropy in ACPI table provided by Hyper-V\n        try:\n            SetFileContents(\"/dev/random\", GetFileContents(\"/sys/firmware/acpi/tables/OEM0\"))\n        except:\n            pass\n\n        Log(\"Probing for Windows Azure environment.\")\n        self.Endpoint = self.DoDhcpWork()\n\n        if self.Endpoint == None:\n            Log(\"Windows Azure environment not detected.\")\n            while True:\n                time.sleep(60)\n\n        Log(\"Discovered Windows Azure endpoint: \" + self.Endpoint)\n        if not self.CheckVersions():\n            Error(\"Agent.CheckVersions failed\")\n            sys.exit(1)\n\n        self.EnvMonitor = EnvMonitor()\n\n        # Set SCSI timeout on SCSI disks\n        MyDistro.initScsiDiskTimeout()\n        global provisioned\n        global provisionError\n        \n        global Openssl\n        Openssl = Config.get(\"OS.OpensslPath\")\n        if Openssl == None:\n            Openssl = \"openssl\"\n\n        self.TransportCert = self.GenerateTransportCert()\n        \n        eventMonitor = None\n        incarnation = None # goalStateIncarnationFromHealthReport\n        currentPort = None # loadBalancerProbePort\n        goalState = None # self.GoalState, instance of GoalState\n        provisioned = os.path.exists(LibDir + \"/provisioned\")\n        program = Config.get(\"Role.StateConsumer\")\n        provisionError = None        \n        lbProbeResponder = True\n        setting = Config.get(\"LBProbeResponder\")\n        if setting != None and setting.lower().startswith(\"n\"):\n            lbProbeResponder = False\n        while True:\n            if (goalState == None) or (incarnation == None) or (goalState.Incarnation != incarnation):\n                try:\n                    goalState = self.UpdateGoalState()\n                except HttpResourceGoneError as e:\n                    Warn(\"Incarnation is out of date:{0}\".format(e))\n                    incarnation = None\n                    continue\n\n                if goalState == None :\n                    Warn(\"Failed to fetch goalstate\")\n                    continue\n\n                if provisioned == False:\n                    self.ReportNotReady(\"Provisioning\", \"Starting\")\n\n                goalState.Process()\n\n                if provisioned == False:\n                    provisionError = self.Provision()\n                    if provisionError == None :\n                        provisioned = True\n                        SetFileContents(LibDir + \"/provisioned\", \"\")\n                        lastCtime = \"NOTFIND\"\n                        try:\n                            walaConfigFile = MyDistro.getConfigurationPath()\n                            lastCtime = time.ctime(os.path.getctime(walaConfigFile))\n                        except:\n                            pass\n                        #Get Ctime of wala config, can help identify the base image of this VM\n                        AddExtensionEvent(name=\"WALA\",op=WALAEventOperation.Provision,isSuccess=True,\n                                              message=\"WALA Config Ctime:\"+lastCtime)\n\n                        executeCustomData = Config.get(\"Provisioning.ExecuteCustomData\")\n                        if executeCustomData != None and executeCustomData.lower().startswith(\"y\"):\n                          if os.path.exists(LibDir + '/CustomData'):\n                            Run('chmod +x ' + LibDir + '/CustomData')\n                            Run(LibDir + '/CustomData')\n                          else:\n                            Error(LibDir + '/CustomData does not exist.')\n\n                #\n                # only one port supported\n                # restart server if new port is different than old port\n                # stop server if no longer a port\n                #\n                goalPort = goalState.LoadBalancerProbePort\n                if currentPort != goalPort:\n                    try:\n                        self.LoadBalancerProbeServer_Shutdown()\n                        currentPort = goalPort\n                        if currentPort != None and lbProbeResponder == True:\n                            self.LoadBalancerProbeServer = LoadBalancerProbeServer(currentPort)\n                            if self.LoadBalancerProbeServer == None :\n                                lbProbeResponder = False\n                                Log(\"Unable to create LBProbeResponder.\")\n                    except Exception, e:\n                        Error(\"Failed to launch LBProbeResponder: {0}\".format(e))\n                        currentPort = None\n\n                # Report SSH key fingerprint\n                type = Config.get(\"Provisioning.SshHostKeyPairType\")\n                if type == None:\n                    type = \"rsa\"\n\n                host_key_path = \"/etc/ssh/ssh_host_\" + type + \"_key.pub\"\n                if(MyDistro.waitForSshHostKey(host_key_path)):\n                    fingerprint = RunGetOutput(\"ssh-keygen -lf /etc/ssh/ssh_host_\" + type + \"_key.pub\")[1].rstrip().split()[1].replace(':','')\n                    self.ReportRoleProperties(fingerprint)\n\n            if program != None and DiskActivated == True:\n                try:\n                    Children.append(subprocess.Popen([program, \"Ready\"]))\n                except OSError, e :\n                    ErrorWithPrefix('SharedConfig.Parse','Exception: '+ str(e) +' occured launching ' + program )\n                program = None\n\n            sleepToReduceAccessDenied = 3\n            time.sleep(sleepToReduceAccessDenied)\n            if provisionError != None:\n                incarnation = self.ReportNotReady(\"ProvisioningFailed\", provisionError)\n            else:\n                incarnation = self.ReportReady()\n            # Process our extensions.\n            if goalState.ExtensionsConfig == None and goalState.ExtensionsConfigXml != None :\n                goalState.ExtensionsConfig = ExtensionsConfig().Parse(goalState.ExtensionsConfigXml)\n\n            # report the status/heartbeat results of extension processing\n            if goalState.ExtensionsConfig != None :\n                goalState.ExtensionsConfig.ReportHandlerStatus()\n            \n            if not eventMonitor:\n                eventMonitor = WALAEventMonitor(self.HttpPostWithHeaders)\n                eventMonitor.StartEventsLoop()\n\n            time.sleep(25 - sleepToReduceAccessDenied)\n\n            \nWaagentLogrotate = \"\"\"\\\n/var/log/waagent.log {\n    monthly\n    rotate 6\n    notifempty\n    missingok\n}\n\"\"\"\n\ndef GetMountPoint(mountlist, device):\n    \"\"\"\n    Example of mountlist:\n        /dev/sda1 on / type ext4 (rw)\n        proc on /proc type proc (rw)\n        sysfs on /sys type sysfs (rw)\n        devpts on /dev/pts type devpts (rw,gid=5,mode=620)\n        tmpfs on /dev/shm type tmpfs (rw,rootcontext=\"system_u:object_r:tmpfs_t:s0\")\n        none on /proc/sys/fs/binfmt_misc type binfmt_misc (rw)\n        /dev/sdb1 on /mnt/resource type ext4 (rw)\n    \"\"\"\n    if (mountlist and device):\n        for entry in mountlist.split('\\n'):\n            if(re.search(device, entry)):\n                tokens = entry.split()\n                #Return the 3rd column of this line\n                return tokens[2] if len(tokens) > 2 else None\n    return None\n\ndef FindInLinuxKernelCmdline(option):\n    \"\"\"\n    Return match object if 'option' is present in the kernel boot options\n    of the grub configuration.\n    \"\"\"\n    m=None\n    matchs=r'^.*?'+MyDistro.grubKernelBootOptionsLine+r'.*?'+option+r'.*$'\n    try:\n        m=FindStringInFile(MyDistro.grubKernelBootOptionsFile,matchs)\n    except IOError, e:\n        Error('FindInLinuxKernelCmdline: Exception opening ' + MyDistro.grubKernelBootOptionsFile + 'Exception:' + str(e))\n        \n    return m\n\ndef AppendToLinuxKernelCmdline(option):\n    \"\"\"\n    Add 'option' to the kernel boot options of the grub configuration.\n    \"\"\"\n    if not FindInLinuxKernelCmdline(option):\n        src=r'^(.*?'+MyDistro.grubKernelBootOptionsLine+r')(.*?)(\"?)$'\n        rep=r'\\1\\2 '+ option + r'\\3'\n        try:\n            ReplaceStringInFile(MyDistro.grubKernelBootOptionsFile,src,rep)\n        except IOError, e :\n            Error('AppendToLinuxKernelCmdline: Exception opening ' + MyDistro.grubKernelBootOptionsFile + 'Exception:' + str(e))\n            return 1\n        Run(\"update-grub\",chk_err=False)    \n    return 0\n\ndef RemoveFromLinuxKernelCmdline(option):\n    \"\"\"\n    Remove 'option' to the kernel boot options of the grub configuration.\n    \"\"\"\n    if FindInLinuxKernelCmdline(option):\n        src=r'^(.*?'+MyDistro.grubKernelBootOptionsLine+r'.*?)('+option+r')(.*?)(\"?)$'\n        rep=r'\\1\\3\\4'\n        try:\n            ReplaceStringInFile(MyDistro.grubKernelBootOptionsFile,src,rep)\n        except IOError, e :\n            Error('RemoveFromLinuxKernelCmdline: Exception opening ' + MyDistro.grubKernelBootOptionsFile + 'Exception:' + str(e))\n            return 1\n        Run(\"update-grub\",chk_err=False)    \n    return 0\n\ndef FindStringInFile(fname,matchs):\n    \"\"\"\n    Return match object if found in file.\n    \"\"\"\n    try:\n        ms=re.compile(matchs)\n        for l in (open(fname,'r')).readlines():\n            m=re.search(ms,l)\n            if m:\n                return m\n    except:\n        raise\n    \n    return None\n\ndef ReplaceStringInFile(fname,src,repl):\n    \"\"\"\n    Replace 'src' with 'repl' in file.\n    \"\"\"\n    try:\n        sr=re.compile(src)\n        if FindStringInFile(fname,src):\n            updated=''\n            for l in (open(fname,'r')).readlines():\n                n=re.sub(sr,repl,l)\n                updated+=n\n            ReplaceFileContentsAtomic(fname,updated)\n    except :\n        raise\n    return\n\ndef ApplyVNUMAWorkaround():\n    \"\"\"\n    If kernel version has NUMA bug, add 'numa=off' to\n    kernel boot options.\n    \"\"\"\n    VersionParts = platform.release().replace('-', '.').split('.')\n    if int(VersionParts[0]) > 2:\n        return\n    if int(VersionParts[1]) > 6:\n        return\n    if int(VersionParts[2]) > 37:\n        return\n    if AppendToLinuxKernelCmdline(\"numa=off\") == 0 :\n        Log(\"Your kernel version \" + platform.release() + \" has a NUMA-related bug: NUMA has been disabled.\")\n    else :\n        \"Error adding 'numa=off'.  NUMA has not been disabled.\"\n        \ndef RevertVNUMAWorkaround():\n    \"\"\"\n    Remove 'numa=off' from kernel boot options.\n    \"\"\"\n    if RemoveFromLinuxKernelCmdline(\"numa=off\") == 0 :\n        Log('NUMA has been re-enabled')\n    else :\n        Log('NUMA has not been re-enabled')\n\ndef Install():\n    \"\"\"\n    Install the agent service.\n    Check dependencies.\n    Create /etc/waagent.conf and move old version to\n    /etc/waagent.conf.old\n    Copy RulesFiles to /var/lib/waagent\n    Create /etc/logrotate.d/waagent\n    Set /etc/ssh/sshd_config ClientAliveInterval to 180\n    Call ApplyVNUMAWorkaround()\n    \"\"\"\n    if MyDistro.checkDependencies():\n        return 1\n    os.chmod(sys.argv[0], 0755)\n    SwitchCwd()\n    for a in RulesFiles:\n        if os.path.isfile(a):\n            if os.path.isfile(GetLastPathElement(a)):\n                os.remove(GetLastPathElement(a))\n            shutil.move(a, \".\")\n            Warn(\"Moved \" + a + \" -> \" + LibDir + \"/\" + GetLastPathElement(a) )\n    MyDistro.registerAgentService()\n    if os.path.isfile(\"/etc/waagent.conf\"):\n        try:\n            os.remove(\"/etc/waagent.conf.old\")\n        except:\n            pass\n        try:\n            os.rename(\"/etc/waagent.conf\", \"/etc/waagent.conf.old\")\n            Warn(\"Existing /etc/waagent.conf has been renamed to /etc/waagent.conf.old\")\n        except:\n            pass\n    SetFileContents(\"/etc/waagent.conf\", MyDistro.waagent_conf_file)\n    SetFileContents(\"/etc/logrotate.d/waagent\", WaagentLogrotate)\n    filepath = \"/etc/ssh/sshd_config\"\n    ReplaceFileContentsAtomic(filepath, \"\\n\".join(filter(lambda a: not\n        a.startswith(\"ClientAliveInterval\"),\n        GetFileContents(filepath).split('\\n'))) + \"\\nClientAliveInterval 180\\n\")\n    Log(\"Configured SSH client probing to keep connections alive.\")\n    ApplyVNUMAWorkaround()\n    return 0\n\ndef GetMyDistro(dist_class_name=''):\n    \"\"\"\n    Return MyDistro object.\n    NOTE: Logging is not initialized at this point.\n    \"\"\"\n    if dist_class_name == '':\n        if 'Linux' in platform.system():\n            Distro=DistInfo()[0]\n        else : # I know this is not Linux!\n            if 'FreeBSD' in platform.system():\n                Distro=platform.system()\n        Distro=Distro.strip('\"')\n        Distro=Distro.strip(' ')\n        dist_class_name=Distro+'Distro'\n    else:\n        Distro=dist_class_name\n    if not globals().has_key(dist_class_name):\n        print Distro+' is not a supported distribution.'\n        return None\n    return globals()[dist_class_name]() # the distro class inside this module.\n\ndef DistInfo(fullname=0):\n    if 'FreeBSD' in platform.system():\n        release = re.sub('\\-.*\\Z', '', str(platform.release()))\n        distinfo = ['FreeBSD', release]\n        return distinfo\n    \n    if 'linux_distribution' in dir(platform):\n        distinfo = list(platform.linux_distribution(full_distribution_name=fullname))\n        distinfo[0] = distinfo[0].strip() # remove trailing whitespace in distro name\n        return distinfo\n    else:\n        return platform.dist()\n\ndef PackagedInstall(buildroot):\n    \"\"\"\n    Called from setup.py for use by RPM.\n    Generic implementation Creates directories and\n    files /etc/waagent.conf, /etc/init.d/waagent, /usr/sbin/waagent,\n    /etc/logrotate.d/waagent, /etc/sudoers.d/waagent under buildroot.\n    Copies generated files waagent.conf, into place and exits.\n    \"\"\"\n    MyDistro=GetMyDistro()\n    if MyDistro == None :\n        sys.exit(1)\n    MyDistro.packagedInstall(buildroot)\n\ndef LibraryInstall(buildroot):\n    pass\n\ndef Uninstall():\n    \"\"\"\n    Uninstall the agent service.\n    Copy RulesFiles back to original locations.\n    Delete agent-related files.\n    Call RevertVNUMAWorkaround().\n    \"\"\"\n    SwitchCwd()\n    for a in RulesFiles:\n        if os.path.isfile(GetLastPathElement(a)):\n            try:\n                shutil.move(GetLastPathElement(a), a)\n                Warn(\"Moved \" + LibDir + \"/\" + GetLastPathElement(a) + \" -> \" + a )\n            except:\n                pass\n    MyDistro.unregisterAgentService()\n    MyDistro.uninstallDeleteFiles()\n    RevertVNUMAWorkaround()\n    return 0\n\ndef Deprovision(force, deluser):\n    \"\"\"\n    Remove user accounts created by provisioning.\n    Disables root password if Provisioning.DeleteRootPassword = 'y'\n    Stop agent service.\n    Remove SSH host keys if they were generated by the provision.\n    Set hostname to 'localhost.localdomain'.\n    Delete cached system configuration files in /var/lib and /var/lib/waagent.\n    \"\"\"\n    \n    #Append blank line at the end of file, so the ctime of this file is changed every time\n    Run(\"echo ''>>\"+ MyDistro.getConfigurationPath())\n\n    SwitchCwd()\n    ovfxml = GetFileContents(LibDir+\"/ovf-env.xml\")\n    ovfobj = None\n    if ovfxml != None:\n        ovfobj = OvfEnv().Parse(ovfxml, True)\n\n    print(\"WARNING! The waagent service will be stopped.\")\n    print(\"WARNING! All SSH host key pairs will be deleted.\")\n    print(\"WARNING! Cached DHCP leases will be deleted.\")\n    MyDistro.deprovisionWarnUser()\n    delRootPass = Config.get(\"Provisioning.DeleteRootPassword\")\n    if delRootPass != None and delRootPass.lower().startswith(\"y\"):\n        print(\"WARNING! root password will be disabled. You will not be able to login as root.\")\n\n    if ovfobj != None and deluser == True:\n        print(\"WARNING! \" + ovfobj.UserName + \" account and entire home directory will be deleted.\")\n\n    if force == False and not raw_input('Do you want to proceed (y/n)? ').startswith('y'):\n        return 1\n\n    MyDistro.stopAgentService()\n\n    # Remove SSH host keys\n    regenerateKeys = Config.get(\"Provisioning.RegenerateSshHostKeyPair\")\n    if regenerateKeys == None or regenerateKeys.lower().startswith(\"y\"):\n        Run(\"rm -f /etc/ssh/ssh_host_*key*\")\n\n    # Remove root password\n    if delRootPass != None and delRootPass.lower().startswith(\"y\"):\n        MyDistro.deleteRootPassword()\n    # Remove distribution specific networking configuration\n\n    MyDistro.publishHostname('localhost.localdomain')\n    MyDistro.deprovisionDeleteFiles()\n    if deluser == True:\n        MyDistro.DeleteAccount(ovfobj.UserName)\n    return 0\n\ndef SwitchCwd():\n    \"\"\"\n    Switch to cwd to /var/lib/waagent.\n    Create if not present.\n    \"\"\"\n    CreateDir(LibDir, \"root\", 0700)\n    os.chdir(LibDir)\n\ndef Usage():\n    \"\"\"\n    Print the arguments to waagent.\n    \"\"\"\n    print(\"usage: \" + sys.argv[0] + \" [-verbose] [-force] [-help|-install|-uninstall|-deprovision[+user]|-version|-serialconsole|-daemon]\")\n    return 0\n\n\n\ndef main():\n    \"\"\"\n    Instantiate MyDistro, exit if distro class is not defined.\n    Parse command-line arguments, exit with usage() on error.\n    Instantiate ConfigurationProvider.\n    Call appropriate non-daemon methods and exit.\n    If daemon mode, enter Agent.Run() loop.\n    \"\"\"\n    if GuestAgentVersion == \"\":\n        print(\"WARNING! This is a non-standard agent that does not include a valid version string.\")\n    \n    if len(sys.argv) == 1:\n        sys.exit(Usage())\n\n    LoggerInit('/var/log/waagent.log','/dev/console')\n    global LinuxDistro\n    LinuxDistro=DistInfo()[0]\n    \n    #The platform.py lib has issue with detecting oracle linux distribution.\n    #Merge the following patch provided by oracle as a temparory fix.\n    if os.path.exists(\"/etc/oracle-release\"): \n        LinuxDistro=\"Oracle Linux\" \n\n    global MyDistro\n    MyDistro=GetMyDistro()\n    if MyDistro == None :\n        sys.exit(1)\n    args = []\n    conf_file = None\n    global force\n    force = False\n    for a in sys.argv[1:]:\n        if re.match(\"^([-/]*)(help|usage|\\?)\", a):\n            sys.exit(Usage())\n        elif re.match(\"^([-/]*)version\", a):\n            print(GuestAgentVersion + \" running on \" + LinuxDistro)\n            sys.exit(0)\n        elif re.match(\"^([-/]*)verbose\", a):\n            myLogger.verbose = True\n        elif re.match(\"^([-/]*)force\", a):\n            force = True\n        elif re.match(\"^(?:[-/]*)conf=.+\", a):\n            conf_file = re.match(\"^(?:[-/]*)conf=(.+)\", a).groups()[0]\n        elif re.match(\"^([-/]*)(setup|install)\", a):\n            sys.exit(MyDistro.Install())\n        elif re.match(\"^([-/]*)(uninstall)\", a):\n            sys.exit(Uninstall())\n        else:\n            args.append(a)\n    global Config\n    Config = ConfigurationProvider(conf_file)\n    \n    logfile = Config.get(\"Logs.File\")\n    if logfile is not None:\n        myLogger.file_path = logfile\n    logconsole = Config.get(\"Logs.Console\")\n    if logconsole is not None and logconsole.lower().startswith(\"n\"):\n        myLogger.con_path = None\n    verbose = Config.get(\"Logs.Verbose\")\n    if verbose != None and verbose.lower().startswith(\"y\"):\n        myLogger.verbose=True\n    global daemon\n    daemon = False\n    for a in args:\n        if re.match(\"^([-/]*)deprovision\\+user\", a):\n            sys.exit(Deprovision(force, True))\n        elif re.match(\"^([-/]*)deprovision\", a):\n            sys.exit(Deprovision(force, False))\n        elif re.match(\"^([-/]*)daemon\", a):\n            daemon = True\n        elif re.match(\"^([-/]*)serialconsole\", a):\n            AppendToLinuxKernelCmdline(\"console=ttyS0 earlyprintk=ttyS0\")\n            Log(\"Configured kernel to use ttyS0 as the boot console.\")\n            sys.exit(0)\n        else:\n            print(\"Invalid command line parameter:\" + a)\n            sys.exit(1)\n    \n    if daemon == False:\n        sys.exit(Usage())\n    global modloaded\n    modloaded = False\n    try:\n        SwitchCwd()\n        Log(GuestAgentLongName + \" Version: \" + GuestAgentVersion)\n        if IsLinux():\n            Log(\"Linux Distribution Detected      : \" + LinuxDistro)\n        global WaAgent\n        WaAgent = Agent()\n        WaAgent.Run()\n    except Exception, e:\n        Error(traceback.format_exc())\n        Error(\"Exception: \" + str(e))\n        sys.exit(1)\n    \nif __name__ == '__main__' :\n    main()\n"
  },
  {
    "path": "Common/WALinuxAgent-2.0.16/waagent",
    "content": "#!/usr/bin/env python\n#\n# Azure Linux Agent\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Implements parts of RFC 2131, 1541, 1497 and\n# http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx\n# http://msdn.microsoft.com/en-us/library/cc227259%28PROT.13%29.aspx\n\nimport sys\n\n# crypt module was removed in Python 3.13\n# For Python < 3.11: use builtin crypt\n# For Python >= 3.11: try crypt_r package, then ctypes fallback\nif sys.version_info >= (3, 11):\n    try:\n        import crypt_r as crypt\n    except ImportError:\n        try:\n            from Utils import crypt_fallback as crypt\n        except ImportError:\n            crypt = None\nelse:\n    try:\n        import crypt\n    except ImportError:\n        try:\n            from Utils import crypt_fallback as crypt\n        except ImportError:\n            crypt = None\n\nimport random\nimport array\nimport base64\nimport os\nimport os.path\nimport platform\nimport pwd\nimport re\nimport shutil\nimport socket\nimport struct\nimport string\nimport subprocess\nimport sys\nimport tempfile\nimport textwrap\nimport threading\nimport time\nimport traceback\nimport xml.dom.minidom\nimport fcntl\nimport inspect\nimport zipfile\nimport json\nimport datetime\nimport xml.sax.saxutils\n# distutils.version was deprecated in Python 3.10 and removed in Python 3.12\ndef import_loose_version():\n    if sys.version_info >= (3, 12):\n        return LooseVersionComparator\n    else:\n        from distutils.version import LooseVersion\n        return LooseVersion\n\n\nif sys.version_info[0] == 3:\n    import http.client as httpclient\n    from urllib.parse import urlparse\n\nelif sys.version_info[0] == 2:\n    import httplib as httpclient\n    from urlparse import urlparse\n\nif not hasattr(subprocess, 'check_output'):\n    def check_output(*popenargs, **kwargs):\n        r\"\"\"Backport from subprocess module from python 2.7\"\"\"\n        if 'stdout' in kwargs:\n            raise ValueError('stdout argument not allowed, it will be overridden.')\n        process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)\n        output, unused_err = process.communicate()\n        retcode = process.poll()\n        if retcode:\n            cmd = kwargs.get(\"args\")\n            if cmd is None:\n                cmd = popenargs[0]\n            raise subprocess.CalledProcessError(retcode, cmd, output=output)\n        return output\n\n\n    # Exception classes used by this module.\n    class CalledProcessError(Exception):\n        def __init__(self, returncode, cmd, output=None):\n            self.returncode = returncode\n            self.cmd = cmd\n            self.output = output\n\n        def __str__(self):\n            return \"Command '%s' returned non-zero exit status %d\" % (self.cmd, self.returncode)\n\n\n    subprocess.check_output = check_output\n    subprocess.CalledProcessError = CalledProcessError\n\nGuestAgentName = \"WALinuxAgent\"\nGuestAgentLongName = \"Azure Linux Agent\"\nGuestAgentVersion = \"WALinuxAgent-2.0.16\"\nProtocolVersion = \"2012-11-30\"  # WARNING this value is used to confirm the correct fabric protocol.\n\nConfig = None\nWaAgent = None\nDiskActivated = False\nOpenssl = \"openssl\"\nChildren = []\nExtensionChildren = []\nVMM_STARTUP_SCRIPT_NAME = 'install'\nVMM_CONFIG_FILE_NAME = 'linuxosconfiguration.xml'\nglobal RulesFiles\nRulesFiles = [\"/lib/udev/rules.d/75-persistent-net-generator.rules\",\n              \"/etc/udev/rules.d/70-persistent-net.rules\"]\nVarLibDhcpDirectories = [\"/var/lib/dhclient\", \"/var/lib/dhcpcd\", \"/var/lib/dhcp\"]\nEtcDhcpClientConfFiles = [\"/etc/dhcp/dhclient.conf\", \"/etc/dhcp3/dhclient.conf\"]\nglobal LibDir\nLibDir = \"/var/lib/waagent\"\nglobal provisioned\nprovisioned = False\nglobal provisionError\nprovisionError = None\nHandlerStatusToAggStatus = {\"installed\": \"Installing\", \"enabled\": \"Ready\", \"unintalled\": \"NotReady\",\n                            \"disabled\": \"NotReady\"}\n\nWaagentConf = \"\"\"\\\n#\n# Azure Linux Agent Configuration\n#\n\nRole.StateConsumer=None                 # Specified program is invoked with the argument \"Ready\" when we report ready status\n                                        # to the endpoint server.\nRole.ConfigurationConsumer=None         # Specified program is invoked with XML file argument specifying role configuration.\nRole.TopologyConsumer=None              # Specified program is invoked with XML file argument specifying role topology.\n\nProvisioning.Enabled=y                  #\nProvisioning.DeleteRootPassword=y       # Password authentication for root account will be unavailable.\nProvisioning.RegenerateSshHostKeyPair=y # Generate fresh host key pair.\nProvisioning.SshHostKeyPairType=rsa     # Supported values are \"rsa\", \"dsa\" and \"ecdsa\".\nProvisioning.MonitorHostName=y          # Monitor host name changes and publish changes via DHCP requests.\n\nResourceDisk.Format=y                   # Format if unformatted. If 'n', resource disk will not be mounted.\nResourceDisk.Filesystem=ext4            # Typically ext3 or ext4. FreeBSD images should use 'ufs2' here.\nResourceDisk.MountPoint=/mnt/resource   #\nResourceDisk.EnableSwap=n               # Create and use swapfile on resource disk.\nResourceDisk.SwapSizeMB=0               # Size of the swapfile.\n\nLBProbeResponder=y                      # Respond to load balancer probes if requested by Azure.\n\nLogs.Verbose=n                          # Enable verbose logs\n\nOS.RootDeviceScsiTimeout=300            # Root device timeout in seconds.\nOS.OpensslPath=None                     # If \"None\", the system default version is used.\n\"\"\"\nREADME_FILENAME = \"DATALOSS_WARNING_README.txt\"\nREADME_FILECONTENT = \"\"\"\\\nWARNING: THIS IS A TEMPORARY DISK. \n\nAny data stored on this drive is SUBJECT TO LOSS and THERE IS NO WAY TO RECOVER IT.\n\nPlease do not use this disk for storing any personal or application data.\n\nFor additional details to please refer to the MSDN documentation at : http://msdn.microsoft.com/en-us/library/windowsazure/jj672979.aspx\n\"\"\"\n\nclass LooseVersionComparator:\n    \"\"\"\n    Class to parse and compare versions with pre-release tags\n    Based on LooseVersion from distutils.version as that was removed in python 3.12.\n    Python's standard library does not include a direct replacement for `distutils.version.LooseVersion` \n    or `StrictVersion` for arbitrary version string comparison outside of the context of installed packages.\n    This is needed to avoid installing pip.\n    \n    \"\"\"\n    def __init__(self, version):\n        self.version, self.prerelease = self.parse_version(version)\n    \n    def parse_version(self, version):\n        # Regular expression to parse versions with pre-release tags\n        match = re.match(r'^(\\d+(?:\\.\\d+)*)(?:-([\\da-zA-Z-]+))?$', version)\n        if not match:\n            raise ValueError(\"Invalid version format: {0}\".format(version))\n        \n        main_version = tuple(map(int, match.group(1).split('.')))\n        prerelease = match.group(2)\n        return main_version, prerelease\n\n    def __lt__(self, other):\n        if self.version == other.version:\n            return self._compare_prerelease(self.prerelease, other.prerelease) < 0\n        return self.version < other.version\n\n    def __gt__(self, other):\n        if self.version == other.version:\n            return self._compare_prerelease(self.prerelease, other.prerelease) > 0\n        return self.version > other.version\n\n    def __eq__(self, other):\n        return self.version == other.version and self.prerelease == other.prerelease\n\n    def __str__(self):\n        return \".\".join(map(str, self.version)) + (\"-{0}\".format(self.prerelease) if self.prerelease else \"\")\n\n    @staticmethod\n    def _compare_prerelease(pr1, pr2):\n        if pr1 is None and pr2 is None:\n            return 0\n        if pr1 is None:\n            return 1\n        if pr2 is None:\n            return -1\n        return (pr1 > pr2) - (pr1 < pr2)\n\nLooseVersion = import_loose_version()\n\n############################################################\n# BEGIN DISTRO CLASS DEFS\n############################################################\n############################################################    \n#   AbstractDistro\n############################################################    \nclass AbstractDistro(object):\n    \"\"\"\n    AbstractDistro defines a skeleton neccesary for a concrete Distro class.\n\n    Generic methods and attributes are kept here, distribution specific attributes\n    and behavior are to be placed in the concrete child named distroDistro, where\n    distro is the string returned by calling python platform.linux_distribution()[0].\n    So for CentOS the derived class is called 'centosDistro'.\n    \"\"\"\n\n    def __init__(self):\n        \"\"\"\n        Generic Attributes go here.  These are based on 'majority rules'.\n        This __init__() may be called or overriden by the child.\n        \"\"\"\n        self.agent_service_name = os.path.basename(sys.argv[0])\n        self.selinux = None\n        self.service_cmd = '/usr/sbin/service'\n        self.ssh_service_restart_option = 'restart'\n        self.ssh_service_name = 'ssh'\n        self.ssh_config_file = '/etc/ssh/sshd_config'\n        self.hostname_file_path = '/etc/hostname'\n        self.dhcp_client_name = 'dhclient'\n        self.requiredDeps = ['route', 'shutdown', 'ssh-keygen', 'useradd', 'usermod',\n                             'openssl', 'sfdisk', 'fdisk', 'mkfs',\n                             'sed', 'grep', 'sudo', 'parted']\n        self.init_script_file = '/etc/init.d/waagent'\n        self.agent_package_name = 'WALinuxAgent'\n        self.fileBlackList = [\"/root/.bash_history\", \"/var/log/waagent.log\", '/etc/resolv.conf']\n        self.agent_files_to_uninstall = [\"/etc/waagent.conf\", \"/etc/logrotate.d/waagent\"]\n        self.grubKernelBootOptionsFile = '/etc/default/grub'\n        self.grubKernelBootOptionsLine = 'GRUB_CMDLINE_LINUX_DEFAULT='\n        self.getpidcmd = 'pidof'\n        self.mount_dvd_cmd = 'mount'\n        self.sudoers_dir_base = '/etc'\n        self.waagent_conf_file = WaagentConf\n        self.shadow_file_mode = 0o600\n        self.shadow_file_path = \"/etc/shadow\"\n        self.dhcp_enabled = False\n\n    def isSelinuxSystem(self):\n        \"\"\"\n        Checks and sets self.selinux = True if SELinux is available on system.\n        \"\"\"\n        if self.selinux == None:\n            if Run(\"which getenforce\", chk_err=False):\n                self.selinux = False\n            else:\n                self.selinux = True\n        return self.selinux\n\n    def isSelinuxRunning(self):\n        \"\"\"\n        Calls shell command 'getenforce' and returns True if 'Enforcing'.\n        \"\"\"\n        if self.isSelinuxSystem():\n            return RunGetOutput(\"getenforce\")[1].startswith(\"Enforcing\")\n        else:\n            return False\n\n    def setSelinuxEnforce(self, state):\n        \"\"\"\n        Calls shell command 'setenforce' with 'state' and returns resulting exit code.\n        \"\"\"\n        if self.isSelinuxSystem():\n            if state:\n                s = '1'\n            else:\n                s = '0'\n            return Run(\"setenforce \" + s)\n\n    def setSelinuxContext(self, path, cn):\n        \"\"\"\n        Calls shell 'chcon' with 'path' and 'cn' context.\n        Returns exit result.\n        \"\"\"\n        if self.isSelinuxSystem():\n            return Run('chcon ' + cn + ' ' + path)\n\n    def setHostname(self, name):\n        \"\"\"\n        Shell call to hostname.\n        Returns resulting exit code.\n        \"\"\"\n        return Run('hostname ' + name)\n\n    def publishHostname(self, name):\n        \"\"\"\n        Set the contents of the hostname file to 'name'.\n        Return 1 on failure.\n        \"\"\"\n        try:\n            r = SetFileContents(self.hostname_file_path, name)\n            for f in EtcDhcpClientConfFiles:\n                if os.path.exists(f) and FindStringInFile(f,\n                                                          r'^[^#]*?send\\s*host-name.*?(<hostname>|gethostname[(,)])') == None:\n                    r = ReplaceFileContentsAtomic('/etc/dhcp/dhclient.conf', \"send host-name \\\"\" + name + \"\\\";\\n\"\n                                                  + \"\\n\".join(filter(lambda a: not a.startswith(\"send host-name\"),\n                                                                     GetFileContents('/etc/dhcp/dhclient.conf').split(\n                                                                         '\\n'))))\n        except:\n            return 1\n        return r\n\n    def installAgentServiceScriptFiles(self):\n        \"\"\"\n        Create the waagent support files for service installation.\n        Called by registerAgentService()\n        Abstract Virtual Function.  Over-ridden in concrete Distro classes.\n        \"\"\"\n        pass\n\n    def registerAgentService(self):\n        \"\"\"\n        Calls installAgentService to create service files.\n        Shell exec service registration commands. (e.g. chkconfig --add waagent)\n        Abstract Virtual Function.  Over-ridden in concrete Distro classes.\n        \"\"\"\n        pass\n\n    def uninstallAgentService(self):\n        \"\"\"\n        Call service subsystem to remove waagent script.\n        Abstract Virtual Function.  Over-ridden in concrete Distro classes.\n        \"\"\"\n        pass\n\n    def unregisterAgentService(self):\n        \"\"\"\n        Calls self.stopAgentService and call self.uninstallAgentService()\n        \"\"\"\n        self.stopAgentService()\n        self.uninstallAgentService()\n\n    def startAgentService(self):\n        \"\"\"\n        Service call to start the Agent service\n        \"\"\"\n        return Run(self.service_cmd + ' ' + self.agent_service_name + ' start')\n\n    def stopAgentService(self):\n        \"\"\"\n        Service call to stop the Agent service\n        \"\"\"\n        return Run(self.service_cmd + ' ' + self.agent_service_name + ' stop', False)\n\n    def restartSshService(self):\n        \"\"\"\n        Service call to re(start) the SSH service\n        \"\"\"\n        sshRestartCmd = self.service_cmd + \" \" + self.ssh_service_name + \" \" + self.ssh_service_restart_option\n        retcode = Run(sshRestartCmd)\n        if retcode > 0:\n            Error(\"Failed to restart SSH service with return code:\" + str(retcode))\n        return retcode\n\n    def sshDeployPublicKey(self, fprint, path):\n        \"\"\"\n        Generic sshDeployPublicKey - over-ridden in some concrete Distro classes due to minor differences in openssl packages deployed\n        \"\"\"\n        error = 0\n        SshPubKey = OvfEnv().OpensslToSsh(fprint)\n        if SshPubKey != None:\n            AppendFileContents(path, SshPubKey)\n        else:\n            Error(\"Failed: \" + fprint + \".crt -> \" + path)\n            error = 1\n        return error\n\n    def checkPackageInstalled(self, p):\n        \"\"\"\n        Query package database for prescence of an installed package.\n        Abstract Virtual Function.  Over-ridden in concrete Distro classes.\n        \"\"\"\n        pass\n\n    def checkPackageUpdateable(self, p):\n        \"\"\"\n        Online check if updated package of walinuxagent is available.\n        Abstract Virtual Function.  Over-ridden in concrete Distro classes.\n        \"\"\"\n        pass\n\n    def deleteRootPassword(self):\n        \"\"\"\n        Generic root password removal.\n        \"\"\"\n        filepath = \"/etc/shadow\"\n        ReplaceFileContentsAtomic(filepath, \"root:*LOCK*:14600::::::\\n\"\n                                  + \"\\n\".join(\n            filter(lambda a: not a.startswith(\"root:\"), GetFileContents(filepath).split('\\n'))))\n        os.chmod(filepath, self.shadow_file_mode)\n        if self.isSelinuxSystem():\n            self.setSelinuxContext(filepath, 'system_u:object_r:shadow_t:s0')\n        Log(\"Root password deleted.\")\n        return 0\n\n    def changePass(self, user, password):\n        Log(\"Change user password\")\n        crypt_id = Config.get(\"Provisioning.PasswordCryptId\")\n        if crypt_id is None:\n            crypt_id = \"6\"\n\n        salt_len = Config.get(\"Provisioning.PasswordCryptSaltLength\")\n        try:\n            salt_len = int(salt_len)\n            if salt_len < 0 or salt_len > 10:\n                salt_len = 10\n        except (ValueError, TypeError):\n            salt_len = 10\n\n        return self.chpasswd(user, password, crypt_id=crypt_id,\n                             salt_len=salt_len)\n\n    def chpasswd(self, username, password, crypt_id=6, salt_len=10):\n        passwd_hash = self.gen_password_hash(password, crypt_id, salt_len)\n        cmd = \"usermod -p '{0}' {1}\".format(passwd_hash, username)\n        ret, output = RunGetOutput(cmd, log_cmd=False)\n        if ret != 0:\n            return \"Failed to set password for {0}: {1}\".format(username, output)\n\n    def gen_password_hash(self, password, crypt_id, salt_len):\n        collection = string.ascii_letters + string.digits\n        salt = ''.join(random.choice(collection) for _ in range(salt_len))\n        salt = \"${0}${1}\".format(crypt_id, salt)\n        return crypt.crypt(password, salt)\n\n    def load_ata_piix(self):\n        return WaAgent.TryLoadAtapiix()\n\n    def unload_ata_piix(self):\n        \"\"\"\n        Generic function to remove ata_piix.ko.\n        \"\"\"\n        return WaAgent.TryUnloadAtapiix()\n\n    def deprovisionWarnUser(self):\n        \"\"\"\n        Generic user warnings used at deprovision.\n        \"\"\"\n        print(\"WARNING! Nameserver configuration in /etc/resolv.conf will be deleted.\")\n\n    def deprovisionDeleteFiles(self):\n        \"\"\"\n        Files to delete when VM is deprovisioned\n        \"\"\"\n        for a in VarLibDhcpDirectories:\n            Run(\"rm -f \" + a + \"/*\")\n\n        # Clear LibDir, remove nameserver and root bash history\n\n        for f in os.listdir(LibDir) + self.fileBlackList:\n            try:\n                os.remove(f)\n            except:\n                pass\n        return 0\n\n    def uninstallDeleteFiles(self):\n        \"\"\"\n        Files to delete when agent is uninstalled.\n        \"\"\"\n        for f in self.agent_files_to_uninstall:\n            try:\n                os.remove(f)\n            except:\n                pass\n        return 0\n\n    def checkDependencies(self):\n        \"\"\"\n        Generic dependency check.\n        Return 1 unless all dependencies are satisfied.\n        \"\"\"\n        if self.checkPackageInstalled('NetworkManager'):\n            Error(GuestAgentLongName + \" is not compatible with network-manager.\")\n            return 1\n        try:\n            m = __import__('pyasn1')\n        except ImportError:\n            Error(GuestAgentLongName + \" requires python-pyasn1 for your Linux distribution.\")\n            return 1\n        for a in self.requiredDeps:\n            if Run(\"which \" + a + \" > /dev/null 2>&1\", chk_err=False):\n                Error(\"Missing required dependency: \" + a)\n                return 1\n        return 0\n\n    def packagedInstall(self, buildroot):\n        \"\"\"\n        Called from setup.py for use by RPM.\n        Copies generated files waagent.conf, under the buildroot.\n        \"\"\"\n        if not os.path.exists(buildroot + '/etc'):\n            os.mkdir(buildroot + '/etc')\n        SetFileContents(buildroot + '/etc/waagent.conf', MyDistro.waagent_conf_file)\n\n        if not os.path.exists(buildroot + '/etc/logrotate.d'):\n            os.mkdir(buildroot + '/etc/logrotate.d')\n        SetFileContents(buildroot + '/etc/logrotate.d/waagent', WaagentLogrotate)\n\n        self.init_script_file = buildroot + self.init_script_file\n        # this allows us to call installAgentServiceScriptFiles()\n        if not os.path.exists(os.path.dirname(self.init_script_file)):\n            os.mkdir(os.path.dirname(self.init_script_file))\n        self.installAgentServiceScriptFiles()\n\n    def GetIpv4Address(self):\n        \"\"\"\n        Return the ip of the \n        first active non-loopback interface.\n        \"\"\"\n        addr = ''\n        iface, addr = GetFirstActiveNetworkInterfaceNonLoopback()\n        return addr\n\n    def GetMacAddress(self):\n        return GetMacAddress()\n\n    def GetInterfaceName(self):\n        return GetFirstActiveNetworkInterfaceNonLoopback()[0]\n\n    def RestartInterface(self, iface, max_retry=3):\n        for retry in range(1, max_retry + 1):\n            ret = Run(\"ifdown \" + iface + \" && ifup \" + iface)\n            if ret == 0:\n                return\n            Log(\"Failed to restart interface: {0}, ret={1}\".format(iface, ret))\n            if retry < max_retry:\n                Log(\"Retry restart interface in 5 seconds\")\n                time.sleep(5)\n\n    def CreateAccount(self, user, password, expiration, thumbprint):\n        return CreateAccount(user, password, expiration, thumbprint)\n\n    def DeleteAccount(self, user):\n        return DeleteAccount(user)\n\n    def ActivateResourceDisk(self):\n        \"\"\"\n        Format, mount, and if specified in the configuration\n        set resource disk as swap.\n        \"\"\"\n        global DiskActivated\n        format = Config.get(\"ResourceDisk.Format\")\n        if format == None or format.lower().startswith(\"n\"):\n            DiskActivated = True\n            return\n        device = DeviceForIdePort(1)\n        if device == None:\n            Error(\"ActivateResourceDisk: Unable to detect disk topology.\")\n            return\n        device = \"/dev/\" + device\n\n        mountlist = RunGetOutput(\"mount\")[1]\n        mountpoint = GetMountPoint(mountlist, device)\n\n        if (mountpoint):\n            Log(\"ActivateResourceDisk: \" + device + \"1 is already mounted.\")\n        else:\n            mountpoint = Config.get(\"ResourceDisk.MountPoint\")\n            if mountpoint == None:\n                mountpoint = \"/mnt/resource\"\n            CreateDir(mountpoint, \"root\", 0o755)\n            fs = Config.get(\"ResourceDisk.Filesystem\")\n            if fs == None:\n                fs = \"ext3\"\n\n            partition = device + \"1\"\n\n            # Check partition type\n            Log(\"Detect GPT...\")\n            ret = RunGetOutput(\"parted {0} print\".format(device))\n            if ret[0] == 0 and \"gpt\" in ret[1]:\n                Log(\"GPT detected.\")\n                # GPT(Guid Partition Table) is used.\n                # Get partitions.\n                parts = filter(lambda x: re.match(r\"^\\s*[0-9]+\", x), ret[1].split(\"\\n\"))\n                # If there are more than 1 partitions, remove all partitions\n                # and create a new one using the entire disk space.\n                if len(parts) > 1:\n                    for i in range(1, len(parts) + 1):\n                        Run(\"parted {0} rm {1}\".format(device, i))\n                    Run(\"parted {0} mkpart primary 0% 100%\".format(device))\n                    Run(\"mkfs.\" + fs + \" \" + partition + \" -F\")\n            else:\n                existingFS = RunGetOutput(\"sfdisk -q -c \" + device + \" 1\", chk_err=False)[1].rstrip()\n                if existingFS == \"7\" and fs != \"ntfs\":\n                    Run(\"sfdisk -c \" + device + \" 1 83\")\n                    Run(\"mkfs.\" + fs + \" \" + partition)\n            if Run(\"mount \" + partition + \" \" + mountpoint, chk_err=False):\n                # If mount failed, try to format the partition and mount again\n                Warn(\"Failed to mount resource disk. Retry mounting.\")\n                Run(\"mkfs.\" + fs + \" \" + partition + \" -F\")\n                if Run(\"mount \" + partition + \" \" + mountpoint):\n                    Error(\"ActivateResourceDisk: Failed to mount resource disk (\" + partition + \").\")\n                    return\n            Log(\"Resource disk (\" + partition + \") is mounted at \" + mountpoint + \" with fstype \" + fs)\n\n        # Create README file under the root of resource disk\n        SetFileContents(os.path.join(mountpoint, README_FILENAME), README_FILECONTENT)\n        DiskActivated = True\n\n        # Create swap space\n        swap = Config.get(\"ResourceDisk.EnableSwap\")\n        if swap == None or swap.lower().startswith(\"n\"):\n            return\n        sizeKB = int(Config.get(\"ResourceDisk.SwapSizeMB\")) * 1024\n        if os.path.isfile(mountpoint + \"/swapfile\") and os.path.getsize(mountpoint + \"/swapfile\") != (sizeKB * 1024):\n            os.remove(mountpoint + \"/swapfile\")\n        if not os.path.isfile(mountpoint + \"/swapfile\"):\n            Run(\"dd if=/dev/zero of=\" + mountpoint + \"/swapfile bs=1024 count=\" + str(sizeKB))\n            Run(\"mkswap \" + mountpoint + \"/swapfile\")\n        Run(\"chmod 600 \" + mountpoint + \"/swapfile\")\n        if not Run(\"swapon \" + mountpoint + \"/swapfile\"):\n            Log(\"Enabled \" + str(sizeKB) + \" KB of swap at \" + mountpoint + \"/swapfile\")\n        else:\n            Error(\"ActivateResourceDisk: Failed to activate swap at \" + mountpoint + \"/swapfile\")\n\n    def Install(self):\n        return Install()\n\n    def mediaHasFilesystem(self, dsk):\n        if len(dsk) == 0:\n            return False\n        if Run(\"LC_ALL=C fdisk -l \" + dsk + \" | grep Disk\"):\n            return False\n        return True\n\n    def mountDVD(self, dvd, location):\n        return RunGetOutput(self.mount_dvd_cmd + ' ' + dvd + ' ' + location)\n\n    def GetHome(self):\n        return GetHome()\n\n    def getDhcpClientName(self):\n        return self.dhcp_client_name\n\n    def initScsiDiskTimeout(self):\n        \"\"\"\n        Set the SCSI disk timeout when the agent starts running\n        \"\"\"\n        self.setScsiDiskTimeout()\n\n    def setScsiDiskTimeout(self):\n        \"\"\"\n        Iterate all SCSI disks(include hot-add) and set their timeout if their value are different from the OS.RootDeviceScsiTimeout\n        \"\"\"\n        try:\n            scsiTimeout = Config.get(\"OS.RootDeviceScsiTimeout\")\n            for diskName in [disk for disk in os.listdir(\"/sys/block\") if disk.startswith(\"sd\")]:\n                self.setBlockDeviceTimeout(diskName, scsiTimeout)\n        except:\n            pass\n\n    def setBlockDeviceTimeout(self, device, timeout):\n        \"\"\"\n        Set SCSI disk timeout by set /sys/block/sd*/device/timeout\n        \"\"\"\n        if timeout != None and device:\n            filePath = \"/sys/block/\" + device + \"/device/timeout\"\n            if (GetFileContents(filePath).splitlines()[0].rstrip() != timeout):\n                SetFileContents(filePath, timeout)\n                Log(\"SetBlockDeviceTimeout: Update the device \" + device + \" with timeout \" + timeout)\n\n    def waitForSshHostKey(self, path):\n        \"\"\"\n        Provide a dummy waiting, since by default, ssh host key is created by waagent and the key\n        should already been created.\n        \"\"\"\n        if (os.path.isfile(path)):\n            return True\n        else:\n            Error(\"Can't find host key: {0}\".format(path))\n            return False\n\n    def isDHCPEnabled(self):\n        return self.dhcp_enabled\n\n    def stopDHCP(self):\n        \"\"\"\n        Stop the system DHCP client so that the agent can bind on its port. If\n        the distro has set dhcp_enabled to True, it will need to provide an\n        implementation of this method.\n        \"\"\"\n        raise NotImplementedError('stopDHCP method missing')\n\n    def startDHCP(self):\n        \"\"\"\n        Start the system DHCP client. If the distro has set dhcp_enabled to\n        True, it will need to provide an implementation of this method.\n        \"\"\"\n        raise NotImplementedError('startDHCP method missing')\n\n    def translateCustomData(self, data):\n        \"\"\"\n        Translate the custom data from a Base64 encoding. Default to no-op.\n        \"\"\"\n        decodeCustomData = Config.get(\"Provisioning.DecodeCustomData\")\n        if decodeCustomData != None and decodeCustomData.lower().startswith(\"y\"):\n            return base64.b64decode(data)\n        return data\n\n    def getConfigurationPath(self):\n        return \"/etc/waagent.conf\"\n\n    def getProcessorCores(self):\n        return int(RunGetOutput(\"grep 'processor.*:' /proc/cpuinfo |wc -l\")[1])\n\n    def getTotalMemory(self):\n        return int(RunGetOutput(\"grep MemTotal /proc/meminfo |awk '{print $2}'\")[1]) / 1024\n\n    def getInterfaceNameByMac(self, mac):\n        ret, output = RunGetOutput(\"ifconfig -a\")\n        if ret != 0:\n            raise Exception(\"Failed to get network interface info\")\n        output = output.replace('\\n', '')\n        match = re.search(r\"(eth\\d).*(HWaddr|ether) {0}\".format(mac),\n                          output, re.IGNORECASE)\n        if match is None:\n            raise Exception(\"Failed to get ifname with mac: {0}\".format(mac))\n        output = match.group(0)\n        eths = re.findall(r\"eth\\d\", output)\n        if eths is None or len(eths) == 0:\n            raise Exception(\"Failed to get ifname with mac: {0}\".format(mac))\n        return eths[-1]\n\n    def configIpV4(self, ifName, addr, netmask=24):\n        ret, output = RunGetOutput(\"ifconfig {0} up\".format(ifName))\n        if ret != 0:\n            raise Exception(\"Failed to bring up {0}: {1}\".format(ifName,\n                                                                 output))\n        ret, output = RunGetOutput(\"ifconfig {0} {1}/{2}\".format(ifName, addr,\n                                                                 netmask))\n        if ret != 0:\n            raise Exception(\"Failed to config ipv4 for {0}: {1}\".format(ifName,\n                                                                        output))\n\n    def setDefaultGateway(self, gateway):\n        Run(\"/sbin/route add default gw\" + gateway, chk_err=False)\n\n    def routeAdd(self, net, mask, gateway):\n        Run(\"/sbin/route add -net \" + net + \" netmask \" + mask + \" gw \" + gateway,\n            chk_err=False)\n\n    def getNdDriverVersion(self):\n        \"\"\"\n        if error happens, raise a RdmaError\n        \"\"\"\n        try:\n            with open(\"/var/lib/hyperv/.kvp_pool_0\", \"r\") as f:\n                lines = f.read()\n            r = re.search(r\"NdDriverVersion\\0+(\\d\\d\\d\\.\\d)\", lines)\n            if r is not None:\n                NdDriverVersion = r.groups()[0]\n                return NdDriverVersion  # e.g.  NdDriverVersion = 142.0\n            else:\n                Log(\"Error: NdDriverVersion not found.\")\n                return None\n        except Exception as e:\n            errMsg = 'Cannot update status: Failed to enable the extension with error: %s, stack trace: %s' % (\n            str(e), traceback.format_exc())\n            Log(errMsg)\n            raise RdmaError(RdmaConfig.nd_driver_detect_error)\n\n    def checkInstallHyperV(self):\n        return None\n\n    def getRdmaPackageVersion(self):\n        return None\n\n    def rdmaUpdate(self, updateRdmaRepository=None):\n        Log(\"rdmaUpdate in base class\")\n        pass\n\n    def checkRDMA(self):\n        Log(\"checkRDMA in base class\")\n        pass\n\n\nclass DefaultDistro(AbstractDistro):\n    \"\"\"\n    Default Distro concrete class: This class serves as a default OS behavior class.\n    \"\"\"\n\n    def startDHCP(self):\n        \"\"\"\n        Following the pattern used in WALinuxAgent for Default distro: This method is not implemented in the default\n        case.\n        \"\"\"\n        pass\n\n    def stopDHCP(self):\n        \"\"\"\n        Following the pattern used in WALinuxAgent for Default distro: This method is not implemented in the default\n        case.\n        \"\"\"\n        pass\n\n    def __init__(self):\n        super(DefaultDistro, self).__init__()\n\n\n############################################################\n#   GentooDistro\n############################################################\ngentoo_init_file = \"\"\"\\\n#!/sbin/runscript\n\ncommand=/usr/sbin/waagent\npidfile=/var/run/waagent.pid\ncommand_args=-daemon\ncommand_background=true\nname=\"Azure Linux Agent\"\n\ndepend()\n{\n    need localmount\n    use logger network\n    after bootmisc modules\n}\n\n\"\"\"\n\n\nclass gentooDistro(AbstractDistro):\n    \"\"\"\n    Gentoo distro concrete class\n    \"\"\"\n\n    def __init__(self):  #\n        super(gentooDistro, self).__init__()\n        self.service_cmd = '/sbin/service'\n        self.ssh_service_name = 'sshd'\n        self.hostname_file_path = '/etc/conf.d/hostname'\n        self.dhcp_client_name = 'dhcpcd'\n        self.shadow_file_mode = 0o640\n        self.init_file = gentoo_init_file\n\n    def publishHostname(self, name):\n        try:\n            if (os.path.isfile(self.hostname_file_path)):\n                r = ReplaceFileContentsAtomic(self.hostname_file_path, \"hostname=\\\"\" + name + \"\\\"\\n\"\n                                              + \"\\n\".join(filter(lambda a: not a.startswith(\"hostname=\"),\n                                                                 GetFileContents(self.hostname_file_path).split(\"\\n\"))))\n        except:\n            return 1\n        return r\n\n    def installAgentServiceScriptFiles(self):\n        SetFileContents(self.init_script_file, self.init_file)\n        os.chmod(self.init_script_file, 0o755)\n\n    def registerAgentService(self):\n        self.installAgentServiceScriptFiles()\n        return Run('rc-update add ' + self.agent_service_name + ' default')\n\n    def uninstallAgentService(self):\n        return Run('rc-update del ' + self.agent_service_name + ' default')\n\n    def unregisterAgentService(self):\n        self.stopAgentService()\n        return self.uninstallAgentService()\n\n    def checkPackageInstalled(self, p):\n        if Run('eix -I ^' + p + '$', chk_err=False):\n            return 0\n        else:\n            return 1\n\n    def checkPackageUpdateable(self, p):\n        if Run('eix -u ^' + p + '$', chk_err=False):\n            return 0\n        else:\n            return 1\n\n    def RestartInterface(self, iface):\n        Run(\"/etc/init.d/net.\" + iface + \" restart\")\n\n\n############################################################\n#   SuSEDistro\n############################################################    \nsuse_init_file = \"\"\"\\\n#! /bin/sh\n#\n# Azure Linux Agent sysV init script\n#\n# Copyright 2013 Microsoft Corporation\n# Copyright SUSE LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# /etc/init.d/waagent\n#\n#  and symbolic link\n#\n# /usr/sbin/rcwaagent\n#\n# System startup script for the waagent\n#\n### BEGIN INIT INFO\n# Provides: AzureLinuxAgent\n# Required-Start: $network sshd\n# Required-Stop: $network sshd\n# Default-Start: 3 5\n# Default-Stop: 0 1 2 6\n# Description: Start the AzureLinuxAgent\n### END INIT INFO\n\nPYTHON=/usr/bin/python\nWAZD_BIN=/usr/sbin/waagent\nWAZD_CONF=/etc/waagent.conf\nWAZD_PIDFILE=/var/run/waagent.pid\n\ntest -x \"$WAZD_BIN\" || { echo \"$WAZD_BIN not installed\"; exit 5; }\ntest -e \"$WAZD_CONF\" || { echo \"$WAZD_CONF not found\"; exit 6; }\n\n. /etc/rc.status\n\n# First reset status of this service\nrc_reset\n\n# Return values acc. to LSB for all commands but status:\n# 0 - success\n# 1 - misc error\n# 2 - invalid or excess args\n# 3 - unimplemented feature (e.g. reload)\n# 4 - insufficient privilege\n# 5 - program not installed\n# 6 - program not configured\n#\n# Note that starting an already running service, stopping\n# or restarting a not-running service as well as the restart\n# with force-reload (in case signalling is not supported) are\n# considered a success.\n\n\ncase \"$1\" in\n    start)\n        echo -n \"Starting AzureLinuxAgent\"\n        ## Start daemon with startproc(8). If this fails\n        ## the echo return value is set appropriate.\n        startproc -f ${PYTHON} ${WAZD_BIN} -daemon\n        rc_status -v\n        ;;\n    stop)\n        echo -n \"Shutting down AzureLinuxAgent\"\n        ## Stop daemon with killproc(8) and if this fails\n        ## set echo the echo return value.\n        killproc -p ${WAZD_PIDFILE} ${PYTHON} ${WAZD_BIN}\n        rc_status -v\n        ;;\n    try-restart)\n        ## Stop the service and if this succeeds (i.e. the\n        ## service was running before), start it again.\n        $0 status >/dev/null && $0 restart\n        rc_status\n        ;;\n    restart)\n        ## Stop the service and regardless of whether it was\n        ## running or not, start it again.\n        $0 stop\n        sleep 1\n        $0 start\n        rc_status\n        ;;\n    force-reload|reload)\n        rc_status\n        ;;\n    status)\n        echo -n \"Checking for service AzureLinuxAgent \"\n        ## Check status with checkproc(8), if process is running\n        ## checkproc will return with exit status 0.\n\n        checkproc -p ${WAZD_PIDFILE} ${PYTHON} ${WAZD_BIN}\n        rc_status -v\n        ;;\n    probe)\n        ;;\n    *)\n        echo \"Usage: $0 {start|stop|status|try-restart|restart|force-reload|reload}\"\n        exit 1\n        ;;\nesac\nrc_exit\n\"\"\"\n\n\nclass SuSEDistro(AbstractDistro):\n    \"\"\"\n    SuSE Distro concrete class\n    Put SuSE specific behavior here...\n    \"\"\"\n\n    def __init__(self):\n        super(SuSEDistro, self).__init__()\n        dist_info = DistInfo()\n        dist_info_fullname = DistInfo(fullname=1)\n\n        self.dhcp_client_name = 'dhcpcd'\n        if ((dist_info_fullname[0] == 'SUSE Linux Enterprise Server' and dist_info[1] >= '12') or \\\n                (dist_info_fullname[0] == 'openSUSE' and dist_info[1] >= '13.2')):\n            self.dhcp_client_name = 'wickedd-dhcp4'\n        self.dhcp_enabled = True\n        self.grubKernelBootOptionsFile = '/boot/grub/menu.lst'\n        self.grubKernelBootOptionsLine = 'kernel'\n        self.getpidcmd = 'pidof '\n        self.hostname_file_path = '/etc/HOSTNAME'\n        self.init_file = suse_init_file\n        self.kernel_boot_options_file = '/boot/grub/menu.lst'\n        self.modprobe_path = '/usr/bin/modprobe'\n\n        self.requiredDeps += [\"/sbin/insserv\"]\n        self.reboot_path = '/sbin/reboot'\n        self.rpm_path = '/bin/rpm'\n        self.service_cmd = '/sbin/service'\n        self.ssh_service_name = 'sshd'\n        if (dist_info[1] == \"11\"):\n            self.ps_path = '/bin/ps'\n        else:\n            self.ps_path = '/usr/bin/ps'\n\n        self.zypper_path = '/usr/bin/zypper'\n\n    def checkPackageInstalled(self, p):\n        if Run(\"rpm -q \" + p, chk_err=False):\n            return 0\n        else:\n            return 1\n\n    def checkPackageUpdateable(self, p):\n        if Run(\"zypper list-updates | grep \" + p, chk_err=False):\n            return 1\n        else:\n            return 0\n\n    def installAgentServiceScriptFiles(self):\n        try:\n            SetFileContents(self.init_script_file, self.init_file)\n            os.chmod(self.init_script_file, 0o744)\n        except:\n            pass\n\n    def registerAgentService(self):\n        self.installAgentServiceScriptFiles()\n        return Run('insserv ' + self.agent_service_name)\n\n    def uninstallAgentService(self):\n        return Run('insserv -r ' + self.agent_service_name)\n\n    def unregisterAgentService(self):\n        self.stopAgentService()\n        return self.uninstallAgentService()\n\n    def startDHCP(self):\n        Run(\"service \" + self.dhcp_client_name + \" start\", chk_err=False)\n\n    def stopDHCP(self):\n        Run(\"service \" + self.dhcp_client_name + \" stop\", chk_err=False)\n\n    def getRdmaPackageVersion(self):\n        \"\"\"\n        \"\"\"\n        error, output = RunGetOutput(self.zypper_path + \" info \" + RdmaConfig.rmda_package_name)\n        if (error == RdmaConfig.process_success):\n            r = re.search(r\"Version: (\\S+)\", output)\n            if r is not None:\n                package_version = r.groups()[0]  # e.g.  package_version is \"20150707.140.0_k3.12.28_4-3.1.\"\n                return package_version\n            else:\n                return None\n        else:\n            return None\n\n    def checkInstallHyperV(self):\n        error, output = RunGetOutput(self.ps_path + \" -ef\")\n        if (error != RdmaConfig.process_success):\n            return RdmaConfig.common_failed\n        else:\n            hv_kvp_daemon_service_process_name = \"hv_kvp_daemon\"\n            hv_kvp_daemon_service_name = \"hv_kvp_daemon\"\n            r = re.search(hv_kvp_daemon_service_process_name, output)\n            if r is None:\n                # if the \n                Log(\"hv kvp daemon is not running.\")\n                error, output = RunGetOutput(self.rpm_path + \" -q hyper-v\", chk_err=False, log_cmd=False)\n                if (error == RdmaConfig.process_success):\n                    Log(\"the hyper-v package is installed, but hv_kvp_daemon not started\")\n                    return RdmaConfig.hv_kvp_daemon_not_started\n                else:\n                    error, output = RunGetOutput(self.zypper_path + \" -n install --force hyper-v\")\n                    Log(\"install hyper-v return code: \" + str(error) + \" output:\" + str(output))\n                    if (error != RdmaConfig.process_success):\n                        return RdmaConfig.common_failed\n                    else:\n                        self.rebootMachine()\n                        return RdmaConfig.process_success\n            else:\n                Log(\"KVP daemon  is running\")\n                return RdmaConfig.process_success\n\n    def rdmaUpdate(self, updateRdmaRepository=None):\n        # give some time for the hv_hvp_daemon to start up.\n        time.sleep(10)\n        check_install_result = self.checkInstallHyperV()\n        if (check_install_result == RdmaConfig.process_success):\n            # wait for sometime the RDMA Driver not passed in by KVP in time.\n            time.sleep(10)\n\n            nd_driver_version = self.getNdDriverVersion()\n            if (nd_driver_version is None):\n                raise RdmaError(RdmaConfig.driver_version_not_found)\n            else:\n                check_result = self.checkRDMA(nd_driver_version=nd_driver_version)\n                Log(\"RDMA  version check result is \" + str(check_result))\n                if (check_result == RdmaConfig.UpToDate):\n                    return\n                elif (check_result == RdmaConfig.OutOfDate):\n                    update_rdma_driver_result = self.rdmaUpdatePackage(host_version=nd_driver_version,\n                                                                       updateRdmaRepository=updateRdmaRepository)\n                elif (check_result == RdmaConfig.DriverVersionNotFound):\n                    raise RdmaError(RdmaConfig.driver_version_not_found)\n                elif (check_result == RdmaConfig.Unknown):\n                    raise RdmaError(RdmaConfig.unknown_error)\n        else:\n            raise RdmaError(RdmaConfig.check_install_hv_utils_failed)\n\n    def rdmaUpdatePackage(self, host_version, updateRdmaRepository=None):\n        # check the repository first\n        if (updateRdmaRepository is not None):\n            error, output = RunGetOutput(self.zypper_path + \" lr -u\")\n            rdma_pack_repository_name = \"msft-rdma-pack\"\n            rdma_pack_result = re.search(rdma_pack_repository_name, output)\n            if rdma_pack_result is None:\n                Log(\"rdma_pack_result is None\")\n                error, output = RunGetOutput(\n                    self.zypper_path + \" ar \" + str(updateRdmaRepository) + \" \" + rdma_pack_repository_name)\n                # wait for the cache build.\n                time.sleep(20)\n                Log(\"error result is \" + str(error) + \" output is : \" + str(output))\n            else:\n                Log(\"output is: \" + str(output))\n                Log(\"msft-rdma-pack found\")\n\n        returnCode, message = RunGetOutput(self.zypper_path + \" --no-gpg-checks refresh\")\n        Log(\"refresh repo return code is \" + str(returnCode) + \" output is: \" + str(message))\n        # install the wrapper package, that will put the driver RPM packages under /opt/microsoft/rdma\n        returnCode, message = RunGetOutput(self.zypper_path + \" -n remove \" + RdmaConfig.wrapper_package_name)\n        Log(\"remove wrapper package return code is \" + str(returnCode) + \" output is: \" + str(message))\n        returnCode, message = RunGetOutput(\n            self.zypper_path + \" --non-interactive install --force \" + RdmaConfig.wrapper_package_name)\n        Log(\"install wrapper package return code is \" + str(returnCode) + \" output is: \" + str(message))\n        r = os.listdir(\"/opt/microsoft/rdma\")\n        if r is not None:\n            for filename in r:\n                if re.match(RdmaConfig.rmda_package_name + r\"-\\d{8}\\.(%s).+\" % host_version, filename):\n                    error, output = RunGetOutput(\n                        self.zypper_path + \" --non-interactive remove \" + RdmaConfig.rmda_package_name)\n                    Log(\"remove rdma package result is \" + str(error) + \" output is: \" + str(output))\n                    Log(\"Installing RPM /opt/microsoft/rdma/\" + filename)\n                    error, output = RunGetOutput(\n                        self.zypper_path + \" --non-interactive install --force /opt/microsoft/rdma/%s\" % filename)\n                    Log(\"Install rdma package result is \" + str(error) + \" output is: \" + str(output))\n                    if (error == RdmaConfig.process_success):\n                        self.rebootMachine()\n                    else:\n                        raise RdmaError(RdmaConfig.package_install_failed)\n        else:\n            Log(\"RDMA drivers not found in /opt/microsoft/rdma\")\n            raise RdmaError(RdmaConfig.package_not_found)\n\n    def checkRDMA(self, nd_driver_version=None):\n        if (nd_driver_version is None):\n            nd_driver_version = self.getNdDriverVersion()\n        if (nd_driver_version is None or nd_driver_version == \"\"):\n            return RdmaConfig.DriverVersionNotFound\n        package_version = self.getRdmaPackageVersion()\n        if (package_version is None or package_version == \"\"):\n            return RdmaConfig.OutOfDate\n        else:\n            # package_version would be like this :20150707_k3.12.28_4-3.1 20150707.140.0_k3.12.28_4-1.1\n            # nd_driver_version 140.0\n            Log(\"nd_driver_version is \" + str(nd_driver_version) + \" package_version is \" + str(package_version))\n            if (nd_driver_version is not None):\n                r = re.match(\"^[0-9]+[.](%s).+\" % nd_driver_version,\n                             package_version)  # NdDriverVersion should be at the end of package version\n                if not r:  # host ND version is the same as the package version, do an update\n                    return RdmaConfig.OutOfDate\n                else:\n                    return RdmaConfig.UpToDate\n            return RdmaConfig.Unknown\n\n    def rebootMachine(self):\n        Log(\"rebooting the machine\")\n        RunGetOutput(self.reboot_path)\n\n\n############################################################\n#   redhatDistro\n############################################################    \n\nredhat_init_file = \"\"\"\\\n#!/bin/bash\n#\n# Init file for AzureLinuxAgent.\n#\n# chkconfig: 2345 60 80\n# description: AzureLinuxAgent\n#\n\n# source function library\n. /etc/rc.d/init.d/functions\n\nRETVAL=0\nFriendlyName=\"AzureLinuxAgent\"\nWAZD_BIN=/usr/sbin/waagent\n\nstart()\n{\n    echo -n $\"Starting $FriendlyName: \"\n    $WAZD_BIN -daemon &\n}\n\nstop()\n{\n    echo -n $\"Stopping $FriendlyName: \"\n    killproc -p /var/run/waagent.pid $WAZD_BIN\n    RETVAL=$?\n    echo\n    return $RETVAL\n}\n\ncase \"$1\" in\n    start)\n        start\n        ;;\n    stop)\n        stop\n        ;;\n    restart)\n        stop\n        start\n        ;;\n    reload)\n        ;;\n    report)\n        ;;\n    status)\n        status $WAZD_BIN\n        RETVAL=$?\n        ;;\n    *)\n        echo $\"Usage: $0 {start|stop|restart|status}\"\n        RETVAL=1\nesac\nexit $RETVAL\n\"\"\"\n\n\nclass redhatDistro(AbstractDistro):\n    \"\"\"\n    Redhat Distro concrete class\n    Put Redhat specific behavior here...\n    \"\"\"\n\n    def __init__(self):\n        super(redhatDistro, self).__init__()\n        self.service_cmd = '/sbin/service'\n        self.ssh_service_restart_option = 'condrestart'\n        self.ssh_service_name = 'sshd'\n        self.hostname_file_path = None if DistInfo()[1] < '7.0' else '/etc/hostname'\n        self.init_file = redhat_init_file\n        self.grubKernelBootOptionsFile = '/boot/grub/menu.lst'\n        self.grubKernelBootOptionsLine = 'kernel'\n\n    def publishHostname(self, name):\n        super(redhatDistro, self).publishHostname(name)\n        if DistInfo()[1] < '7.0':\n            filepath = \"/etc/sysconfig/network\"\n            if os.path.isfile(filepath):\n                ReplaceFileContentsAtomic(filepath, \"HOSTNAME=\" + name + \"\\n\"\n                                          + \"\\n\".join(\n                    filter(lambda a: not a.startswith(\"HOSTNAME\"), GetFileContents(filepath).split('\\n'))))\n\n        ethernetInterface = MyDistro.GetInterfaceName()\n        filepath = \"/etc/sysconfig/network-scripts/ifcfg-\" + ethernetInterface\n        if os.path.isfile(filepath):\n            ReplaceFileContentsAtomic(filepath, \"DHCP_HOSTNAME=\" + name + \"\\n\"\n                                      + \"\\n\".join(\n                filter(lambda a: not a.startswith(\"DHCP_HOSTNAME\"), GetFileContents(filepath).split('\\n'))))\n        return 0\n\n    def installAgentServiceScriptFiles(self):\n        SetFileContents(self.init_script_file, self.init_file)\n        os.chmod(self.init_script_file, 0o744)\n        return 0\n\n    def registerAgentService(self):\n        self.installAgentServiceScriptFiles()\n        return Run('chkconfig --add waagent')\n\n    def uninstallAgentService(self):\n        return Run('chkconfig --del ' + self.agent_service_name)\n\n    def unregisterAgentService(self):\n        self.stopAgentService()\n        return self.uninstallAgentService()\n\n    def checkPackageInstalled(self, p):\n        if Run(\"yum list installed \" + p, chk_err=False):\n            return 0\n        else:\n            return 1\n\n    def checkPackageUpdateable(self, p):\n        if Run(\"yum check-update | grep \" + p, chk_err=False):\n            return 1\n        else:\n            return 0\n\n    def checkDependencies(self):\n        \"\"\"\n        Generic dependency check.\n        Return 1 unless all dependencies are satisfied.\n        \"\"\"\n        if DistInfo()[1] < '7.0' and self.checkPackageInstalled('NetworkManager'):\n            Error(GuestAgentLongName + \" is not compatible with network-manager.\")\n            return 1\n        try:\n            m = __import__('pyasn1')\n        except ImportError:\n            Error(GuestAgentLongName + \" requires python-pyasn1 for your Linux distribution.\")\n            return 1\n        for a in self.requiredDeps:\n            if Run(\"which \" + a + \" > /dev/null 2>&1\", chk_err=False):\n                Error(\"Missing required dependency: \" + a)\n                return 1\n        return 0\n\n    ############################################################\n\n\n#   centosDistro\n############################################################    \n\nclass centosDistro(redhatDistro):\n    \"\"\"\n    CentOS Distro concrete class\n    Put CentOS specific behavior here...\n    \"\"\"\n\n    def __init__(self):\n        super(centosDistro, self).__init__()\n\n    def rdmaUpdate(self, updateRdmaRepository=None):\n        pass\n\n    def checkRDMA(self):\n        pass\n\n\n############################################################\n#   oracleDistro\n############################################################    \n\nclass oracleDistro(redhatDistro):\n    \"\"\"\n    Oracle Distro concrete class\n    Put Oracle specific behavior here...\n    \"\"\"\n\n    def __init__(self):\n        super(oracleDistro, self).__init__()\n\n\n############################################################\n#   asianuxDistro\n############################################################    \n\nclass asianuxDistro(redhatDistro):\n    \"\"\"\n    Asianux Distro concrete class\n    Put Asianux specific behavior here...\n    \"\"\"\n\n    def __init__(self):\n        super(asianuxDistro, self).__init__()\n\n\n############################################################\n#   CoreOSDistro\n############################################################\n\nclass CoreOSDistro(AbstractDistro):\n    \"\"\"\n    CoreOS Distro concrete class\n    Put CoreOS specific behavior here...\n    \"\"\"\n    CORE_UID = 500\n\n    def __init__(self):\n        super(CoreOSDistro, self).__init__()\n        self.requiredDeps += [\"/usr/bin/systemctl\"]\n        self.agent_service_name = 'waagent'\n        self.init_script_file = '/etc/systemd/system/waagent.service'\n        self.fileBlackList.append(\"/etc/machine-id\")\n        self.dhcp_client_name = 'systemd-networkd'\n        self.getpidcmd = 'pidof '\n        self.shadow_file_mode = 0o640\n        self.waagent_path = '/usr/share/oem/bin'\n        self.python_path = '/usr/share/oem/python/bin'\n        self.dhcp_enabled = True\n        if 'PATH' in os.environ:\n            os.environ['PATH'] = \"{0}:{1}\".format(os.environ['PATH'], self.python_path)\n        else:\n            os.environ['PATH'] = self.python_path\n\n        if 'PYTHONPATH' in os.environ:\n            os.environ['PYTHONPATH'] = \"{0}:{1}\".format(os.environ['PYTHONPATH'], self.waagent_path)\n        else:\n            os.environ['PYTHONPATH'] = self.waagent_path\n\n    def checkPackageInstalled(self, p):\n        \"\"\"\n        There is no package manager in CoreOS.  Return 1 since it must be preinstalled.\n        \"\"\"\n        return 1\n\n    def checkDependencies(self):\n        for a in self.requiredDeps:\n            if Run(\"which \" + a + \" > /dev/null 2>&1\", chk_err=False):\n                Error(\"Missing required dependency: \" + a)\n                return 1\n        return 0\n\n    def checkPackageUpdateable(self, p):\n        \"\"\"\n        There is no package manager in CoreOS.  Return 0 since it can't be updated via package.\n        \"\"\"\n        return 0\n\n    def startAgentService(self):\n        return Run('systemctl start ' + self.agent_service_name)\n\n    def stopAgentService(self):\n        return Run('systemctl stop ' + self.agent_service_name)\n\n    def restartSshService(self):\n        \"\"\"\n        SSH is socket activated on CoreOS. No need to restart it.\n        \"\"\"\n        return 0\n\n    def sshDeployPublicKey(self, fprint, path):\n        \"\"\"\n        We support PKCS8.\n        \"\"\"\n        if Run(\"ssh-keygen -i -m PKCS8 -f \" + fprint + \" >> \" + path):\n            return 1\n        else:\n            return 0\n\n    def RestartInterface(self, iface):\n        Run(\"systemctl restart systemd-networkd\")\n\n    def CreateAccount(self, user, password, expiration, thumbprint):\n        \"\"\"\n        Create a user account, with 'user', 'password', 'expiration', ssh keys\n        and sudo permissions.\n        Returns None if successful, error string on failure.\n        \"\"\"\n        userentry = None\n        try:\n            userentry = pwd.getpwnam(user)\n        except:\n            pass\n        uidmin = None\n        try:\n            uidmin = int(GetLineStartingWith(\"UID_MIN\", \"/etc/login.defs\").split()[1])\n        except:\n            pass\n        if uidmin == None:\n            uidmin = 100\n        if userentry != None and userentry[2] < uidmin and userentry[2] != self.CORE_UID:\n            Error(\"CreateAccount: \" + user + \" is a system user. Will not set password.\")\n            return \"Failed to set password for system user: \" + user + \" (0x06).\"\n        if userentry == None:\n            command = \"useradd --create-home --password '*' \" + user\n            if expiration != None:\n                command += \" --expiredate \" + expiration.split('.')[0]\n            if Run(command):\n                Error(\"Failed to create user account: \" + user)\n                return \"Failed to create user account: \" + user + \" (0x07).\"\n        else:\n            Log(\"CreateAccount: \" + user + \" already exists. Will update password.\")\n        if password != None:\n            self.changePass(user, password)\n        try:\n            if password == None:\n                SetFileContents(\"/etc/sudoers.d/waagent\", user + \" ALL = (ALL) NOPASSWD: ALL\\n\")\n            else:\n                SetFileContents(\"/etc/sudoers.d/waagent\", user + \" ALL = (ALL) ALL\\n\")\n            os.chmod(\"/etc/sudoers.d/waagent\", 0o440)\n        except:\n            Error(\"CreateAccount: Failed to configure sudo access for user.\")\n            return \"Failed to configure sudo privileges (0x08).\"\n        home = MyDistro.GetHome()\n        if thumbprint != None:\n            dir = home + \"/\" + user + \"/.ssh\"\n            CreateDir(dir, user, 0o700)\n            pub = dir + \"/id_rsa.pub\"\n            prv = dir + \"/id_rsa\"\n            Run(\"ssh-keygen -y -f \" + thumbprint + \".prv > \" + pub)\n            SetFileContents(prv, GetFileContents(thumbprint + \".prv\"))\n            for f in [pub, prv]:\n                os.chmod(f, 0o600)\n                ChangeOwner(f, user)\n            SetFileContents(dir + \"/authorized_keys\", GetFileContents(pub))\n            ChangeOwner(dir + \"/authorized_keys\", user)\n        Log(\"Created user account: \" + user)\n        return None\n\n    def startDHCP(self):\n        Run(\"systemctl start \" + self.dhcp_client_name, chk_err=False)\n\n    def stopDHCP(self):\n        Run(\"systemctl stop \" + self.dhcp_client_name, chk_err=False)\n\n    def translateCustomData(self, data):\n        return base64.b64decode(data)\n\n    def getConfigurationPath(self):\n        return \"/usr/share/oem/waagent.conf\"\n\n\n############################################################\n#   debianDistro\n############################################################    \ndebian_init_file = \"\"\"\\\n#!/bin/sh\n### BEGIN INIT INFO\n# Provides:          AzureLinuxAgent\n# Required-Start:    $network $syslog\n# Required-Stop:     $network $syslog\n# Should-Start:      $network $syslog\n# Should-Stop:       $network $syslog\n# Default-Start:     2 3 4 5\n# Default-Stop:      0 1 6\n# Short-Description: AzureLinuxAgent\n# Description:       AzureLinuxAgent\n### END INIT INFO\n\n. /lib/lsb/init-functions\n\nOPTIONS=\"-daemon\"\nWAZD_BIN=/usr/sbin/waagent\nWAZD_PID=/var/run/waagent.pid\n\ncase \"$1\" in\n    start)\n        log_begin_msg \"Starting AzureLinuxAgent...\"\n        pid=$( pidofproc $WAZD_BIN )\n        if [ -n \"$pid\" ] ; then\n              log_begin_msg \"Already running.\"\n              log_end_msg 0\n              exit 0\n        fi\n        start-stop-daemon --start --quiet --oknodo --background --exec $WAZD_BIN -- $OPTIONS\n        log_end_msg $?\n        ;;\n\n    stop)\n        log_begin_msg \"Stopping AzureLinuxAgent...\"\n        start-stop-daemon --stop --quiet --oknodo --pidfile $WAZD_PID\n        ret=$?\n        rm -f $WAZD_PID\n        log_end_msg $ret\n        ;;\n    force-reload)\n        $0 restart\n        ;;\n    restart)\n        $0 stop\n        $0 start\n        ;;\n    status)\n        status_of_proc $WAZD_BIN && exit 0 || exit $?\n        ;;\n    *)\n        log_success_msg \"Usage: /etc/init.d/waagent {start|stop|force-reload|restart|status}\"\n        exit 1\n        ;;\nesac\n\nexit 0\n\"\"\"\n\n\nclass debianDistro(AbstractDistro):\n    \"\"\"\n    debian Distro concrete class\n    Put debian specific behavior here...\n    \"\"\"\n\n    def __init__(self):\n        super(debianDistro, self).__init__()\n        self.requiredDeps += [\"/usr/sbin/update-rc.d\"]\n        self.init_file = debian_init_file\n        self.agent_package_name = 'walinuxagent'\n        self.dhcp_client_name = 'dhclient'\n        self.getpidcmd = 'pidof '\n        self.shadow_file_mode = 0o640\n\n    def checkPackageInstalled(self, p):\n        \"\"\"\n        Check that the package is installed.\n        Return 1 if installed, 0 if not installed.\n        This method of using dpkg-query\n        allows wildcards to be present in the\n        package name.\n        \"\"\"\n        if not Run(\"dpkg-query -W -f='${Status}\\n' '\" + p + \"' | grep ' installed' 2>&1\", chk_err=False):\n            return 1\n        else:\n            return 0\n\n    def checkDependencies(self):\n        \"\"\"\n        Debian dependency check.  python-pyasn1 is NOT needed.\n        Return 1 unless all dependencies are satisfied.\n        NOTE: using network*manager will catch either package name in Ubuntu or debian.\n        \"\"\"\n        if self.checkPackageInstalled('network*manager'):\n            Error(GuestAgentLongName + \" is not compatible with network-manager.\")\n            return 1\n        for a in self.requiredDeps:\n            if Run(\"which \" + a + \" > /dev/null 2>&1\", chk_err=False):\n                Error(\"Missing required dependency: \" + a)\n                return 1\n        return 0\n\n    def checkPackageUpdateable(self, p):\n        if Run(\"apt-get update ; apt-get upgrade -us | grep \" + p, chk_err=False):\n            return 1\n        else:\n            return 0\n\n    def installAgentServiceScriptFiles(self):\n        \"\"\"\n        If we are packaged - the service name is walinuxagent, do nothing.\n        \"\"\"\n        if self.agent_service_name == 'walinuxagent':\n            return 0\n        try:\n            SetFileContents(self.init_script_file, self.init_file)\n            os.chmod(self.init_script_file, 0o744)\n        except OSError as e:\n            ErrorWithPrefix('installAgentServiceScriptFiles',\n                            'Exception: ' + str(e) + ' occured creating ' + self.init_script_file)\n            return 1\n        return 0\n\n    def registerAgentService(self):\n        if self.installAgentServiceScriptFiles() == 0:\n            return Run('update-rc.d waagent defaults')\n        else:\n            return 1\n\n    def uninstallAgentService(self):\n        return Run('update-rc.d -f ' + self.agent_service_name + ' remove')\n\n    def unregisterAgentService(self):\n        self.stopAgentService()\n        return self.uninstallAgentService()\n\n    def sshDeployPublicKey(self, fprint, path):\n        \"\"\"\n        We support PKCS8.\n        \"\"\"\n        if Run(\"ssh-keygen -i -m PKCS8 -f \" + fprint + \" >> \" + path):\n            return 1\n        else:\n            return 0\n\n\n############################################################\n#   KaliDistro - WIP\n#       Functioning on Kali 1.1.0a so far\n############################################################ \nclass KaliDistro(debianDistro):\n    \"\"\"\n    Kali Distro concrete class\n    Put Kali specific behavior here...\n    \"\"\"\n\n    def __init__(self):\n        super(KaliDistro, self).__init__()\n\n\n############################################################\n#   UbuntuDistro\n############################################################    \nubuntu_upstart_file = \"\"\"\\\n#walinuxagent - start Azure agent\n\ndescription \"walinuxagent\"\nauthor \"Ben Howard <ben.howard@canonical.com>\"\n\nstart on (filesystem and started rsyslog)\n\npre-start script\n\n    WALINUXAGENT_ENABLED=1\n    [ -r /etc/default/walinuxagent ] && . /etc/default/walinuxagent\n\n    if [ \"$WALINUXAGENT_ENABLED\" != \"1\" ]; then\n        exit 1\n    fi\n\n    if [ ! -x /usr/sbin/waagent ]; then\n        exit 1\n    fi\n\n    #Load the udf module\n    modprobe -b udf\nend script\n\nexec /usr/sbin/waagent -daemon\n\"\"\"\n\n\nclass UbuntuDistro(debianDistro):\n    \"\"\"\n    Ubuntu Distro concrete class\n    Put Ubuntu specific behavior here...\n    \"\"\"\n\n    def __init__(self):\n        super(UbuntuDistro, self).__init__()\n        self.init_script_file = '/etc/init/waagent.conf'\n        self.init_file = ubuntu_upstart_file\n        self.fileBlackList = [\"/root/.bash_history\", \"/var/log/waagent.log\"]\n        self.dhcp_client_name = None\n        self.getpidcmd = 'pidof '\n\n    def registerAgentService(self):\n        return self.installAgentServiceScriptFiles()\n\n    def uninstallAgentService(self):\n        \"\"\"\n        If we are packaged - the service name is walinuxagent, do nothing.\n        \"\"\"\n        if self.agent_service_name == 'walinuxagent':\n            return 0\n        os.remove('/etc/init/' + self.agent_service_name + '.conf')\n\n    def unregisterAgentService(self):\n        \"\"\"\n        If we are packaged - the service name is walinuxagent, do nothing.\n        \"\"\"\n        if self.agent_service_name == 'walinuxagent':\n            return\n        self.stopAgentService()\n        return self.uninstallAgentService()\n\n    def deprovisionWarnUser(self):\n        \"\"\"\n        Ubuntu specific warning string from Deprovision.\n        \"\"\"\n        print(\"WARNING! Nameserver configuration in /etc/resolvconf/resolv.conf.d/{tail,original} will be deleted.\")\n\n    def deprovisionDeleteFiles(self):\n        \"\"\"\n        Ubuntu uses resolv.conf by default, so removing /etc/resolv.conf will\n        break resolvconf. Therefore, we check to see if resolvconf is in use,\n        and if so, we remove the resolvconf artifacts.\n        \"\"\"\n        if os.path.realpath('/etc/resolv.conf') != '/run/resolvconf/resolv.conf':\n            Log(\"resolvconf is not configured. Removing /etc/resolv.conf\")\n            self.fileBlackList.append('/etc/resolv.conf')\n        else:\n            Log(\"resolvconf is enabled; leaving /etc/resolv.conf intact\")\n            resolvConfD = '/etc/resolvconf/resolv.conf.d/'\n            self.fileBlackList.extend([resolvConfD + 'tail', resolvConfD + 'original'])\n        for f in os.listdir(LibDir) + self.fileBlackList:\n            try:\n                os.remove(f)\n            except:\n                pass\n        return 0\n\n    def getDhcpClientName(self):\n        if self.dhcp_client_name != None:\n            return self.dhcp_client_name\n        if DistInfo()[1] == '12.04':\n            self.dhcp_client_name = 'dhclient3'\n        else:\n            self.dhcp_client_name = 'dhclient'\n        return self.dhcp_client_name\n\n    def waitForSshHostKey(self, path):\n        \"\"\"\n        Wait until the ssh host key is generated by cloud init.\n        \"\"\"\n        for retry in range(0, 10):\n            if (os.path.isfile(path)):\n                return True\n            time.sleep(1)\n        Error(\"Can't find host key: {0}\".format(path))\n        return False\n\n\n############################################################\n#   LinuxMintDistro\n############################################################    \n\nclass LinuxMintDistro(UbuntuDistro):\n    \"\"\"\n    LinuxMint Distro concrete class\n    Put LinuxMint specific behavior here...\n    \"\"\"\n\n    def __init__(self):\n        super(LinuxMintDistro, self).__init__()\n\n\n############################################################\n#   fedoraDistro\n############################################################    \nfedora_systemd_service = \"\"\"\\\n[Unit]\nDescription=Azure Linux Agent\nAfter=network.target\nAfter=sshd.service\nConditionFileIsExecutable=/usr/sbin/waagent\nConditionPathExists=/etc/waagent.conf\n\n[Service]\nType=simple\nExecStart=/usr/sbin/waagent -daemon\n\n[Install]\nWantedBy=multi-user.target\n\"\"\"\n\n\nclass fedoraDistro(redhatDistro):\n    \"\"\"\n    FedoraDistro concrete class\n    Put Fedora specific behavior here...\n    \"\"\"\n\n    def __init__(self):\n        super(fedoraDistro, self).__init__()\n        self.service_cmd = '/usr/bin/systemctl'\n        self.hostname_file_path = '/etc/hostname'\n        self.init_script_file = '/usr/lib/systemd/system/' + self.agent_service_name + '.service'\n        self.init_file = fedora_systemd_service\n        self.grubKernelBootOptionsFile = '/etc/default/grub'\n        self.grubKernelBootOptionsLine = 'GRUB_CMDLINE_LINUX='\n\n    def publishHostname(self, name):\n        SetFileContents(self.hostname_file_path, name + '\\n')\n        ethernetInterface = MyDistro.GetInterfaceName()\n        filepath = \"/etc/sysconfig/network-scripts/ifcfg-\" + ethernetInterface\n        if os.path.isfile(filepath):\n            ReplaceFileContentsAtomic(filepath, \"DHCP_HOSTNAME=\" + name + \"\\n\"\n                                      + \"\\n\".join(\n                filter(lambda a: not a.startswith(\"DHCP_HOSTNAME\"), GetFileContents(filepath).split('\\n'))))\n        return 0\n\n    def installAgentServiceScriptFiles(self):\n        SetFileContents(self.init_script_file, self.init_file)\n        os.chmod(self.init_script_file, 0o644)\n        return Run(self.service_cmd + ' daemon-reload')\n\n    def registerAgentService(self):\n        self.installAgentServiceScriptFiles()\n        return Run(self.service_cmd + ' enable ' + self.agent_service_name)\n\n    def uninstallAgentService(self):\n        \"\"\"\n        Call service subsystem to remove waagent script.\n        \"\"\"\n        return Run(self.service_cmd + ' disable ' + self.agent_service_name)\n\n    def unregisterAgentService(self):\n        \"\"\"\n        Calls self.stopAgentService and call self.uninstallAgentService()\n        \"\"\"\n        self.stopAgentService()\n        self.uninstallAgentService()\n\n    def startAgentService(self):\n        \"\"\"\n        Service call to start the Agent service\n        \"\"\"\n        return Run(self.service_cmd + ' start ' + self.agent_service_name)\n\n    def stopAgentService(self):\n        \"\"\"\n        Service call to stop the Agent service\n        \"\"\"\n        return Run(self.service_cmd + ' stop ' + self.agent_service_name, False)\n\n    def restartSshService(self):\n        \"\"\"\n        Service call to re(start) the SSH service\n        \"\"\"\n        sshRestartCmd = self.service_cmd + \" \" + self.ssh_service_restart_option + \" \" + self.ssh_service_name\n        retcode = Run(sshRestartCmd)\n        if retcode > 0:\n            Error(\"Failed to restart SSH service with return code:\" + str(retcode))\n        return retcode\n\n    def checkPackageInstalled(self, p):\n        \"\"\"\n        Query package database for prescence of an installed package.\n        \"\"\"\n        import rpm\n        ts = rpm.TransactionSet()\n        rpms = ts.dbMatch(rpm.RPMTAG_PROVIDES, p)\n        return bool(len(rpms) > 0)\n\n    def deleteRootPassword(self):\n        return Run(\"/sbin/usermod root -p '!!'\")\n\n    def packagedInstall(self, buildroot):\n        \"\"\"\n        Called from setup.py for use by RPM.\n        Copies generated files waagent.conf, under the buildroot.\n        \"\"\"\n        if not os.path.exists(buildroot + '/etc'):\n            os.mkdir(buildroot + '/etc')\n        SetFileContents(buildroot + '/etc/waagent.conf', MyDistro.waagent_conf_file)\n\n        if not os.path.exists(buildroot + '/etc/logrotate.d'):\n            os.mkdir(buildroot + '/etc/logrotate.d')\n        SetFileContents(buildroot + '/etc/logrotate.d/WALinuxAgent', WaagentLogrotate)\n\n        self.init_script_file = buildroot + self.init_script_file\n        # this allows us to call installAgentServiceScriptFiles()\n        if not os.path.exists(os.path.dirname(self.init_script_file)):\n            os.mkdir(os.path.dirname(self.init_script_file))\n        self.installAgentServiceScriptFiles()\n\n    def CreateAccount(self, user, password, expiration, thumbprint):\n        super(fedoraDistro, self).CreateAccount(user, password, expiration, thumbprint)\n        Run('/sbin/usermod ' + user + ' -G wheel')\n\n    def DeleteAccount(self, user):\n        Run('/sbin/usermod ' + user + ' -G \"\"')\n        super(fedoraDistro, self).DeleteAccount(user)\n\n\n############################################################\n#   FreeBSD\n############################################################    \nFreeBSDWaagentConf = \"\"\"\\\n#\n# Azure Linux Agent Configuration\n#\n\nRole.StateConsumer=None                 # Specified program is invoked with the argument \"Ready\" when we report ready status\n                                        # to the endpoint server.\nRole.ConfigurationConsumer=None         # Specified program is invoked with XML file argument specifying role configuration.\nRole.TopologyConsumer=None              # Specified program is invoked with XML file argument specifying role topology.\n\nProvisioning.Enabled=y                  #\nProvisioning.DeleteRootPassword=y       # Password authentication for root account will be unavailable.\nProvisioning.RegenerateSshHostKeyPair=y # Generate fresh host key pair.\nProvisioning.SshHostKeyPairType=rsa     # Supported values are \"rsa\", \"dsa\" and \"ecdsa\".\nProvisioning.MonitorHostName=y          # Monitor host name changes and publish changes via DHCP requests.\n\nResourceDisk.Format=y                   # Format if unformatted. If 'n', resource disk will not be mounted.\nResourceDisk.Filesystem=ufs2            #\nResourceDisk.MountPoint=/mnt/resource   #\nResourceDisk.EnableSwap=n               # Create and use swapfile on resource disk.\nResourceDisk.SwapSizeMB=0               # Size of the swapfile.\n\nLBProbeResponder=y                      # Respond to load balancer probes if requested by Azure.\n\nLogs.Verbose=n                          # Enable verbose logs\n\nOS.RootDeviceScsiTimeout=300            # Root device timeout in seconds.\nOS.OpensslPath=None                     # If \"None\", the system default version is used.\n\"\"\"\n\nbsd_init_file = \"\"\"\\\n#! /bin/sh\n\n# PROVIDE: waagent\n# REQUIRE: DAEMON cleanvar sshd\n# BEFORE: LOGIN\n# KEYWORD: nojail\n\n. /etc/rc.subr\nexport PATH=$PATH:/usr/local/bin\nname=\"waagent\"\nrcvar=\"waagent_enable\"\ncommand=\"/usr/sbin/${name}\"\ncommand_interpreter=\"/usr/local/bin/python\"\nwaagent_flags=\" daemon &\"\n\npidfile=\"/var/run/waagent.pid\"\n\nload_rc_config $name\nrun_rc_command \"$1\"\n\n\"\"\"\nbsd_activate_resource_disk_txt = \"\"\"\\\n#!/usr/bin/env python\n\nimport os\nimport sys\nimport imp\n\n# waagent has no '.py' therefore create waagent module import manually.\n__name__='setupmain' #prevent waagent.__main__ from executing\nwaagent=imp.load_source('waagent','/tmp/waagent') \nwaagent.LoggerInit('/var/log/waagent.log','/dev/console')\nfrom waagent import RunGetOutput,Run\nConfig=waagent.ConfigurationProvider(None)\nformat = Config.get(\"ResourceDisk.Format\")\nif format == None or format.lower().startswith(\"n\"):\n    sys.exit(0)\ndevice_base = 'da1'\ndevice = \"/dev/\" + device_base\nfor entry in RunGetOutput(\"mount\")[1].split():\n    if entry.startswith(device + \"s1\"):\n        waagent.Log(\"ActivateResourceDisk: \" + device + \"s1 is already mounted.\")\n        sys.exit(0)\nmountpoint = Config.get(\"ResourceDisk.MountPoint\")\nif mountpoint == None:\n    mountpoint = \"/mnt/resource\"\nwaagent.CreateDir(mountpoint, \"root\", 0o755)\nfs = Config.get(\"ResourceDisk.Filesystem\")\nif waagent.FreeBSDDistro().mediaHasFilesystem(device) == False :\n    Run(\"newfs \" + device + \"s1\")\nif Run(\"mount \" + device + \"s1 \" + mountpoint):\n    waagent.Error(\"ActivateResourceDisk: Failed to mount resource disk (\" + device + \"s1).\")\n    sys.exit(0)\nwaagent.Log(\"Resource disk (\" + device + \"s1) is mounted at \" + mountpoint + \" with fstype \" + fs)\nwaagent.SetFileContents(os.path.join(mountpoint,waagent.README_FILENAME), waagent.README_FILECONTENT)\nswap = Config.get(\"ResourceDisk.EnableSwap\")\nif swap == None or swap.lower().startswith(\"n\"):\n    sys.exit(0)\nsizeKB = int(Config.get(\"ResourceDisk.SwapSizeMB\")) * 1024\nif os.path.isfile(mountpoint + \"/swapfile\") and os.path.getsize(mountpoint + \"/swapfile\") != (sizeKB * 1024):\n    os.remove(mountpoint + \"/swapfile\")\nif not os.path.isfile(mountpoint + \"/swapfile\"):\n    Run(\"dd if=/dev/zero of=\" + mountpoint + \"/swapfile bs=1024 count=\" + str(sizeKB))\nif Run(\"mdconfig -a -t vnode -f \" + mountpoint + \"/swapfile -u 0\"):\n    waagent.Error(\"ActivateResourceDisk: Configuring swap - Failed to create md0\")\nif not Run(\"swapon /dev/md0\"):\n    waagent.Log(\"Enabled \" + str(sizeKB) + \" KB of swap at \" + mountpoint + \"/swapfile\")\nelse:\n    waagent.Error(\"ActivateResourceDisk: Failed to activate swap at \" + mountpoint + \"/swapfile\")\n\"\"\"\n\n\nclass FreeBSDDistro(AbstractDistro):\n    \"\"\"\n    \"\"\"\n\n    def __init__(self):\n        \"\"\"\n        Generic Attributes go here.  These are based on 'majority rules'.\n        This __init__() may be called or overriden by the child.\n        \"\"\"\n        super(FreeBSDDistro, self).__init__()\n        self.agent_service_name = os.path.basename(sys.argv[0])\n        self.selinux = False\n        self.ssh_service_name = 'sshd'\n        self.ssh_config_file = '/etc/ssh/sshd_config'\n        self.hostname_file_path = '/etc/hostname'\n        self.dhcp_client_name = 'dhclient'\n        self.requiredDeps = ['route', 'shutdown', 'ssh-keygen', 'pw'\n            , 'openssl', 'fdisk', 'sed', 'grep', 'sudo']\n        self.init_script_file = '/etc/rc.d/waagent'\n        self.init_file = bsd_init_file\n        self.agent_package_name = 'WALinuxAgent'\n        self.fileBlackList = [\"/root/.bash_history\", \"/var/log/waagent.log\", '/etc/resolv.conf']\n        self.agent_files_to_uninstall = [\"/etc/waagent.conf\"]\n        self.grubKernelBootOptionsFile = '/boot/loader.conf'\n        self.grubKernelBootOptionsLine = ''\n        self.getpidcmd = 'pgrep -n'\n        self.mount_dvd_cmd = 'dd bs=2048 count=33 skip=295 if='  # custom data max len is 64k\n        self.sudoers_dir_base = '/usr/local/etc'\n        self.waagent_conf_file = FreeBSDWaagentConf\n\n    def installAgentServiceScriptFiles(self):\n        SetFileContents(self.init_script_file, self.init_file)\n        os.chmod(self.init_script_file, 0o777)\n        AppendFileContents(\"/etc/rc.conf\", \"waagent_enable='YES'\\n\")\n        return 0\n\n    def registerAgentService(self):\n        self.installAgentServiceScriptFiles()\n        return Run(\"services_mkdb \" + self.init_script_file)\n\n    def sshDeployPublicKey(self, fprint, path):\n        \"\"\"\n        We support PKCS8.\n        \"\"\"\n        if Run(\"ssh-keygen -i -m PKCS8 -f \" + fprint + \" >> \" + path):\n            return 1\n        else:\n            return 0\n\n    def deleteRootPassword(self):\n        \"\"\"\n        BSD root password removal.\n        \"\"\"\n        filepath = \"/etc/master.passwd\"\n        ReplaceStringInFile(filepath, r'root:.*?:', 'root::')\n        # ReplaceFileContentsAtomic(filepath,\"root:*LOCK*:14600::::::\\n\"\n        #                          + \"\\n\".join(filter(lambda a: not a.startswith(\"root:\"),GetFileContents(filepath).split('\\n'))))\n        os.chmod(filepath, self.shadow_file_mode)\n        if self.isSelinuxSystem():\n            self.setSelinuxContext(filepath, 'system_u:object_r:shadow_t:s0')\n        RunGetOutput(\"pwd_mkdb -u root /etc/master.passwd\")\n        Log(\"Root password deleted.\")\n        return 0\n\n    def changePass(self, user, password):\n        return RunSendStdin(\"pw usermod \" + user + \" -h 0 \", password, log_cmd=False, use_shell=False)\n\n    def load_ata_piix(self):\n        return 0\n\n    def unload_ata_piix(self):\n        return 0\n\n    def checkDependencies(self):\n        \"\"\"\n        FreeBSD dependency check.\n        Return 1 unless all dependencies are satisfied.\n        \"\"\"\n        for a in self.requiredDeps:\n            if Run(\"which \" + a + \" > /dev/null 2>&1\", chk_err=False):\n                Error(\"Missing required dependency: \" + a)\n                return 1\n        return 0\n\n    def packagedInstall(self, buildroot):\n        pass\n\n    def GetInterfaceName(self):\n        \"\"\"\n        Return the ip of the \n        active ethernet interface.\n        \"\"\"\n        iface, inet, mac = self.GetFreeBSDEthernetInfo()\n        return iface\n\n    def RestartInterface(self, iface):\n        Run(\"service netif restart\")\n\n    def GetIpv4Address(self):\n        \"\"\"\n        Return the ip of the \n        active ethernet interface.\n        \"\"\"\n        iface, inet, mac = self.GetFreeBSDEthernetInfo()\n        return inet\n\n    def GetMacAddress(self):\n        \"\"\"\n        Return the ip of the \n        active ethernet interface.\n        \"\"\"\n        iface, inet, mac = self.GetFreeBSDEthernetInfo()\n        l = mac.split(':')\n        r = []\n        for i in l:\n            r.append(string.atoi(i, 16))\n        return r\n\n    def GetFreeBSDEthernetInfo(self):\n        \"\"\"\n        There is no SIOCGIFCONF\n        on freeBSD - just parse ifconfig.\n        Returns strings: iface, inet4_addr, and mac\n        or 'None,None,None' if unable to parse.\n        We will sleep and retry as the network must be up.\n        \"\"\"\n        code, output = RunGetOutput(\"ifconfig\", chk_err=False)\n        Log(output)\n        retries = 10\n        cmd = 'ifconfig | grep -A2 -B2 ether | grep -B3 inet | grep -A4 UP '\n        code = 1\n\n        while code > 0:\n            if code > 0 and retries == 0:\n                Error(\"GetFreeBSDEthernetInfo - Failed to detect ethernet interface\")\n                return None, None, None\n            code, output = RunGetOutput(cmd, chk_err=False)\n            retries -= 1\n            if code > 0 and retries > 0:\n                Log(\"GetFreeBSDEthernetInfo - Error: retry ethernet detection \" + str(retries))\n                if retries == 9:\n                    c, o = RunGetOutput(\"ifconfig | grep -A1 -B2 ether\", chk_err=False)\n                    if c == 0:\n                        t = o.replace('\\n', ' ')\n                        t = t.split()\n                        i = t[0][:-1]\n                        Log(RunGetOutput('id')[1])\n                        Run('dhclient ' + i)\n                time.sleep(10)\n\n        j = output.replace('\\n', ' ')\n        j = j.split()\n        iface = j[0][:-1]\n\n        for i in range(len(j)):\n            if j[i] == 'inet':\n                inet = j[i + 1]\n            elif j[i] == 'ether':\n                mac = j[i + 1]\n\n        return iface, inet, mac\n\n    def CreateAccount(self, user, password, expiration, thumbprint):\n        \"\"\"\n        Create a user account, with 'user', 'password', 'expiration', ssh keys\n        and sudo permissions.\n        Returns None if successful, error string on failure.\n        \"\"\"\n        userentry = None\n        try:\n            userentry = pwd.getpwnam(user)\n        except:\n            pass\n        uidmin = None\n        try:\n            if os.path.isfile(\"/etc/login.defs\"):\n                uidmin = int(GetLineStartingWith(\"UID_MIN\", \"/etc/login.defs\").split()[1])\n        except:\n            pass\n        if uidmin == None:\n            uidmin = 100\n        if userentry != None and userentry[2] < uidmin:\n            Error(\"CreateAccount: \" + user + \" is a system user. Will not set password.\")\n            return \"Failed to set password for system user: \" + user + \" (0x06).\"\n        if userentry == None:\n            command = \"pw useradd \" + user + \" -m\"\n            if expiration != None:\n                command += \" -e \" + expiration.split('.')[0]\n            if Run(command):\n                Error(\"Failed to create user account: \" + user)\n                return \"Failed to create user account: \" + user + \" (0x07).\"\n            else:\n                Log(\"CreateAccount: \" + user + \" already exists. Will update password.\")\n\n        if password != None:\n            self.changePass(user, password)\n        try:\n            # for older distros create sudoers.d\n            if not os.path.isdir(MyDistro.sudoers_dir_base + '/sudoers.d/'):\n                # create the /etc/sudoers.d/ directory\n                os.mkdir(MyDistro.sudoers_dir_base + '/sudoers.d')\n                # add the include of sudoers.d to the /etc/sudoers\n                SetFileContents(MyDistro.sudoers_dir_base + '/sudoers', GetFileContents(\n                    MyDistro.sudoers_dir_base + '/sudoers') + '\\n#includedir ' + MyDistro.sudoers_dir_base + '/sudoers.d\\n')\n            if password == None:\n                SetFileContents(MyDistro.sudoers_dir_base + \"/sudoers.d/waagent\", user + \" ALL = (ALL) NOPASSWD: ALL\\n\")\n            else:\n                SetFileContents(MyDistro.sudoers_dir_base + \"/sudoers.d/waagent\", user + \" ALL = (ALL) ALL\\n\")\n            os.chmod(MyDistro.sudoers_dir_base + \"/sudoers.d/waagent\", 0o440)\n        except:\n            Error(\"CreateAccount: Failed to configure sudo access for user.\")\n            return \"Failed to configure sudo privileges (0x08).\"\n        home = MyDistro.GetHome()\n        if thumbprint != None:\n            dir = home + \"/\" + user + \"/.ssh\"\n            CreateDir(dir, user, 0o700)\n            pub = dir + \"/id_rsa.pub\"\n            prv = dir + \"/id_rsa\"\n            Run(\"ssh-keygen -y -f \" + thumbprint + \".prv > \" + pub)\n            SetFileContents(prv, GetFileContents(thumbprint + \".prv\"))\n            for f in [pub, prv]:\n                os.chmod(f, 0o600)\n                ChangeOwner(f, user)\n            SetFileContents(dir + \"/authorized_keys\", GetFileContents(pub))\n            ChangeOwner(dir + \"/authorized_keys\", user)\n        Log(\"Created user account: \" + user)\n        return None\n\n    def DeleteAccount(self, user):\n        \"\"\"\n        Delete the 'user'.\n        Clear utmp first, to avoid error.\n        Removes the /etc/sudoers.d/waagent file.\n        \"\"\"\n        userentry = None\n        try:\n            userentry = pwd.getpwnam(user)\n        except:\n            pass\n        if userentry == None:\n            Error(\"DeleteAccount: \" + user + \" not found.\")\n            return\n        uidmin = None\n        try:\n            if os.path.isfile(\"/etc/login.defs\"):\n                uidmin = int(GetLineStartingWith(\"UID_MIN\", \"/etc/login.defs\").split()[1])\n        except:\n            pass\n        if uidmin == None:\n            uidmin = 100\n        if userentry[2] < uidmin:\n            Error(\"DeleteAccount: \" + user + \" is a system user. Will not delete account.\")\n            return\n        Run(\"> /var/run/utmp\")  # Delete utmp to prevent error if we are the 'user' deleted\n        pid = subprocess.Popen(['rmuser', '-y', user], stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n                               stdin=subprocess.PIPE).pid\n        try:\n            os.remove(MyDistro.sudoers_dir_base + \"/sudoers.d/waagent\")\n        except:\n            pass\n        return\n\n    def ActivateResourceDiskNoThread(self):\n        \"\"\"\n        Format, mount, and if specified in the configuration\n        set resource disk as swap.\n        \"\"\"\n        global DiskActivated\n        Run('cp /usr/sbin/waagent /tmp/')\n        SetFileContents('/tmp/bsd_activate_resource_disk.py', bsd_activate_resource_disk_txt)\n        Run('chmod +x /tmp/bsd_activate_resource_disk.py')\n        pid = subprocess.Popen([\"/tmp/bsd_activate_resource_disk.py\", \"\"]).pid\n        Log(\"Spawning bsd_activate_resource_disk.py\")\n        DiskActivated = True\n        return\n\n    def Install(self):\n        \"\"\"\n        Install the agent service.\n        Check dependencies.\n        Create /etc/waagent.conf and move old version to\n        /etc/waagent.conf.old\n        Copy RulesFiles to /var/lib/waagent\n        Create /etc/logrotate.d/waagent\n        Set /etc/ssh/sshd_config ClientAliveInterval to 180\n        Call ApplyVNUMAWorkaround()\n        \"\"\"\n        if MyDistro.checkDependencies():\n            return 1\n        os.chmod(sys.argv[0], 0o755)\n        SwitchCwd()\n        for a in RulesFiles:\n            if os.path.isfile(a):\n                if os.path.isfile(GetLastPathElement(a)):\n                    os.remove(GetLastPathElement(a))\n                shutil.move(a, \".\")\n                Warn(\"Moved \" + a + \" -> \" + LibDir + \"/\" + GetLastPathElement(a))\n        MyDistro.registerAgentService()\n        if os.path.isfile(\"/etc/waagent.conf\"):\n            try:\n                os.remove(\"/etc/waagent.conf.old\")\n            except:\n                pass\n            try:\n                os.rename(\"/etc/waagent.conf\", \"/etc/waagent.conf.old\")\n                Warn(\"Existing /etc/waagent.conf has been renamed to /etc/waagent.conf.old\")\n            except:\n                pass\n        SetFileContents(\"/etc/waagent.conf\", self.waagent_conf_file)\n        if os.path.exists('/usr/local/etc/logrotate.d/'):\n            SetFileContents(\"/usr/local/etc/logrotate.d/waagent\", WaagentLogrotate)\n        filepath = \"/etc/ssh/sshd_config\"\n        ReplaceFileContentsAtomic(filepath, \"\\n\".join(filter(lambda a: not\n        a.startswith(\"ClientAliveInterval\"),\n                                                             GetFileContents(filepath).split(\n                                                                 '\\n'))) + \"\\nClientAliveInterval 180\\n\")\n        Log(\"Configured SSH client probing to keep connections alive.\")\n        # ApplyVNUMAWorkaround()\n        return 0\n\n    def mediaHasFilesystem(self, dsk):\n        if Run('LC_ALL=C fdisk -p ' + dsk + ' | grep \"invalid fdisk partition table found\" ', False):\n            return False\n        return True\n\n    def mountDVD(self, dvd, location):\n        # At this point we cannot read a joliet option udf DVD in freebsd10 - so we 'dd' it into our location\n        retcode, out = RunGetOutput(self.mount_dvd_cmd + dvd + ' of=' + location + '/ovf-env.xml')\n        if retcode != 0:\n            return retcode, out\n\n        ovfxml = (GetFileContents(location + \"/ovf-env.xml\", asbin=False))\n        if ord(ovfxml[0]) > 128 and ord(ovfxml[1]) > 128 and ord(ovfxml[2]) > 128:\n            ovfxml = ovfxml[\n                     3:]  # BOM is not stripped. First three bytes are > 128 and not unicode chars so we ignore them.\n        ovfxml = ovfxml.strip(chr(0x00))\n        ovfxml = \"\".join(filter(lambda x: ord(x) < 128, ovfxml))\n        ovfxml = re.sub(r'</Environment>.*\\Z', '', ovfxml, 0, re.DOTALL)\n        ovfxml += '</Environment>'\n        SetFileContents(location + \"/ovf-env.xml\", ovfxml)\n        return retcode, out\n\n    def GetHome(self):\n        return '/home'\n\n    def initScsiDiskTimeout(self):\n        \"\"\"\n        Set the SCSI disk timeout by updating the kernal config\n        \"\"\"\n        timeout = Config.get(\"OS.RootDeviceScsiTimeout\")\n        if timeout:\n            Run(\"sysctl kern.cam.da.default_timeout=\" + timeout)\n\n    def setScsiDiskTimeout(self):\n        return\n\n    def setBlockDeviceTimeout(self, device, timeout):\n        return\n\n    def getProcessorCores(self):\n        return int(RunGetOutput(\"sysctl hw.ncpu | awk '{print $2}'\")[1])\n\n    def getTotalMemory(self):\n        return int(RunGetOutput(\"sysctl hw.realmem | awk '{print $2}'\")[1]) / 1024\n\n    def setDefaultGateway(self, gateway):\n        Run(\"/sbin/route add default \" + gateway, chk_err=False)\n\n    def routeAdd(self, net, mask, gateway):\n        Run(\"/sbin/route add -net \" + net + \" \" + mask + \" \" + gateway, chk_err=False)\n\n\n############################################################\n# END DISTRO CLASS DEFS\n############################################################  \n\n# This lets us index into a string or an array of integers transparently.\ndef Ord(a):\n    \"\"\"\n    Allows indexing into a string or an array of integers transparently.\n    Generic utility function.\n    \"\"\"\n    if type(a) == type(\"a\"):\n        a = ord(a)\n    return a\n\n\ndef IsLinux():\n    \"\"\"\n    Returns True if platform is Linux.\n    Generic utility function.\n    \"\"\"\n    return (platform.uname()[0] == \"Linux\")\n\n\ndef GetLastPathElement(path):\n    \"\"\"\n    Similar to basename.\n    Generic utility function.\n    \"\"\"\n    return path.rsplit('/', 1)[1]\n\n\ndef GetFileContents(filepath, asbin=False):\n    \"\"\"\n    Read and return contents of 'filepath'.\n    \"\"\"\n    mode = 'r'\n    if asbin:\n        mode += 'b'\n    c = None\n    try:\n        with open(filepath, mode) as F:\n            c = F.read()\n    except IOError as e:\n        ErrorWithPrefix('GetFileContents', 'Reading from file ' + filepath + ' Exception is ' + str(e))\n        return None\n    return c\n\n\ndef SetFileContents(filepath, contents):\n    \"\"\"\n    Write 'contents' to 'filepath'.\n    \"\"\"\n    if type(contents) == str:\n        contents = contents.encode('latin-1', 'ignore')\n    try:\n        with open(filepath, \"wb+\") as F:\n            F.write(contents)\n    except IOError as e:\n        ErrorWithPrefix('SetFileContents', 'Writing to file ' + filepath + ' Exception is ' + str(e))\n        return None\n    return 0\n\n\ndef AppendFileContents(filepath, contents):\n    \"\"\"\n    Append 'contents' to 'filepath'.\n    \"\"\"\n    if type(contents) == str:\n        if sys.version_info[0] == 3:\n            contents = contents.encode('latin-1').decode('latin-1')\n        elif sys.version_info[0] == 2:\n            contents = contents.encode('latin-1')\n    try:\n        with open(filepath, \"a+\") as F:\n            F.write(contents)\n    except IOError as e:\n        ErrorWithPrefix('AppendFileContents', 'Appending to file ' + filepath + ' Exception is ' + str(e))\n        return None\n    return 0\n\n\ndef ReplaceFileContentsAtomic(filepath, contents):\n    \"\"\"\n    Write 'contents' to 'filepath' by creating a temp file, and replacing original.\n    \"\"\"\n    handle, temp = tempfile.mkstemp(dir=os.path.dirname(filepath))\n    if type(contents) == str:\n        contents = contents.encode('latin-1')\n    try:\n        os.write(handle, contents)\n    except IOError as e:\n        ErrorWithPrefix('ReplaceFileContentsAtomic', 'Writing to file ' + filepath + ' Exception is ' + str(e))\n        return None\n    finally:\n        os.close(handle)\n    try:\n        os.rename(temp, filepath)\n        return None\n    except IOError as e:\n        ErrorWithPrefix('ReplaceFileContentsAtomic', 'Renaming ' + temp + ' to ' + filepath + ' Exception is ' + str(e))\n    try:\n        os.remove(filepath)\n    except IOError as e:\n        ErrorWithPrefix('ReplaceFileContentsAtomic', 'Removing ' + filepath + ' Exception is ' + str(e))\n    try:\n        os.rename(temp, filepath)\n    except IOError as e:\n        ErrorWithPrefix('ReplaceFileContentsAtomic', 'Removing ' + filepath + ' Exception is ' + str(e))\n        return 1\n    return 0\n\n\ndef GetLineStartingWith(prefix, filepath):\n    \"\"\"\n    Return line from 'filepath' if the line startswith 'prefix'\n    \"\"\"\n    for line in GetFileContents(filepath).split('\\n'):\n        if line.startswith(prefix):\n            return line\n    return None\n\n\ndef Run(cmd, chk_err=True):\n    \"\"\"\n    Calls RunGetOutput on 'cmd', returning only the return code.\n    If chk_err=True then errors will be reported in the log.\n    If chk_err=False then errors will be suppressed from the log.\n    \"\"\"\n    retcode, out = RunGetOutput(cmd, chk_err)\n    return retcode\n\n\ndef RunGetOutput(cmd, chk_err=True, log_cmd=True):\n    \"\"\"\n    Wrapper for subprocess.check_output.\n    Execute 'cmd'.  Returns return code and STDOUT, trapping expected exceptions.\n    Reports exceptions to Error if chk_err parameter is True\n    \"\"\"\n    if log_cmd:\n        LogIfVerbose(cmd)\n    try:\n        output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)\n    except subprocess.CalledProcessError as e:\n        if chk_err and log_cmd:\n            Error('CalledProcessError.  Error Code is ' + str(e.returncode))\n            Error('CalledProcessError.  Command string was ' + e.cmd)\n            Error('CalledProcessError.  Command result was ' + (e.output[:-1]).decode('latin-1'))\n        return e.returncode, e.output.decode('latin-1')\n    return 0, output.decode('latin-1')\n\n\ndef RunSendStdin(cmd, input, chk_err=True, log_cmd=True, use_shell=True):\n    \"\"\"\n    Wrapper for subprocess.Popen.\n    Execute 'cmd', sending 'input' to STDIN of 'cmd'.\n    Returns return code and STDOUT, trapping expected exceptions.\n    Reports exceptions to Error if chk_err parameter is True\n    \"\"\"\n    if log_cmd:\n        LogIfVerbose(str(cmd) + str(input))\n    try:\n        me = subprocess.Popen([cmd], shell=use_shell, stdin=subprocess.PIPE, stderr=subprocess.STDOUT,\n                              stdout=subprocess.PIPE)\n        output = me.communicate(input)\n    except OSError as e:\n        if chk_err and log_cmd:\n            Error('CalledProcessError.  Error Code is ' + str(me.returncode))\n            Error('CalledProcessError.  Command string was ' + cmd)\n            Error('CalledProcessError.  Command result was ' + output[0].decode('latin-1'))\n            return 1, output[0].decode('latin-1')\n    if me.returncode != 0 and chk_err is True and log_cmd:\n        Error('CalledProcessError.  Error Code is ' + str(me.returncode))\n        Error('CalledProcessError.  Command string was ' + cmd)\n        Error('CalledProcessError.  Command result was ' + output[0].decode('latin-1'))\n    return me.returncode, output[0].decode('latin-1')\n\n\ndef GetNodeTextData(a):\n    \"\"\"\n    Filter non-text nodes from DOM tree\n    \"\"\"\n    for b in a.childNodes:\n        if b.nodeType == b.TEXT_NODE:\n            return b.data\n\n\ndef GetHome():\n    \"\"\"\n    Attempt to guess the $HOME location.\n    Return the path string.\n    \"\"\"\n    home = None\n    try:\n        home = GetLineStartingWith(\"HOME\", \"/etc/default/useradd\").split('=')[1].strip()\n    except:\n        pass\n    if (home == None) or (home.startswith(\"/\") == False):\n        home = \"/home\"\n    return home\n\n\ndef ChangeOwner(filepath, user):\n    \"\"\"\n    Lookup user.  Attempt chown 'filepath' to 'user'.\n    \"\"\"\n    p = None\n    try:\n        p = pwd.getpwnam(user)\n    except:\n        pass\n    if p != None:\n        os.chown(filepath, p[2], p[3])\n\n\ndef CreateDir(dirpath, user, mode):\n    \"\"\"\n    Attempt os.makedirs, catch all exceptions.\n    Call ChangeOwner afterwards.\n    \"\"\"\n    try:\n        os.makedirs(dirpath, mode)\n    except:\n        pass\n    ChangeOwner(dirpath, user)\n\n\ndef CreateAccount(user, password, expiration, thumbprint):\n    \"\"\"\n    Create a user account, with 'user', 'password', 'expiration', ssh keys\n    and sudo permissions.\n    Returns None if successful, error string on failure.\n    \"\"\"\n    userentry = None\n    try:\n        userentry = pwd.getpwnam(user)\n    except:\n        pass\n    uidmin = None\n    try:\n        uidmin = int(GetLineStartingWith(\"UID_MIN\", \"/etc/login.defs\").split()[1])\n    except:\n        pass\n    if uidmin == None:\n        uidmin = 100\n    if userentry != None and userentry[2] < uidmin:\n        Error(\"CreateAccount: \" + user + \" is a system user. Will not set password.\")\n        return \"Failed to set password for system user: \" + user + \" (0x06).\"\n    if userentry == None:\n        command = \"useradd -m \" + user\n        if expiration != None:\n            command += \" -e \" + expiration.split('.')[0]\n        if Run(command):\n            Error(\"Failed to create user account: \" + user)\n            return \"Failed to create user account: \" + user + \" (0x07).\"\n    else:\n        Log(\"CreateAccount: \" + user + \" already exists. Will update password.\")\n    if password != None:\n        MyDistro.changePass(user, password)\n    try:\n        # for older distros create sudoers.d\n        if not os.path.isdir('/etc/sudoers.d/'):\n            # create the /etc/sudoers.d/ directory\n            os.mkdir('/etc/sudoers.d/')\n            # add the include of sudoers.d to the /etc/sudoers\n            SetFileContents('/etc/sudoers', GetFileContents('/etc/sudoers') + '\\n#includedir /etc/sudoers.d\\n')\n        if password == None:\n            SetFileContents(\"/etc/sudoers.d/waagent\", user + \" ALL = (ALL) NOPASSWD: ALL\\n\")\n        else:\n            SetFileContents(\"/etc/sudoers.d/waagent\", user + \" ALL = (ALL) ALL\\n\")\n        os.chmod(\"/etc/sudoers.d/waagent\", 0o440)\n    except:\n        Error(\"CreateAccount: Failed to configure sudo access for user.\")\n        return \"Failed to configure sudo privileges (0x08).\"\n    home = MyDistro.GetHome()\n    if thumbprint != None:\n        dir = home + \"/\" + user + \"/.ssh\"\n        CreateDir(dir, user, 0o700)\n        pub = dir + \"/id_rsa.pub\"\n        prv = dir + \"/id_rsa\"\n        Run(\"ssh-keygen -y -f \" + thumbprint + \".prv > \" + pub)\n        SetFileContents(prv, GetFileContents(thumbprint + \".prv\"))\n        for f in [pub, prv]:\n            os.chmod(f, 0o600)\n            ChangeOwner(f, user)\n        SetFileContents(dir + \"/authorized_keys\", GetFileContents(pub))\n        ChangeOwner(dir + \"/authorized_keys\", user)\n    Log(\"Created user account: \" + user)\n    return None\n\n\ndef DeleteAccount(user):\n    \"\"\"\n    Delete the 'user'.\n    Clear utmp first, to avoid error.\n    Removes the /etc/sudoers.d/waagent file.\n    \"\"\"\n    userentry = None\n    try:\n        userentry = pwd.getpwnam(user)\n    except:\n        pass\n    if userentry == None:\n        Error(\"DeleteAccount: \" + user + \" not found.\")\n        return\n    uidmin = None\n    try:\n        uidmin = int(GetLineStartingWith(\"UID_MIN\", \"/etc/login.defs\").split()[1])\n    except:\n        pass\n    if uidmin == None:\n        uidmin = 100\n    if userentry[2] < uidmin:\n        Error(\"DeleteAccount: \" + user + \" is a system user. Will not delete account.\")\n        return\n    Run(\"> /var/run/utmp\")  # Delete utmp to prevent error if we are the 'user' deleted\n    Run(\"userdel -f -r \" + user)\n    try:\n        os.remove(\"/etc/sudoers.d/waagent\")\n    except:\n        pass\n    return\n\n\ndef IsInRangeInclusive(a, low, high):\n    \"\"\"\n    Return True if 'a' in 'low' <= a >= 'high'\n    \"\"\"\n    return (a >= low and a <= high)\n\n\ndef IsPrintable(ch):\n    \"\"\"\n    Return True if character is displayable.\n    \"\"\"\n    return IsInRangeInclusive(ch, Ord('A'), Ord('Z')) or IsInRangeInclusive(ch, Ord('a'),\n                                                                            Ord('z')) or IsInRangeInclusive(ch,\n                                                                                                            Ord('0'),\n                                                                                                            Ord('9'))\n\n\ndef HexDump(buffer, size):\n    \"\"\"\n    Return Hex formated dump of a 'buffer' of 'size'.\n    \"\"\"\n    if size < 0:\n        size = len(buffer)\n    result = \"\"\n    for i in range(0, size):\n        if (i % 16) == 0:\n            result += \"%06X: \" % i\n        byte = buffer[i]\n        if type(byte) == str:\n            byte = ord(byte.decode('latin1'))\n        result += \"%02X \" % byte\n        if (i & 15) == 7:\n            result += \" \"\n        if ((i + 1) % 16) == 0 or (i + 1) == size:\n            j = i\n            while ((j + 1) % 16) != 0:\n                result += \"   \"\n                if (j & 7) == 7:\n                    result += \" \"\n                j += 1\n            result += \" \"\n            for j in range(i - (i % 16), i + 1):\n                byte = buffer[j]\n                if type(byte) == str:\n                    byte = ord(byte.decode('latin1'))\n                k = '.'\n                if IsPrintable(byte):\n                    k = chr(byte)\n                result += k\n            if (i + 1) != size:\n                result += \"\\n\"\n    return result\n\n\ndef SimpleLog(file_path, message):\n    if not file_path or len(message) < 1:\n        return\n    t = time.localtime()\n    t = \"%04u/%02u/%02u %02u:%02u:%02u \" % (t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec)\n    lines = re.sub(re.compile(r'^(.)', re.MULTILINE), t + r'\\1', message)\n    with open(file_path, \"a\") as F:\n        lines = filter(lambda x: x in string.printable, lines)\n        F.write(lines.encode('ascii', 'ignore') + \"\\n\")\n\n\nclass Logger(object):\n    \"\"\"\n    The Agent's logging assumptions are:\n    For Log, and LogWithPrefix all messages are logged to the\n    self.file_path and to the self.con_path.  Setting either path\n    parameter to None skips that log.  If Verbose is enabled, messages\n    calling the LogIfVerbose method will be logged to file_path yet\n    not to con_path.  Error and Warn messages are normal log messages\n    with the 'ERROR:' or 'WARNING:' prefix added.\n    \"\"\"\n\n    def __init__(self, filepath, conpath, verbose=False):\n        \"\"\"\n        Construct an instance of Logger.\n        \"\"\"\n        self.file_path = filepath\n        self.con_path = conpath\n        self.verbose = verbose\n\n    def ThrottleLog(self, counter):\n        \"\"\"\n        Log everything up to 10, every 10 up to 100, then every 100.\n        \"\"\"\n        return (counter < 10) or ((counter < 100) and ((counter % 10) == 0)) or ((counter % 100) == 0)\n\n    def WriteToFile(self, message):\n        \"\"\"\n        Write 'message' to logfile.\n        \"\"\"\n        if self.file_path:\n            try:\n                with open(self.file_path, \"a\") as F:\n                    message = filter(lambda x: x in string.printable, message)\n\n                    # encoding works different for between interpreter version, we are keeping separate implementation\n                    # to ensure backward compatibility\n                    if sys.version_info[0] == 3:\n                        message = ''.join(list(message)).encode('ascii', 'ignore').decode(\"ascii\", \"ignore\")\n                    elif sys.version_info[0] == 2:\n                        message = message.encode('ascii', 'ignore')\n\n                    F.write(message + \"\\n\")\n            except IOError as e:\n                pass\n\n    def WriteToConsole(self, message):\n        \"\"\"\n        Write 'message' to /dev/console.\n        This supports serial port logging if the /dev/console\n        is redirected to ttys0 in kernel boot options.\n        \"\"\"\n        if self.con_path:\n            try:\n                with open(self.con_path, \"w\") as C:\n                    message = filter(lambda x: x in string.printable, message)\n\n                    # encoding works different for between interpreter version, we are keeping separate implementation\n                    # to ensure backward compatibility\n                    if sys.version_info[0] == 3:\n                        message = ''.join(list(message)).encode('ascii', 'ignore').decode(\"ascii\", \"ignore\")\n                    elif sys.version_info[0] == 2:\n                        message = message.encode('ascii', 'ignore')\n\n                    C.write(message + \"\\n\")\n            except IOError as e:\n                pass\n\n    def Log(self, message):\n        \"\"\"\n        Standard Log function.\n        Logs to self.file_path, and con_path\n        \"\"\"\n        self.LogWithPrefix(\"\", message)\n\n    def LogToConsole(self, message):\n        \"\"\"\n        Logs message to console by pre-pending each line of 'message' with current time.\n        \"\"\"\n        log_prefix = self._get_log_prefix(\"\")\n        for line in message.split('\\n'):\n            line = log_prefix + line\n            self.WriteToConsole(line)\n\n    def LogToFile(self, message):\n        \"\"\"\n        Logs message to file by pre-pending each line of 'message' with current time.\n        \"\"\"\n        log_prefix = self._get_log_prefix(\"\")\n        for line in message.split('\\n'):\n            line = log_prefix + line\n            self.WriteToFile(line)\n\n    def NoLog(self, message):\n        \"\"\"\n        Don't Log.\n        \"\"\"\n        pass\n\n    def LogIfVerbose(self, message):\n        \"\"\"\n        Only log 'message' if global Verbose is True.\n        \"\"\"\n        self.LogWithPrefixIfVerbose('', message)\n\n    def LogWithPrefix(self, prefix, message):\n        \"\"\"\n        Prefix each line of 'message' with current time+'prefix'.\n        \"\"\"\n        log_prefix = self._get_log_prefix(prefix)\n        for line in message.split('\\n'):\n            line = log_prefix + line\n            self.WriteToFile(line)\n            self.WriteToConsole(line)\n\n    def LogWithPrefixIfVerbose(self, prefix, message):\n        \"\"\"\n        Only log 'message' if global Verbose is True.\n        Prefix each line of 'message' with current time+'prefix'.\n        \"\"\"\n        if self.verbose == True:\n            log_prefix = self._get_log_prefix(prefix)\n            for line in message.split('\\n'):\n                line = log_prefix + line\n                self.WriteToFile(line)\n                self.WriteToConsole(line)\n\n    def Warn(self, message):\n        \"\"\"\n        Prepend the text \"WARNING:\" for each line in 'message'.\n        \"\"\"\n        self.LogWithPrefix(\"WARNING:\", message)\n\n    def Error(self, message):\n        \"\"\"\n        Call ErrorWithPrefix(message).\n        \"\"\"\n        ErrorWithPrefix(\"\", message)\n\n    def ErrorWithPrefix(self, prefix, message):\n        \"\"\"\n        Prepend the text \"ERROR:\" to the prefix for each line in 'message'.\n        Errors written to logfile, and /dev/console\n        \"\"\"\n        self.LogWithPrefix(\"ERROR:\", message)\n\n    def _get_log_prefix(self, prefix):\n        \"\"\"\n        Generates the log prefix with timestamp+'prefix'.\n        \"\"\"\n        t = time.localtime()\n        t = \"%04u/%02u/%02u %02u:%02u:%02u \" % (t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec)\n        return t + prefix\n\ndef LoggerInit(log_file_path, log_con_path, verbose=False):\n    \"\"\"\n    Create log object and export its methods to global scope.\n    \"\"\"\n    global Log, LogToConsole, LogToFile, LogWithPrefix, LogIfVerbose, LogWithPrefixIfVerbose, Error, ErrorWithPrefix, Warn, NoLog, ThrottleLog, myLogger\n    l = Logger(log_file_path, log_con_path, verbose)\n    Log, LogToConsole, LogToFile, LogWithPrefix, LogIfVerbose, LogWithPrefixIfVerbose, Error, ErrorWithPrefix, Warn, NoLog, ThrottleLog, myLogger = l.Log, l.LogToConsole, l.LogToFile, l.LogWithPrefix, l.LogIfVerbose, l.LogWithPrefixIfVerbose, l.Error, l.ErrorWithPrefix, l.Warn, l.NoLog, l.ThrottleLog, l\n\n\ndef Linux_ioctl_GetInterfaceMac(ifname):\n    \"\"\"\n    Return the mac-address bound to the socket.\n    \"\"\"\n    s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n    info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', (ifname[:15] + ('\\0' * 241)).encode('latin-1')))\n    return ''.join(['%02X' % Ord(char) for char in info[18:24]])\n\n\ndef GetFirstActiveNetworkInterfaceNonLoopback():\n    \"\"\"\n    Return the interface name, and ip addr of the\n    first active non-loopback interface.\n    \"\"\"\n    iface = ''\n    expected = 16  # how many devices should I expect...\n    is_64bits = sys.maxsize > 2 ** 32\n    struct_size = 40 if is_64bits else 32  # for 64bit the size is 40 bytes, for 32bits it is 32 bytes.\n    s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n    buff = array.array('B', b'\\0' * (expected * struct_size))\n    retsize = (struct.unpack('iL', fcntl.ioctl(s.fileno(), 0x8912,\n                                               struct.pack('iL', expected * struct_size, buff.buffer_info()[0]))))[0]\n    if retsize == (expected * struct_size):\n        Warn('SIOCGIFCONF returned more than ' + str(expected) + ' up network interfaces.')\n    s = buff.tostring()\n    preferred_nic = Config.get(\"Network.Interface\")\n    for i in range(0, struct_size * expected, struct_size):\n        iface = s[i:i + 16].split(b'\\0', 1)[0]\n        if iface == b'lo':\n            continue\n        elif preferred_nic is None:\n            break\n        elif iface == preferred_nic:\n            break\n    return iface.decode('latin-1'), socket.inet_ntoa(s[i + 20:i + 24])\n\n\ndef GetIpv4Address():\n    \"\"\"\n    Return the ip of the \n    first active non-loopback interface.\n    \"\"\"\n    iface, addr = GetFirstActiveNetworkInterfaceNonLoopback()\n    return addr\n\n\ndef HexStringToByteArray(a):\n    \"\"\"\n    Return hex string packed into a binary struct.\n    \"\"\"\n    b = b\"\"\n    for c in range(0, len(a) // 2):\n        b += struct.pack(\"B\", int(a[c * 2:c * 2 + 2], 16))\n    return b\n\n\ndef GetMacAddress():\n    \"\"\"\n    Convienience function, returns mac addr bound to\n    first non-loobback interface.\n    \"\"\"\n    ifname = ''\n    while len(ifname) < 2:\n        ifname = GetFirstActiveNetworkInterfaceNonLoopback()[0]\n    a = Linux_ioctl_GetInterfaceMac(ifname)\n    return HexStringToByteArray(a)\n\n\ndef DeviceForIdePort(n):\n    \"\"\"\n    Return device name attached to ide port 'n'.\n    \"\"\"\n    if n > 3:\n        return None\n    g0 = \"00000000\"\n    if n > 1:\n        g0 = \"00000001\"\n        n = n - 2\n    device = None\n    path = \"/sys/bus/vmbus/devices/\"\n    for vmbus in os.listdir(path):\n        guid = GetFileContents(path + vmbus + \"/device_id\").lstrip('{').split('-')\n        if guid[0] == g0 and guid[1] == \"000\" + str(n):\n            for root, dirs, files in os.walk(path + vmbus):\n                if root.endswith(\"/block\"):\n                    device = dirs[0]\n                    break\n                else:  # older distros\n                    for d in dirs:\n                        if ':' in d and \"block\" == d.split(':')[0]:\n                            device = d.split(':')[1]\n                            break\n            break\n    return device\n\n\nclass HttpResourceGoneError(Exception):\n    pass\n\n\ndef DoInstallRHUIRPM():\n    \"\"\"\n    Install RHUI RPM according to VM region\n    \"\"\"\n    rhuiRPMinstalled = os.path.exists(LibDir + \"/rhuirpminstalled\")\n    if rhuiRPMinstalled:\n        return\n    else:\n        SetFileContents(LibDir + \"/rhuirpminstalled\", \"\")\n\n    Log(\"Begin to install RHUI RPM\")\n    cmd = r\"grep '<Location>' /var/lib/waagent/ExtensionsConfig* --no-filename | sed 's/<Location>//g' | sed 's/<\\/Location>//g' | sed 's/ //g' | tr 'A-Z' 'a-z' | uniq\"\n\n    retcode, out = RunGetOutput(cmd, True)\n    region = out.rstrip(\"\\n\")\n\n    # try a few times at most to get the region info\n    retry = 0\n    for i in range(0, 8):\n        if (region != \"\"):\n            break\n        Log(\"region info is empty, now wait 15 seconds...\")\n        time.sleep(15)\n        retcode, out = RunGetOutput(cmd, True)\n        region = out.rstrip(\"\\n\")\n\n    if region == \"\":\n        Log(\"could not detect region info, now use the default region: eastus2\")\n        region = \"eastus2\"\n\n    scriptFilePath = \"/tmp/install-rhui-rpm.sh\"\n\n    if not os.path.exists(scriptFilePath):\n        Error(scriptFilePath + \" does not exist, now quit RHUI RPM installation.\");\n        return\n    # chmod a+x script file\n    os.chmod(scriptFilePath, 0o100)\n    Log(\"begin to run \" + scriptFilePath)\n\n    # execute the downloaded script file\n    retcode, out = RunGetOutput(scriptFilePath, True)\n    if retcode != 0:\n        Error(\"execute script \" + scriptFilePath + \" failed, return code: \" + str(\n            retcode) + \", now exit RHUI RPM installation.\");\n        return\n\n    Log(\"install RHUI RPM completed\")\n\n\nclass Util(object):\n    \"\"\"\n    Http communication class.\n    Base of GoalState, and Agent classes.\n    \"\"\"\n    RetryWaitingInterval = 10\n\n    def __init__(self):\n        self.Endpoint = None\n\n    def _ParseUrl(self, url):\n        secure = False\n        host = self.Endpoint\n        path = url\n        port = None\n\n        # \"http[s]://hostname[:port][/]\"\n        if url.startswith(\"http://\"):\n            url = url[7:]\n            if \"/\" in url:\n                host = url[0: url.index(\"/\")]\n                path = url[url.index(\"/\"):]\n            else:\n                host = url\n                path = \"/\"\n        elif url.startswith(\"https://\"):\n            secure = True\n            url = url[8:]\n            if \"/\" in url:\n                host = url[0: url.index(\"/\")]\n                path = url[url.index(\"/\"):]\n            else:\n                host = url\n                path = \"/\"\n\n        if host is None:\n            raise ValueError(\"Host is invalid:{0}\".format(url))\n\n        if (\":\" in host):\n            pos = host.rfind(\":\")\n            port = int(host[pos + 1:])\n            host = host[0:pos]\n\n        return host, port, secure, path\n\n    def GetHttpProxy(self, secure):\n        \"\"\"\n        Get http_proxy and https_proxy from environment variables.\n        Username and password is not supported now.\n        \"\"\"\n        host = Config.get(\"HttpProxy.Host\")\n        port = Config.get(\"HttpProxy.Port\")\n        return (host, port)\n\n    def _HttpRequest(self, method, host, path, port=None, data=None, secure=False,\n                     headers=None, proxyHost=None, proxyPort=None):\n        resp = None\n        conn = None\n        try:\n            if secure:\n                port = 443 if port is None else port\n                if proxyHost is not None and proxyPort is not None:\n                    conn = httpclient.HTTPSConnection(proxyHost, proxyPort, timeout=10)\n                    conn.set_tunnel(host, port)\n                    # If proxy is used, full url is needed.\n                    path = \"https://{0}:{1}{2}\".format(host, port, path)\n                else:\n                    conn = httpclient.HTTPSConnection(host, port, timeout=10)\n            else:\n                port = 80 if port is None else port\n                if proxyHost is not None and proxyPort is not None:\n                    conn = httpclient.HTTPConnection(proxyHost, proxyPort, timeout=10)\n                    # If proxy is used, full url is needed.\n                    path = \"http://{0}:{1}{2}\".format(host, port, path)\n                else:\n                    conn = httpclient.HTTPConnection(host, port, timeout=10)\n            if headers == None:\n                conn.request(method, path, data)\n            else:\n                conn.request(method, path, data, headers)\n            resp = conn.getresponse()\n        except httpclient.HTTPException as e:\n            Error('HTTPException {0}, args:{1}'.format(e, repr(e.args)))\n        except IOError as e:\n            Error('Socket IOError {0}, args:{1}'.format(e, repr(e.args)))\n        return resp\n\n    def HttpRequest(self, method, url, data=None,\n                    headers=None, maxRetry=3, chkProxy=False):\n        \"\"\"\n        Sending http request to server\n        On error, sleep 10 and maxRetry times.\n        Return the output buffer or None.\n        \"\"\"\n        LogIfVerbose(\"HTTP Req: {0} {1}\".format(method, url))\n        LogIfVerbose(\"HTTP Req: Data={0}\".format(data))\n        LogIfVerbose(\"HTTP Req: Header={0}\".format(headers))\n        try:\n            host, port, secure, path = self._ParseUrl(url)\n        except ValueError as e:\n            Error(\"Failed to parse url:{0}\".format(url))\n            return None\n\n        # Check proxy\n        proxyHost, proxyPort = (None, None)\n        if chkProxy:\n            proxyHost, proxyPort = self.GetHttpProxy(secure)\n\n        # If httplib/httpclient module is not built with ssl support. Fallback to http\n        if secure and not hasattr(httpclient, \"HTTPSConnection\"):\n            Warn(\"httplib/httpclient is not built with ssl support\")\n            secure = False\n            proxyHost, proxyPort = self.GetHttpProxy(secure)\n\n        # If httplib/httpclient module doesn't support https tunnelling. Fallback to http\n        if secure and \\\n                proxyHost is not None and \\\n                proxyPort is not None and \\\n                not hasattr(httpclient.HTTPSConnection, \"set_tunnel\"):\n            Warn(\"httplib/httpclient doesn't support https tunnelling(new in python 2.7)\")\n            secure = False\n            proxyHost, proxyPort = self.GetHttpProxy(secure)\n\n        resp = self._HttpRequest(method, host, path, port=port, data=data,\n                                 secure=secure, headers=headers,\n                                 proxyHost=proxyHost, proxyPort=proxyPort)\n        for retry in range(0, maxRetry):\n            if resp is not None and \\\n                    (resp.status == httpclient.OK or \\\n                     resp.status == httpclient.CREATED or \\\n                     resp.status == httpclient.ACCEPTED):\n                return resp;\n\n            if resp is not None and resp.status == httpclient.GONE:\n                raise HttpResourceGoneError(\"Http resource gone.\")\n\n            Error(\"Retry={0}\".format(retry))\n            Error(\"HTTP Req: {0} {1}\".format(method, url))\n            Error(\"HTTP Req: Data={0}\".format(data))\n            Error(\"HTTP Req: Header={0}\".format(headers))\n            if resp is None:\n                Error(\"HTTP Err: response is empty.\".format(retry))\n            else:\n                Error(\"HTTP Err: Status={0}\".format(resp.status))\n                Error(\"HTTP Err: Reason={0}\".format(resp.reason))\n                Error(\"HTTP Err: Header={0}\".format(resp.getheaders()))\n                Error(\"HTTP Err: Body={0}\".format(resp.read()))\n\n            time.sleep(self.__class__.RetryWaitingInterval)\n            resp = self._HttpRequest(method, host, path, port=port, data=data,\n                                     secure=secure, headers=headers,\n                                     proxyHost=proxyHost, proxyPort=proxyPort)\n\n        return None\n\n    def HttpGet(self, url, headers=None, maxRetry=3, chkProxy=False):\n        return self.HttpRequest(\"GET\", url, headers=headers,\n                                maxRetry=maxRetry, chkProxy=chkProxy)\n\n    def HttpHead(self, url, headers=None, maxRetry=3, chkProxy=False):\n        return self.HttpRequest(\"HEAD\", url, headers=headers,\n                                maxRetry=maxRetry, chkProxy=chkProxy)\n\n    def HttpPost(self, url, data, headers=None, maxRetry=3, chkProxy=False):\n        return self.HttpRequest(\"POST\", url, data=data, headers=headers,\n                                maxRetry=maxRetry, chkProxy=chkProxy)\n\n    def HttpPut(self, url, data, headers=None, maxRetry=3, chkProxy=False):\n        return self.HttpRequest(\"PUT\", url, data=data, headers=headers,\n                                maxRetry=maxRetry, chkProxy=chkProxy)\n\n    def HttpDelete(self, url, headers=None, maxRetry=3, chkProxy=False):\n        return self.HttpRequest(\"DELETE\", url, headers=headers,\n                                maxRetry=maxRetry, chkProxy=chkProxy)\n\n    def HttpGetWithoutHeaders(self, url, maxRetry=3, chkProxy=False):\n        \"\"\"\n        Return data from an HTTP get on 'url'.\n        \"\"\"\n        resp = self.HttpGet(url, headers=None, maxRetry=maxRetry,\n                            chkProxy=chkProxy)\n        return resp.read() if resp is not None else None\n\n    def HttpGetWithHeaders(self, url, maxRetry=3, chkProxy=False):\n        \"\"\"\n        Return data from an HTTP get on 'url' with\n        x-ms-agent-name and x-ms-version\n        headers.\n        \"\"\"\n        resp = self.HttpGet(url, headers={\n            \"x-ms-agent-name\": GuestAgentName,\n            \"x-ms-version\": ProtocolVersion\n        }, maxRetry=maxRetry, chkProxy=chkProxy)\n        return resp.read() if resp is not None else None\n\n    def HttpSecureGetWithHeaders(self, url, transportCert, maxRetry=3,\n                                 chkProxy=False):\n        \"\"\"\n        Return output of get using ssl cert.\n        \"\"\"\n        resp = self.HttpGet(url, headers={\n            \"x-ms-agent-name\": GuestAgentName,\n            \"x-ms-version\": ProtocolVersion,\n            \"x-ms-cipher-name\": \"DES_EDE3_CBC\",\n            \"x-ms-guest-agent-public-x509-cert\": transportCert\n        }, maxRetry=maxRetry, chkProxy=chkProxy)\n        return resp.read() if resp is not None else None\n\n    def HttpPostWithHeaders(self, url, data, maxRetry=3, chkProxy=False):\n        headers = {\n            \"x-ms-agent-name\": GuestAgentName,\n            \"Content-Type\": \"text/xml; charset=utf-8\",\n            \"x-ms-version\": ProtocolVersion\n        }\n        try:\n            return self.HttpPost(url, data=data, headers=headers,\n                                 maxRetry=maxRetry, chkProxy=chkProxy)\n        except HttpResourceGoneError as e:\n            Error(\"Failed to post: {0} {1}\".format(url, e))\n            return None\n\n\n__StorageVersion = \"2014-02-14\"\n\n\ndef GetBlobType(url):\n    restutil = Util()\n    # Check blob type\n    LogIfVerbose(\"Check blob type.\")\n    timestamp = time.strftime(\"%Y-%m-%dT%H:%M:%SZ\", time.gmtime())\n    blobPropResp = restutil.HttpHead(url, {\n        \"x-ms-date\": timestamp,\n        'x-ms-version': __StorageVersion\n    }, chkProxy=True);\n    blobType = None\n    if blobPropResp is None:\n        Error(\"Can't get status blob type.\")\n        return None\n    blobType = blobPropResp.getheader(\"x-ms-blob-type\")\n    LogIfVerbose(\"Blob type={0}\".format(blobType))\n    return blobType\n\n\ndef PutBlockBlob(url, data):\n    restutil = Util()\n    LogIfVerbose(\"Upload block blob\")\n    timestamp = time.strftime(\"%Y-%m-%dT%H:%M:%SZ\", time.gmtime())\n    ret = restutil.HttpPut(url, data, {\n        \"x-ms-date\": timestamp,\n        \"x-ms-blob-type\": \"BlockBlob\",\n        \"Content-Length\": str(len(data)),\n        \"x-ms-version\": __StorageVersion\n    }, chkProxy=True)\n    if ret is None:\n        Error(\"Failed to upload block blob for status.\")\n        return -1\n    return 0\n\n\ndef PutPageBlob(url, data):\n    restutil = Util()\n    LogIfVerbose(\"Replace old page blob\")\n    timestamp = time.strftime(\"%Y-%m-%dT%H:%M:%SZ\", time.gmtime())\n    # Align to 512 bytes\n    pageBlobSize = ((len(data) + 511) / 512) * 512\n    ret = restutil.HttpPut(url, \"\", {\n        \"x-ms-date\": timestamp,\n        \"x-ms-blob-type\": \"PageBlob\",\n        \"Content-Length\": \"0\",\n        \"x-ms-blob-content-length\": str(pageBlobSize),\n        \"x-ms-version\": __StorageVersion\n    }, chkProxy=True)\n    if ret is None:\n        Error(\"Failed to clean up page blob for status\")\n        return -1\n\n    if url.index('?') < 0:\n        url = \"{0}?comp=page\".format(url)\n    else:\n        url = \"{0}&comp=page\".format(url)\n\n    LogIfVerbose(\"Upload page blob\")\n    pageMax = 4 * 1024 * 1024  # Max page size: 4MB\n    start = 0\n    end = 0\n    while end < len(data):\n        end = min(len(data), start + pageMax)\n        contentSize = end - start\n        # Align to 512 bytes\n        pageEnd = ((end + 511) / 512) * 512\n        bufSize = pageEnd - start\n        buf = bytearray(bufSize)\n        buf[0: contentSize] = data[start: end]\n        ret = restutil.HttpPut(url, buffer(buf), {\n            \"x-ms-date\": timestamp,\n            \"x-ms-range\": \"bytes={0}-{1}\".format(start, pageEnd - 1),\n            \"x-ms-page-write\": \"update\",\n            \"x-ms-version\": __StorageVersion,\n            \"Content-Length\": str(pageEnd - start)\n        }, chkProxy=True)\n        if ret is None:\n            Error(\"Failed to upload page blob for status\")\n            return -1\n        start = end\n    return 0\n\n\ndef UploadStatusBlob(url, data):\n    LogIfVerbose(\"Upload status blob\")\n    LogIfVerbose(\"Status={0}\".format(data))\n    blobType = GetBlobType(url)\n\n    if blobType == \"BlockBlob\":\n        return PutBlockBlob(url, data)\n    elif blobType == \"PageBlob\":\n        return PutPageBlob(url, data)\n    else:\n        Error(\"Unknown blob type: {0}\".format(blobType))\n        return -1\n\n\nclass TCPHandler():\n    \"\"\"\n    Callback object for LoadBalancerProbeServer.\n    Recv and send LB probe messages.\n    \"\"\"\n\n    def __init__(self, lb_probe):\n        super(TCPHandler, self).__init__()\n        self.lb_probe = lb_probe\n\n    def GetHttpDateTimeNow(self):\n        \"\"\"\n        Return formatted gmtime \"Date: Fri, 25 Mar 2011 04:53:10 GMT\"\n        \"\"\"\n        return time.strftime(\"%a, %d %b %Y %H:%M:%S GMT\", time.gmtime())\n\n    def handle(self):\n        \"\"\"\n        Log LB probe messages, read the socket buffer,\n        send LB probe response back to server.\n        \"\"\"\n        self.lb_probe.ProbeCounter = (self.lb_probe.ProbeCounter + 1) % 1000000\n        log = [NoLog, LogIfVerbose][ThrottleLog(self.lb_probe.ProbeCounter)]\n        strCounter = str(self.lb_probe.ProbeCounter)\n        if self.lb_probe.ProbeCounter == 1:\n            Log(\"Receiving LB probes.\")\n        log(\"Received LB probe # \" + strCounter)\n        self.request.recv(1024)\n        self.request.send(\n            \"HTTP/1.1 200 OK\\r\\nContent-Length: 2\\r\\nContent-Type: text/html\\r\\nDate: \" + self.GetHttpDateTimeNow() + \"\\r\\n\\r\\nOK\")\n\n\nclass LoadBalancerProbeServer(object):\n    \"\"\"\n    Threaded object to receive and send LB probe messages.\n    Load Balancer messages but be recv'd by\n    the load balancing server, or this node may be shut-down.\n    \"\"\"\n\n    def __init__(self, port):\n        pass\n\n    def shutdown(self):\n        pass\n\n    def get_ip(self):\n        return None\n\n\nclass ConfigurationProvider(object):\n    \"\"\"\n    Parse amd store key:values in waagent.conf\n    \"\"\"\n\n    def __init__(self, walaConfigFile):\n        self.values = dict()\n        if walaConfigFile is None:\n            walaConfigFile = MyDistro.getConfigurationPath()\n        if os.path.isfile(walaConfigFile) == False:\n            raise Exception(\"Missing configuration in {0}\".format(walaConfigFile))\n        try:\n            for line in GetFileContents(walaConfigFile).split('\\n'):\n                if not line.startswith(\"#\") and \"=\" in line:\n                    parts = line.split()[0].split('=')\n                    value = parts[1].strip(\"\\\" \")\n                    if value != \"None\":\n                        self.values[parts[0]] = value\n                    else:\n                        self.values[parts[0]] = None\n        except:\n            Error(\"Unable to parse {0}\".format(walaConfigFile))\n            raise\n        return\n\n    def get(self, key):\n        return self.values.get(key)\n\n    def yes(self, key):\n        configValue = self.get(key)\n        if (configValue is not None and configValue.lower().startswith(\"y\")):\n            return True\n        else:\n            return False\n\n    def no(self, key):\n        configValue = self.get(key)\n        if (configValue is not None and configValue.lower().startswith(\"n\")):\n            return True\n        else:\n            return False\n\n\nclass EnvMonitor(object):\n    \"\"\"\n    Montor changes to dhcp and hostname.\n    If dhcp clinet process re-start has occurred, reset routes, dhcp with fabric.\n    \"\"\"\n\n    def __init__(self):\n        self.shutdown = False\n        self.HostName = socket.gethostname()\n        self.server_thread = threading.Thread(target=self.monitor)\n        self.server_thread.setDaemon(True)\n        self.server_thread.start()\n        self.published = False\n\n    def monitor(self):\n        \"\"\"\n        Monitor dhcp client pid and hostname.\n        If dhcp clinet process re-start has occurred, reset routes, dhcp with fabric.\n        \"\"\"\n        publish = Config.get(\"Provisioning.MonitorHostName\")\n        dhcpcmd = MyDistro.getpidcmd + ' ' + MyDistro.getDhcpClientName()\n        dhcppid = RunGetOutput(dhcpcmd)[1]\n        while not self.shutdown:\n            for a in RulesFiles:\n                if os.path.isfile(a):\n                    if os.path.isfile(GetLastPathElement(a)):\n                        os.remove(GetLastPathElement(a))\n                    shutil.move(a, \".\")\n                    Log(\"EnvMonitor: Moved \" + a + \" -> \" + LibDir)\n            MyDistro.setScsiDiskTimeout()\n            if publish != None and publish.lower().startswith(\"y\"):\n                try:\n                    if socket.gethostname() != self.HostName:\n                        Log(\"EnvMonitor: Detected host name change: \" + self.HostName + \" -> \" + socket.gethostname())\n                        self.HostName = socket.gethostname()\n                        WaAgent.UpdateAndPublishHostName(self.HostName)\n                        dhcppid = RunGetOutput(dhcpcmd)[1]\n                        self.published = True\n                except:\n                    pass\n            else:\n                self.published = True\n            pid = \"\"\n            if not os.path.isdir(\"/proc/\" + dhcppid.strip()):\n                pid = RunGetOutput(dhcpcmd)[1]\n            if pid != \"\" and pid != dhcppid:\n                Log(\"EnvMonitor: Detected dhcp client restart. Restoring routing table.\")\n                WaAgent.RestoreRoutes()\n                dhcppid = pid\n            for child in Children:\n                if child.poll() != None:\n                    Children.remove(child)\n            time.sleep(5)\n\n    def SetHostName(self, name):\n        \"\"\"\n        Generic call to MyDistro.setHostname(name).\n        Complian to Log on error.\n        \"\"\"\n        if socket.gethostname() == name:\n            self.published = True\n        elif MyDistro.setHostname(name):\n            Error(\"Error: SetHostName: Cannot set hostname to \" + name)\n            return (\"Error: SetHostName: Cannot set hostname to \" + name)\n\n    def IsHostnamePublished(self):\n        \"\"\"\n        Return self.published  \n        \"\"\"\n        return self.published\n\n    def ShutdownService(self):\n        \"\"\"\n        Stop server comminucation and join the thread to main thread.\n        \"\"\"\n        self.shutdown = True\n        self.server_thread.join()\n\n\nclass Certificates(object):\n    \"\"\"\n    Object containing certificates of host and provisioned user.\n    Parses and splits certificates into files.\n    \"\"\"\n\n    #     <CertificateFile xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:noNamespaceSchemaLocation=\"certificates10.xsd\">\n    #     <Version>2010-12-15</Version>\n    #     <Incarnation>2</Incarnation>\n    #     <Format>Pkcs7BlobWithPfxContents</Format>\n    #     <Data>MIILTAY...\n    #     </Data>\n    #     </CertificateFile>\n\n    def __init__(self):\n        self.reinitialize()\n\n    def reinitialize(self):\n        \"\"\"\n        Reset the Role, Incarnation\n        \"\"\"\n        self.Incarnation = None\n        self.Role = None\n\n    def Parse(self, xmlText):\n        \"\"\"\n        Parse multiple certificates into seperate files.\n        \"\"\"\n        self.reinitialize()\n        SetFileContents(\"Certificates.xml\", xmlText)\n        dom = xml.dom.minidom.parseString(xmlText)\n        for a in [\"CertificateFile\", \"Version\", \"Incarnation\",\n                  \"Format\", \"Data\", ]:\n            if not dom.getElementsByTagName(a):\n                Error(\"Certificates.Parse: Missing \" + a)\n                return None\n        node = dom.childNodes[0]\n        if node.localName != \"CertificateFile\":\n            Error(\"Certificates.Parse: root not CertificateFile\")\n            return None\n        SetFileContents(\"Certificates.p7m\",\n                        \"MIME-Version: 1.0\\n\"\n                        + \"Content-Disposition: attachment; filename=\\\"Certificates.p7m\\\"\\n\"\n                        + \"Content-Type: application/x-pkcs7-mime; name=\\\"Certificates.p7m\\\"\\n\"\n                        + \"Content-Transfer-Encoding: base64\\n\\n\"\n                        + GetNodeTextData(dom.getElementsByTagName(\"Data\")[0]))\n        if Run(\n                Openssl + \" cms -decrypt -in Certificates.p7m -inkey TransportPrivate.pem -recip TransportCert.pem | \" + Openssl + \" pkcs12 -nodes -password pass: -out Certificates.pem\"):\n            Error(\"Certificates.Parse: Failed to extract certificates from CMS message.\")\n            return self\n        # There may be multiple certificates in this package. Split them.\n        file = open(\"Certificates.pem\")\n        pindex = 1\n        cindex = 1\n        output = open(\"temp.pem\", \"w\")\n        for line in file.readlines():\n            output.write(line)\n            if re.match(r'[-]+END .*?(KEY|CERTIFICATE)[-]+$', line):\n                output.close()\n                if re.match(r'[-]+END .*?KEY[-]+$', line):\n                    os.rename(\"temp.pem\", str(pindex) + \".prv\")\n                    pindex += 1\n                else:\n                    os.rename(\"temp.pem\", str(cindex) + \".crt\")\n                    cindex += 1\n                output = open(\"temp.pem\", \"w\")\n        output.close()\n        os.remove(\"temp.pem\")\n        keys = dict()\n        index = 1\n        filename = str(index) + \".crt\"\n        while os.path.isfile(filename):\n            thumbprint = \\\n            (RunGetOutput(Openssl + \" x509 -in \" + filename + \" -fingerprint -noout\")[1]).rstrip().split('=')[\n                1].replace(':', '').upper()\n            pubkey = RunGetOutput(Openssl + \" x509 -in \" + filename + \" -pubkey -noout\")[1]\n            keys[pubkey] = thumbprint\n            os.rename(filename, thumbprint + \".crt\")\n            os.chmod(thumbprint + \".crt\", 0o600)\n            MyDistro.setSelinuxContext(thumbprint + '.crt', 'unconfined_u:object_r:ssh_home_t:s0')\n            index += 1\n            filename = str(index) + \".crt\"\n        index = 1\n        filename = str(index) + \".prv\"\n        while os.path.isfile(filename):\n            pubkey = RunGetOutput(Openssl + \" rsa -in \" + filename + \" -pubout 2> /dev/null \")[1]\n            os.rename(filename, keys[pubkey] + \".prv\")\n            os.chmod(keys[pubkey] + \".prv\", 0o600)\n            MyDistro.setSelinuxContext(keys[pubkey] + '.prv', 'unconfined_u:object_r:ssh_home_t:s0')\n            index += 1\n            filename = str(index) + \".prv\"\n        return self\n\n\nclass SharedConfig(object):\n    \"\"\"\n    Parse role endpoint server and goal state config.\n    \"\"\"\n\n    #\n    # <SharedConfig version=\"1.0.0.0\" goalStateIncarnation=\"1\">\n    #   <Deployment name=\"db00a7755a5e4e8a8fe4b19bc3b330c3\" guid=\"{ce5a036f-5c93-40e7-8adf-2613631008ab}\" incarnation=\"2\">\n    #     <Service name=\"MyVMRoleService\" guid=\"{00000000-0000-0000-0000-000000000000}\" />\n    #     <ServiceInstance name=\"db00a7755a5e4e8a8fe4b19bc3b330c3.1\" guid=\"{d113f4d7-9ead-4e73-b715-b724b5b7842c}\" />\n    #   </Deployment>\n    #   <Incarnation number=\"1\" instance=\"MachineRole_IN_0\" guid=\"{a0faca35-52e5-4ec7-8fd1-63d2bc107d9b}\" />\n    #   <Role guid=\"{73d95f1c-6472-e58e-7a1a-523554e11d46}\" name=\"MachineRole\" settleTimeSeconds=\"10\" />\n    #   <LoadBalancerSettings timeoutSeconds=\"0\" waitLoadBalancerProbeCount=\"8\">\n    #     <Probes>\n    #       <Probe name=\"MachineRole\" />\n    #       <Probe name=\"55B17C5E41A1E1E8FA991CF80FAC8E55\" />\n    #       <Probe name=\"3EA4DBC19418F0A766A4C19D431FA45F\" />\n    #     </Probes>\n    #   </LoadBalancerSettings>\n    #   <OutputEndpoints>\n    #     <Endpoint name=\"MachineRole:Microsoft.WindowsAzure.Plugins.RemoteAccess.Rdp\" type=\"SFS\">\n    #       <Target instance=\"MachineRole_IN_0\" endpoint=\"Microsoft.WindowsAzure.Plugins.RemoteAccess.Rdp\" />\n    #     </Endpoint>\n    #   </OutputEndpoints>\n    #   <Instances>\n    #     <Instance id=\"MachineRole_IN_0\" address=\"10.115.153.75\">\n    #       <FaultDomains randomId=\"0\" updateId=\"0\" updateCount=\"0\" />\n    #       <InputEndpoints>\n    #         <Endpoint name=\"a\" address=\"10.115.153.75:80\" protocol=\"http\" isPublic=\"true\" loadBalancedPublicAddress=\"70.37.106.197:80\" enableDirectServerReturn=\"false\" isDirectAddress=\"false\" disableStealthMode=\"false\">\n    #           <LocalPorts>\n    #             <LocalPortRange from=\"80\" to=\"80\" />\n    #           </LocalPorts>\n    #         </Endpoint>\n    #         <Endpoint name=\"Microsoft.WindowsAzure.Plugins.RemoteAccess.Rdp\" address=\"10.115.153.75:3389\" protocol=\"tcp\" isPublic=\"false\" enableDirectServerReturn=\"false\" isDirectAddress=\"false\" disableStealthMode=\"false\">\n    #           <LocalPorts>\n    #             <LocalPortRange from=\"3389\" to=\"3389\" />\n    #           </LocalPorts>\n    #         </Endpoint>\n    #         <Endpoint name=\"Microsoft.WindowsAzure.Plugins.RemoteForwarder.RdpInput\" address=\"10.115.153.75:20000\" protocol=\"tcp\" isPublic=\"true\" loadBalancedPublicAddress=\"70.37.106.197:3389\" enableDirectServerReturn=\"false\" isDirectAddress=\"false\" disableStealthMode=\"false\">\n    #           <LocalPorts>\n    #             <LocalPortRange from=\"20000\" to=\"20000\" />\n    #           </LocalPorts>\n    #         </Endpoint>\n    #       </InputEndpoints>\n    #     </Instance>\n    #   </Instances>\n    # </SharedConfig>\n    #\n    def __init__(self):\n        self.reinitialize()\n\n    def reinitialize(self):\n        \"\"\"\n        Reset members.\n        \"\"\"\n        self.RdmaMacAddress = None\n        self.RdmaIPv4Address = None\n        self.xmlText = None\n\n    def Parse(self, xmlText):\n        \"\"\"\n        Parse and write configuration to file SharedConfig.xml.\n        \"\"\"\n        LogIfVerbose(xmlText)\n        self.reinitialize()\n        self.xmlText = xmlText\n        dom = xml.dom.minidom.parseString(xmlText)\n        for a in [\"SharedConfig\", \"Deployment\", \"Service\",\n                  \"ServiceInstance\", \"Incarnation\", \"Role\", ]:\n            if not dom.getElementsByTagName(a):\n                Error(\"SharedConfig.Parse: Missing \" + a)\n\n        node = dom.childNodes[0]\n        if node.localName != \"SharedConfig\":\n            Error(\"SharedConfig.Parse: root not SharedConfig\")\n\n        nodes = dom.getElementsByTagName(\"Instance\")\n        if nodes is not None and len(nodes) != 0:\n            node = nodes[0]\n            if node.hasAttribute(\"rdmaMacAddress\"):\n                addr = node.getAttribute(\"rdmaMacAddress\")\n                self.RdmaMacAddress = addr[0:2]\n                for i in range(1, 6):\n                    self.RdmaMacAddress += \":\" + addr[2 * i: 2 * i + 2]\n            if node.hasAttribute(\"rdmaIPv4Address\"):\n                self.RdmaIPv4Address = node.getAttribute(\"rdmaIPv4Address\")\n        return self\n\n    def Save(self):\n        LogIfVerbose(\"Save SharedConfig.xml\")\n        SetFileContents(\"SharedConfig.xml\", self.xmlText)\n\n    def InvokeTopologyConsumer(self):\n        program = Config.get(\"Role.TopologyConsumer\")\n        if program != None:\n            try:\n                Children.append(subprocess.Popen([program, LibDir + \"/SharedConfig.xml\"]))\n            except OSError as e:\n                ErrorWithPrefix('Agent.Run', 'Exception: ' + str(e) + ' occured launching ' + program)\n\n    def Process(self):\n        global rdma_configured\n        if not rdma_configured and self.RdmaMacAddress is not None and self.RdmaIPv4Address is not None:\n            handler = RdmaHandler(self.RdmaMacAddress, self.RdmaIPv4Address)\n            handler.start()\n            rdma_configured = True\n        self.InvokeTopologyConsumer()\n\n\nrdma_configured = False\n\n\nclass RdmaConfig(object):\n    \"\"\"\n    configurations\n    \"\"\"\n    wrapper_package_name = 'msft-rdma-drivers'\n    rmda_package_name = 'msft-lis-rdma-kmp-default'\n    \"\"\"\n    error code definitions\n    \"\"\"\n    process_success = 0\n    common_failed = 1\n    check_install_hv_utils_failed = 2\n    nd_driver_detect_error = 3\n    driver_version_not_found = 4\n    unknown_error = 5\n    package_not_found = 6\n    package_install_failed = 7\n    hv_kvp_daemon_not_started = 8\n    \"\"\"\n    check_rdma_result\n    \"\"\"\n    UpToDate = 0\n    OutOfDate = 1\n    DriverVersionNotFound = 3\n    Unknown = -1\n\n\nclass RdmaError(Exception):\n    def __init__(self, error_code=RdmaConfig.process_success):\n        self.error_code = error_code\n\n\nclass RdmaHandler(object):\n    \"\"\"\n    Handle rdma configuration.\n    \"\"\"\n\n    def __init__(self, mac, ip_addr, dev=\"/dev/hvnd_rdma\",\n                 dat_conf_files=['/etc/dat.conf', '/etc/rdma/dat.conf',\n                                 '/usr/local/etc/dat.conf']):\n        self.mac = mac\n        self.ip_addr = ip_addr\n        self.dev = dev\n        self.dat_conf_files = dat_conf_files\n        self.data = ('rdmaMacAddress=\"{0}\" rdmaIPv4Address=\"{1}\"'\n                     '').format(self.mac, self.ip_addr)\n\n    def start(self):\n        \"\"\"\n        Start a new thread to process rdma\n        \"\"\"\n        threading.Thread(target=self.process).start()\n\n    def process(self):\n        try:\n            self.set_dat_conf()\n            self.set_rdma_dev()\n            self.set_rdma_ip()\n        except RdmaError as e:\n            Error(\"Failed to config rdma device: {0}\".format(e))\n\n    def set_dat_conf(self):\n        \"\"\"\n        Agent needs to search all possible locations for dat.conf\n        \"\"\"\n        Log(\"Set dat.conf\")\n        for dat_conf_file in self.dat_conf_files:\n            if not os.path.isfile(dat_conf_file):\n                continue\n            try:\n                self.write_dat_conf(dat_conf_file)\n            except IOError as e:\n                raise RdmaError(\"Failed to write to dat.conf: {0}\".format(e))\n\n    def write_dat_conf(self, dat_conf_file):\n        Log(\"Write config to {0}\".format(dat_conf_file))\n        old = (r\"ofa-v2-ib0 u2.0 nonthreadsafe default libdaplofa.so.2 \"\n               r\"dapl.2.0 \\\"\\S+ 0\\\"\")\n        new = (\"ofa-v2-ib0 u2.0 nonthreadsafe default libdaplofa.so.2 \"\n               \"dapl.2.0 \\\"{0} 0\\\"\").format(self.ip_addr)\n        lines = GetFileContents(dat_conf_file)\n        lines = re.sub(old, new, lines)\n        SetFileContents(dat_conf_file, lines)\n\n    def set_rdma_dev(self):\n        \"\"\"\n        Write config string to /dev/hvnd_rdma\n        \"\"\"\n        Log(\"Set /dev/hvnd_rdma\")\n        self.wait_rdma_dev()\n        self.write_rdma_dev_conf()\n\n    def write_rdma_dev_conf(self):\n        Log(\"Write rdma config to {0}: {1}\".format(self.dev, self.data))\n        try:\n            with open(self.dev, \"w\") as c:\n                c.write(self.data)\n        except IOError as e:\n            raise RdmaError(\"Error writing {0}, {1}\".format(self.dev, e))\n\n    def wait_rdma_dev(self):\n        Log(\"Wait for /dev/hvnd_rdma\")\n        retry = 0\n        while retry < 120:\n            if os.path.exists(self.dev):\n                return\n            time.sleep(1)\n            retry += 1\n        raise RdmaError(\"The device doesn't show up in 120 seconds\")\n\n    def set_rdma_ip(self):\n        Log(\"Set ip addr for rdma\")\n        try:\n            if_name = MyDistro.getInterfaceNameByMac(self.mac)\n            # Azure is using 12 bits network mask for infiniband.\n            MyDistro.configIpV4(if_name, self.ip_addr, 12)\n        except Exception as e:\n            raise RdmaError(\"Failed to config rdma device: {0}\".format(e))\n\n\nclass ExtensionsConfig(object):\n    \"\"\"\n    Parse ExtensionsConfig, downloading and unpacking them to /var/lib/waagent.\n    Install if <enabled>true</enabled>, remove if it is set to false.\n    \"\"\"\n\n    # <?xml version=\"1.0\" encoding=\"utf-8\"?>\n    # <Extensions version=\"1.0.0.0\" goalStateIncarnation=\"6\"><Plugins>\n    #  <Plugin name=\"OSTCExtensions.ExampleHandlerLinux\" version=\"1.5\"\n    # location=\"http://previewusnorthcache.blob.core.test-cint.azure-test.net/d84b216d00bf4d96982be531539e1513/OSTCExtensions_ExampleHandlerLinux_usnorth_manifest.xml\"\n    # config=\"\" state=\"enabled\" autoUpgrade=\"false\" runAsStartupTask=\"false\" isJson=\"true\" />\n    # </Plugins>\n    # <PluginSettings>\n    #  <Plugin name=\"OSTCExtensions.ExampleHandlerLinux\" version=\"1.5\">\n    #    <RuntimeSettings seqNo=\"2\">{\"runtimeSettings\":[{\"handlerSettings\":{\"protectedSettingsCertThumbprint\":\"1BE9A13AA1321C7C515EF109746998BAB6D86FD1\",\n    # \"protectedSettings\":\"MIIByAYJKoZIhvcNAQcDoIIBuTCCAbUCAQAxggFxMIIBbQIBADBVMEExPzA9BgoJkiaJk/IsZAEZFi9XaW5kb3dzIEF6dXJlIFNlcnZpY2UgTWFuYWdlbWVudCBmb3IgR\n    # Xh0ZW5zaW9ucwIQZi7dw+nhc6VHQTQpCiiV2zANBgkqhkiG9w0BAQEFAASCAQCKr09QKMGhwYe+O4/a8td+vpB4eTR+BQso84cV5KCAnD6iUIMcSYTrn9aveY6v6ykRLEw8GRKfri2d6\n    # tvVDggUrBqDwIgzejGTlCstcMJItWa8Je8gHZVSDfoN80AEOTws9Fp+wNXAbSuMJNb8EnpkpvigAWU2v6pGLEFvSKC0MCjDTkjpjqciGMcbe/r85RG3Zo21HLl0xNOpjDs/qqikc/ri43Y76E/X\n    # v1vBSHEGMFprPy/Hwo3PqZCnulcbVzNnaXN3qi/kxV897xGMPPC3IrO7Nc++AT9qRLFI0841JLcLTlnoVG1okPzK9w6ttksDQmKBSHt3mfYV+skqs+EOMDsGCSqGSIb3DQEHATAUBggqh\n    # kiG9w0DBwQITgu0Nu3iFPuAGD6/QzKdtrnCI5425fIUy7LtpXJGmpWDUA==\",\"publicSettings\":{\"port\":\"3000\"}}}]}</RuntimeSettings>\n    #  </Plugin>\n    # </PluginSettings>\n    # <StatusUploadBlob>https://ostcextensions.blob.core.test-cint.azure-test.net/vhds/eg-plugin7-vm.eg-plugin7-vm.eg-plugin7-vm.status?sr=b&amp;sp=rw&amp;\n    # se=9999-01-01&amp;sk=key1&amp;sv=2012-02-12&amp;sig=wRUIDN1x2GC06FWaetBP9sjjifOWvRzS2y2XBB4qoBU%3D</StatusUploadBlob></Extensions>\n\n    def __init__(self):\n        self.reinitialize()\n\n    def reinitialize(self):\n        \"\"\"\n        Reset members.\n        \"\"\"\n        self.Extensions = None\n        self.Plugins = None\n        self.Util = None\n\n    def Parse(self, xmlText):\n        \"\"\"\n        Write configuration to file ExtensionsConfig.xml.\n        Log plugin specific activity to /var/log/azure/<Publisher>.<PluginName>/<Version>/CommandExecution.log.\n        If state is enabled:\n            if the plugin is installed:\n                if the new plugin's version is higher\n                if DisallowMajorVersionUpgrade is false or if true, the version is a minor version do upgrade:\n                    download the new archive\n                    do the updateCommand.\n                    disable the old plugin and remove\n                    enable the new plugin\n                if the new plugin's version is the same or lower:\n                    create the new .settings file from the configuration received\n                    do the enableCommand\n            if the plugin is not installed:\n                download/unpack archive and call the installCommand/Enable\n        if state is disabled:\n            call disableCommand\n        if state is uninstall:\n            call uninstallCommand\n            remove old plugin directory.\n        \"\"\"\n        self.reinitialize()\n        self.Util = Util()\n        dom = xml.dom.minidom.parseString(xmlText)\n        LogIfVerbose(xmlText)\n        self.plugin_log_dir = '/var/log/azure'\n        if not os.path.exists(self.plugin_log_dir):\n            os.mkdir(self.plugin_log_dir)\n        try:\n            self.Extensions = dom.getElementsByTagName(\"Extensions\")\n            pg = dom.getElementsByTagName(\"Plugins\")\n            if len(pg) > 0:\n                self.Plugins = pg[0].getElementsByTagName(\"Plugin\")\n            else:\n                self.Plugins = []\n            incarnation = self.Extensions[0].getAttribute(\"goalStateIncarnation\")\n            SetFileContents('ExtensionsConfig.' + incarnation + '.xml', xmlText)\n        except Exception as e:\n            Error('ERROR:  Error parsing ExtensionsConfig: {0}.'.format(e))\n            return None\n        for p in self.Plugins:\n            if len(p.getAttribute(\"location\")) < 1:  # this plugin is inside the PluginSettings\n                continue\n            p.setAttribute('restricted', 'false')\n            previous_version = None\n            version = p.getAttribute(\"version\")\n            name = p.getAttribute(\"name\")\n            plog_dir = self.plugin_log_dir + '/' + name + '/' + version\n            if not os.path.exists(plog_dir):\n                os.makedirs(plog_dir)\n            p.plugin_log = plog_dir + '/CommandExecution.log'\n            handler = name + '-' + version\n            if p.getAttribute(\"isJson\") != 'true':\n                Error(\"Plugin \" + name + \" version: \" + version + \" is not a JSON Extension.  Skipping.\")\n                continue\n            Log(\"Found Plugin: \" + name + ' version: ' + version)\n            if p.getAttribute(\"state\") == 'disabled' or p.getAttribute(\"state\") == 'uninstall':\n                # disable\n                zip_dir = LibDir + \"/\" + name + '-' + version\n                mfile = None\n                for root, dirs, files in os.walk(zip_dir):\n                    for f in files:\n                        if f in ('HandlerManifest.json'):\n                            mfile = os.path.join(root, f)\n                    if mfile != None:\n                        break\n                if mfile == None:\n                    Error('HandlerManifest.json not found.')\n                    continue\n                manifest = GetFileContents(mfile)\n                p.setAttribute('manifestdata', manifest)\n                if self.launchCommand(p.plugin_log, name, version, 'disableCommand') == None:\n                    self.SetHandlerState(handler, 'Enabled')\n                    Error('Unable to disable ' + name)\n                    SimpleLog(p.plugin_log, 'ERROR: Unable to disable ' + name)\n                else:\n                    self.SetHandlerState(handler, 'Disabled')\n                    Log(name + ' is disabled')\n                    SimpleLog(p.plugin_log, name + ' is disabled')\n\n                # uninstall if needed\n                if p.getAttribute(\"state\") == 'uninstall':\n                    if self.launchCommand(p.plugin_log, name, version, 'uninstallCommand') == None:\n                        self.SetHandlerState(handler, 'Installed')\n                        Error('Unable to uninstall ' + name)\n                        SimpleLog(p.plugin_log, 'Unable to uninstall ' + name)\n                    else:\n                        self.SetHandlerState(handler, 'NotInstalled')\n                        Log(name + ' uninstallCommand completed .')\n                    # remove the plugin\n                    Run('rm -rf ' + LibDir + '/' + name + '-' + version + '*')\n                    Log(name + '-' + version + ' extension files deleted.')\n                    SimpleLog(p.plugin_log, name + '-' + version + ' extension files deleted.')\n\n                continue\n                # state is enabled\n            # if the same plugin exists and the version is newer or\n            # does not exist then download and unzip the new plugin\n            plg_dir = None\n\n            latest_version_installed = LooseVersion(\"0.0\")\n            for item in os.listdir(LibDir):\n                itemPath = os.path.join(LibDir, item)\n                if os.path.isdir(itemPath) and name in item:\n                    try:\n                        # Split plugin dir name with '-' to get intalled plugin name and version\n                        sperator = item.rfind('-')\n                        if sperator < 0:\n                            continue\n                        installed_plg_name = item[0:sperator]\n                        installed_plg_version = LooseVersion(item[sperator + 1:])\n\n                        # Check installed plugin name and compare installed version to get the latest version installed\n                        if installed_plg_name == name and installed_plg_version > latest_version_installed:\n                            plg_dir = itemPath\n                            previous_version = str(installed_plg_version)\n                            latest_version_installed = installed_plg_version\n                    except Exception as e:\n                        Warn(\"Invalid plugin dir name: {0} {1}\".format(item, e))\n                        continue\n\n            if plg_dir == None or LooseVersion(version) > LooseVersion(previous_version):\n                location = p.getAttribute(\"location\")\n                Log(\"Downloading plugin manifest: \" + name + \" from \" + location)\n                SimpleLog(p.plugin_log, \"Downloading plugin manifest: \" + name + \" from \" + location)\n\n                self.Util.Endpoint = location.split('/')[2]\n                Log(\"Plugin server is: \" + self.Util.Endpoint)\n                SimpleLog(p.plugin_log, \"Plugin server is: \" + self.Util.Endpoint)\n\n                manifest = self.Util.HttpGetWithoutHeaders(location, chkProxy=True)\n                if manifest == None:\n                    Error(\n                        \"Unable to download plugin manifest\" + name + \" from primary location.  Attempting with failover location.\")\n                    SimpleLog(p.plugin_log,\n                              \"Unable to download plugin manifest\" + name + \" from primary location.  Attempting with failover location.\")\n                    failoverlocation = p.getAttribute(\"failoverlocation\")\n                    self.Util.Endpoint = failoverlocation.split('/')[2]\n                    Log(\"Plugin failover server is: \" + self.Util.Endpoint)\n                    SimpleLog(p.plugin_log, \"Plugin failover server is: \" + self.Util.Endpoint)\n\n                    manifest = self.Util.HttpGetWithoutHeaders(failoverlocation, chkProxy=True)\n                # if failoverlocation also fail what to do then?\n                if manifest == None:\n                    AddExtensionEvent(name, WALAEventOperation.Download, False, 0, version,\n                                      \"Download mainfest fail \" + failoverlocation)\n                    Log(\"Plugin manifest \" + name + \" downloading failed from failover location.\")\n                    SimpleLog(p.plugin_log, \"Plugin manifest \" + name + \" downloading failed from failover location.\")\n\n                filepath = LibDir + \"/\" + name + '.' + incarnation + '.manifest'\n                if os.path.splitext(location)[-1] == '.xml':  # if this is an xml file we may have a BOM\n                    if ord(manifest[0]) > 128 and ord(manifest[1]) > 128 and ord(manifest[2]) > 128:\n                        manifest = manifest[3:]\n                SetFileContents(filepath, manifest)\n                # Get the bundle url from the manifest\n                p.setAttribute('manifestdata', manifest)\n                man_dom = xml.dom.minidom.parseString(manifest)\n                bundle_uri = \"\"\n                for mp in man_dom.getElementsByTagName(\"Plugin\"):\n                    if GetNodeTextData(mp.getElementsByTagName(\"Version\")[0]) == version:\n                        bundle_uri = GetNodeTextData(mp.getElementsByTagName(\"Uri\")[0])\n                        break\n                if len(mp.getElementsByTagName(\"DisallowMajorVersionUpgrade\")):\n                    if GetNodeTextData(mp.getElementsByTagName(\"DisallowMajorVersionUpgrade\")[\n                                           0]) == 'true' and previous_version != None and previous_version.split('.')[\n                        0] != version.split('.')[0]:\n                        Log('DisallowMajorVersionUpgrade is true, this major version is restricted from upgrade.')\n                        SimpleLog(p.plugin_log,\n                                  'DisallowMajorVersionUpgrade is true, this major version is restricted from upgrade.')\n                        p.setAttribute('restricted', 'true')\n                        continue\n                if len(bundle_uri) < 1:\n                    Error(\"Unable to fetch Bundle URI from manifest for \" + name + \" v \" + version)\n                    SimpleLog(p.plugin_log, \"Unable to fetch Bundle URI from manifest for \" + name + \" v \" + version)\n                    continue\n                Log(\"Bundle URI = \" + bundle_uri)\n                SimpleLog(p.plugin_log, \"Bundle URI = \" + bundle_uri)\n\n                # Download the zipfile archive and save as '.zip'\n                bundle = self.Util.HttpGetWithoutHeaders(bundle_uri, chkProxy=True)\n                if bundle == None:\n                    AddExtensionEvent(name, WALAEventOperation.Download, True, 0, version,\n                                      \"Download zip fail \" + bundle_uri)\n                    Error(\"Unable to download plugin bundle\" + bundle_uri)\n                    SimpleLog(p.plugin_log, \"Unable to download plugin bundle\" + bundle_uri)\n                    continue\n                AddExtensionEvent(name, WALAEventOperation.Download, True, 0, version, \"Download Success\")\n                b = bytearray(bundle)\n                filepath = LibDir + \"/\" + os.path.basename(bundle_uri) + '.zip'\n                SetFileContents(filepath, b)\n                Log(\"Plugin bundle\" + bundle_uri + \"downloaded successfully length = \" + str(len(bundle)))\n                SimpleLog(p.plugin_log,\n                          \"Plugin bundle\" + bundle_uri + \"downloaded successfully length = \" + str(len(bundle)))\n\n                # unpack the archive\n                z = zipfile.ZipFile(filepath)\n                zip_dir = LibDir + \"/\" + name + '-' + version\n                z.extractall(zip_dir)\n                Log('Extracted ' + bundle_uri + ' to ' + zip_dir)\n                SimpleLog(p.plugin_log, 'Extracted ' + bundle_uri + ' to ' + zip_dir)\n\n                # zip no file perms in .zip so set all the scripts to +x\n                Run(\"find \" + zip_dir + \" -type f | xargs chmod  u+x \")\n                # write out the base64 config data so the plugin can process it.\n                mfile = None\n                for root, dirs, files in os.walk(zip_dir):\n                    for f in files:\n                        if f in ('HandlerManifest.json'):\n                            mfile = os.path.join(root, f)\n                    if mfile != None:\n                        break\n                if mfile == None:\n                    Error('HandlerManifest.json not found.')\n                    SimpleLog(p.plugin_log, 'HandlerManifest.json not found.')\n                    continue\n                manifest = GetFileContents(mfile)\n                p.setAttribute('manifestdata', manifest)\n                # create the status and config dirs\n                Run('mkdir -p ' + root + '/status')\n                Run('mkdir -p ' + root + '/config')\n                # write out the configuration data to goalStateIncarnation.settings file in the config path.\n                config = ''\n                seqNo = '0'\n                if len(dom.getElementsByTagName(\"PluginSettings\")) != 0:\n                    pslist = dom.getElementsByTagName(\"PluginSettings\")[0].getElementsByTagName(\"Plugin\")\n                    for ps in pslist:\n                        if name == ps.getAttribute(\"name\") and version == ps.getAttribute(\"version\"):\n                            Log(\"Found RuntimeSettings for \" + name + \" V \" + version)\n                            SimpleLog(p.plugin_log, \"Found RuntimeSettings for \" + name + \" V \" + version)\n\n                            config = GetNodeTextData(ps.getElementsByTagName(\"RuntimeSettings\")[0])\n                            seqNo = ps.getElementsByTagName(\"RuntimeSettings\")[0].getAttribute(\"seqNo\")\n                            break\n                if config == '':\n                    Log(\"No RuntimeSettings for \" + name + \" V \" + version)\n                    SimpleLog(p.plugin_log, \"No RuntimeSettings for \" + name + \" V \" + version)\n\n                SetFileContents(root + \"/config/\" + seqNo + \".settings\", config)\n                # create HandlerEnvironment.json\n                handler_env = '[{  \"name\": \"' + name + '\", \"seqNo\": \"' + seqNo + '\", \"version\": 1.0,  \"handlerEnvironment\": {    \"logFolder\": \"' + os.path.dirname(\n                    p.plugin_log) + '\",    \"configFolder\": \"' + root + '/config\",    \"statusFolder\": \"' + root + '/status\",    \"heartbeatFile\": \"' + root + '/heartbeat.log\"}}]'\n                SetFileContents(root + '/HandlerEnvironment.json', handler_env)\n                self.SetHandlerState(handler, 'NotInstalled')\n\n                cmd = ''\n                getcmd = 'installCommand'\n                if plg_dir != None and previous_version != None and LooseVersion(version) > LooseVersion(\n                        previous_version):\n                    previous_handler = name + '-' + previous_version\n                    if self.GetHandlerState(previous_handler) != 'NotInstalled':\n                        getcmd = 'updateCommand'\n                        # disable the old plugin if it exists\n                        if self.launchCommand(p.plugin_log, name, previous_version, 'disableCommand') == None:\n                            self.SetHandlerState(previous_handler, 'Enabled')\n                            Error('Unable to disable old plugin ' + name + ' version ' + previous_version)\n                            SimpleLog(p.plugin_log,\n                                      'Unable to disable old plugin ' + name + ' version ' + previous_version)\n                        else:\n                            self.SetHandlerState(previous_handler, 'Disabled')\n                            Log(name + ' version ' + previous_version + ' is disabled')\n                            SimpleLog(p.plugin_log, name + ' version ' + previous_version + ' is disabled')\n\n                        try:\n                            Log(\"Copy status file from old plugin dir to new\")\n                            old_plg_dir = plg_dir\n                            new_plg_dir = os.path.join(LibDir, \"{0}-{1}\".format(name, version))\n                            old_ext_status_dir = os.path.join(old_plg_dir, \"status\")\n                            new_ext_status_dir = os.path.join(new_plg_dir, \"status\")\n                            if os.path.isdir(old_ext_status_dir):\n                                for status_file in os.listdir(old_ext_status_dir):\n                                    status_file_path = os.path.join(old_ext_status_dir, status_file)\n                                    if os.path.isfile(status_file_path):\n                                        shutil.copy2(status_file_path, new_ext_status_dir)\n                            mrseq_file = os.path.join(old_plg_dir, \"mrseq\")\n                            if os.path.isfile(mrseq_file):\n                                shutil.copy(mrseq_file, new_plg_dir)\n                        except Exception as e:\n                            Error(\"Failed to copy status file.\")\n\n                isupgradeSuccess = True\n                if getcmd == 'updateCommand':\n                    if self.launchCommand(p.plugin_log, name, version, getcmd, previous_version) == None:\n                        Error('Update failed for ' + name + '-' + version)\n                        SimpleLog(p.plugin_log, 'Update failed for ' + name + '-' + version)\n                        isupgradeSuccess = False\n                    else:\n                        Log('Update complete' + name + '-' + version)\n                        SimpleLog(p.plugin_log, 'Update complete' + name + '-' + version)\n\n                    # if we updated - call unistall for the old plugin\n                    if self.launchCommand(p.plugin_log, name, previous_version, 'uninstallCommand') == None:\n                        self.SetHandlerState(previous_handler, 'Installed')\n                        Error('Uninstall failed for ' + name + '-' + previous_version)\n                        SimpleLog(p.plugin_log, 'Uninstall failed for ' + name + '-' + previous_version)\n                        isupgradeSuccess = False\n                    else:\n                        self.SetHandlerState(previous_handler, 'NotInstalled')\n                        Log('Uninstall complete' + previous_handler)\n                        SimpleLog(p.plugin_log, 'Uninstall complete' + name + '-' + previous_version)\n\n                    try:\n                        # rm old plugin dir\n                        if os.path.isdir(plg_dir):\n                            shutil.rmtree(plg_dir)\n                            Log(name + '-' + previous_version + ' extension files deleted.')\n                            SimpleLog(p.plugin_log, name + '-' + previous_version + ' extension files deleted.')\n                    except Exception as e:\n                        Error(\"Failed to remove old plugin directory\")\n\n                    AddExtensionEvent(name, WALAEventOperation.Upgrade, isupgradeSuccess, 0, previous_version)\n                else:  # run install\n                    if self.launchCommand(p.plugin_log, name, version, getcmd) == None:\n                        self.SetHandlerState(handler, 'NotInstalled')\n                        Error('Installation failed for ' + name + '-' + version)\n                        SimpleLog(p.plugin_log, 'Installation failed for ' + name + '-' + version)\n                    else:\n                        self.SetHandlerState(handler, 'Installed')\n                        Log('Installation completed for ' + name + '-' + version)\n                        SimpleLog(p.plugin_log, 'Installation completed for ' + name + '-' + version)\n\n            # end if plg_dir == none or version > = prev\n            # change incarnation of settings file so it knows how to name status...\n            zip_dir = LibDir + \"/\" + name + '-' + version\n            mfile = None\n            for root, dirs, files in os.walk(zip_dir):\n                for f in files:\n                    if f in ('HandlerManifest.json'):\n                        mfile = os.path.join(root, f)\n                if mfile != None:\n                    break\n            if mfile == None:\n                Error('HandlerManifest.json not found.')\n                SimpleLog(p.plugin_log, 'HandlerManifest.json not found.')\n\n                continue\n            manifest = GetFileContents(mfile)\n            p.setAttribute('manifestdata', manifest)\n            config = ''\n            seqNo = '0'\n            if len(dom.getElementsByTagName(\"PluginSettings\")) != 0:\n                try:\n                    pslist = dom.getElementsByTagName(\"PluginSettings\")[0].getElementsByTagName(\"Plugin\")\n                except:\n                    Error('Error parsing ExtensionsConfig.')\n                    SimpleLog(p.plugin_log, 'Error parsing ExtensionsConfig.')\n\n                    continue\n                for ps in pslist:\n                    if name == ps.getAttribute(\"name\") and version == ps.getAttribute(\"version\"):\n                        Log(\"Found RuntimeSettings for \" + name + \" V \" + version)\n                        SimpleLog(p.plugin_log, \"Found RuntimeSettings for \" + name + \" V \" + version)\n\n                        config = GetNodeTextData(ps.getElementsByTagName(\"RuntimeSettings\")[0])\n                        seqNo = ps.getElementsByTagName(\"RuntimeSettings\")[0].getAttribute(\"seqNo\")\n                        break\n            if config == '':\n                Error(\"No RuntimeSettings for \" + name + \" V \" + version)\n                SimpleLog(p.plugin_log, \"No RuntimeSettings for \" + name + \" V \" + version)\n\n            SetFileContents(root + \"/config/\" + seqNo + \".settings\", config)\n\n            # state is still enable\n            if (self.GetHandlerState(handler) == 'NotInstalled'):  # run install first if true\n                if self.launchCommand(p.plugin_log, name, version, 'installCommand') == None:\n                    self.SetHandlerState(handler, 'NotInstalled')\n                    Error('Installation failed for ' + name + '-' + version)\n                    SimpleLog(p.plugin_log, 'Installation failed for ' + name + '-' + version)\n\n                else:\n                    self.SetHandlerState(handler, 'Installed')\n                    Log('Installation completed for ' + name + '-' + version)\n                    SimpleLog(p.plugin_log, 'Installation completed for ' + name + '-' + version)\n\n            if (self.GetHandlerState(handler) != 'NotInstalled'):\n                if self.launchCommand(p.plugin_log, name, version, 'enableCommand') == None:\n                    self.SetHandlerState(handler, 'Installed')\n                    Error('Enable failed for ' + name + '-' + version)\n                    SimpleLog(p.plugin_log, 'Enable failed for ' + name + '-' + version)\n\n                else:\n                    self.SetHandlerState(handler, 'Enabled')\n                    Log('Enable completed for ' + name + '-' + version)\n                    SimpleLog(p.plugin_log, 'Enable completed for ' + name + '-' + version)\n\n            # this plugin processing is complete\n            Log('Processing completed for ' + name + '-' + version)\n            SimpleLog(p.plugin_log, 'Processing completed for ' + name + '-' + version)\n\n        # end plugin processing loop\n        Log('Finished processing ExtensionsConfig.xml')\n        try:\n            SimpleLog(p.plugin_log, 'Finished processing ExtensionsConfig.xml')\n        except:\n            pass\n\n        return self\n\n    def launchCommand(self, plugin_log, name, version, command, prev_version=None):\n        commandToEventOperation = {\n            \"installCommand\": WALAEventOperation.Install,\n            \"uninstallCommand\": WALAEventOperation.UnIsntall,\n            \"updateCommand\": WALAEventOperation.Upgrade,\n            \"enableCommand\": WALAEventOperation.Enable,\n            \"disableCommand\": WALAEventOperation.Disable,\n        }\n        isSuccess = True\n        start = datetime.datetime.now()\n        r = self.__launchCommandWithoutEventLog(plugin_log, name, version, command, prev_version)\n        if r == None:\n            isSuccess = False\n        Duration = int((datetime.datetime.now() - start).seconds)\n        if commandToEventOperation.get(command):\n            AddExtensionEvent(name, commandToEventOperation[command], isSuccess, Duration, version)\n        return r\n\n    def __launchCommandWithoutEventLog(self, plugin_log, name, version, command, prev_version=None):\n        # get the manifest and read the command\n        mfile = None\n        zip_dir = LibDir + \"/\" + name + '-' + version\n        for root, dirs, files in os.walk(zip_dir):\n            for f in files:\n                if f in ('HandlerManifest.json'):\n                    mfile = os.path.join(root, f)\n            if mfile != None:\n                break\n        if mfile == None:\n            Error('HandlerManifest.json not found.')\n            SimpleLog(plugin_log, 'HandlerManifest.json not found.')\n\n            return None\n        manifest = GetFileContents(mfile)\n        try:\n            jsn = json.loads(manifest)\n        except:\n            Error('Error parsing HandlerManifest.json.')\n            SimpleLog(plugin_log, 'Error parsing HandlerManifest.json.')\n\n            return None\n        if type(jsn) == list:\n            jsn = jsn[0]\n        if jsn.has_key('handlerManifest'):\n            cmd = jsn['handlerManifest'][command]\n        else:\n            Error('Key handlerManifest not found.  Handler cannot be installed.')\n            SimpleLog(plugin_log, 'Key handlerManifest not found.  Handler cannot be installed.')\n\n        if len(cmd) == 0:\n            Error('Unable to read ' + command)\n            SimpleLog(plugin_log, 'Unable to read ' + command)\n\n            return None\n\n        # for update we send the path of the old installation\n        arg = ''\n        if prev_version != None:\n            arg = ' ' + LibDir + '/' + name + '-' + prev_version\n        dirpath = os.path.dirname(mfile)\n        LogIfVerbose('Command is ' + dirpath + '/' + cmd)\n        # launch\n        pid = None\n        try:\n            child = subprocess.Popen(dirpath + '/' + cmd + arg, shell=True, cwd=dirpath, stdout=subprocess.PIPE)\n        except Exception as e:\n            Error('Exception launching ' + cmd + str(e))\n            SimpleLog(plugin_log, 'Exception launching ' + cmd + str(e))\n\n        pid = child.pid\n        if pid == None or pid < 1:\n            ExtensionChildren.append((-1, root))\n            Error('Error launching ' + cmd + '.')\n            SimpleLog(plugin_log, 'Error launching ' + cmd + '.')\n\n        else:\n            ExtensionChildren.append((pid, root))\n            Log(\"Spawned \" + cmd + \" PID \" + str(pid))\n            SimpleLog(plugin_log, \"Spawned \" + cmd + \" PID \" + str(pid))\n\n        # wait until install/upgrade is finished\n        timeout = 300  # 5 minutes\n        retry = timeout / 5\n        while retry > 0 and child.poll() == None:\n            LogIfVerbose(cmd + ' still running with PID ' + str(pid))\n            time.sleep(5)\n            retry -= 1\n        if retry == 0:\n            Error('Process exceeded timeout of ' + str(timeout) + ' seconds. Terminating process ' + str(pid))\n            SimpleLog(plugin_log,\n                      'Process exceeded timeout of ' + str(timeout) + ' seconds. Terminating process ' + str(pid))\n\n            os.kill(pid, 9)\n            return None\n        code = child.wait()\n        if code == None or code != 0:\n            Error('Process ' + str(pid) + ' returned non-zero exit code (' + str(code) + ')')\n            SimpleLog(plugin_log, 'Process ' + str(pid) + ' returned non-zero exit code (' + str(code) + ')')\n\n            return None\n        Log(command + ' completed.')\n        SimpleLog(plugin_log, command + ' completed.')\n\n        return 0\n\n    def ReportHandlerStatus(self):\n        \"\"\"\n        Collect all status reports.\n        \"\"\"\n        # { \"version\": \"1.0\", \"timestampUTC\": \"2014-03-31T21:28:58Z\", \n        # \"aggregateStatus\": { \n        # \"guestAgentStatus\": { \"version\": \"2.0.4PRE\", \"status\": \"Ready\", \"formattedMessage\": { \"lang\": \"en-US\", \"message\": \"GuestAgent is running and accepting new configurations.\" } }, \n        # \"handlerAggregateStatus\": [{ \n        # \"handlerName\": \"ExampleHandlerLinux\", \"handlerVersion\": \"1.0\", \"status\": \"Ready\", \"runtimeSettingsStatus\": { \n        # \"sequenceNumber\": \"2\", \"settingsStatus\": { \"timestampUTC\": \"2014-03-31T23:46:00Z\", \"status\": { \"name\": \"ExampleHandlerLinux\", \"operation\": \"Command Execution Finished\", \"configurationAppliedTime\": \"2014-03-31T23:46:00Z\", \"status\": \"success\", \"formattedMessage\": { \"lang\": \"en-US\", \"message\": \"Finished executing command\" }, \n        # \"substatus\": [\n        # { \"name\": \"StdOut\", \"status\": \"success\", \"formattedMessage\": { \"lang\": \"en-US\", \"message\": \"Goodbye world!\" }  }, \n        # { \"name\": \"StdErr\", \"status\": \"success\", \"formattedMessage\": { \"lang\": \"en-US\", \"message\": \"\" } }\n        # ] \n        # } } } }\n        # ]\n        #  }}\n\n        try:\n            incarnation = self.Extensions[0].getAttribute(\"goalStateIncarnation\")\n        except:\n            Error('Error parsing ExtensionsConfig.  Unable to send status reports')\n            return -1\n        status = ''\n        statuses = ''\n        for p in self.Plugins:\n            if p.getAttribute(\"state\") == 'uninstall' or p.getAttribute(\"restricted\") == 'true':\n                continue\n            version = p.getAttribute(\"version\")\n            name = p.getAttribute(\"name\")\n            if p.getAttribute(\"isJson\") != 'true':\n                LogIfVerbose(\"Plugin \" + name + \" version: \" + version + \" is not a JSON Extension.  Skipping.\")\n                continue\n            reportHeartbeat = False\n            if len(p.getAttribute(\"manifestdata\")) < 1:\n                Error(\"Failed to get manifestdata.\")\n            else:\n                reportHeartbeat = json.loads(p.getAttribute(\"manifestdata\"))[0]['handlerManifest']['reportHeartbeat']\n            if len(statuses) > 0:\n                statuses += ','\n            statuses += self.GenerateAggStatus(name, version, reportHeartbeat)\n        tstamp = time.strftime(\"%Y-%m-%dT%H:%M:%SZ\", time.gmtime())\n        # header\n        # agent state\n        if provisioned == False:\n            if provisionError == None:\n                agent_state = 'Provisioning'\n                agent_msg = 'Guest Agent is starting.'\n            else:\n                agent_state = 'Provisioning Error.'\n                agent_msg = provisionError\n        else:\n            agent_state = 'Ready'\n            agent_msg = 'GuestAgent is running and accepting new configurations.'\n\n        status = '{\"version\":\"1.0\",\"timestampUTC\":\"' + tstamp + '\",\"aggregateStatus\":{\"guestAgentStatus\":{\"version\":\"' + GuestAgentVersion + '\",\"status\":\"' + agent_state + '\",\"formattedMessage\":{\"lang\":\"en-US\",\"message\":\"' + agent_msg + '\"}},\"handlerAggregateStatus\":[' + statuses + ']}}'\n        try:\n            uri = GetNodeTextData(self.Extensions[0].getElementsByTagName(\"StatusUploadBlob\")[0]).replace('&amp;', '&')\n        except:\n            Error('Error parsing ExtensionsConfig.  Unable to send status reports')\n            return -1\n\n        LogIfVerbose('Status report ' + status + ' sent to ' + uri)\n        return UploadStatusBlob(uri, status.encode(\"utf-8\"))\n\n    def GetCurrentSequenceNumber(self, plugin_base_dir):\n        \"\"\"\n        Get the settings file with biggest file number in config folder\n        \"\"\"\n        config_dir = os.path.join(plugin_base_dir, 'config')\n        seq_no = 0\n        for subdir, dirs, files in os.walk(config_dir):\n            for file in files:\n                try:\n                    cur_seq_no = int(os.path.basename(file).split('.')[0])\n                    if cur_seq_no > seq_no:\n                        seq_no = cur_seq_no\n                except ValueError:\n                    continue\n        return str(seq_no)\n\n    def GenerateAggStatus(self, name, version, reportHeartbeat=False):\n        \"\"\"\n        Generate the status which Azure can understand by the status and heartbeat reported by extension\n        \"\"\"\n        plugin_base_dir = LibDir + '/' + name + '-' + version + '/'\n        current_seq_no = self.GetCurrentSequenceNumber(plugin_base_dir)\n        status_file = os.path.join(plugin_base_dir, 'status/', current_seq_no + '.status')\n        heartbeat_file = os.path.join(plugin_base_dir, 'heartbeat.log')\n\n        handler_state_file = os.path.join(plugin_base_dir, 'config', 'HandlerState')\n        agg_state = 'NotReady'\n        handler_state = None\n        status_obj = None\n        status_code = None\n        formatted_message = None\n        localized_message = None\n\n        if os.path.exists(handler_state_file):\n            handler_state = GetFileContents(handler_state_file).lower()\n        if HandlerStatusToAggStatus.has_key(handler_state):\n            agg_state = HandlerStatusToAggStatus[handler_state]\n        if reportHeartbeat:\n            if os.path.exists(heartbeat_file):\n                d = int(time.time() - os.stat(heartbeat_file).st_mtime)\n                if d > 600:  # not updated for more than 10 min\n                    agg_state = 'Unresponsive'\n                else:\n                    try:\n                        heartbeat = json.loads(GetFileContents(heartbeat_file))[0][\"heartbeat\"]\n                        agg_state = heartbeat.get(\"status\")\n                        status_code = heartbeat.get(\"code\")\n                        formatted_message = heartbeat.get(\"formattedMessage\")\n                        localized_message = heartbeat.get(\"message\")\n                    except:\n                        Error(\"Incorrect heartbeat file. Ignore it. \")\n            else:\n                agg_state = 'Unresponsive'\n        # get status file reported by extension\n        if os.path.exists(status_file):\n            # raw status generated by extension is an array, get the first item and remove the unnecessary element\n            try:\n                status_obj = json.loads(GetFileContents(status_file))[0]\n                del status_obj[\"version\"]\n            except:\n                Error(\"Incorrect status file. Will NOT settingsStatus in settings. \")\n        agg_status_obj = {\"handlerName\": name, \"handlerVersion\": version, \"status\": agg_state, \"runtimeSettingsStatus\":\n            {\"sequenceNumber\": current_seq_no}}\n        if status_obj:\n            agg_status_obj[\"runtimeSettingsStatus\"][\"settingsStatus\"] = status_obj\n        if status_code != None:\n            agg_status_obj[\"code\"] = status_code\n        if formatted_message:\n            agg_status_obj[\"formattedMessage\"] = formatted_message\n        if localized_message:\n            agg_status_obj[\"message\"] = localized_message\n        agg_status_string = json.dumps(agg_status_obj)\n        LogIfVerbose(\"Handler Aggregated Status:\" + agg_status_string)\n        return agg_status_string\n\n    def SetHandlerState(self, handler, state=''):\n        zip_dir = LibDir + \"/\" + handler\n        mfile = None\n        for root, dirs, files in os.walk(zip_dir):\n            for f in files:\n                if f in ('HandlerManifest.json'):\n                    mfile = os.path.join(root, f)\n            if mfile != None:\n                break\n        if mfile == None:\n            Error('SetHandlerState(): HandlerManifest.json not found, cannot set HandlerState.')\n            return None\n        Log(\"SetHandlerState: \" + handler + \", \" + state)\n        return SetFileContents(os.path.dirname(mfile) + '/config/HandlerState', state)\n\n    def GetHandlerState(self, handler):\n        handlerState = GetFileContents(handler + '/config/HandlerState')\n        if (handlerState):\n            return handlerState.rstrip('\\r\\n')\n        else:\n            return 'NotInstalled'\n\n\nclass HostingEnvironmentConfig(object):\n    \"\"\"\n    Parse Hosting enviromnet config and store in\n    HostingEnvironmentConfig.xml\n    \"\"\"\n\n    #\n    # <HostingEnvironmentConfig version=\"1.0.0.0\" goalStateIncarnation=\"1\">\n    #   <StoredCertificates>\n    #     <StoredCertificate name=\"Stored0Microsoft.WindowsAzure.Plugins.RemoteAccess.PasswordEncryption\" certificateId=\"sha1:C093FA5CD3AAE057CB7C4E04532B2E16E07C26CA\" storeName=\"My\" configurationLevel=\"System\" />\n    #   </StoredCertificates>\n    #   <Deployment name=\"db00a7755a5e4e8a8fe4b19bc3b330c3\" guid=\"{ce5a036f-5c93-40e7-8adf-2613631008ab}\" incarnation=\"2\">\n    #     <Service name=\"MyVMRoleService\" guid=\"{00000000-0000-0000-0000-000000000000}\" />\n    #     <ServiceInstance name=\"db00a7755a5e4e8a8fe4b19bc3b330c3.1\" guid=\"{d113f4d7-9ead-4e73-b715-b724b5b7842c}\" />\n    #   </Deployment>\n    #   <Incarnation number=\"1\" instance=\"MachineRole_IN_0\" guid=\"{a0faca35-52e5-4ec7-8fd1-63d2bc107d9b}\" />\n    #   <Role guid=\"{73d95f1c-6472-e58e-7a1a-523554e11d46}\" name=\"MachineRole\" hostingEnvironmentVersion=\"1\" software=\"\" softwareType=\"ApplicationPackage\" entryPoint=\"\" parameters=\"\" settleTimeSeconds=\"10\" />\n    #   <HostingEnvironmentSettings name=\"full\" Runtime=\"rd_fabric_stable.110217-1402.RuntimePackage_1.0.0.8.zip\">\n    #     <CAS mode=\"full\" />\n    #     <PrivilegeLevel mode=\"max\" />\n    #     <AdditionalProperties><CgiHandlers></CgiHandlers></AdditionalProperties>\n    #   </HostingEnvironmentSettings>\n    #   <ApplicationSettings>\n    #     <Setting name=\"__ModelData\" value=\"&lt;m role=&quot;MachineRole&quot; xmlns=&quot;urn:azure:m:v1&quot;>&lt;r name=&quot;MachineRole&quot;>&lt;e name=&quot;a&quot; />&lt;e name=&quot;b&quot; />&lt;e name=&quot;Microsoft.WindowsAzure.Plugins.RemoteAccess.Rdp&quot; />&lt;e name=&quot;Microsoft.WindowsAzure.Plugins.RemoteForwarder.RdpInput&quot; />&lt;/r>&lt;/m>\" />\n    #     <Setting name=\"Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString\" value=\"DefaultEndpointsProtocol=http;AccountName=osimages;AccountKey=DNZQ...\" />\n    #     <Setting name=\"Microsoft.WindowsAzure.Plugins.RemoteForwarder.Enabled\" value=\"true\" />\n    #   </ApplicationSettings>\n    #   <ResourceReferences>\n    #     <Resource name=\"DiagnosticStore\" type=\"directory\" request=\"Microsoft.Cis.Fabric.Controller.Descriptions.ServiceDescription.Data.Policy\" sticky=\"true\" size=\"1\" path=\"db00a7755a5e4e8a8fe4b19bc3b330c3.MachineRole.DiagnosticStore\\\" disableQuota=\"false\" />\n    #   </ResourceReferences>\n    # </HostingEnvironmentConfig>\n    #\n    def __init__(self):\n        self.reinitialize()\n\n    def reinitialize(self):\n        \"\"\"\n        Reset Members.\n        \"\"\"\n        self.StoredCertificates = None\n        self.Deployment = None\n        self.Incarnation = None\n        self.Role = None\n        self.HostingEnvironmentSettings = None\n        self.ApplicationSettings = None\n        self.Certificates = None\n        self.ResourceReferences = None\n\n    def Parse(self, xmlText):\n        \"\"\"\n        Parse and create HostingEnvironmentConfig.xml.\n        \"\"\"\n        self.reinitialize()\n        SetFileContents(\"HostingEnvironmentConfig.xml\", xmlText)\n        dom = xml.dom.minidom.parseString(xmlText)\n        for a in [\"HostingEnvironmentConfig\", \"Deployment\", \"Service\",\n                  \"ServiceInstance\", \"Incarnation\", \"Role\", ]:\n            if not dom.getElementsByTagName(a):\n                Error(\"HostingEnvironmentConfig.Parse: Missing \" + a)\n                return None\n        node = dom.childNodes[0]\n        if node.localName != \"HostingEnvironmentConfig\":\n            Error(\"HostingEnvironmentConfig.Parse: root not HostingEnvironmentConfig\")\n            return None\n        self.ApplicationSettings = dom.getElementsByTagName(\"Setting\")\n        self.Certificates = dom.getElementsByTagName(\"StoredCertificate\")\n        return self\n\n    def DecryptPassword(self, e):\n        \"\"\"\n        Return decrypted password.\n        \"\"\"\n        SetFileContents(\"password.p7m\",\n                        \"MIME-Version: 1.0\\n\"\n                        + \"Content-Disposition: attachment; filename=\\\"password.p7m\\\"\\n\"\n                        + \"Content-Type: application/x-pkcs7-mime; name=\\\"password.p7m\\\"\\n\"\n                        + \"Content-Transfer-Encoding: base64\\n\\n\"\n                        + textwrap.fill(e, 64))\n        return RunGetOutput(Openssl + \" cms -decrypt -in password.p7m -inkey Certificates.pem -recip Certificates.pem\")[\n            1]\n\n    def ActivateResourceDisk(self):\n        return MyDistro.ActivateResourceDisk()\n\n    def Process(self):\n        \"\"\"\n        Execute ActivateResourceDisk in separate thread.\n        Create the user account.\n        Launch ConfigurationConsumer if specified in the config.\n        \"\"\"\n        no_thread = False\n        if DiskActivated == False:\n            for m in inspect.getmembers(MyDistro):\n                if 'ActivateResourceDiskNoThread' in m:\n                    no_thread = True\n                    break\n            if no_thread == True:\n                MyDistro.ActivateResourceDiskNoThread()\n            else:\n                diskThread = threading.Thread(target=self.ActivateResourceDisk)\n                diskThread.start()\n        User = None\n        Pass = None\n        Expiration = None\n        Thumbprint = None\n        for b in self.ApplicationSettings:\n            sname = b.getAttribute(\"name\")\n            svalue = b.getAttribute(\"value\")\n        if User != None and Pass != None:\n            if User != \"root\" and User != \"\" and Pass != \"\":\n                CreateAccount(User, Pass, Expiration, Thumbprint)\n            else:\n                Error(\"Not creating user account: \" + User)\n        for c in self.Certificates:\n            csha1 = c.getAttribute(\"certificateId\").split(':')[1].upper()\n            if os.path.isfile(csha1 + \".prv\"):\n                Log(\"Private key with thumbprint: \" + csha1 + \" was retrieved.\")\n            if os.path.isfile(csha1 + \".crt\"):\n                Log(\"Public cert with thumbprint: \" + csha1 + \" was retrieved.\")\n        program = Config.get(\"Role.ConfigurationConsumer\")\n        if program != None:\n            try:\n                Children.append(subprocess.Popen([program, LibDir + \"/HostingEnvironmentConfig.xml\"]))\n            except OSError as e:\n                ErrorWithPrefix('HostingEnvironmentConfig.Process',\n                                'Exception: ' + str(e) + ' occured launching ' + program)\n\n\nclass GoalState(Util):\n    \"\"\"\n    Primary container for all configuration except OvfXml.\n    Encapsulates http communication with endpoint server.\n    Initializes and populates:\n    self.HostingEnvironmentConfig\n    self.SharedConfig\n    self.ExtensionsConfig\n    self.Certificates\n    \"\"\"\n\n    #\n    # <GoalState xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:noNamespaceSchemaLocation=\"goalstate10.xsd\">\n    #   <Version>2010-12-15</Version>\n    #   <Incarnation>1</Incarnation>\n    #   <Machine>\n    #     <ExpectedState>Started</ExpectedState>\n    #     <LBProbePorts>\n    #       <Port>16001</Port>\n    #     </LBProbePorts>\n    #   </Machine>\n    #   <Container>\n    #     <ContainerId>c6d5526c-5ac2-4200-b6e2-56f2b70c5ab2</ContainerId>\n    #     <RoleInstanceList>\n    #       <RoleInstance>\n    #         <InstanceId>MachineRole_IN_0</InstanceId>\n    #         <State>Started</State>\n    #         <Configuration>\n    #           <HostingEnvironmentConfig>http://10.115.153.40:80/machine/c6d5526c-5ac2-4200-b6e2-56f2b70c5ab2/MachineRole%5FIN%5F0?comp=config&amp;type=hostingEnvironmentConfig&amp;incarnation=1</HostingEnvironmentConfig>\n    #           <SharedConfig>http://10.115.153.40:80/machine/c6d5526c-5ac2-4200-b6e2-56f2b70c5ab2/MachineRole%5FIN%5F0?comp=config&amp;type=sharedConfig&amp;incarnation=1</SharedConfig>\n    #           <Certificates>http://10.115.153.40:80/machine/c6d5526c-5ac2-4200-b6e2-56f2b70c5ab2/MachineRole%5FIN%5F0?comp=certificates&amp;incarnation=1</Certificates>\n    #          <ExtensionsConfig>http://100.67.238.230:80/machine/9c87aa94-3bda-45e3-b2b7-0eb0fca7baff/1552dd64dc254e6884f8d5b8b68aa18f.eg%2Dplug%2Dvm?comp=config&amp;type=extensionsConfig&amp;incarnation=2</ExtensionsConfig>\n    #         <FullConfig>http://100.67.238.230:80/machine/9c87aa94-3bda-45e3-b2b7-0eb0fca7baff/1552dd64dc254e6884f8d5b8b68aa18f.eg%2Dplug%2Dvm?comp=config&amp;type=fullConfig&amp;incarnation=2</FullConfig>\n\n    #         </Configuration>\n    #       </RoleInstance>\n    #     </RoleInstanceList>\n    #   </Container>\n    # </GoalState>\n    #\n    # There is only one Role for VM images.\n    #\n    # Of primary interest is:\n    #  LBProbePorts -- an http server needs to run here\n    #  We also note Container/ContainerID and RoleInstance/InstanceId to form the health report.\n    #  And of course, Incarnation\n    #\n    def __init__(self, Agent):\n        self.Agent = Agent\n        self.Endpoint = Agent.Endpoint\n        self.TransportCert = Agent.TransportCert\n        self.reinitialize()\n\n    def reinitialize(self):\n        self.Incarnation = None  # integer\n        self.ExpectedState = None  # \"Started\"\n        self.HostingEnvironmentConfigUrl = None\n        self.HostingEnvironmentConfigXml = None\n        self.HostingEnvironmentConfig = None\n        self.SharedConfigUrl = None\n        self.SharedConfigXml = None\n        self.SharedConfig = None\n        self.CertificatesUrl = None\n        self.CertificatesXml = None\n        self.Certificates = None\n        self.ExtensionsConfigUrl = None\n        self.ExtensionsConfigXml = None\n        self.ExtensionsConfig = None\n        self.RoleInstanceId = None\n        self.ContainerId = None\n        self.LoadBalancerProbePort = None  # integer, ?list of integers\n\n    def Parse(self, xmlText):\n        \"\"\"\n        Request configuration data from endpoint server.\n        Parse and populate contained configuration objects.\n        Calls Certificates().Parse()\n        Calls SharedConfig().Parse\n        Calls ExtensionsConfig().Parse\n        Calls HostingEnvironmentConfig().Parse\n        \"\"\"\n        self.reinitialize()\n        LogIfVerbose(xmlText)\n        node = xml.dom.minidom.parseString(xmlText).childNodes[0]\n        if node.localName != \"GoalState\":\n            Error(\"GoalState.Parse: root not GoalState\")\n            return None\n        for a in node.childNodes:\n            if a.nodeType == node.ELEMENT_NODE:\n                if a.localName == \"Incarnation\":\n                    self.Incarnation = GetNodeTextData(a)\n                elif a.localName == \"Machine\":\n                    for b in a.childNodes:\n                        if b.nodeType == node.ELEMENT_NODE:\n                            if b.localName == \"ExpectedState\":\n                                self.ExpectedState = GetNodeTextData(b)\n                                Log(\"ExpectedState: \" + self.ExpectedState)\n                            elif b.localName == \"LBProbePorts\":\n                                for c in b.childNodes:\n                                    if c.nodeType == node.ELEMENT_NODE and c.localName == \"Port\":\n                                        self.LoadBalancerProbePort = int(GetNodeTextData(c))\n                elif a.localName == \"Container\":\n                    for b in a.childNodes:\n                        if b.nodeType == node.ELEMENT_NODE:\n                            if b.localName == \"ContainerId\":\n                                self.ContainerId = GetNodeTextData(b)\n                                Log(\"ContainerId: \" + self.ContainerId)\n                            elif b.localName == \"RoleInstanceList\":\n                                for c in b.childNodes:\n                                    if c.localName == \"RoleInstance\":\n                                        for d in c.childNodes:\n                                            if d.nodeType == node.ELEMENT_NODE:\n                                                if d.localName == \"InstanceId\":\n                                                    self.RoleInstanceId = GetNodeTextData(d)\n                                                    Log(\"RoleInstanceId: \" + self.RoleInstanceId)\n                                                elif d.localName == \"State\":\n                                                    pass\n                                                elif d.localName == \"Configuration\":\n                                                    for e in d.childNodes:\n                                                        if e.nodeType == node.ELEMENT_NODE:\n                                                            LogIfVerbose(e.localName)\n                                                            if e.localName == \"HostingEnvironmentConfig\":\n                                                                self.HostingEnvironmentConfigUrl = GetNodeTextData(e)\n                                                                LogIfVerbose(\n                                                                    \"HostingEnvironmentConfigUrl:\" + self.HostingEnvironmentConfigUrl)\n                                                                self.HostingEnvironmentConfigXml = self.HttpGetWithHeaders(\n                                                                    self.HostingEnvironmentConfigUrl)\n                                                                self.HostingEnvironmentConfig = HostingEnvironmentConfig().Parse(\n                                                                    self.HostingEnvironmentConfigXml)\n                                                            elif e.localName == \"SharedConfig\":\n                                                                self.SharedConfigUrl = GetNodeTextData(e)\n                                                                LogIfVerbose(\"SharedConfigUrl:\" + self.SharedConfigUrl)\n                                                                self.SharedConfigXml = self.HttpGetWithHeaders(\n                                                                    self.SharedConfigUrl)\n                                                                self.SharedConfig = SharedConfig().Parse(\n                                                                    self.SharedConfigXml)\n                                                                self.SharedConfig.Save()\n                                                            elif e.localName == \"ExtensionsConfig\":\n                                                                self.ExtensionsConfigUrl = GetNodeTextData(e)\n                                                                LogIfVerbose(\n                                                                    \"ExtensionsConfigUrl:\" + self.ExtensionsConfigUrl)\n                                                                self.ExtensionsConfigXml = self.HttpGetWithHeaders(\n                                                                    self.ExtensionsConfigUrl)\n                                                            elif e.localName == \"Certificates\":\n                                                                self.CertificatesUrl = GetNodeTextData(e)\n                                                                LogIfVerbose(\"CertificatesUrl:\" + self.CertificatesUrl)\n                                                                self.CertificatesXml = self.HttpSecureGetWithHeaders(\n                                                                    self.CertificatesUrl, self.TransportCert)\n                                                                self.Certificates = Certificates().Parse(\n                                                                    self.CertificatesXml)\n        if self.Incarnation == None:\n            Error(\"GoalState.Parse: Incarnation missing\")\n            return None\n        if self.ExpectedState == None:\n            Error(\"GoalState.Parse: ExpectedState missing\")\n            return None\n        if self.RoleInstanceId == None:\n            Error(\"GoalState.Parse: RoleInstanceId missing\")\n            return None\n        if self.ContainerId == None:\n            Error(\"GoalState.Parse: ContainerId missing\")\n            return None\n        SetFileContents(\"GoalState.\" + self.Incarnation + \".xml\", xmlText)\n        return self\n\n    def Process(self):\n        \"\"\"\n        Calls HostingEnvironmentConfig.Process()\n        \"\"\"\n        LogIfVerbose(\"Process goalstate\")\n        self.HostingEnvironmentConfig.Process()\n        self.SharedConfig.Process()\n\n\nclass OvfEnv(object):\n    \"\"\"\n    Read, and process provisioning info from provisioning file OvfEnv.xml\n    \"\"\"\n\n    #\n    # <?xml version=\"1.0\" encoding=\"utf-8\"?>\n    # <Environment xmlns=\"http://schemas.dmtf.org/ovf/environment/1\" xmlns:oe=\"http://schemas.dmtf.org/ovf/environment/1\" xmlns:wa=\"http://schemas.microsoft.com/windowsazure\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\">\n    #    <wa:ProvisioningSection>\n    #      <wa:Version>1.0</wa:Version>\n    #      <LinuxProvisioningConfigurationSet xmlns=\"http://schemas.microsoft.com/windowsazure\" xmlns:i=\"http://www.w3.org/2001/XMLSchema-instance\">\n    #        <ConfigurationSetType>LinuxProvisioningConfiguration</ConfigurationSetType>\n    #        <HostName>HostName</HostName>\n    #        <UserName>UserName</UserName>\n    #        <UserPassword>UserPassword</UserPassword>\n    #        <DisableSshPasswordAuthentication>false</DisableSshPasswordAuthentication>\n    #        <SSH>\n    #          <PublicKeys>\n    #            <PublicKey>\n    #              <Fingerprint>EB0C0AB4B2D5FC35F2F0658D19F44C8283E2DD62</Fingerprint>\n    #              <Path>$HOME/UserName/.ssh/authorized_keys</Path>\n    #            </PublicKey>\n    #          </PublicKeys>\n    #          <KeyPairs>\n    #            <KeyPair>\n    #              <Fingerprint>EB0C0AB4B2D5FC35F2F0658D19F44C8283E2DD62</Fingerprint>\n    #              <Path>$HOME/UserName/.ssh/id_rsa</Path>\n    #            </KeyPair>\n    #          </KeyPairs>\n    #        </SSH>\n    #      </LinuxProvisioningConfigurationSet>\n    #    </wa:ProvisioningSection>\n    # </Environment>\n    #\n    def __init__(self):\n        self.reinitialize()\n\n    def reinitialize(self):\n        \"\"\"\n        Reset members.\n        \"\"\"\n        self.WaNs = \"http://schemas.microsoft.com/windowsazure\"\n        self.OvfNs = \"http://schemas.dmtf.org/ovf/environment/1\"\n        self.MajorVersion = 1\n        self.MinorVersion = 0\n        self.ComputerName = None\n        self.AdminPassword = None\n        self.UserName = None\n        self.UserPassword = None\n        self.CustomData = None\n        self.DisableSshPasswordAuthentication = True\n        self.SshPublicKeys = []\n        self.SshKeyPairs = []\n\n    def Parse(self, xmlText, isDeprovision=False):\n        \"\"\"\n        Parse xml tree, retreiving user and ssh key information.\n        Return self.\n        \"\"\"\n        self.reinitialize()\n        LogIfVerbose(re.sub(\"<UserPassword>.*?<\", \"<UserPassword>*<\", xmlText))\n        dom = xml.dom.minidom.parseString(xmlText)\n        if len(dom.getElementsByTagNameNS(self.OvfNs, \"Environment\")) != 1:\n            Error(\"Unable to parse OVF XML.\")\n        section = None\n        newer = False\n        for p in dom.getElementsByTagNameNS(self.WaNs, \"ProvisioningSection\"):\n            for n in p.childNodes:\n                if n.localName == \"Version\":\n                    verparts = GetNodeTextData(n).split('.')\n                    major = int(verparts[0])\n                    minor = int(verparts[1])\n                    if major > self.MajorVersion:\n                        newer = True\n                    if major != self.MajorVersion:\n                        break\n                    if minor > self.MinorVersion:\n                        newer = True\n                    section = p\n        if newer == True:\n            Warn(\"Newer provisioning configuration detected. Please consider updating waagent.\")\n        if section == None:\n            Error(\"Could not find ProvisioningSection with major version=\" + str(self.MajorVersion))\n            return None\n        self.ComputerName = GetNodeTextData(section.getElementsByTagNameNS(self.WaNs, \"HostName\")[0])\n        self.UserName = GetNodeTextData(section.getElementsByTagNameNS(self.WaNs, \"UserName\")[0])\n        if isDeprovision == True:\n            return self\n        try:\n            self.UserPassword = GetNodeTextData(section.getElementsByTagNameNS(self.WaNs, \"UserPassword\")[0])\n        except:\n            pass\n        CDSection = None\n        try:\n            CDSection = section.getElementsByTagNameNS(self.WaNs, \"CustomData\")\n            if len(CDSection) > 0:\n                self.CustomData = GetNodeTextData(CDSection[0])\n                if len(self.CustomData) > 0:\n                    SetFileContents(LibDir + '/CustomData', bytearray(MyDistro.translateCustomData(self.CustomData)))\n                    Log('Wrote ' + LibDir + '/CustomData')\n                else:\n                    Error('<CustomData> contains no data!')\n        except Exception as e:\n            Error(str(e) + ' occured creating ' + LibDir + '/CustomData')\n        disableSshPass = section.getElementsByTagNameNS(self.WaNs, \"DisableSshPasswordAuthentication\")\n        if len(disableSshPass) != 0:\n            self.DisableSshPasswordAuthentication = (GetNodeTextData(disableSshPass[0]).lower() == \"true\")\n        for pkey in section.getElementsByTagNameNS(self.WaNs, \"PublicKey\"):\n            LogIfVerbose(repr(pkey))\n            fp = None\n            path = None\n            for c in pkey.childNodes:\n                if c.localName == \"Fingerprint\":\n                    fp = GetNodeTextData(c).upper()\n                    LogIfVerbose(fp)\n                if c.localName == \"Path\":\n                    path = GetNodeTextData(c)\n                    LogIfVerbose(path)\n            self.SshPublicKeys += [[fp, path]]\n        for keyp in section.getElementsByTagNameNS(self.WaNs, \"KeyPair\"):\n            fp = None\n            path = None\n            LogIfVerbose(repr(keyp))\n            for c in keyp.childNodes:\n                if c.localName == \"Fingerprint\":\n                    fp = GetNodeTextData(c).upper()\n                    LogIfVerbose(fp)\n                if c.localName == \"Path\":\n                    path = GetNodeTextData(c)\n                    LogIfVerbose(path)\n            self.SshKeyPairs += [[fp, path]]\n        return self\n\n    def PrepareDir(self, filepath):\n        \"\"\"\n        Create home dir for self.UserName\n        Change owner and return path.\n        \"\"\"\n        home = MyDistro.GetHome()\n        # Expand HOME variable if present in path\n        path = os.path.normpath(filepath.replace(\"$HOME\", home))\n        if (path.startswith(\"/\") == False) or (path.endswith(\"/\") == True):\n            return None\n        dir = path.rsplit('/', 1)[0]\n        if dir != \"\":\n            CreateDir(dir, \"root\", 0o700)\n            if path.startswith(os.path.normpath(home + \"/\" + self.UserName + \"/\")):\n                ChangeOwner(dir, self.UserName)\n        return path\n\n    def NumberToBytes(self, i):\n        \"\"\"\n        Pack number into bytes.  Retun as string.\n        \"\"\"\n        result = []\n        while i:\n            result.append(chr(i & 0xFF))\n            i >>= 8\n        result.reverse()\n        return ''.join(result)\n\n    def BitsToString(self, a):\n        \"\"\"\n        Return string representation of bits in a.\n        \"\"\"\n        index = 7\n        s = \"\"\n        c = 0\n        for bit in a:\n            c = c | (bit << index)\n            index = index - 1\n            if index == -1:\n                s = s + struct.pack('>B', c)\n                c = 0\n                index = 7\n        return s\n\n    def OpensslToSsh(self, file):\n        \"\"\"\n        Return base-64 encoded key appropriate for ssh.\n        \"\"\"\n        from pyasn1.codec.der import decoder as der_decoder\n        try:\n            f = open(file).read().replace('\\n', '').split(\"KEY-----\")[1].split('-')[0]\n            k = der_decoder.decode(self.BitsToString(der_decoder.decode(base64.b64decode(f))[0][1]))[0]\n            n = k[0]\n            e = k[1]\n            keydata = \"\"\n            keydata += struct.pack('>I', len(\"ssh-rsa\"))\n            keydata += \"ssh-rsa\"\n            keydata += struct.pack('>I', len(self.NumberToBytes(e)))\n            keydata += self.NumberToBytes(e)\n            keydata += struct.pack('>I', len(self.NumberToBytes(n)) + 1)\n            keydata += \"\\0\"\n            keydata += self.NumberToBytes(n)\n        except Exception as e:\n            print(\"OpensslToSsh: Exception \" + str(e))\n            return None\n        return \"ssh-rsa \" + base64.b64encode(keydata) + \"\\n\"\n\n    def Process(self):\n        \"\"\"\n        Process all certificate and key info.\n        DisableSshPasswordAuthentication if configured.\n        CreateAccount(user)\n        Wait for WaAgent.EnvMonitor.IsHostnamePublished().\n        Restart ssh service.\n        \"\"\"\n        error = None\n        if self.ComputerName == None:\n            return \"Error: Hostname missing\"\n        error = WaAgent.EnvMonitor.SetHostName(self.ComputerName)\n        if error: return error\n        if self.DisableSshPasswordAuthentication:\n            filepath = \"/etc/ssh/sshd_config\"\n            # Disable RFC 4252 and RFC 4256 authentication schemes.\n            ReplaceFileContentsAtomic(filepath, \"\\n\".join(filter(lambda a: not\n            (a.startswith(\"PasswordAuthentication\") or a.startswith(\"ChallengeResponseAuthentication\")),\n                                                                 GetFileContents(filepath).split(\n                                                                     '\\n'))) + \"\\nPasswordAuthentication no\\nChallengeResponseAuthentication no\\n\")\n            Log(\"Disabled SSH password-based authentication methods.\")\n        if self.AdminPassword != None:\n            MyDistro.changePass('root', self.AdminPassword)\n        if self.UserName != None:\n            error = MyDistro.CreateAccount(self.UserName, self.UserPassword, None, None)\n        sel = MyDistro.isSelinuxRunning()\n        if sel:\n            MyDistro.setSelinuxEnforce(0)\n        home = MyDistro.GetHome()\n        for pkey in self.SshPublicKeys:\n            Log(\"Deploy public key:{0}\".format(pkey[0]))\n            if not os.path.isfile(pkey[0] + \".crt\"):\n                Error(\"PublicKey not found: \" + pkey[0])\n                error = \"Failed to deploy public key (0x09).\"\n                continue\n            path = self.PrepareDir(pkey[1])\n            if path == None:\n                Error(\"Invalid path: \" + pkey[1] + \" for PublicKey: \" + pkey[0])\n                error = \"Invalid path for public key (0x03).\"\n                continue\n            Run(Openssl + \" x509 -in \" + pkey[0] + \".crt -noout -pubkey > \" + pkey[0] + \".pub\")\n            MyDistro.setSelinuxContext(pkey[0] + '.pub', 'unconfined_u:object_r:ssh_home_t:s0')\n            MyDistro.sshDeployPublicKey(pkey[0] + '.pub', path)\n            MyDistro.setSelinuxContext(path, 'unconfined_u:object_r:ssh_home_t:s0')\n            if path.startswith(os.path.normpath(home + \"/\" + self.UserName + \"/\")):\n                ChangeOwner(path, self.UserName)\n        for keyp in self.SshKeyPairs:\n            Log(\"Deploy key pair:{0}\".format(keyp[0]))\n            if not os.path.isfile(keyp[0] + \".prv\"):\n                Error(\"KeyPair not found: \" + keyp[0])\n                error = \"Failed to deploy key pair (0x0A).\"\n                continue\n            path = self.PrepareDir(keyp[1])\n            if path == None:\n                Error(\"Invalid path: \" + keyp[1] + \" for KeyPair: \" + keyp[0])\n                error = \"Invalid path for key pair (0x05).\"\n                continue\n            SetFileContents(path, GetFileContents(keyp[0] + \".prv\"))\n            os.chmod(path, 0o600)\n            Run(\"ssh-keygen -y -f \" + keyp[0] + \".prv > \" + path + \".pub\")\n            MyDistro.setSelinuxContext(path, 'unconfined_u:object_r:ssh_home_t:s0')\n            MyDistro.setSelinuxContext(path + '.pub', 'unconfined_u:object_r:ssh_home_t:s0')\n            if path.startswith(os.path.normpath(home + \"/\" + self.UserName + \"/\")):\n                ChangeOwner(path, self.UserName)\n                ChangeOwner(path + \".pub\", self.UserName)\n        if sel:\n            MyDistro.setSelinuxEnforce(1)\n        while not WaAgent.EnvMonitor.IsHostnamePublished():\n            time.sleep(1)\n        MyDistro.restartSshService()\n        return error\n\n\nclass WALAEvent(object):\n    def __init__(self):\n        self.providerId = \"\"\n        self.eventId = 1\n        self.OpcodeName = \"\"\n        self.KeywordName = \"\"\n        self.TaskName = \"\"\n        self.TenantName = \"\"\n        self.RoleName = \"\"\n        self.RoleInstanceName = \"\"\n        self.ContainerId = \"\"\n        self.ExecutionMode = \"IAAS\"\n        self.OSVersion = \"\"\n        self.GAVersion = \"\"\n        self.RAM = 0\n        self.Processors = 0\n\n    def ToXml(self):\n        strEventid = u'<Event id=\"{0}\"/>'.format(self.eventId)\n        strProviderid = u'<Provider id=\"{0}\"/>'.format(self.providerId)\n        strRecordFormat = u'<Param Name=\"{0}\" Value=\"{1}\" T=\"{2}\" />'\n        strRecordNoQuoteFormat = u'<Param Name=\"{0}\" Value={1} T=\"{2}\" />'\n        strMtStr = u'mt:wstr'\n        strMtUInt64 = u'mt:uint64'\n        strMtBool = u'mt:bool'\n        strMtFloat = u'mt:float64'\n        strEventsData = u\"\"\n\n        for attName in self.__dict__:\n            if attName in [\"eventId\", \"filedCount\", \"providerId\"]:\n                continue\n\n            attValue = self.__dict__[attName]\n            if type(attValue) is int:\n                strEventsData += strRecordFormat.format(attName, attValue, strMtUInt64)\n                continue\n            if type(attValue) is str:\n                attValue = xml.sax.saxutils.quoteattr(attValue)\n                strEventsData += strRecordNoQuoteFormat.format(attName, attValue, strMtStr)\n                continue\n            if str(type(attValue)).count(\"'unicode'\") > 0:\n                attValue = xml.sax.saxutils.quoteattr(attValue)\n                strEventsData += strRecordNoQuoteFormat.format(attName, attValue, strMtStr)\n                continue\n            if type(attValue) is bool:\n                strEventsData += strRecordFormat.format(attName, attValue, strMtBool)\n                continue\n            if type(attValue) is float:\n                strEventsData += strRecordFormat.format(attName, attValue, strMtFloat)\n                continue\n\n            Log(\"Warning: property \" + attName + \":\" + str(type(attValue)) + \":type\" + str(\n                type(attValue)) + \"Can't convert to events data:\" + \":type not supported\")\n\n        return u\"<Data>{0}{1}{2}</Data>\".format(strProviderid, strEventid, strEventsData)\n\n    def Save(self):\n        eventfolder = LibDir + \"/events\"\n        if not os.path.exists(eventfolder):\n            os.mkdir(eventfolder)\n            os.chmod(eventfolder, 0o700)\n        if len(os.listdir(eventfolder)) > 1000:\n            raise Exception(\"WriteToFolder:Too many file under \" + eventfolder + \" exit\")\n\n        filename = os.path.join(eventfolder, str(int(time.time() * 1000000)))\n        with open(filename + \".tmp\", 'wb+') as hfile:\n            hfile.write(self.ToXml().encode(\"utf-8\"))\n        os.rename(filename + \".tmp\", filename + \".tld\")\n\n\nclass WALAEventOperation:\n    HeartBeat = \"HeartBeat\"\n    Provision = \"Provision\"\n    Install = \"Install\"\n    UnIsntall = \"UnInstall\"\n    Disable = \"Disable\"\n    Enable = \"Enable\"\n    Download = \"Download\"\n    Upgrade = \"Upgrade\"\n    Update = \"Update\"\n\n\ndef AddExtensionEvent(name, op, isSuccess, duration=0, version=\"1.0\", message=\"\", type=\"\", isInternal=False):\n    event = ExtensionEvent()\n    event.Name = name\n    event.Version = version\n    event.IsInternal = isInternal\n    event.Operation = op\n    event.OperationSuccess = isSuccess\n    event.Message = message\n    event.Duration = duration\n    event.ExtensionType = type\n    try:\n        event.Save()\n    except:\n        Error(\"Error \" + traceback.format_exc())\n\n\nclass ExtensionEvent(WALAEvent):\n    def __init__(self):\n        WALAEvent.__init__(self)\n        self.eventId = 1\n        self.providerId = \"69B669B9-4AF8-4C50-BDC4-6006FA76E975\"\n        self.Name = \"\"\n        self.Version = \"\"\n        self.IsInternal = False\n        self.Operation = \"\"\n        self.OperationSuccess = True\n        self.ExtensionType = \"\"\n        self.Message = \"\"\n        self.Duration = 0\n\n\nclass WALAEventMonitor(WALAEvent):\n    def __init__(self, postMethod):\n        WALAEvent.__init__(self)\n        self.post = postMethod\n        self.sysInfo = {}\n        self.eventdir = LibDir + \"/events\"\n        self.issysteminfoinitilized = False\n\n    def StartEventsLoop(self):\n        eventThread = threading.Thread(target=self.EventsLoop)\n        eventThread.setDaemon(True)\n        eventThread.start()\n\n    def EventsLoop(self):\n        LastReportHeartBeatTime = datetime.datetime.min\n        try:\n            while (True):\n                if (datetime.datetime.now() - LastReportHeartBeatTime) > datetime.timedelta(hours=12):\n                    LastReportHeartBeatTime = datetime.datetime.now()\n                    AddExtensionEvent(op=WALAEventOperation.HeartBeat, name=\"WALA\", isSuccess=True)\n                self.postNumbersInOneLoop = 0\n                self.CollectAndSendWALAEvents()\n                time.sleep(60)\n        except:\n            Error(\"Exception in events loop:\" + traceback.format_exc())\n\n    def SendEvent(self, providerid, events):\n        dataFormat = u'<?xml version=\"1.0\"?><TelemetryData version=\"1.0\"><Provider id=\"{0}\">{1}' \\\n                     '</Provider></TelemetryData>'\n        data = dataFormat.format(providerid, events)\n        self.post(\"/machine/?comp=telemetrydata\", data)\n\n    def CollectAndSendWALAEvents(self):\n        if not os.path.exists(self.eventdir):\n            return\n        # Throtting, can't send more than 3 events in 15 seconds\n        eventSendNumber = 0\n        eventFiles = os.listdir(self.eventdir)\n        events = {}\n        for file in eventFiles:\n            if not file.endswith(\".tld\"):\n                continue\n            with open(os.path.join(self.eventdir, file), \"rb\") as hfile:\n                # if fail to open or delete the file, throw exception\n                xmlStr = hfile.read().decode(\"utf-8\", 'ignore')\n            os.remove(os.path.join(self.eventdir, file))\n            params = \"\"\n            eventid = \"\"\n            providerid = \"\"\n            # if exception happen during process an event, catch it and continue\n            try:\n                xmlStr = self.AddSystemInfo(xmlStr)\n                for node in xml.dom.minidom.parseString(xmlStr.encode(\"utf-8\")).childNodes[0].childNodes:\n                    if node.tagName == \"Param\":\n                        params += node.toxml()\n                    if node.tagName == \"Event\":\n                        eventid = node.getAttribute(\"id\")\n                    if node.tagName == \"Provider\":\n                        providerid = node.getAttribute(\"id\")\n            except:\n                Error(traceback.format_exc())\n                continue\n            if len(params) == 0 or len(eventid) == 0 or len(providerid) == 0:\n                Error(\"Empty filed in params:\" + params + \" event id:\" + eventid + \" provider id:\" + providerid)\n                continue\n\n            eventstr = u'<Event id=\"{0}\"><![CDATA[{1}]]></Event>'.format(eventid, params)\n            if not events.get(providerid):\n                events[providerid] = \"\"\n            if len(events[providerid]) > 0 and len(events.get(providerid) + eventstr) >= 63 * 1024:\n                eventSendNumber += 1\n                self.SendEvent(providerid, events.get(providerid))\n                if eventSendNumber % 3 == 0:\n                    time.sleep(15)\n                events[providerid] = \"\"\n            if len(eventstr) >= 63 * 1024:\n                Error(\"Signle event too large abort \" + eventstr[:300])\n                continue\n\n            events[providerid] = events.get(providerid) + eventstr\n\n        for key in events.keys():\n            if len(events[key]) > 0:\n                eventSendNumber += 1\n                self.SendEvent(key, events[key])\n                if eventSendNumber % 3 == 0:\n                    time.sleep(15)\n\n    def AddSystemInfo(self, eventData):\n        if not self.issysteminfoinitilized:\n            self.issysteminfoinitilized = True\n            try:\n                self.sysInfo[\"OSVersion\"] = platform.system() + \":\" + \"-\".join(DistInfo(1)) + \":\" + platform.release()\n                self.sysInfo[\"GAVersion\"] = GuestAgentVersion\n                self.sysInfo[\"RAM\"] = MyDistro.getTotalMemory()\n                self.sysInfo[\"Processors\"] = MyDistro.getProcessorCores()\n                sharedConfig = xml.dom.minidom.parse(\"/var/lib/waagent/SharedConfig.xml\").childNodes[0]\n                hostEnvConfig = xml.dom.minidom.parse(\"/var/lib/waagent/HostingEnvironmentConfig.xml\").childNodes[0]\n                gfiles = RunGetOutput(\"ls -t /var/lib/waagent/GoalState.*.xml\")[1]\n                goalStateConfi = xml.dom.minidom.parse(gfiles.split(\"\\n\")[0]).childNodes[0]\n                self.sysInfo[\"TenantName\"] = hostEnvConfig.getElementsByTagName(\"Deployment\")[0].getAttribute(\"name\")\n                self.sysInfo[\"RoleName\"] = hostEnvConfig.getElementsByTagName(\"Role\")[0].getAttribute(\"name\")\n                self.sysInfo[\"RoleInstanceName\"] = sharedConfig.getElementsByTagName(\"Instance\")[0].getAttribute(\"id\")\n                self.sysInfo[\"ContainerId\"] = goalStateConfi.getElementsByTagName(\"ContainerId\")[0].childNodes[\n                    0].nodeValue\n            except:\n                Error(traceback.format_exc())\n\n        eventObject = xml.dom.minidom.parseString(eventData.encode(\"utf-8\")).childNodes[0]\n        for node in eventObject.childNodes:\n            if node.tagName == \"Param\":\n                name = node.getAttribute(\"Name\")\n                if self.sysInfo.get(name):\n                    node.setAttribute(\"Value\", xml.sax.saxutils.escape(str(self.sysInfo[name])))\n\n        return eventObject.toxml()\n\n\nclass Agent(Util):\n    \"\"\"\n    Primary object container for the provisioning process.\n    \n    \"\"\"\n\n    def __init__(self):\n        self.GoalState = None\n        self.Endpoint = None\n        self.LoadBalancerProbeServer = None\n        self.HealthReportCounter = 0\n        self.TransportCert = \"\"\n        self.EnvMonitor = None\n        self.SendData = None\n        self.DhcpResponse = None\n\n    def CheckVersions(self):\n        \"\"\"\n        Query endpoint server for wire protocol version.\n        Fail if our desired protocol version is not seen.\n        \"\"\"\n        # <?xml version=\"1.0\" encoding=\"utf-8\"?>\n        # <Versions>\n        #  <Preferred>\n        #    <Version>2010-12-15</Version>\n        #  </Preferred>\n        #  <Supported>\n        #    <Version>2010-12-15</Version>\n        #    <Version>2010-28-10</Version>\n        #  </Supported>\n        # </Versions>\n        global ProtocolVersion\n        protocolVersionSeen = False\n        node = xml.dom.minidom.parseString(self.HttpGetWithoutHeaders(\"/?comp=versions\")).childNodes[0]\n        if node.localName != \"Versions\":\n            Error(\"CheckVersions: root not Versions\")\n            return False\n        for a in node.childNodes:\n            if a.nodeType == node.ELEMENT_NODE and a.localName == \"Supported\":\n                for b in a.childNodes:\n                    if b.nodeType == node.ELEMENT_NODE and b.localName == \"Version\":\n                        v = GetNodeTextData(b)\n                        LogIfVerbose(\"Fabric supported wire protocol version: \" + v)\n                        if v == ProtocolVersion:\n                            protocolVersionSeen = True\n            if a.nodeType == node.ELEMENT_NODE and a.localName == \"Preferred\":\n                v = GetNodeTextData(a.getElementsByTagName(\"Version\")[0])\n                Log(\"Fabric preferred wire protocol version: \" + v)\n        if not protocolVersionSeen:\n            Warn(\"Agent supported wire protocol version: \" + ProtocolVersion + \" was not advertised by Fabric.\")\n        else:\n            Log(\"Negotiated wire protocol version: \" + ProtocolVersion)\n        return True\n\n    def Unpack(self, buffer, offset, range):\n        \"\"\"\n        Unpack bytes into python values.\n        \"\"\"\n        result = 0\n        for i in range:\n            result = (result << 8) | Ord(buffer[offset + i])\n        return result\n\n    def UnpackLittleEndian(self, buffer, offset, length):\n        \"\"\"\n        Unpack little endian bytes into python values.\n        \"\"\"\n        return self.Unpack(buffer, offset, list(range(length - 1, -1, -1)))\n\n    def UnpackBigEndian(self, buffer, offset, length):\n        \"\"\"\n        Unpack big endian bytes into python values.\n        \"\"\"\n        return self.Unpack(buffer, offset, list(range(0, length)))\n\n    def HexDump3(self, buffer, offset, length):\n        \"\"\"\n        Dump range of buffer in formatted hex.\n        \"\"\"\n        return ''.join(['%02X' % Ord(char) for char in buffer[offset:offset + length]])\n\n    def HexDump2(self, buffer):\n        \"\"\"\n        Dump buffer in formatted hex.\n        \"\"\"\n        return self.HexDump3(buffer, 0, len(buffer))\n\n    def BuildDhcpRequest(self):\n        \"\"\"\n        Build DHCP request string.\n        \"\"\"\n        #\n        # typedef struct _DHCP {\n        #     UINT8   Opcode;                     /* op:     BOOTREQUEST or BOOTREPLY */\n        #     UINT8   HardwareAddressType;        /* htype:  ethernet */\n        #     UINT8   HardwareAddressLength;      /* hlen:   6 (48 bit mac address) */\n        #     UINT8   Hops;                       /* hops:   0 */\n        #     UINT8   TransactionID[4];           /* xid:    random */\n        #     UINT8   Seconds[2];                 /* secs:   0 */\n        #     UINT8   Flags[2];                   /* flags:  0 or 0x8000 for broadcast */\n        #     UINT8   ClientIpAddress[4];         /* ciaddr: 0 */\n        #     UINT8   YourIpAddress[4];           /* yiaddr: 0 */\n        #     UINT8   ServerIpAddress[4];         /* siaddr: 0 */\n        #     UINT8   RelayAgentIpAddress[4];     /* giaddr: 0 */\n        #     UINT8   ClientHardwareAddress[16];  /* chaddr: 6 byte ethernet MAC address */\n        #     UINT8   ServerName[64];             /* sname:  0 */\n        #     UINT8   BootFileName[128];          /* file:   0  */\n        #     UINT8   MagicCookie[4];             /*   99  130   83   99 */\n        #                                         /* 0x63 0x82 0x53 0x63 */\n        #     /* options -- hard code ours */\n        #\n        #     UINT8 MessageTypeCode;              /* 53 */\n        #     UINT8 MessageTypeLength;            /* 1 */\n        #     UINT8 MessageType;                  /* 1 for DISCOVER */\n        #     UINT8 End;                          /* 255 */\n        # } DHCP;\n        #\n\n        # tuple of 244 zeros\n        # (struct.pack_into would be good here, but requires Python 2.5)\n        sendData = [0] * 244\n\n        transactionID = os.urandom(4)\n        macAddress = MyDistro.GetMacAddress()\n\n        # Opcode = 1\n        # HardwareAddressType = 1 (ethernet/MAC)\n        # HardwareAddressLength = 6 (ethernet/MAC/48 bits)\n        for a in range(0, 3):\n            sendData[a] = [1, 1, 6][a]\n\n        # fill in transaction id (random number to ensure response matches request)\n        for a in range(0, 4):\n            sendData[4 + a] = Ord(transactionID[a])\n\n        LogIfVerbose(\"BuildDhcpRequest: transactionId:%s,%04X\" % (\n        self.HexDump2(transactionID), self.UnpackBigEndian(sendData, 4, 4)))\n\n        # fill in ClientHardwareAddress\n        for a in range(0, 6):\n            sendData[0x1C + a] = Ord(macAddress[a])\n\n        # DHCP Magic Cookie: 99, 130, 83, 99\n        # MessageTypeCode = 53 DHCP Message Type\n        # MessageTypeLength = 1\n        # MessageType = DHCPDISCOVER\n        # End = 255 DHCP_END\n        for a in range(0, 8):\n            sendData[0xEC + a] = [99, 130, 83, 99, 53, 1, 1, 255][a]\n        return array.array(\"B\", sendData)\n\n    def IntegerToIpAddressV4String(self, a):\n        \"\"\"\n        Build DHCP request string.\n        \"\"\"\n        return \"%u.%u.%u.%u\" % ((a >> 24) & 0xFF, (a >> 16) & 0xFF, (a >> 8) & 0xFF, a & 0xFF)\n\n    def RouteAdd(self, net, mask, gateway):\n        \"\"\"\n        Add specified route using /sbin/route add -net.\n        \"\"\"\n        net = self.IntegerToIpAddressV4String(net)\n        mask = self.IntegerToIpAddressV4String(mask)\n        gateway = self.IntegerToIpAddressV4String(gateway)\n        Log(\"Route add: net={0}, mask={1}, gateway={2}\".format(net, mask, gateway))\n        MyDistro.routeAdd(net, mask, gateway)\n\n    def SetDefaultGateway(self, gateway):\n        \"\"\"\n        Set default gateway\n        \"\"\"\n        gateway = self.IntegerToIpAddressV4String(gateway)\n        Log(\"Set default gateway: {0}\".format(gateway))\n        MyDistro.setDefaultGateway(gateway)\n\n    def HandleDhcpResponse(self, sendData, receiveBuffer):\n        \"\"\"\n        Parse DHCP response:\n        Set default gateway.\n        Set default routes.\n        Retrieve endpoint server.\n        Returns endpoint server or None on error.\n        \"\"\"\n        LogIfVerbose(\"HandleDhcpResponse\")\n        bytesReceived = len(receiveBuffer)\n        if bytesReceived < 0xF6:\n            Error(\"HandleDhcpResponse: Too few bytes received \" + str(bytesReceived))\n            return None\n\n        LogIfVerbose(\"BytesReceived: \" + hex(bytesReceived))\n        LogWithPrefixIfVerbose(\"DHCP response:\", HexDump(receiveBuffer, bytesReceived))\n\n        # check transactionId, cookie, MAC address\n        # cookie should never mismatch\n        # transactionId and MAC address may mismatch if we see a response meant from another machine\n\n        for offsets in [list(range(4, 4 + 4)), list(range(0x1C, 0x1C + 6)), list(range(0xEC, 0xEC + 4))]:\n            for offset in offsets:\n                sentByte = Ord(sendData[offset])\n                receivedByte = Ord(receiveBuffer[offset])\n                if sentByte != receivedByte:\n                    LogIfVerbose(\"HandleDhcpResponse: sent cookie:\" + self.HexDump3(sendData, 0xEC, 4))\n                    LogIfVerbose(\"HandleDhcpResponse: rcvd cookie:\" + self.HexDump3(receiveBuffer, 0xEC, 4))\n                    LogIfVerbose(\"HandleDhcpResponse: sent transactionID:\" + self.HexDump3(sendData, 4, 4))\n                    LogIfVerbose(\"HandleDhcpResponse: rcvd transactionID:\" + self.HexDump3(receiveBuffer, 4, 4))\n                    LogIfVerbose(\"HandleDhcpResponse: sent ClientHardwareAddress:\" + self.HexDump3(sendData, 0x1C, 6))\n                    LogIfVerbose(\n                        \"HandleDhcpResponse: rcvd ClientHardwareAddress:\" + self.HexDump3(receiveBuffer, 0x1C, 6))\n                    LogIfVerbose(\"HandleDhcpResponse: transactionId, cookie, or MAC address mismatch\")\n                    return None\n        endpoint = None\n\n        #\n        # Walk all the returned options, parsing out what we need, ignoring the others.\n        # We need the custom option 245 to find the the endpoint we talk to,\n        # as well as, to handle some Linux DHCP client incompatibilities,\n        # options 3 for default gateway and 249 for routes. And 255 is end.\n        #\n\n        i = 0xF0  # offset to first option\n        while i < bytesReceived:\n            option = Ord(receiveBuffer[i])\n            length = 0\n            if (i + 1) < bytesReceived:\n                length = Ord(receiveBuffer[i + 1])\n            LogIfVerbose(\"DHCP option \" + hex(option) + \" at offset:\" + hex(i) + \" with length:\" + hex(length))\n            if option == 255:\n                LogIfVerbose(\"DHCP packet ended at offset \" + hex(i))\n                break\n            elif option == 249:\n                # http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx\n                LogIfVerbose(\"Routes at offset:\" + hex(i) + \" with length:\" + hex(length))\n                if length < 5:\n                    Error(\"Data too small for option \" + str(option))\n                j = i + 2\n                while j < (i + length + 2):\n                    maskLengthBits = Ord(receiveBuffer[j])\n                    maskLengthBytes = (((maskLengthBits + 7) & ~7) >> 3)\n                    mask = 0xFFFFFFFF & (0xFFFFFFFF << (32 - maskLengthBits))\n                    j += 1\n                    net = self.UnpackBigEndian(receiveBuffer, j, maskLengthBytes)\n                    net <<= (32 - maskLengthBytes * 8)\n                    net &= mask\n                    j += maskLengthBytes\n                    gateway = self.UnpackBigEndian(receiveBuffer, j, 4)\n                    j += 4\n                    self.RouteAdd(net, mask, gateway)\n                if j != (i + length + 2):\n                    Error(\"HandleDhcpResponse: Unable to parse routes\")\n            elif option == 3 or option == 245:\n                if i + 5 < bytesReceived:\n                    if length != 4:\n                        Error(\"HandleDhcpResponse: Endpoint or Default Gateway not 4 bytes\")\n                        return None\n                    gateway = self.UnpackBigEndian(receiveBuffer, i + 2, 4)\n                    IpAddress = self.IntegerToIpAddressV4String(gateway)\n                    if option == 3:\n                        self.SetDefaultGateway(gateway)\n                        name = \"DefaultGateway\"\n                    else:\n                        endpoint = IpAddress\n                        name = \"Azure wire protocol endpoint\"\n                    LogIfVerbose(name + \": \" + IpAddress + \" at \" + hex(i))\n                else:\n                    Error(\"HandleDhcpResponse: Data too small for option \" + str(option))\n            else:\n                LogIfVerbose(\"Skipping DHCP option \" + hex(option) + \" at \" + hex(i) + \" with length \" + hex(length))\n            i += length + 2\n        return endpoint\n\n    def DoDhcpWork(self):\n        \"\"\"\n        Discover the wire server via DHCP option 245.\n        And workaround incompatibility with Azure DHCP servers.\n        \"\"\"\n        ShortSleep = False  # Sleep 1 second before retrying DHCP queries.\n        ifname = None\n\n        sleepDurations = [0, 10, 30, 60, 60]\n        maxRetry = len(sleepDurations)\n        lastTry = (maxRetry - 1)\n        for retry in range(0, maxRetry):\n            try:\n                # Open DHCP port if iptables is enabled.\n                Run(\"iptables -D INPUT -p udp --dport 68 -j ACCEPT\",\n                    chk_err=False)  # We supress error logging on error.\n                Run(\"iptables -I INPUT -p udp --dport 68 -j ACCEPT\",\n                    chk_err=False)  # We supress error logging on error.\n                strRetry = str(retry)\n                prefix = \"DoDhcpWork: try=\" + strRetry\n                LogIfVerbose(prefix)\n                sendData = self.BuildDhcpRequest()\n                LogWithPrefixIfVerbose(\"DHCP request:\", HexDump(sendData, len(sendData)))\n                sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)\n                sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\n                sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n                missingDefaultRoute = True\n                try:\n                    if DistInfo()[0] == 'FreeBSD':\n                        missingDefaultRoute = True\n                    else:\n                        routes = RunGetOutput(\"route -n\")[1]\n                    for line in routes.split('\\n'):\n                        if line.startswith(\"0.0.0.0 \") or line.startswith(\"default \"):\n                            missingDefaultRoute = False\n                except:\n                    pass\n                if missingDefaultRoute:\n                    # This is required because sending after binding to 0.0.0.0 fails with\n                    # network unreachable when the default gateway is not set up.\n                    ifname = MyDistro.GetInterfaceName()\n                    Log(\"DoDhcpWork: Missing default route - adding broadcast route for DHCP.\")\n                    if DistInfo()[0] == 'FreeBSD':\n                        Run(\"route add -net 255.255.255.255 -iface \" + ifname, chk_err=False)\n                    else:\n                        Run(\"route add 255.255.255.255 dev \" + ifname, chk_err=False)\n                if MyDistro.isDHCPEnabled():\n                    MyDistro.stopDHCP()\n                sock.bind((\"0.0.0.0\", 68))\n                sock.sendto(sendData, (\"<broadcast>\", 67))\n                sock.settimeout(10)\n                Log(\"DoDhcpWork: Setting socket.timeout=10, entering recv\")\n                receiveBuffer = sock.recv(1024)\n                endpoint = self.HandleDhcpResponse(sendData, receiveBuffer)\n                if endpoint == None:\n                    LogIfVerbose(\"DoDhcpWork: No endpoint found\")\n                if endpoint != None or retry == lastTry:\n                    if endpoint != None:\n                        self.SendData = sendData\n                        self.DhcpResponse = receiveBuffer\n                    if retry == lastTry:\n                        LogIfVerbose(\"DoDhcpWork: try=\" + strRetry)\n                    return endpoint\n                sleepDuration = [sleepDurations[retry % len(sleepDurations)], 1][ShortSleep]\n                LogIfVerbose(\"DoDhcpWork: sleep=\" + str(sleepDuration))\n                time.sleep(sleepDuration)\n            except Exception as e:\n                ErrorWithPrefix(prefix, str(e))\n                ErrorWithPrefix(prefix, traceback.format_exc())\n            finally:\n                sock.close()\n                if missingDefaultRoute:\n                    # We added this route - delete it\n                    Log(\"DoDhcpWork: Removing broadcast route for DHCP.\")\n                    if DistInfo()[0] == 'FreeBSD':\n                        Run(\"route del -net 255.255.255.255 -iface \" + ifname, chk_err=False)\n                    else:\n                        Run(\"route del 255.255.255.255 dev \" + ifname,\n                            chk_err=False)  # We supress error logging on error.\n                if MyDistro.isDHCPEnabled():\n                    MyDistro.startDHCP()\n        return None\n\n    def UpdateAndPublishHostName(self, name):\n        \"\"\"\n        Set hostname locally and publish to iDNS\n        \"\"\"\n        Log(\"Setting host name: \" + name)\n        MyDistro.publishHostname(name)\n        ethernetInterface = MyDistro.GetInterfaceName()\n        MyDistro.RestartInterface(ethernetInterface)\n        self.RestoreRoutes()\n\n    def RestoreRoutes(self):\n        \"\"\"\n        If there is a DHCP response, then call HandleDhcpResponse.\n        \"\"\"\n        if self.SendData != None and self.DhcpResponse != None:\n            self.HandleDhcpResponse(self.SendData, self.DhcpResponse)\n\n    def UpdateGoalState(self):\n        \"\"\"\n        Retreive goal state information from endpoint server.\n        Parse xml and initialize Agent.GoalState object.\n        Return object or None on error.\n        \"\"\"\n        goalStateXml = None\n        maxRetry = 9\n        log = NoLog\n        for retry in range(1, maxRetry + 1):\n            strRetry = str(retry)\n            log(\"retry UpdateGoalState,retry=\" + strRetry)\n            goalStateXml = self.HttpGetWithHeaders(\"/machine/?comp=goalstate\")\n            if goalStateXml != None:\n                break\n            log = Log\n            time.sleep(retry)\n        if not goalStateXml:\n            Error(\"UpdateGoalState failed.\")\n            return\n        Log(\"Retrieved GoalState from Azure Fabric.\")\n        self.GoalState = GoalState(self).Parse(goalStateXml)\n        return self.GoalState\n\n    def ReportReady(self):\n        \"\"\"\n        Send health report 'Ready' to server.\n        This signals the fabric that our provosion is completed,\n        and the host is ready for operation.\n        \"\"\"\n        counter = (self.HealthReportCounter + 1) % 1000000\n        self.HealthReportCounter = counter\n        healthReport = (\n                    \"<?xml version=\\\"1.0\\\" encoding=\\\"utf-8\\\"?><Health xmlns:xsi=\\\"http://www.w3.org/2001/XMLSchema-instance\\\" xmlns:xsd=\\\"http://www.w3.org/2001/XMLSchema\\\"><GoalStateIncarnation>\"\n                    + self.GoalState.Incarnation\n                    + \"</GoalStateIncarnation><Container><ContainerId>\"\n                    + self.GoalState.ContainerId\n                    + \"</ContainerId><RoleInstanceList><Role><InstanceId>\"\n                    + self.GoalState.RoleInstanceId\n                    + \"</InstanceId><Health><State>Ready</State></Health></Role></RoleInstanceList></Container></Health>\")\n        a = self.HttpPostWithHeaders(\"/machine?comp=health\", healthReport)\n        if a != None:\n            return a.getheader(\"x-ms-latest-goal-state-incarnation-number\")\n        return None\n\n    def ReportNotReady(self, status, desc):\n        \"\"\"\n        Send health report 'Provisioning' to server.\n        This signals the fabric that our provosion is starting.\n        \"\"\"\n        healthReport = (\n                    \"<?xml version=\\\"1.0\\\" encoding=\\\"utf-8\\\"?><Health xmlns:xsi=\\\"http://www.w3.org/2001/XMLSchema-instance\\\" xmlns:xsd=\\\"http://www.w3.org/2001/XMLSchema\\\"><GoalStateIncarnation>\"\n                    + self.GoalState.Incarnation\n                    + \"</GoalStateIncarnation><Container><ContainerId>\"\n                    + self.GoalState.ContainerId\n                    + \"</ContainerId><RoleInstanceList><Role><InstanceId>\"\n                    + self.GoalState.RoleInstanceId\n                    + \"</InstanceId><Health><State>NotReady</State>\"\n                    + \"<Details><SubStatus>\" + status + \"</SubStatus><Description>\" + desc + \"</Description></Details>\"\n                    + \"</Health></Role></RoleInstanceList></Container></Health>\")\n        a = self.HttpPostWithHeaders(\"/machine?comp=health\", healthReport)\n        if a != None:\n            return a.getheader(\"x-ms-latest-goal-state-incarnation-number\")\n        return None\n\n    def ReportRoleProperties(self, thumbprint):\n        \"\"\"\n        Send roleProperties and thumbprint to server. \n        \"\"\"\n        roleProperties = (\"<?xml version=\\\"1.0\\\" encoding=\\\"utf-8\\\"?><RoleProperties><Container>\"\n                          + \"<ContainerId>\" + self.GoalState.ContainerId + \"</ContainerId>\"\n                          + \"<RoleInstances><RoleInstance>\"\n                          + \"<Id>\" + self.GoalState.RoleInstanceId + \"</Id>\"\n                          + \"<Properties><Property name=\\\"CertificateThumbprint\\\" value=\\\"\" + thumbprint + \"\\\" /></Properties>\"\n                          + \"</RoleInstance></RoleInstances></Container></RoleProperties>\")\n        a = self.HttpPostWithHeaders(\"/machine?comp=roleProperties\",\n                                     roleProperties)\n        Log(\"Posted Role Properties. CertificateThumbprint=\" + thumbprint)\n        return a\n\n    def LoadBalancerProbeServer_Shutdown(self):\n        \"\"\"\n        Shutdown the LoadBalancerProbeServer.\n        \"\"\"\n        if self.LoadBalancerProbeServer != None:\n            self.LoadBalancerProbeServer.shutdown()\n            self.LoadBalancerProbeServer = None\n\n    def GenerateTransportCert(self):\n        \"\"\"\n        Create ssl certificate for https communication with endpoint server.\n        \"\"\"\n        Run(\n            Openssl + \" req -x509 -nodes -subj /CN=LinuxTransport -days 32768 -newkey rsa:2048 -keyout TransportPrivate.pem -out TransportCert.pem\")\n        cert = \"\"\n        for line in GetFileContents(\"TransportCert.pem\").split('\\n'):\n            if not \"CERTIFICATE\" in line:\n                cert += line.rstrip()\n        return cert\n\n    def DoVmmStartup(self):\n        \"\"\"\n        Spawn the VMM startup script.\n        \"\"\"\n        Log(\"Starting Microsoft System Center VMM Initialization Process\")\n        pid = subprocess.Popen(\n            [\"/bin/bash\", \"/mnt/cdrom/secure/\" + VMM_STARTUP_SCRIPT_NAME, \"-p /mnt/cdrom/secure/ \"]).pid\n        time.sleep(5)\n        sys.exit(0)\n\n    def TryUnloadAtapiix(self):\n        \"\"\"\n        If global modloaded is True, then we loaded the ata_piix kernel module, unload it.\n        \"\"\"\n        if modloaded:\n            Run(\"rmmod ata_piix.ko\", chk_err=False)\n            Log(\"Unloaded ata_piix.ko driver for ATAPI CD-ROM\")\n\n    def TryLoadAtapiix(self):\n        \"\"\"\n        Load the ata_piix kernel module if it exists.\n        If successful, set global modloaded to True.\n        If unable to load module leave modloaded False.\n        \"\"\"\n        global modloaded\n        modloaded = False\n        retcode, krn = RunGetOutput('uname -r')\n        krn_pth = '/lib/modules/' + krn.strip('\\n') + '/kernel/drivers/ata/ata_piix.ko'\n        if Run(\"lsmod | grep ata_piix\", chk_err=False) == 0:\n            Log(\"Module \" + krn_pth + \" driver for ATAPI CD-ROM is already present.\")\n            return 0\n        if retcode:\n            Error(\"Unable to provision: Failed to call uname -r\")\n            return \"Unable to provision: Failed to call uname\"\n        if os.path.isfile(krn_pth):\n            retcode, output = RunGetOutput(\"insmod \" + krn_pth, chk_err=False)\n        else:\n            Log(\"Module \" + krn_pth + \" driver for ATAPI CD-ROM does not exist.\")\n            return 1\n        if retcode != 0:\n            Error('Error calling insmod for ' + krn_pth + ' driver for ATAPI CD-ROM')\n            return retcode\n        time.sleep(1)\n        # check 3 times if the mod is loaded\n        for i in range(3):\n            if Run('lsmod | grep ata_piix'):\n                continue\n            else:\n                modloaded = True\n                break\n        if not modloaded:\n            Error('Unable to load ' + krn_pth + ' driver for ATAPI CD-ROM')\n            return 1\n\n        Log(\"Loaded \" + krn_pth + \" driver for ATAPI CD-ROM\")\n\n        # we have succeeded loading the ata_piix mod if it can be done.\n\n    def SearchForVMMStartup(self):\n        \"\"\"\n        Search for a DVD/CDROM containing VMM's VMM_CONFIG_FILE_NAME.\n        Call TryLoadAtapiix in case we must load the ata_piix module first.\n\n        If VMM_CONFIG_FILE_NAME is found, call DoVmmStartup.\n        Else, return to Azure Provisioning process.\n        \"\"\"\n        self.TryLoadAtapiix()\n        if os.path.exists('/mnt/cdrom/secure') == False:\n            CreateDir(\"/mnt/cdrom/secure\", \"root\", 0o700)\n        mounted = False\n        for dvds in [re.match(r'(sr[0-9]|hd[c-z]|cdrom[0-9]|cd[0-9]?)', x) for x in os.listdir('/dev/')]:\n            if dvds == None:\n                continue\n            dvd = '/dev/' + dvds.group(0)\n            if Run(\"LC_ALL=C fdisk -l \" + dvd + \" | grep Disk\", chk_err=False):\n                continue  # Not mountable\n            else:\n                for retry in range(1, 6):\n                    retcode, output = RunGetOutput(\"mount -v \" + dvd + \" /mnt/cdrom/secure\")\n                    Log(output[:-1])\n                    if retcode == 0:\n                        Log(\"mount succeeded on attempt #\" + str(retry))\n                        mounted = True\n                        break\n                    if 'is already mounted on /mnt/cdrom/secure' in output:\n                        Log(\"Device \" + dvd + \" is already mounted on /mnt/cdrom/secure.\" + str(retry))\n                        mounted = True\n                        break\n                    Log(\"mount failed on attempt #\" + str(retry))\n                    Log(\"mount loop sleeping 5...\")\n                    time.sleep(5)\n                if not mounted:\n                    # unable to mount\n                    continue\n                if not os.path.isfile(\"/mnt/cdrom/secure/\" + VMM_CONFIG_FILE_NAME):\n                    # nope - mount the next drive\n                    if mounted:\n                        Run(\"umount \" + dvd, chk_err=False)\n                        mounted = False\n                        continue\n                else:  # it is the vmm startup\n                    self.DoVmmStartup()\n\n        Log(\"VMM Init script not found.  Provisioning for Azure\")\n        return\n\n    def Provision(self):\n        \"\"\"\n        Responible for:\n        Regenerate ssh keys,\n        Mount, read, and parse ovfenv.xml from provisioning dvd rom\n        Process the ovfenv.xml info\n        Call ReportRoleProperties\n        If configured, delete root password.\n        Return None on success, error string on error.\n        \"\"\"\n        enabled = Config.get(\"Provisioning.Enabled\")\n        if enabled != None and enabled.lower().startswith(\"n\"):\n            return\n        Log(\"Provisioning image started.\")\n        type = Config.get(\"Provisioning.SshHostKeyPairType\")\n        if type == None:\n            type = \"rsa\"\n        regenerateKeys = Config.get(\"Provisioning.RegenerateSshHostKeyPair\")\n        if regenerateKeys == None or regenerateKeys.lower().startswith(\"y\"):\n            Run(\"rm -f /etc/ssh/ssh_host_*key*\")\n            Run(\"ssh-keygen -N '' -t \" + type + \" -f /etc/ssh/ssh_host_\" + type + \"_key\")\n            MyDistro.restartSshService()\n        # SetFileContents(LibDir + \"/provisioned\", \"\")\n        dvd = None\n        for dvds in [re.match(r'(sr[0-9]|hd[c-z]|cdrom[0-9]|cd[0-9]?)', x) for x in os.listdir('/dev/')]:\n            if dvds == None:\n                continue\n            dvd = '/dev/' + dvds.group(0)\n        if dvd == None:\n            # No DVD device detected\n            Error(\"No DVD device detected, unable to provision.\")\n            return \"No DVD device detected, unable to provision.\"\n        if MyDistro.mediaHasFilesystem(dvd) is False:\n            out = MyDistro.load_ata_piix()\n            if out:\n                return out\n            for i in range(10):  # we may have to wait\n                if os.path.exists(dvd):\n                    break\n                Log(\"Waiting for DVD - sleeping 1 - \" + str(i + 1) + \" try...\")\n                time.sleep(1)\n        if os.path.exists('/mnt/cdrom/secure') == False:\n            CreateDir(\"/mnt/cdrom/secure\", \"root\", 0o700)\n        # begin mount loop - 5 tries - 5 sec wait between\n        for retry in range(1, 6):\n            location = '/mnt/cdrom/secure'\n            retcode, output = MyDistro.mountDVD(dvd, location)\n            Log(output[:-1])\n            if retcode == 0:\n                Log(\"mount succeeded on attempt #\" + str(retry))\n                break\n            if 'is already mounted on /mnt/cdrom/secure' in output:\n                Log(\"Device \" + dvd + \" is already mounted on /mnt/cdrom/secure.\" + str(retry))\n                break\n            Log(\"mount failed on attempt #\" + str(retry))\n            Log(\"mount loop sleeping 5...\")\n            time.sleep(5)\n        if not os.path.isfile(\"/mnt/cdrom/secure/ovf-env.xml\"):\n            Error(\"Unable to provision: Missing ovf-env.xml on DVD.\")\n            return \"Failed to retrieve provisioning data (0x02).\"\n        ovfxml = (GetFileContents(u\"/mnt/cdrom/secure/ovf-env.xml\",\n                                  asbin=False))  # use unicode here to ensure correct codec gets used.\n        if ord(ovfxml[0]) > 128 and ord(ovfxml[1]) > 128 and ord(ovfxml[2]) > 128:\n            ovfxml = ovfxml[\n                     3:]  # BOM is not stripped.  First three bytes are > 128 and not unicode chars so we ignore them.\n        ovfxml = ovfxml.strip(chr(0x00))  # we may have NULLs.\n        ovfxml = ovfxml[ovfxml.find('<?'):]  # chop leading text if present\n        SetFileContents(\"ovf-env.xml\", re.sub(\"<UserPassword>.*?<\", \"<UserPassword>*<\", ovfxml))\n        Run(\"umount \" + dvd, chk_err=False)\n        MyDistro.unload_ata_piix()\n        error = None\n        if ovfxml != None:\n            Log(\"Provisioning image using OVF settings in the DVD.\")\n            ovfobj = OvfEnv().Parse(ovfxml)\n            if ovfobj != None:\n                error = ovfobj.Process()\n                if error:\n                    Error(\"Provisioning image FAILED \" + error)\n                    return (\"Provisioning image FAILED \" + error)\n            Log(\"Ovf XML process finished\")\n        # This is done here because regenerated SSH host key pairs may be potentially overwritten when processing the ovfxml\n        fingerprint = RunGetOutput(\"ssh-keygen -lf /etc/ssh/ssh_host_\" + type + \"_key.pub\")[1].rstrip().split()[\n            1].replace(':', '')\n        self.ReportRoleProperties(fingerprint)\n        delRootPass = Config.get(\"Provisioning.DeleteRootPassword\")\n        if delRootPass != None and delRootPass.lower().startswith(\"y\"):\n            MyDistro.deleteRootPassword()\n        Log(\"Provisioning image completed.\")\n        return error\n\n    def Run(self):\n        \"\"\"\n        Called by 'waagent -daemon.'\n        Main loop to process the goal state.  State is posted every 25 seconds\n        when provisioning has been completed.\n        \n        Search for VMM enviroment, start VMM script if found.\n        Perform DHCP and endpoint server discovery by calling DoDhcpWork().\n        Check wire protocol versions.\n        Set SCSI timeout on root device.\n        Call GenerateTransportCert() to create ssl certs for server communication.\n        Call UpdateGoalState().\n        If not provisioned, call ReportNotReady(\"Provisioning\", \"Starting\")\n        Call Provision(), set global provisioned = True if successful.\n        Call goalState.Process()\n        Start LBProbeServer if indicated in waagent.conf.\n        Start the StateConsumer if indicated in waagent.conf.\n        ReportReady if provisioning is complete.\n        If provisioning failed, call ReportNotReady(\"ProvisioningFailed\", provisionError)\n        \"\"\"\n        SetFileContents(\"/var/run/waagent.pid\", str(os.getpid()) + \"\\n\")\n\n        reportHandlerStatusCount = 0\n\n        # Determine if we are in VMM.  Spawn VMM_STARTUP_SCRIPT_NAME if found.\n        self.SearchForVMMStartup()\n        ipv4 = ''\n        while ipv4 == '' or ipv4 == '0.0.0.0':\n            ipv4 = MyDistro.GetIpv4Address()\n            if ipv4 == '' or ipv4 == '0.0.0.0':\n                Log(\"Waiting for network.\")\n                time.sleep(10)\n\n        Log(\"IPv4 address: \" + ipv4)\n        mac = ''\n        mac = MyDistro.GetMacAddress()\n        if len(mac) > 0:\n            Log(\"MAC  address: \" + \":\".join([\"%02X\" % Ord(a) for a in mac]))\n\n        # Consume Entropy in ACPI table provided by Hyper-V\n        try:\n            SetFileContents(\"/dev/random\", GetFileContents(\"/sys/firmware/acpi/tables/OEM0\"))\n        except:\n            pass\n\n        Log(\"Probing for Azure environment.\")\n        self.Endpoint = self.DoDhcpWork()\n\n        while self.Endpoint == None:\n            Log(\"Retry environment detection in 60 seconds\")\n            time.sleep(60)\n            self.Endpoint = self.DoDhcpWork()\n\n        Log(\"Discovered Azure endpoint: \" + self.Endpoint)\n        if not self.CheckVersions():\n            Error(\"Agent.CheckVersions failed\")\n            sys.exit(1)\n\n        self.EnvMonitor = EnvMonitor()\n\n        # Set SCSI timeout on SCSI disks\n        MyDistro.initScsiDiskTimeout()\n        global provisioned\n        global provisionError\n\n        global Openssl\n        Openssl = Config.get(\"OS.OpensslPath\")\n        if Openssl == None:\n            Openssl = \"openssl\"\n\n        self.TransportCert = self.GenerateTransportCert()\n\n        eventMonitor = None\n        incarnation = None  # goalStateIncarnationFromHealthReport\n        currentPort = None  # loadBalancerProbePort\n        goalState = None  # self.GoalState, instance of GoalState\n        provisioned = os.path.exists(LibDir + \"/provisioned\")\n        program = Config.get(\"Role.StateConsumer\")\n        provisionError = None\n        lbProbeResponder = True\n\n        lbProbeResponderNo = Config.no(\"LBProbeResponder\")\n        if lbProbeResponderNo:\n            lbProbeResponder = False\n\n        try:\n            updateRdmaDriverConfigured = Config.yes(\"OS.UpdateRdmaDriver\")\n            updateRdmaRepository = Config.get(\"OS.RdmaRepository\")\n            if (updateRdmaDriverConfigured):\n                MyDistro.rdmaUpdate(updateRdmaRepository)\n            else:\n                Log(\"OS.UpdateRdmaDriver configured to \" + str(\n                    updateRdmaDriverConfigured) + \" so skip the rdma update.\")\n            checkRdmaDriverConfigured = Config.yes(\"OS.CheckRdmaDriver\")\n            if (checkRdmaDriverConfigured):\n                checkRdmaResult = MyDistro.checkRDMA()\n                Log(\"Rdma check result is \" + str(checkRdmaResult))\n            else:\n                Log(\"OS.CheckRdmaDriver configured to \" + str(checkRdmaDriverConfigured) + \" so skip the rdma check.\")\n        except Exception as e:\n            errMsg = 'check or update Rdma driver failed with error: %s, stack trace: %s' % (\n            str(e), traceback.format_exc())\n            Error(errMsg)\n\n        while True:\n            if (goalState == None) or (incarnation == None) or (goalState.Incarnation != incarnation):\n                try:\n                    goalState = self.UpdateGoalState()\n                except HttpResourceGoneError as e:\n                    Warn(\"Incarnation is out of date:{0}\".format(e))\n                    incarnation = None\n                    continue\n\n                if goalState == None:\n                    Warn(\"Failed to fetch goalstate\")\n                    continue\n\n                if provisioned == False:\n                    self.ReportNotReady(\"Provisioning\", \"Starting\")\n\n                goalState.Process()\n\n                if provisioned == False:\n                    provisionError = self.Provision()\n                    if provisionError == None:\n                        provisioned = True\n                        SetFileContents(LibDir + \"/provisioned\", \"\")\n                        lastCtime = \"NOTFIND\"\n                        try:\n                            walaConfigFile = MyDistro.getConfigurationPath()\n                            lastCtime = time.ctime(os.path.getctime(walaConfigFile))\n                        except:\n                            pass\n                        # Get Ctime of wala config, can help identify the base image of this VM\n                        AddExtensionEvent(name=\"WALA\", op=WALAEventOperation.Provision, isSuccess=True,\n                                          message=\"WALA Config Ctime:\" + lastCtime)\n\n                        executeCustomData = Config.get(\"Provisioning.ExecuteCustomData\")\n                        if executeCustomData != None and executeCustomData.lower().startswith(\"y\"):\n                            if os.path.exists(LibDir + '/CustomData'):\n                                Run('chmod +x ' + LibDir + '/CustomData')\n                                Run(LibDir + '/CustomData')\n                            else:\n                                Error(LibDir + '/CustomData does not exist.')\n\n                #\n                # only one port supported\n                # restart server if new port is different than old port\n                # stop server if no longer a port\n                #\n                goalPort = goalState.LoadBalancerProbePort\n                if currentPort != goalPort:\n                    try:\n                        self.LoadBalancerProbeServer_Shutdown()\n                        currentPort = goalPort\n                        if currentPort != None and lbProbeResponder == True:\n                            self.LoadBalancerProbeServer = LoadBalancerProbeServer(currentPort)\n                            if self.LoadBalancerProbeServer == None:\n                                lbProbeResponder = False\n                                Log(\"Unable to create LBProbeResponder.\")\n                    except Exception as e:\n                        Error(\"Failed to launch LBProbeResponder: {0}\".format(e))\n                        currentPort = None\n\n                # Report SSH key fingerprint\n                type = Config.get(\"Provisioning.SshHostKeyPairType\")\n                if type == None:\n                    type = \"rsa\"\n\n                host_key_path = \"/etc/ssh/ssh_host_\" + type + \"_key.pub\"\n                if (MyDistro.waitForSshHostKey(host_key_path)):\n                    fingerprint = \\\n                    RunGetOutput(\"ssh-keygen -lf /etc/ssh/ssh_host_\" + type + \"_key.pub\")[1].rstrip().split()[\n                        1].replace(':', '')\n                    self.ReportRoleProperties(fingerprint)\n\n            if program != None and DiskActivated == True:\n                try:\n                    Children.append(subprocess.Popen([program, \"Ready\"]))\n                except OSError as e:\n                    ErrorWithPrefix('SharedConfig.Parse', 'Exception: ' + str(e) + ' occured launching ' + program)\n                program = None\n\n            sleepToReduceAccessDenied = 3\n            time.sleep(sleepToReduceAccessDenied)\n            if provisionError != None:\n                incarnation = self.ReportNotReady(\"ProvisioningFailed\", provisionError)\n            else:\n                incarnation = self.ReportReady()\n            # Process our extensions.\n            if goalState.ExtensionsConfig == None and goalState.ExtensionsConfigXml != None:\n                reportHandlerStatusCount = 0  # Reset count when new goal state comes\n                goalState.ExtensionsConfig = ExtensionsConfig().Parse(goalState.ExtensionsConfigXml)\n\n            # report the status/heartbeat results of extension processing\n            if goalState.ExtensionsConfig != None:\n                ret = goalState.ExtensionsConfig.ReportHandlerStatus()\n                if ret != 0:\n                    Error(\"Failed to report handler status\")\n                elif reportHandlerStatusCount % 1000 == 0:\n                    # Agent report handler status every 25 seconds. Reduce the log entries by adding a count\n                    Log(\"Successfully reported handler status\")\n                reportHandlerStatusCount += 1\n            global LinuxDistro\n            if LinuxDistro == \"redhat\":\n                DoInstallRHUIRPM()\n\n            if not eventMonitor:\n                eventMonitor = WALAEventMonitor(self.HttpPostWithHeaders)\n                eventMonitor.StartEventsLoop()\n\n            time.sleep(25 - sleepToReduceAccessDenied)\n\n\nWaagentLogrotate = \"\"\"\\\n/var/log/waagent.log {\n    monthly\n    rotate 6\n    notifempty\n    missingok\n}\n\"\"\"\n\n\ndef GetMountPoint(mountlist, device):\n    \"\"\"\n    Example of mountlist:\n        /dev/sda1 on / type ext4 (rw)\n        proc on /proc type proc (rw)\n        sysfs on /sys type sysfs (rw)\n        devpts on /dev/pts type devpts (rw,gid=5,mode=620)\n        tmpfs on /dev/shm type tmpfs (rw,rootcontext=\"system_u:object_r:tmpfs_t:s0\")\n        none on /proc/sys/fs/binfmt_misc type binfmt_misc (rw)\n        /dev/sdb1 on /mnt/resource type ext4 (rw)\n    \"\"\"\n    if (mountlist and device):\n        for entry in mountlist.split('\\n'):\n            if (re.search(device, entry)):\n                tokens = entry.split()\n                # Return the 3rd column of this line\n                return tokens[2] if len(tokens) > 2 else None\n    return None\n\n\ndef FindInLinuxKernelCmdline(option):\n    \"\"\"\n    Return match object if 'option' is present in the kernel boot options\n    of the grub configuration.\n    \"\"\"\n    m = None\n    matchs = r'^.*?' + MyDistro.grubKernelBootOptionsLine + r'.*?' + option + r'.*$'\n    try:\n        m = FindStringInFile(MyDistro.grubKernelBootOptionsFile, matchs)\n    except IOError as e:\n        Error(\n            'FindInLinuxKernelCmdline: Exception opening ' + MyDistro.grubKernelBootOptionsFile + 'Exception:' + str(e))\n\n    return m\n\n\ndef AppendToLinuxKernelCmdline(option):\n    \"\"\"\n    Add 'option' to the kernel boot options of the grub configuration.\n    \"\"\"\n    if not FindInLinuxKernelCmdline(option):\n        src = r'^(.*?' + MyDistro.grubKernelBootOptionsLine + r')(.*?)(\"?)$'\n        rep = r'\\1\\2 ' + option + r'\\3'\n        try:\n            ReplaceStringInFile(MyDistro.grubKernelBootOptionsFile, src, rep)\n        except IOError as e:\n            Error(\n                'AppendToLinuxKernelCmdline: Exception opening ' + MyDistro.grubKernelBootOptionsFile + 'Exception:' + str(\n                    e))\n            return 1\n        Run(\"update-grub\", chk_err=False)\n    return 0\n\n\ndef RemoveFromLinuxKernelCmdline(option):\n    \"\"\"\n    Remove 'option' to the kernel boot options of the grub configuration.\n    \"\"\"\n    if FindInLinuxKernelCmdline(option):\n        src = r'^(.*?' + MyDistro.grubKernelBootOptionsLine + r'.*?)(' + option + r')(.*?)(\"?)$'\n        rep = r'\\1\\3\\4'\n        try:\n            ReplaceStringInFile(MyDistro.grubKernelBootOptionsFile, src, rep)\n        except IOError as e:\n            Error(\n                'RemoveFromLinuxKernelCmdline: Exception opening ' + MyDistro.grubKernelBootOptionsFile + 'Exception:' + str(\n                    e))\n            return 1\n        Run(\"update-grub\", chk_err=False)\n    return 0\n\n\ndef FindStringInFile(fname, matchs):\n    \"\"\"\n    Return match object if found in file.\n    \"\"\"\n    try:\n        ms = re.compile(matchs)\n        for l in (open(fname, 'r')).readlines():\n            m = re.search(ms, l)\n            if m:\n                return m\n    except:\n        raise\n\n    return None\n\n\ndef ReplaceStringInFile(fname, src, repl):\n    \"\"\"\n    Replace 'src' with 'repl' in file.\n    \"\"\"\n    try:\n        sr = re.compile(src)\n        if FindStringInFile(fname, src):\n            updated = ''\n            for l in (open(fname, 'r')).readlines():\n                n = re.sub(sr, repl, l)\n                updated += n\n            ReplaceFileContentsAtomic(fname, updated)\n    except:\n        raise\n    return\n\n\ndef ApplyVNUMAWorkaround():\n    \"\"\"\n    If kernel version has NUMA bug, add 'numa=off' to\n    kernel boot options.\n    \"\"\"\n    VersionParts = platform.release().replace('-', '.').split('.')\n    if int(VersionParts[0]) > 2:\n        return\n    if int(VersionParts[1]) > 6:\n        return\n    if int(VersionParts[2]) > 37:\n        return\n    if AppendToLinuxKernelCmdline(\"numa=off\") == 0:\n        Log(\"Your kernel version \" + platform.release() + \" has a NUMA-related bug: NUMA has been disabled.\")\n    else:\n        \"Error adding 'numa=off'.  NUMA has not been disabled.\"\n\n\ndef RevertVNUMAWorkaround():\n    \"\"\"\n    Remove 'numa=off' from kernel boot options.\n    \"\"\"\n    if RemoveFromLinuxKernelCmdline(\"numa=off\") == 0:\n        Log('NUMA has been re-enabled')\n    else:\n        Log('NUMA has not been re-enabled')\n\n\ndef Install():\n    \"\"\"\n    Install the agent service.\n    Check dependencies.\n    Create /etc/waagent.conf and move old version to\n    /etc/waagent.conf.old\n    Copy RulesFiles to /var/lib/waagent\n    Create /etc/logrotate.d/waagent\n    Set /etc/ssh/sshd_config ClientAliveInterval to 180\n    Call ApplyVNUMAWorkaround()\n    \"\"\"\n    if MyDistro.checkDependencies():\n        return 1\n    os.chmod(sys.argv[0], 0o755)\n    SwitchCwd()\n    for a in RulesFiles:\n        if os.path.isfile(a):\n            if os.path.isfile(GetLastPathElement(a)):\n                os.remove(GetLastPathElement(a))\n            shutil.move(a, \".\")\n            Warn(\"Moved \" + a + \" -> \" + LibDir + \"/\" + GetLastPathElement(a))\n    MyDistro.registerAgentService()\n    if os.path.isfile(\"/etc/waagent.conf\"):\n        try:\n            os.remove(\"/etc/waagent.conf.old\")\n        except:\n            pass\n        try:\n            os.rename(\"/etc/waagent.conf\", \"/etc/waagent.conf.old\")\n            Warn(\"Existing /etc/waagent.conf has been renamed to /etc/waagent.conf.old\")\n        except:\n            pass\n    SetFileContents(\"/etc/waagent.conf\", MyDistro.waagent_conf_file)\n    SetFileContents(\"/etc/logrotate.d/waagent\", WaagentLogrotate)\n    filepath = \"/etc/ssh/sshd_config\"\n    ReplaceFileContentsAtomic(filepath, \"\\n\".join(filter(lambda a: not\n    a.startswith(\"ClientAliveInterval\"),\n                                                         GetFileContents(filepath).split(\n                                                             '\\n'))) + \"\\nClientAliveInterval 180\\n\")\n    Log(\"Configured SSH client probing to keep connections alive.\")\n    ApplyVNUMAWorkaround()\n    return 0\n\n\ndef GetMyDistro(dist_class_name=''):\n    \"\"\"\n    Return MyDistro object.\n    NOTE: Logging is not initialized at this point.\n    \"\"\"\n    if dist_class_name == '':\n        if 'Linux' in platform.system():\n            Distro = DistInfo()[0]\n        else:  # I know this is not Linux!\n            if 'FreeBSD' in platform.system():\n                Distro = platform.system()\n        Distro = Distro.strip('\"')\n        Distro = Distro.strip(' ')\n        dist_class_name = Distro + 'Distro'\n    else:\n        Distro = dist_class_name\n    if dist_class_name not in globals():\n        msg = Distro + ' is not a supported distribution. Reverting to DefaultDistro to support scenarios in ' \\\n                       'unknown/unsupported distribution.'\n        print(msg)\n        Log(msg)\n        return DefaultDistro()\n\n    # the distro class inside this module. Check the implementations of AbstractDistro\n    return globals()[dist_class_name]()\n\n\ndef DistInfo(fullname=0):\n    if 'FreeBSD' in platform.system():\n        release = re.sub(r'\\-.*\\Z', '', str(platform.release()))\n        distinfo = ['FreeBSD', release]\n        return distinfo\n\n    if 'linux_distribution' in dir(platform):\n        distinfo = list(platform.linux_distribution(full_distribution_name=fullname))\n        distinfo[0] = distinfo[0].strip()  # remove trailing whitespace in distro name\n        if not distinfo[0]:\n            distinfo = dist_info_SLES15()\n        if not distinfo[0]:\n            distinfo = dist_info_opensuse15()\n        return distinfo\n    else:\n        return platform.dist()\n\n\ndef dist_info_SLES15():\n    os_release_filepath = \"/etc/os-release\"\n    if not os.path.isfile(os_release_filepath):\n        return [\"\",\"\",\"\"]\n    info = open(os_release_filepath).readlines()\n    found_name_sles = False\n    found_id_sles = False\n    version_id = \"\"\n    for line in info:\n        if \"NAME=\\\"SLES\\\"\" in line:\n            found_name_sles = True\n        if \"ID=\\\"sles\\\"\" in line:\n            found_id_sles = True\n        if \"VERSION_ID\" in line:\n            match = re.match(r'VERSION_ID=\"([.0-9]+)\"', line)\n            if match:\n                version_id = match.group(1)\n    if found_name_sles and found_id_sles and version_id:\n        return \"SuSE\", version_id, \"suse\"\n    return [\"\",\"\",\"\"]\n\n\ndef dist_info_opensuse15():\n    os_release_filepath = \"/etc/os-release\"\n    if not os.path.isfile(os_release_filepath):\n        return [\"\",\"\",\"\"]\n    info = open(os_release_filepath).readlines()\n    found_name_opensuse_leap = False\n    found_id_opensuse_leap = False\n    version_id = \"\"\n    for line in info:\n        if \"NAME=\\\"openSUSE\" in line and \"Leap\" in line:\n            found_name_opensuse_leap = True\n        if \"ID=\\\"opensuse-leap\\\"\" in line:\n            found_id_opensuse_leap = True\n        if \"VERSION_ID\" in line:\n            match = re.match(r'VERSION_ID=\"([.0-9]+)\"', line)\n            if match:\n                version_id = match.group(1)\n    if found_name_opensuse_leap and found_id_opensuse_leap and version_id:\n        return \"SuSE\", version_id, \"suse\"\n    return [\"\",\"\",\"\"]\n\n\ndef PackagedInstall(buildroot):\n    \"\"\"\n    Called from setup.py for use by RPM.\n    Generic implementation Creates directories and\n    files /etc/waagent.conf, /etc/init.d/waagent, /usr/sbin/waagent,\n    /etc/logrotate.d/waagent, /etc/sudoers.d/waagent under buildroot.\n    Copies generated files waagent.conf, into place and exits.\n    \"\"\"\n    MyDistro = GetMyDistro()\n    if MyDistro == None:\n        sys.exit(1)\n    MyDistro.packagedInstall(buildroot)\n\n\ndef LibraryInstall(buildroot):\n    pass\n\n\ndef Uninstall():\n    \"\"\"\n    Uninstall the agent service.\n    Copy RulesFiles back to original locations.\n    Delete agent-related files.\n    Call RevertVNUMAWorkaround().\n    \"\"\"\n    SwitchCwd()\n    for a in RulesFiles:\n        if os.path.isfile(GetLastPathElement(a)):\n            try:\n                shutil.move(GetLastPathElement(a), a)\n                Warn(\"Moved \" + LibDir + \"/\" + GetLastPathElement(a) + \" -> \" + a)\n            except:\n                pass\n    MyDistro.unregisterAgentService()\n    MyDistro.uninstallDeleteFiles()\n    RevertVNUMAWorkaround()\n    return 0\n\n\ndef Deprovision(force, deluser):\n    \"\"\"\n    Remove user accounts created by provisioning.\n    Disables root password if Provisioning.DeleteRootPassword = 'y'\n    Stop agent service.\n    Remove SSH host keys if they were generated by the provision.\n    Set hostname to 'localhost.localdomain'.\n    Delete cached system configuration files in /var/lib and /var/lib/waagent.\n    \"\"\"\n\n    # Append blank line at the end of file, so the ctime of this file is changed every time\n    Run(\"echo ''>>\" + MyDistro.getConfigurationPath())\n\n    SwitchCwd()\n    ovfxml = GetFileContents(LibDir + \"/ovf-env.xml\")\n    ovfobj = None\n    if ovfxml != None:\n        ovfobj = OvfEnv().Parse(ovfxml, True)\n\n    print(\"WARNING! The waagent service will be stopped.\")\n    print(\"WARNING! All SSH host key pairs will be deleted.\")\n    print(\"WARNING! Cached DHCP leases will be deleted.\")\n    MyDistro.deprovisionWarnUser()\n    delRootPass = Config.get(\"Provisioning.DeleteRootPassword\")\n    if delRootPass != None and delRootPass.lower().startswith(\"y\"):\n        print(\"WARNING! root password will be disabled. You will not be able to login as root.\")\n\n    if ovfobj != None and deluser == True:\n        print(\"WARNING! \" + ovfobj.UserName + \" account and entire home directory will be deleted.\")\n\n    if force == False and not raw_input('Do you want to proceed (y/n)? ').startswith('y'):\n        return 1\n\n    MyDistro.stopAgentService()\n\n    # Remove SSH host keys\n    regenerateKeys = Config.get(\"Provisioning.RegenerateSshHostKeyPair\")\n    if regenerateKeys == None or regenerateKeys.lower().startswith(\"y\"):\n        Run(\"rm -f /etc/ssh/ssh_host_*key*\")\n\n    # Remove root password\n    if delRootPass != None and delRootPass.lower().startswith(\"y\"):\n        MyDistro.deleteRootPassword()\n    # Remove distribution specific networking configuration\n\n    MyDistro.publishHostname('localhost.localdomain')\n    MyDistro.deprovisionDeleteFiles()\n    if deluser == True:\n        MyDistro.DeleteAccount(ovfobj.UserName)\n    return 0\n\n\ndef SwitchCwd():\n    \"\"\"\n    Switch to cwd to /var/lib/waagent.\n    Create if not present.\n    \"\"\"\n    CreateDir(LibDir, \"root\", 0o700)\n    os.chdir(LibDir)\n\n\ndef Usage():\n    \"\"\"\n    Print the arguments to waagent.\n    \"\"\"\n    print(\"usage: \" + sys.argv[\n        0] + \" [-verbose] [-force] [-help|-install|-uninstall|-deprovision[+user]|-version|-serialconsole|-daemon]\")\n    return 0\n\n\ndef main():\n    \"\"\"\n    Instantiate MyDistro, exit if distro class is not defined.\n    Parse command-line arguments, exit with usage() on error.\n    Instantiate ConfigurationProvider.\n    Call appropriate non-daemon methods and exit.\n    If daemon mode, enter Agent.Run() loop.\n    \"\"\"\n    if GuestAgentVersion == \"\":\n        print(\"WARNING! This is a non-standard agent that does not include a valid version string.\")\n\n    if len(sys.argv) == 1:\n        sys.exit(Usage())\n\n    LoggerInit('/var/log/waagent.log', '/dev/console')\n    global LinuxDistro\n    LinuxDistro = DistInfo()[0]\n\n    global MyDistro\n    MyDistro = GetMyDistro()\n    if MyDistro == None:\n        sys.exit(1)\n    args = []\n    conf_file = None\n    global force\n    force = False\n    for a in sys.argv[1:]:\n        if re.match(r\"^([-/]*)(help|usage|\\?)\", a):\n            sys.exit(Usage())\n        elif re.match(\"^([-/]*)version\", a):\n            print(GuestAgentVersion + \" running on \" + LinuxDistro)\n            sys.exit(0)\n        elif re.match(\"^([-/]*)verbose\", a):\n            myLogger.verbose = True\n        elif re.match(\"^([-/]*)force\", a):\n            force = True\n        elif re.match(\"^(?:[-/]*)conf=.+\", a):\n            conf_file = re.match(\"^(?:[-/]*)conf=(.+)\", a).groups()[0]\n        elif re.match(\"^([-/]*)(setup|install)\", a):\n            sys.exit(MyDistro.Install())\n        elif re.match(\"^([-/]*)(uninstall)\", a):\n            sys.exit(Uninstall())\n        else:\n            args.append(a)\n    global Config\n    Config = ConfigurationProvider(conf_file)\n\n    logfile = Config.get(\"Logs.File\")\n    if logfile is not None:\n        myLogger.file_path = logfile\n    logconsole = Config.get(\"Logs.Console\")\n    if logconsole is not None and logconsole.lower().startswith(\"n\"):\n        myLogger.con_path = None\n    verbose = Config.get(\"Logs.Verbose\")\n    if verbose != None and verbose.lower().startswith(\"y\"):\n        myLogger.verbose = True\n    global daemon\n    daemon = False\n    for a in args:\n        if re.match(r\"^([-/]*)deprovision\\+user\", a):\n            sys.exit(Deprovision(force, True))\n        elif re.match(\"^([-/]*)deprovision\", a):\n            sys.exit(Deprovision(force, False))\n        elif re.match(\"^([-/]*)daemon\", a):\n            daemon = True\n        elif re.match(\"^([-/]*)serialconsole\", a):\n            AppendToLinuxKernelCmdline(\"console=ttyS0 earlyprintk=ttyS0\")\n            Log(\"Configured kernel to use ttyS0 as the boot console.\")\n            sys.exit(0)\n        else:\n            print(\"Invalid command line parameter:\" + a)\n            sys.exit(1)\n\n    if daemon == False:\n        sys.exit(Usage())\n    global modloaded\n    modloaded = False\n\n    while True:\n        try:\n            SwitchCwd()\n            Log(GuestAgentLongName + \" Version: \" + GuestAgentVersion)\n            if IsLinux():\n                Log(\"Linux Distribution Detected      : \" + LinuxDistro)\n            global WaAgent\n            WaAgent = Agent()\n            WaAgent.Run()\n        except Exception as e:\n            Error(traceback.format_exc())\n            Error(\"Exception: \" + str(e))\n            Log(\"Restart agent in 15 seconds\")\n            time.sleep(15)\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "Common/libpsutil/py2.6-glibc-2.12-pre/psutil/__init__.py",
    "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\n\"\"\"psutil is a cross-platform library for retrieving information on\nrunning processes and system utilization (CPU, memory, disks, network)\nin Python.\n\"\"\"\n\nfrom __future__ import division\n\n__author__ = \"Giampaolo Rodola'\"\n__version__ = \"2.2.1\"\nversion_info = tuple([int(num) for num in __version__.split('.')])\n\n__all__ = [\n    # exceptions\n    \"Error\", \"NoSuchProcess\", \"AccessDenied\", \"TimeoutExpired\",\n    # constants\n    \"version_info\", \"__version__\",\n    \"STATUS_RUNNING\", \"STATUS_IDLE\", \"STATUS_SLEEPING\", \"STATUS_DISK_SLEEP\",\n    \"STATUS_STOPPED\", \"STATUS_TRACING_STOP\", \"STATUS_ZOMBIE\", \"STATUS_DEAD\",\n    \"STATUS_WAKING\", \"STATUS_LOCKED\", \"STATUS_WAITING\", \"STATUS_LOCKED\",\n    \"CONN_ESTABLISHED\", \"CONN_SYN_SENT\", \"CONN_SYN_RECV\", \"CONN_FIN_WAIT1\",\n    \"CONN_FIN_WAIT2\", \"CONN_TIME_WAIT\", \"CONN_CLOSE\", \"CONN_CLOSE_WAIT\",\n    \"CONN_LAST_ACK\", \"CONN_LISTEN\", \"CONN_CLOSING\", \"CONN_NONE\",\n    # classes\n    \"Process\", \"Popen\",\n    # functions\n    \"pid_exists\", \"pids\", \"process_iter\", \"wait_procs\",             # proc\n    \"virtual_memory\", \"swap_memory\",                                # memory\n    \"cpu_times\", \"cpu_percent\", \"cpu_times_percent\", \"cpu_count\",   # cpu\n    \"net_io_counters\", \"net_connections\",                           # network\n    \"disk_io_counters\", \"disk_partitions\", \"disk_usage\",            # disk\n    \"users\", \"boot_time\",                                           # others\n]\n\nimport collections\nimport errno\nimport functools\nimport os\nimport signal\nimport subprocess\nimport sys\nimport time\nimport warnings\ntry:\n    import pwd\nexcept ImportError:\n    pwd = None\n\nfrom psutil._common import memoize\nfrom psutil._compat import callable, long\nfrom psutil._compat import PY3 as _PY3\nfrom psutil._common import (deprecated_method as _deprecated_method,\n                            deprecated as _deprecated,\n                            sdiskio as _nt_sys_diskio,\n                            snetio as _nt_sys_netio)\n\nfrom psutil._common import (STATUS_RUNNING,  # NOQA\n                            STATUS_SLEEPING,\n                            STATUS_DISK_SLEEP,\n                            STATUS_STOPPED,\n                            STATUS_TRACING_STOP,\n                            STATUS_ZOMBIE,\n                            STATUS_DEAD,\n                            STATUS_WAKING,\n                            STATUS_LOCKED,\n                            STATUS_IDLE,  # bsd\n                            STATUS_WAITING,  # bsd\n                            STATUS_LOCKED)  # bsd\n\nfrom psutil._common import (CONN_ESTABLISHED,\n                            CONN_SYN_SENT,\n                            CONN_SYN_RECV,\n                            CONN_FIN_WAIT1,\n                            CONN_FIN_WAIT2,\n                            CONN_TIME_WAIT,\n                            CONN_CLOSE,\n                            CONN_CLOSE_WAIT,\n                            CONN_LAST_ACK,\n                            CONN_LISTEN,\n                            CONN_CLOSING,\n                            CONN_NONE)\n\nif sys.platform.startswith(\"linux\"):\n    import psutil._pslinux as _psplatform\n    from psutil._pslinux import (phymem_buffers,  # NOQA\n                                 cached_phymem)\n\n    from psutil._pslinux import (IOPRIO_CLASS_NONE,  # NOQA\n                                 IOPRIO_CLASS_RT,\n                                 IOPRIO_CLASS_BE,\n                                 IOPRIO_CLASS_IDLE)\n    # Linux >= 2.6.36\n    if _psplatform.HAS_PRLIMIT:\n        from _psutil_linux import (RLIM_INFINITY,  # NOQA\n                                   RLIMIT_AS,\n                                   RLIMIT_CORE,\n                                   RLIMIT_CPU,\n                                   RLIMIT_DATA,\n                                   RLIMIT_FSIZE,\n                                   RLIMIT_LOCKS,\n                                   RLIMIT_MEMLOCK,\n                                   RLIMIT_NOFILE,\n                                   RLIMIT_NPROC,\n                                   RLIMIT_RSS,\n                                   RLIMIT_STACK)\n        # Kinda ugly but considerably faster than using hasattr() and\n        # setattr() against the module object (we are at import time:\n        # speed matters).\n        import _psutil_linux\n        try:\n            RLIMIT_MSGQUEUE = _psutil_linux.RLIMIT_MSGQUEUE\n        except AttributeError:\n            pass\n        try:\n            RLIMIT_NICE = _psutil_linux.RLIMIT_NICE\n        except AttributeError:\n            pass\n        try:\n            RLIMIT_RTPRIO = _psutil_linux.RLIMIT_RTPRIO\n        except AttributeError:\n            pass\n        try:\n            RLIMIT_RTTIME = _psutil_linux.RLIMIT_RTTIME\n        except AttributeError:\n            pass\n        try:\n            RLIMIT_SIGPENDING = _psutil_linux.RLIMIT_SIGPENDING\n        except AttributeError:\n            pass\n        del _psutil_linux\n\nelif sys.platform.startswith(\"win32\"):\n    import psutil._pswindows as _psplatform\n    from _psutil_windows import (ABOVE_NORMAL_PRIORITY_CLASS,  # NOQA\n                                 BELOW_NORMAL_PRIORITY_CLASS,\n                                 HIGH_PRIORITY_CLASS,\n                                 IDLE_PRIORITY_CLASS,\n                                 NORMAL_PRIORITY_CLASS,\n                                 REALTIME_PRIORITY_CLASS)\n    from psutil._pswindows import CONN_DELETE_TCB  # NOQA\n\nelif sys.platform.startswith(\"darwin\"):\n    import psutil._psosx as _psplatform\n\nelif sys.platform.startswith(\"freebsd\"):\n    import psutil._psbsd as _psplatform\n\nelif sys.platform.startswith(\"sunos\"):\n    import psutil._pssunos as _psplatform\n    from psutil._pssunos import (CONN_IDLE,  # NOQA\n                                 CONN_BOUND)\n\nelse:\n    raise NotImplementedError('platform %s is not supported' % sys.platform)\n\n__all__.extend(_psplatform.__extra__all__)\n\n\n_TOTAL_PHYMEM = None\n_POSIX = os.name == 'posix'\n_WINDOWS = os.name == 'nt'\n_timer = getattr(time, 'monotonic', time.time)\n\n\n# Sanity check in case the user messed up with psutil installation\n# or did something weird with sys.path. In this case we might end\n# up importing a python module using a C extension module which\n# was compiled for a different version of psutil.\n# We want to prevent that by failing sooner rather than later.\n# See: https://github.com/giampaolo/psutil/issues/564\nif (int(__version__.replace('.', '')) !=\n        getattr(_psplatform.cext, 'version', None)):\n    msg = \"version conflict: %r C extension module was built for another \" \\\n          \"version of psutil (different than %s)\" % (_psplatform.cext.__file__,\n                                                     __version__)\n    raise ImportError(msg)\n\n\n# =====================================================================\n# --- exceptions\n# =====================================================================\n\nclass Error(Exception):\n    \"\"\"Base exception class. All other psutil exceptions inherit\n    from this one.\n    \"\"\"\n\n\nclass NoSuchProcess(Error):\n    \"\"\"Exception raised when a process with a certain PID doesn't\n    or no longer exists (zombie).\n    \"\"\"\n\n    def __init__(self, pid, name=None, msg=None):\n        Error.__init__(self)\n        self.pid = pid\n        self.name = name\n        self.msg = msg\n        if msg is None:\n            if name:\n                details = \"(pid=%s, name=%s)\" % (self.pid, repr(self.name))\n            else:\n                details = \"(pid=%s)\" % self.pid\n            self.msg = \"process no longer exists \" + details\n\n    def __str__(self):\n        return self.msg\n\n\nclass AccessDenied(Error):\n    \"\"\"Exception raised when permission to perform an action is denied.\"\"\"\n\n    def __init__(self, pid=None, name=None, msg=None):\n        Error.__init__(self)\n        self.pid = pid\n        self.name = name\n        self.msg = msg\n        if msg is None:\n            if (pid is not None) and (name is not None):\n                self.msg = \"(pid=%s, name=%s)\" % (pid, repr(name))\n            elif (pid is not None):\n                self.msg = \"(pid=%s)\" % self.pid\n            else:\n                self.msg = \"\"\n\n    def __str__(self):\n        return self.msg\n\n\nclass TimeoutExpired(Error):\n    \"\"\"Raised on Process.wait(timeout) if timeout expires and process\n    is still alive.\n    \"\"\"\n\n    def __init__(self, seconds, pid=None, name=None):\n        Error.__init__(self)\n        self.seconds = seconds\n        self.pid = pid\n        self.name = name\n        self.msg = \"timeout after %s seconds\" % seconds\n        if (pid is not None) and (name is not None):\n            self.msg += \" (pid=%s, name=%s)\" % (pid, repr(name))\n        elif (pid is not None):\n            self.msg += \" (pid=%s)\" % self.pid\n\n    def __str__(self):\n        return self.msg\n\n# push exception classes into platform specific module namespace\n_psplatform.NoSuchProcess = NoSuchProcess\n_psplatform.AccessDenied = AccessDenied\n_psplatform.TimeoutExpired = TimeoutExpired\n\n\n# =====================================================================\n# --- Process class\n# =====================================================================\n\ndef _assert_pid_not_reused(fun):\n    \"\"\"Decorator which raises NoSuchProcess in case a process is no\n    longer running or its PID has been reused.\n    \"\"\"\n    @functools.wraps(fun)\n    def wrapper(self, *args, **kwargs):\n        if not self.is_running():\n            raise NoSuchProcess(self.pid, self._name)\n        return fun(self, *args, **kwargs)\n    return wrapper\n\n\nclass Process(object):\n    \"\"\"Represents an OS process with the given PID.\n    If PID is omitted current process PID (os.getpid()) is used.\n    Raise NoSuchProcess if PID does not exist.\n\n    Note that most of the methods of this class do not make sure\n    the PID of the process being queried has been reused over time.\n    That means you might end up retrieving an information referring\n    to another process in case the original one this instance\n    refers to is gone in the meantime.\n\n    The only exceptions for which process identity is pre-emptively\n    checked and guaranteed are:\n\n     - parent()\n     - children()\n     - nice() (set)\n     - ionice() (set)\n     - rlimit() (set)\n     - cpu_affinity (set)\n     - suspend()\n     - resume()\n     - send_signal()\n     - terminate()\n     - kill()\n\n    To prevent this problem for all other methods you can:\n      - use is_running() before querying the process\n      - if you're continuously iterating over a set of Process\n        instances use process_iter() which pre-emptively checks\n        process identity for every yielded instance\n    \"\"\"\n\n    def __init__(self, pid=None):\n        self._init(pid)\n\n    def _init(self, pid, _ignore_nsp=False):\n        if pid is None:\n            pid = os.getpid()\n        else:\n            if not _PY3 and not isinstance(pid, (int, long)):\n                raise TypeError('pid must be an integer (got %r)' % pid)\n            if pid < 0:\n                raise ValueError('pid must be a positive integer (got %s)'\n                                 % pid)\n        self._pid = pid\n        self._name = None\n        self._exe = None\n        self._create_time = None\n        self._gone = False\n        self._hash = None\n        # used for caching on Windows only (on POSIX ppid may change)\n        self._ppid = None\n        # platform-specific modules define an _psplatform.Process\n        # implementation class\n        self._proc = _psplatform.Process(pid)\n        self._last_sys_cpu_times = None\n        self._last_proc_cpu_times = None\n        # cache creation time for later use in is_running() method\n        try:\n            self.create_time()\n        except AccessDenied:\n            # we should never get here as AFAIK we're able to get\n            # process creation time on all platforms even as a\n            # limited user\n            pass\n        except NoSuchProcess:\n            if not _ignore_nsp:\n                msg = 'no process found with pid %s' % pid\n                raise NoSuchProcess(pid, None, msg)\n            else:\n                self._gone = True\n        # This pair is supposed to indentify a Process instance\n        # univocally over time (the PID alone is not enough as\n        # it might refer to a process whose PID has been reused).\n        # This will be used later in __eq__() and is_running().\n        self._ident = (self.pid, self._create_time)\n\n    def __str__(self):\n        try:\n            pid = self.pid\n            name = repr(self.name())\n        except NoSuchProcess:\n            details = \"(pid=%s (terminated))\" % self.pid\n        except AccessDenied:\n            details = \"(pid=%s)\" % (self.pid)\n        else:\n            details = \"(pid=%s, name=%s)\" % (pid, name)\n        return \"%s.%s%s\" % (self.__class__.__module__,\n                            self.__class__.__name__, details)\n\n    def __repr__(self):\n        return \"<%s at %s>\" % (self.__str__(), id(self))\n\n    def __eq__(self, other):\n        # Test for equality with another Process object based\n        # on PID and creation time.\n        if not isinstance(other, Process):\n            return NotImplemented\n        return self._ident == other._ident\n\n    def __ne__(self, other):\n        return not self == other\n\n    def __hash__(self):\n        if self._hash is None:\n            self._hash = hash(self._ident)\n        return self._hash\n\n    # --- utility methods\n\n    def as_dict(self, attrs=None, ad_value=None):\n        \"\"\"Utility method returning process information as a\n        hashable dictionary.\n\n        If 'attrs' is specified it must be a list of strings\n        reflecting available Process class' attribute names\n        (e.g. ['cpu_times', 'name']) else all public (read\n        only) attributes are assumed.\n\n        'ad_value' is the value which gets assigned in case\n        AccessDenied  exception is raised when retrieving that\n        particular process information.\n        \"\"\"\n        excluded_names = set(\n            ['send_signal', 'suspend', 'resume', 'terminate', 'kill', 'wait',\n             'is_running', 'as_dict', 'parent', 'children', 'rlimit'])\n        retdict = dict()\n        ls = set(attrs or [x for x in dir(self) if not x.startswith('get')])\n        for name in ls:\n            if name.startswith('_'):\n                continue\n            if name.startswith('set_'):\n                continue\n            if name.startswith('get_'):\n                msg = \"%s() is deprecated; use %s() instead\" % (name, name[4:])\n                warnings.warn(msg, category=DeprecationWarning, stacklevel=2)\n                name = name[4:]\n                if name in ls:\n                    continue\n            if name == 'getcwd':\n                msg = \"getcwd() is deprecated; use cwd() instead\"\n                warnings.warn(msg, category=DeprecationWarning, stacklevel=2)\n                name = 'cwd'\n                if name in ls:\n                    continue\n\n            if name in excluded_names:\n                continue\n            try:\n                attr = getattr(self, name)\n                if callable(attr):\n                    ret = attr()\n                else:\n                    ret = attr\n            except AccessDenied:\n                ret = ad_value\n            except NotImplementedError:\n                # in case of not implemented functionality (may happen\n                # on old or exotic systems) we want to crash only if\n                # the user explicitly asked for that particular attr\n                if attrs:\n                    raise\n                continue\n            retdict[name] = ret\n        return retdict\n\n    def parent(self):\n        \"\"\"Return the parent process as a Process object pre-emptively\n        checking whether PID has been reused.\n        If no parent is known return None.\n        \"\"\"\n        ppid = self.ppid()\n        if ppid is not None:\n            try:\n                parent = Process(ppid)\n                if parent.create_time() <= self.create_time():\n                    return parent\n                # ...else ppid has been reused by another process\n            except NoSuchProcess:\n                pass\n\n    def is_running(self):\n        \"\"\"Return whether this process is running.\n        It also checks if PID has been reused by another process in\n        which case return False.\n        \"\"\"\n        if self._gone:\n            return False\n        try:\n            # Checking if PID is alive is not enough as the PID might\n            # have been reused by another process: we also want to\n            # check process identity.\n            # Process identity / uniqueness over time is greanted by\n            # (PID + creation time) and that is verified in __eq__.\n            return self == Process(self.pid)\n        except NoSuchProcess:\n            self._gone = True\n            return False\n\n    # --- actual API\n\n    @property\n    def pid(self):\n        \"\"\"The process PID.\"\"\"\n        return self._pid\n\n    def ppid(self):\n        \"\"\"The process parent PID.\n        On Windows the return value is cached after first call.\n        \"\"\"\n        # On POSIX we don't want to cache the ppid as it may unexpectedly\n        # change to 1 (init) in case this process turns into a zombie:\n        # https://github.com/giampaolo/psutil/issues/321\n        # http://stackoverflow.com/questions/356722/\n\n        # XXX should we check creation time here rather than in\n        # Process.parent()?\n        if _POSIX:\n            return self._proc.ppid()\n        else:\n            if self._ppid is None:\n                self._ppid = self._proc.ppid()\n            return self._ppid\n\n    def name(self):\n        \"\"\"The process name. The return value is cached after first call.\"\"\"\n        if self._name is None:\n            name = self._proc.name()\n            if _POSIX and len(name) >= 15:\n                # On UNIX the name gets truncated to the first 15 characters.\n                # If it matches the first part of the cmdline we return that\n                # one instead because it's usually more explicative.\n                # Examples are \"gnome-keyring-d\" vs. \"gnome-keyring-daemon\".\n                try:\n                    cmdline = self.cmdline()\n                except AccessDenied:\n                    pass\n                else:\n                    if cmdline:\n                        extended_name = os.path.basename(cmdline[0])\n                        if extended_name.startswith(name):\n                            name = extended_name\n            self._proc._name = name\n            self._name = name\n        return self._name\n\n    def exe(self):\n        \"\"\"The process executable as an absolute path.\n        May also be an empty string.\n        The return value is cached after first call.\n        \"\"\"\n        def guess_it(fallback):\n            # try to guess exe from cmdline[0] in absence of a native\n            # exe representation\n            cmdline = self.cmdline()\n            if cmdline and hasattr(os, 'access') and hasattr(os, 'X_OK'):\n                exe = cmdline[0]  # the possible exe\n                # Attempt to guess only in case of an absolute path.\n                # It is not safe otherwise as the process might have\n                # changed cwd.\n                if (os.path.isabs(exe)\n                        and os.path.isfile(exe)\n                        and os.access(exe, os.X_OK)):\n                    return exe\n            if isinstance(fallback, AccessDenied):\n                raise fallback\n            return fallback\n\n        if self._exe is None:\n            try:\n                exe = self._proc.exe()\n            except AccessDenied as err:\n                return guess_it(fallback=err)\n            else:\n                if not exe:\n                    # underlying implementation can legitimately return an\n                    # empty string; if that's the case we don't want to\n                    # raise AD while guessing from the cmdline\n                    try:\n                        exe = guess_it(fallback=exe)\n                    except AccessDenied:\n                        pass\n                self._exe = exe\n        return self._exe\n\n    def cmdline(self):\n        \"\"\"The command line this process has been called with.\"\"\"\n        return self._proc.cmdline()\n\n    def status(self):\n        \"\"\"The process current status as a STATUS_* constant.\"\"\"\n        return self._proc.status()\n\n    def username(self):\n        \"\"\"The name of the user that owns the process.\n        On UNIX this is calculated by using *real* process uid.\n        \"\"\"\n        if _POSIX:\n            if pwd is None:\n                # might happen if python was installed from sources\n                raise ImportError(\n                    \"requires pwd module shipped with standard python\")\n            real_uid = self.uids().real\n            try:\n                return pwd.getpwuid(real_uid).pw_name\n            except KeyError:\n                # the uid can't be resolved by the system\n                return str(real_uid)\n        else:\n            return self._proc.username()\n\n    def create_time(self):\n        \"\"\"The process creation time as a floating point number\n        expressed in seconds since the epoch, in UTC.\n        The return value is cached after first call.\n        \"\"\"\n        if self._create_time is None:\n            self._create_time = self._proc.create_time()\n        return self._create_time\n\n    def cwd(self):\n        \"\"\"Process current working directory as an absolute path.\"\"\"\n        return self._proc.cwd()\n\n    def nice(self, value=None):\n        \"\"\"Get or set process niceness (priority).\"\"\"\n        if value is None:\n            return self._proc.nice_get()\n        else:\n            if not self.is_running():\n                raise NoSuchProcess(self.pid, self._name)\n            self._proc.nice_set(value)\n\n    if _POSIX:\n\n        def uids(self):\n            \"\"\"Return process UIDs as a (real, effective, saved)\n            namedtuple.\n            \"\"\"\n            return self._proc.uids()\n\n        def gids(self):\n            \"\"\"Return process GIDs as a (real, effective, saved)\n            namedtuple.\n            \"\"\"\n            return self._proc.gids()\n\n        def terminal(self):\n            \"\"\"The terminal associated with this process, if any,\n            else None.\n            \"\"\"\n            return self._proc.terminal()\n\n        def num_fds(self):\n            \"\"\"Return the number of file descriptors opened by this\n            process (POSIX only).\n            \"\"\"\n            return self._proc.num_fds()\n\n    # Linux, BSD and Windows only\n    if hasattr(_psplatform.Process, \"io_counters\"):\n\n        def io_counters(self):\n            \"\"\"Return process I/O statistics as a\n            (read_count, write_count, read_bytes, write_bytes)\n            namedtuple.\n            Those are the number of read/write calls performed and the\n            amount of bytes read and written by the process.\n            \"\"\"\n            return self._proc.io_counters()\n\n    # Linux and Windows >= Vista only\n    if hasattr(_psplatform.Process, \"ionice_get\"):\n\n        def ionice(self, ioclass=None, value=None):\n            \"\"\"Get or set process I/O niceness (priority).\n\n            On Linux 'ioclass' is one of the IOPRIO_CLASS_* constants.\n            'value' is a number which goes from 0 to 7. The higher the\n            value, the lower the I/O priority of the process.\n\n            On Windows only 'ioclass' is used and it can be set to 2\n            (normal), 1 (low) or 0 (very low).\n\n            Available on Linux and Windows > Vista only.\n            \"\"\"\n            if ioclass is None:\n                if value is not None:\n                    raise ValueError(\"'ioclass' must be specified\")\n                return self._proc.ionice_get()\n            else:\n                return self._proc.ionice_set(ioclass, value)\n\n    # Linux only\n    if hasattr(_psplatform.Process, \"rlimit\"):\n\n        def rlimit(self, resource, limits=None):\n            \"\"\"Get or set process resource limits as a (soft, hard)\n            tuple.\n\n            'resource' is one of the RLIMIT_* constants.\n            'limits' is supposed to be a (soft, hard)  tuple.\n\n            See \"man prlimit\" for further info.\n            Available on Linux only.\n            \"\"\"\n            if limits is None:\n                return self._proc.rlimit(resource)\n            else:\n                return self._proc.rlimit(resource, limits)\n\n    # Windows, Linux and BSD only\n    if hasattr(_psplatform.Process, \"cpu_affinity_get\"):\n\n        def cpu_affinity(self, cpus=None):\n            \"\"\"Get or set process CPU affinity.\n            If specified 'cpus' must be a list of CPUs for which you\n            want to set the affinity (e.g. [0, 1]).\n            (Windows, Linux and BSD only).\n            \"\"\"\n            if cpus is None:\n                return self._proc.cpu_affinity_get()\n            else:\n                self._proc.cpu_affinity_set(cpus)\n\n    if _WINDOWS:\n\n        def num_handles(self):\n            \"\"\"Return the number of handles opened by this process\n            (Windows only).\n            \"\"\"\n            return self._proc.num_handles()\n\n    def num_ctx_switches(self):\n        \"\"\"Return the number of voluntary and involuntary context\n        switches performed by this process.\n        \"\"\"\n        return self._proc.num_ctx_switches()\n\n    def num_threads(self):\n        \"\"\"Return the number of threads used by this process.\"\"\"\n        return self._proc.num_threads()\n\n    def threads(self):\n        \"\"\"Return threads opened by process as a list of\n        (id, user_time, system_time) namedtuples representing\n        thread id and thread CPU times (user/system).\n        \"\"\"\n        return self._proc.threads()\n\n    @_assert_pid_not_reused\n    def children(self, recursive=False):\n        \"\"\"Return the children of this process as a list of Process\n        instances, pre-emptively checking whether PID has been reused.\n        If recursive is True return all the parent descendants.\n\n        Example (A == this process):\n\n         A ─┐\n            │\n            ├─ B (child) ─┐\n            │             └─ X (grandchild) ─┐\n            │                                └─ Y (great grandchild)\n            ├─ C (child)\n            └─ D (child)\n\n        >>> import psutil\n        >>> p = psutil.Process()\n        >>> p.children()\n        B, C, D\n        >>> p.children(recursive=True)\n        B, X, Y, C, D\n\n        Note that in the example above if process X disappears\n        process Y won't be listed as the reference to process A\n        is lost.\n        \"\"\"\n        if hasattr(_psplatform, 'ppid_map'):\n            # Windows only: obtain a {pid:ppid, ...} dict for all running\n            # processes in one shot (faster).\n            ppid_map = _psplatform.ppid_map()\n        else:\n            ppid_map = None\n\n        ret = []\n        if not recursive:\n            if ppid_map is None:\n                # 'slow' version, common to all platforms except Windows\n                for p in process_iter():\n                    try:\n                        if p.ppid() == self.pid:\n                            # if child happens to be older than its parent\n                            # (self) it means child's PID has been reused\n                            if self.create_time() <= p.create_time():\n                                ret.append(p)\n                    except NoSuchProcess:\n                        pass\n            else:\n                # Windows only (faster)\n                for pid, ppid in ppid_map.items():\n                    if ppid == self.pid:\n                        try:\n                            child = Process(pid)\n                            # if child happens to be older than its parent\n                            # (self) it means child's PID has been reused\n                            if self.create_time() <= child.create_time():\n                                ret.append(child)\n                        except NoSuchProcess:\n                            pass\n        else:\n            # construct a dict where 'values' are all the processes\n            # having 'key' as their parent\n            table = collections.defaultdict(list)\n            if ppid_map is None:\n                for p in process_iter():\n                    try:\n                        table[p.ppid()].append(p)\n                    except NoSuchProcess:\n                        pass\n            else:\n                for pid, ppid in ppid_map.items():\n                    try:\n                        p = Process(pid)\n                        table[ppid].append(p)\n                    except NoSuchProcess:\n                        pass\n            # At this point we have a mapping table where table[self.pid]\n            # are the current process' children.\n            # Below, we look for all descendants recursively, similarly\n            # to a recursive function call.\n            checkpids = [self.pid]\n            for pid in checkpids:\n                for child in table[pid]:\n                    try:\n                        # if child happens to be older than its parent\n                        # (self) it means child's PID has been reused\n                        intime = self.create_time() <= child.create_time()\n                    except NoSuchProcess:\n                        pass\n                    else:\n                        if intime:\n                            ret.append(child)\n                            if child.pid not in checkpids:\n                                checkpids.append(child.pid)\n        return ret\n\n    def cpu_percent(self, interval=None):\n        \"\"\"Return a float representing the current process CPU\n        utilization as a percentage.\n\n        When interval is 0.0 or None (default) compares process times\n        to system CPU times elapsed since last call, returning\n        immediately (non-blocking). That means that the first time\n        this is called it will return a meaningful 0.0 value.\n\n        When interval is > 0.0 compares process times to system CPU\n        times elapsed before and after the interval (blocking).\n\n        In this case is recommended for accuracy that this function\n        be called with at least 0.1 seconds between calls.\n\n        Examples:\n\n          >>> import psutil\n          >>> p = psutil.Process(os.getpid())\n          >>> # blocking\n          >>> p.cpu_percent(interval=1)\n          2.0\n          >>> # non-blocking (percentage since last call)\n          >>> p.cpu_percent(interval=None)\n          2.9\n          >>>\n        \"\"\"\n        blocking = interval is not None and interval > 0.0\n        num_cpus = cpu_count()\n        if _POSIX:\n            timer = lambda: _timer() * num_cpus\n        else:\n            timer = lambda: sum(cpu_times())\n        if blocking:\n            st1 = timer()\n            pt1 = self._proc.cpu_times()\n            time.sleep(interval)\n            st2 = timer()\n            pt2 = self._proc.cpu_times()\n        else:\n            st1 = self._last_sys_cpu_times\n            pt1 = self._last_proc_cpu_times\n            st2 = timer()\n            pt2 = self._proc.cpu_times()\n            if st1 is None or pt1 is None:\n                self._last_sys_cpu_times = st2\n                self._last_proc_cpu_times = pt2\n                return 0.0\n\n        delta_proc = (pt2.user - pt1.user) + (pt2.system - pt1.system)\n        delta_time = st2 - st1\n        # reset values for next call in case of interval == None\n        self._last_sys_cpu_times = st2\n        self._last_proc_cpu_times = pt2\n\n        try:\n            # The utilization split between all CPUs.\n            # Note: a percentage > 100 is legitimate as it can result\n            # from a process with multiple threads running on different\n            # CPU cores, see:\n            # http://stackoverflow.com/questions/1032357\n            # https://github.com/giampaolo/psutil/issues/474\n            overall_percent = ((delta_proc / delta_time) * 100) * num_cpus\n        except ZeroDivisionError:\n            # interval was too low\n            return 0.0\n        else:\n            return round(overall_percent, 1)\n\n    def cpu_times(self):\n        \"\"\"Return a (user, system) namedtuple representing  the\n        accumulated process time, in seconds.\n        This is the same as os.times() but per-process.\n        \"\"\"\n        return self._proc.cpu_times()\n\n    def memory_info(self):\n        \"\"\"Return a tuple representing RSS (Resident Set Size) and VMS\n        (Virtual Memory Size) in bytes.\n\n        On UNIX RSS and VMS are the same values shown by 'ps'.\n\n        On Windows RSS and VMS refer to \"Mem Usage\" and \"VM Size\"\n        columns of taskmgr.exe.\n        \"\"\"\n        return self._proc.memory_info()\n\n    def memory_info_ex(self):\n        \"\"\"Return a namedtuple with variable fields depending on the\n        platform representing extended memory information about\n        this process. All numbers are expressed in bytes.\n        \"\"\"\n        return self._proc.memory_info_ex()\n\n    def memory_percent(self):\n        \"\"\"Compare physical system memory to process resident memory\n        (RSS) and calculate process memory utilization as a percentage.\n        \"\"\"\n        rss = self._proc.memory_info()[0]\n        # use cached value if available\n        total_phymem = _TOTAL_PHYMEM or virtual_memory().total\n        try:\n            return (rss / float(total_phymem)) * 100\n        except ZeroDivisionError:\n            return 0.0\n\n    def memory_maps(self, grouped=True):\n        \"\"\"Return process' mapped memory regions as a list of nameduples\n        whose fields are variable depending on the platform.\n\n        If 'grouped' is True the mapped regions with the same 'path'\n        are grouped together and the different memory fields are summed.\n\n        If 'grouped' is False every mapped region is shown as a single\n        entity and the namedtuple will also include the mapped region's\n        address space ('addr') and permission set ('perms').\n        \"\"\"\n        it = self._proc.memory_maps()\n        if grouped:\n            d = {}\n            for tupl in it:\n                path = tupl[2]\n                nums = tupl[3:]\n                try:\n                    d[path] = map(lambda x, y: x + y, d[path], nums)\n                except KeyError:\n                    d[path] = nums\n            nt = _psplatform.pmmap_grouped\n            return [nt(path, *d[path]) for path in d]  # NOQA\n        else:\n            nt = _psplatform.pmmap_ext\n            return [nt(*x) for x in it]\n\n    def open_files(self):\n        \"\"\"Return files opened by process as a list of\n        (path, fd) namedtuples including the absolute file name\n        and file descriptor number.\n        \"\"\"\n        return self._proc.open_files()\n\n    def connections(self, kind='inet'):\n        \"\"\"Return connections opened by process as a list of\n        (fd, family, type, laddr, raddr, status) namedtuples.\n        The 'kind' parameter filters for connections that match the\n        following criteria:\n\n        Kind Value      Connections using\n        inet            IPv4 and IPv6\n        inet4           IPv4\n        inet6           IPv6\n        tcp             TCP\n        tcp4            TCP over IPv4\n        tcp6            TCP over IPv6\n        udp             UDP\n        udp4            UDP over IPv4\n        udp6            UDP over IPv6\n        unix            UNIX socket (both UDP and TCP protocols)\n        all             the sum of all the possible families and protocols\n        \"\"\"\n        return self._proc.connections(kind)\n\n    if _POSIX:\n        def _send_signal(self, sig):\n            # XXX: according to \"man 2 kill\" PID 0 has a special\n            # meaning as it refers to <<every process in the process\n            # group of the calling process>>, so should we prevent\n            # it here?\n            try:\n                os.kill(self.pid, sig)\n            except OSError as err:\n                if err.errno == errno.ESRCH:\n                    self._gone = True\n                    raise NoSuchProcess(self.pid, self._name)\n                if err.errno == errno.EPERM:\n                    raise AccessDenied(self.pid, self._name)\n                raise\n\n    @_assert_pid_not_reused\n    def send_signal(self, sig):\n        \"\"\"Send a signal to process pre-emptively checking whether\n        PID has been reused (see signal module constants) .\n        On Windows only SIGTERM is valid and is treated as an alias\n        for kill().\n        \"\"\"\n        if _POSIX:\n            self._send_signal(sig)\n        else:\n            if sig == signal.SIGTERM:\n                self._proc.kill()\n            else:\n                raise ValueError(\"only SIGTERM is supported on Windows\")\n\n    @_assert_pid_not_reused\n    def suspend(self):\n        \"\"\"Suspend process execution with SIGSTOP pre-emptively checking\n        whether PID has been reused.\n        On Windows this has the effect ot suspending all process threads.\n        \"\"\"\n        if _POSIX:\n            self._send_signal(signal.SIGSTOP)\n        else:\n            self._proc.suspend()\n\n    @_assert_pid_not_reused\n    def resume(self):\n        \"\"\"Resume process execution with SIGCONT pre-emptively checking\n        whether PID has been reused.\n        On Windows this has the effect of resuming all process threads.\n        \"\"\"\n        if _POSIX:\n            self._send_signal(signal.SIGCONT)\n        else:\n            self._proc.resume()\n\n    @_assert_pid_not_reused\n    def terminate(self):\n        \"\"\"Terminate the process with SIGTERM pre-emptively checking\n        whether PID has been reused.\n        On Windows this is an alias for kill().\n        \"\"\"\n        if _POSIX:\n            self._send_signal(signal.SIGTERM)\n        else:\n            self._proc.kill()\n\n    @_assert_pid_not_reused\n    def kill(self):\n        \"\"\"Kill the current process with SIGKILL pre-emptively checking\n        whether PID has been reused.\n        \"\"\"\n        if _POSIX:\n            self._send_signal(signal.SIGKILL)\n        else:\n            self._proc.kill()\n\n    def wait(self, timeout=None):\n        \"\"\"Wait for process to terminate and, if process is a children\n        of os.getpid(), also return its exit code, else None.\n\n        If the process is already terminated immediately return None\n        instead of raising NoSuchProcess.\n\n        If timeout (in seconds) is specified and process is still alive\n        raise TimeoutExpired.\n\n        To wait for multiple Process(es) use psutil.wait_procs().\n        \"\"\"\n        if timeout is not None and not timeout >= 0:\n            raise ValueError(\"timeout must be a positive integer\")\n        return self._proc.wait(timeout)\n\n    # --- deprecated APIs\n\n    _locals = set(locals())\n\n    @_deprecated_method(replacement='children')\n    def get_children(self):\n        pass\n\n    @_deprecated_method(replacement='connections')\n    def get_connections(self):\n        pass\n\n    if \"cpu_affinity\" in _locals:\n        @_deprecated_method(replacement='cpu_affinity')\n        def get_cpu_affinity(self):\n            pass\n\n        @_deprecated_method(replacement='cpu_affinity')\n        def set_cpu_affinity(self, cpus):\n            pass\n\n    @_deprecated_method(replacement='cpu_percent')\n    def get_cpu_percent(self):\n        pass\n\n    @_deprecated_method(replacement='cpu_times')\n    def get_cpu_times(self):\n        pass\n\n    @_deprecated_method(replacement='cwd')\n    def getcwd(self):\n        pass\n\n    @_deprecated_method(replacement='memory_info_ex')\n    def get_ext_memory_info(self):\n        pass\n\n    if \"io_counters\" in _locals:\n        @_deprecated_method(replacement='io_counters')\n        def get_io_counters(self):\n            pass\n\n    if \"ionice\" in _locals:\n        @_deprecated_method(replacement='ionice')\n        def get_ionice(self):\n            pass\n\n        @_deprecated_method(replacement='ionice')\n        def set_ionice(self, ioclass, value=None):\n            pass\n\n    @_deprecated_method(replacement='memory_info')\n    def get_memory_info(self):\n        pass\n\n    @_deprecated_method(replacement='memory_maps')\n    def get_memory_maps(self):\n        pass\n\n    @_deprecated_method(replacement='memory_percent')\n    def get_memory_percent(self):\n        pass\n\n    @_deprecated_method(replacement='nice')\n    def get_nice(self):\n        pass\n\n    @_deprecated_method(replacement='num_ctx_switches')\n    def get_num_ctx_switches(self):\n        pass\n\n    if 'num_fds' in _locals:\n        @_deprecated_method(replacement='num_fds')\n        def get_num_fds(self):\n            pass\n\n    if 'num_handles' in _locals:\n        @_deprecated_method(replacement='num_handles')\n        def get_num_handles(self):\n            pass\n\n    @_deprecated_method(replacement='num_threads')\n    def get_num_threads(self):\n        pass\n\n    @_deprecated_method(replacement='open_files')\n    def get_open_files(self):\n        pass\n\n    if \"rlimit\" in _locals:\n        @_deprecated_method(replacement='rlimit')\n        def get_rlimit(self):\n            pass\n\n        @_deprecated_method(replacement='rlimit')\n        def set_rlimit(self, resource, limits):\n            pass\n\n    @_deprecated_method(replacement='threads')\n    def get_threads(self):\n        pass\n\n    @_deprecated_method(replacement='nice')\n    def set_nice(self, value):\n        pass\n\n    del _locals\n\n\n# =====================================================================\n# --- Popen class\n# =====================================================================\n\nclass Popen(Process):\n    \"\"\"A more convenient interface to stdlib subprocess module.\n    It starts a sub process and deals with it exactly as when using\n    subprocess.Popen class but in addition also provides all the\n    properties and methods of psutil.Process class as a unified\n    interface:\n\n      >>> import psutil\n      >>> from subprocess import PIPE\n      >>> p = psutil.Popen([\"python\", \"-c\", \"print 'hi'\"], stdout=PIPE)\n      >>> p.name()\n      'python'\n      >>> p.uids()\n      user(real=1000, effective=1000, saved=1000)\n      >>> p.username()\n      'giampaolo'\n      >>> p.communicate()\n      ('hi\\n', None)\n      >>> p.terminate()\n      >>> p.wait(timeout=2)\n      0\n      >>>\n\n    For method names common to both classes such as kill(), terminate()\n    and wait(), psutil.Process implementation takes precedence.\n\n    Unlike subprocess.Popen this class pre-emptively checks wheter PID\n    has been reused on send_signal(), terminate() and kill() so that\n    you don't accidentally terminate another process, fixing\n    http://bugs.python.org/issue6973.\n\n    For a complete documentation refer to:\n    http://docs.python.org/library/subprocess.html\n    \"\"\"\n\n    def __init__(self, *args, **kwargs):\n        # Explicitly avoid to raise NoSuchProcess in case the process\n        # spawned by subprocess.Popen terminates too quickly, see:\n        # https://github.com/giampaolo/psutil/issues/193\n        self.__subproc = subprocess.Popen(*args, **kwargs)\n        self._init(self.__subproc.pid, _ignore_nsp=True)\n\n    def __dir__(self):\n        return sorted(set(dir(Popen) + dir(subprocess.Popen)))\n\n    def __getattribute__(self, name):\n        try:\n            return object.__getattribute__(self, name)\n        except AttributeError:\n            try:\n                return object.__getattribute__(self.__subproc, name)\n            except AttributeError:\n                raise AttributeError(\"%s instance has no attribute '%s'\"\n                                     % (self.__class__.__name__, name))\n\n    def wait(self, timeout=None):\n        if self.__subproc.returncode is not None:\n            return self.__subproc.returncode\n        ret = super(Popen, self).wait(timeout)\n        self.__subproc.returncode = ret\n        return ret\n\n\n# =====================================================================\n# --- system processes related functions\n# =====================================================================\n\ndef pids():\n    \"\"\"Return a list of current running PIDs.\"\"\"\n    return _psplatform.pids()\n\n\ndef pid_exists(pid):\n    \"\"\"Return True if given PID exists in the current process list.\n    This is faster than doing \"pid in psutil.pids()\" and\n    should be preferred.\n    \"\"\"\n    if pid < 0:\n        return False\n    elif pid == 0 and _POSIX:\n        # On POSIX we use os.kill() to determine PID existence.\n        # According to \"man 2 kill\" PID 0 has a special meaning\n        # though: it refers to <<every process in the process\n        # group of the calling process>> and that is not we want\n        # to do here.\n        return pid in pids()\n    else:\n        return _psplatform.pid_exists(pid)\n\n\n_pmap = {}\n\n\ndef process_iter():\n    \"\"\"Return a generator yielding a Process instance for all\n    running processes.\n\n    Every new Process instance is only created once and then cached\n    into an internal table which is updated every time this is used.\n\n    Cached Process instances are checked for identity so that you're\n    safe in case a PID has been reused by another process, in which\n    case the cached instance is updated.\n\n    The sorting order in which processes are yielded is based on\n    their PIDs.\n    \"\"\"\n    def add(pid):\n        proc = Process(pid)\n        _pmap[proc.pid] = proc\n        return proc\n\n    def remove(pid):\n        _pmap.pop(pid, None)\n\n    a = set(pids())\n    b = set(_pmap.keys())\n    new_pids = a - b\n    gone_pids = b - a\n\n    for pid in gone_pids:\n        remove(pid)\n    for pid, proc in sorted(list(_pmap.items()) +\n                            list(dict.fromkeys(new_pids).items())):\n        try:\n            if proc is None:  # new process\n                yield add(pid)\n            else:\n                # use is_running() to check whether PID has been reused by\n                # another process in which case yield a new Process instance\n                if proc.is_running():\n                    yield proc\n                else:\n                    yield add(pid)\n        except NoSuchProcess:\n            remove(pid)\n        except AccessDenied:\n            # Process creation time can't be determined hence there's\n            # no way to tell whether the pid of the cached process\n            # has been reused. Just return the cached version.\n            yield proc\n\n\ndef wait_procs(procs, timeout=None, callback=None):\n    \"\"\"Convenience function which waits for a list of processes to\n    terminate.\n\n    Return a (gone, alive) tuple indicating which processes\n    are gone and which ones are still alive.\n\n    The gone ones will have a new 'returncode' attribute indicating\n    process exit status (may be None).\n\n    'callback' is a function which gets called every time a process\n    terminates (a Process instance is passed as callback argument).\n\n    Function will return as soon as all processes terminate or when\n    timeout occurs.\n\n    Typical use case is:\n\n     - send SIGTERM to a list of processes\n     - give them some time to terminate\n     - send SIGKILL to those ones which are still alive\n\n    Example:\n\n    >>> def on_terminate(proc):\n    ...     print(\"process {} terminated\".format(proc))\n    ...\n    >>> for p in procs:\n    ...    p.terminate()\n    ...\n    >>> gone, alive = wait_procs(procs, timeout=3, callback=on_terminate)\n    >>> for p in alive:\n    ...     p.kill()\n    \"\"\"\n    def check_gone(proc, timeout):\n        try:\n            returncode = proc.wait(timeout=timeout)\n        except TimeoutExpired:\n            pass\n        else:\n            if returncode is not None or not proc.is_running():\n                proc.returncode = returncode\n                gone.add(proc)\n                if callback is not None:\n                    callback(proc)\n\n    if timeout is not None and not timeout >= 0:\n        msg = \"timeout must be a positive integer, got %s\" % timeout\n        raise ValueError(msg)\n    gone = set()\n    alive = set(procs)\n    if callback is not None and not callable(callback):\n        raise TypeError(\"callback %r is not a callable\" % callable)\n    if timeout is not None:\n        deadline = _timer() + timeout\n\n    while alive:\n        if timeout is not None and timeout <= 0:\n            break\n        for proc in alive:\n            # Make sure that every complete iteration (all processes)\n            # will last max 1 sec.\n            # We do this because we don't want to wait too long on a\n            # single process: in case it terminates too late other\n            # processes may disappear in the meantime and their PID\n            # reused.\n            max_timeout = 1.0 / len(alive)\n            if timeout is not None:\n                timeout = min((deadline - _timer()), max_timeout)\n                if timeout <= 0:\n                    break\n                check_gone(proc, timeout)\n            else:\n                check_gone(proc, max_timeout)\n        alive = alive - gone\n\n    if alive:\n        # Last attempt over processes survived so far.\n        # timeout == 0 won't make this function wait any further.\n        for proc in alive:\n            check_gone(proc, 0)\n        alive = alive - gone\n\n    return (list(gone), list(alive))\n\n\n# =====================================================================\n# --- CPU related functions\n# =====================================================================\n\n@memoize\ndef cpu_count(logical=True):\n    \"\"\"Return the number of logical CPUs in the system (same as\n    os.cpu_count() in Python 3.4).\n\n    If logical is False return the number of physical cores only\n    (hyper thread CPUs are excluded).\n\n    Return None if undetermined.\n\n    The return value is cached after first call.\n    If desired cache can be cleared like this:\n\n    >>> psutil.cpu_count.cache_clear()\n    \"\"\"\n    if logical:\n        return _psplatform.cpu_count_logical()\n    else:\n        return _psplatform.cpu_count_physical()\n\n\ndef cpu_times(percpu=False):\n    \"\"\"Return system-wide CPU times as a namedtuple.\n    Every CPU time represents the seconds the CPU has spent in the given mode.\n    The namedtuple's fields availability varies depending on the platform:\n     - user\n     - system\n     - idle\n     - nice (UNIX)\n     - iowait (Linux)\n     - irq (Linux, FreeBSD)\n     - softirq (Linux)\n     - steal (Linux >= 2.6.11)\n     - guest (Linux >= 2.6.24)\n     - guest_nice (Linux >= 3.2.0)\n\n    When percpu is True return a list of nameduples for each CPU.\n    First element of the list refers to first CPU, second element\n    to second CPU and so on.\n    The order of the list is consistent across calls.\n    \"\"\"\n    if not percpu:\n        return _psplatform.cpu_times()\n    else:\n        return _psplatform.per_cpu_times()\n\n\n_last_cpu_times = cpu_times()\n_last_per_cpu_times = cpu_times(percpu=True)\n\n\ndef cpu_percent(interval=None, percpu=False):\n    \"\"\"Return a float representing the current system-wide CPU\n    utilization as a percentage.\n\n    When interval is > 0.0 compares system CPU times elapsed before\n    and after the interval (blocking).\n\n    When interval is 0.0 or None compares system CPU times elapsed\n    since last call or module import, returning immediately (non\n    blocking). That means the first time this is called it will\n    return a meaningless 0.0 value which you should ignore.\n    In this case is recommended for accuracy that this function be\n    called with at least 0.1 seconds between calls.\n\n    When percpu is True returns a list of floats representing the\n    utilization as a percentage for each CPU.\n    First element of the list refers to first CPU, second element\n    to second CPU and so on.\n    The order of the list is consistent across calls.\n\n    Examples:\n\n      >>> # blocking, system-wide\n      >>> psutil.cpu_percent(interval=1)\n      2.0\n      >>>\n      >>> # blocking, per-cpu\n      >>> psutil.cpu_percent(interval=1, percpu=True)\n      [2.0, 1.0]\n      >>>\n      >>> # non-blocking (percentage since last call)\n      >>> psutil.cpu_percent(interval=None)\n      2.9\n      >>>\n    \"\"\"\n    global _last_cpu_times\n    global _last_per_cpu_times\n    blocking = interval is not None and interval > 0.0\n\n    def calculate(t1, t2):\n        t1_all = sum(t1)\n        t1_busy = t1_all - t1.idle\n\n        t2_all = sum(t2)\n        t2_busy = t2_all - t2.idle\n\n        # this usually indicates a float precision issue\n        if t2_busy <= t1_busy:\n            return 0.0\n\n        busy_delta = t2_busy - t1_busy\n        all_delta = t2_all - t1_all\n        busy_perc = (busy_delta / all_delta) * 100\n        return round(busy_perc, 1)\n\n    # system-wide usage\n    if not percpu:\n        if blocking:\n            t1 = cpu_times()\n            time.sleep(interval)\n        else:\n            t1 = _last_cpu_times\n        _last_cpu_times = cpu_times()\n        return calculate(t1, _last_cpu_times)\n    # per-cpu usage\n    else:\n        ret = []\n        if blocking:\n            tot1 = cpu_times(percpu=True)\n            time.sleep(interval)\n        else:\n            tot1 = _last_per_cpu_times\n        _last_per_cpu_times = cpu_times(percpu=True)\n        for t1, t2 in zip(tot1, _last_per_cpu_times):\n            ret.append(calculate(t1, t2))\n        return ret\n\n\n# Use separate global vars for cpu_times_percent() so that it's\n# independent from cpu_percent() and they can both be used within\n# the same program.\n_last_cpu_times_2 = _last_cpu_times\n_last_per_cpu_times_2 = _last_per_cpu_times\n\n\ndef cpu_times_percent(interval=None, percpu=False):\n    \"\"\"Same as cpu_percent() but provides utilization percentages\n    for each specific CPU time as is returned by cpu_times().\n    For instance, on Linux we'll get:\n\n      >>> cpu_times_percent()\n      cpupercent(user=4.8, nice=0.0, system=4.8, idle=90.5, iowait=0.0,\n                 irq=0.0, softirq=0.0, steal=0.0, guest=0.0, guest_nice=0.0)\n      >>>\n\n    interval and percpu arguments have the same meaning as in\n    cpu_percent().\n    \"\"\"\n    global _last_cpu_times_2\n    global _last_per_cpu_times_2\n    blocking = interval is not None and interval > 0.0\n\n    def calculate(t1, t2):\n        nums = []\n        all_delta = sum(t2) - sum(t1)\n        for field in t1._fields:\n            field_delta = getattr(t2, field) - getattr(t1, field)\n            try:\n                field_perc = (100 * field_delta) / all_delta\n            except ZeroDivisionError:\n                field_perc = 0.0\n            field_perc = round(field_perc, 1)\n            if _WINDOWS:\n                # XXX\n                # Work around:\n                # https://github.com/giampaolo/psutil/issues/392\n                # CPU times are always supposed to increase over time\n                # or at least remain the same and that's because time\n                # cannot go backwards.\n                # Surprisingly sometimes this might not be the case on\n                # Windows where 'system' CPU time can be smaller\n                # compared to the previous call, resulting in corrupted\n                # percentages (< 0 or > 100).\n                # I really don't know what to do about that except\n                # forcing the value to 0 or 100.\n                if field_perc > 100.0:\n                    field_perc = 100.0\n                elif field_perc < 0.0:\n                    field_perc = 0.0\n            nums.append(field_perc)\n        return _psplatform.scputimes(*nums)\n\n    # system-wide usage\n    if not percpu:\n        if blocking:\n            t1 = cpu_times()\n            time.sleep(interval)\n        else:\n            t1 = _last_cpu_times_2\n        _last_cpu_times_2 = cpu_times()\n        return calculate(t1, _last_cpu_times_2)\n    # per-cpu usage\n    else:\n        ret = []\n        if blocking:\n            tot1 = cpu_times(percpu=True)\n            time.sleep(interval)\n        else:\n            tot1 = _last_per_cpu_times_2\n        _last_per_cpu_times_2 = cpu_times(percpu=True)\n        for t1, t2 in zip(tot1, _last_per_cpu_times_2):\n            ret.append(calculate(t1, t2))\n        return ret\n\n\n# =====================================================================\n# --- system memory related functions\n# =====================================================================\n\ndef virtual_memory():\n    \"\"\"Return statistics about system memory usage as a namedtuple\n    including the following fields, expressed in bytes:\n\n     - total:\n       total physical memory available.\n\n     - available:\n       the actual amount of available memory that can be given\n       instantly to processes that request more memory in bytes; this\n       is calculated by summing different memory values depending on\n       the platform (e.g. free + buffers + cached on Linux) and it is\n       supposed to be used to monitor actual memory usage in a cross\n       platform fashion.\n\n     - percent:\n       the percentage usage calculated as (total - available) / total * 100\n\n     - used:\n       memory used, calculated differently depending on the platform and\n       designed for informational purposes only:\n        OSX: active + inactive + wired\n        BSD: active + wired + cached\n        LINUX: total - free\n\n     - free:\n       memory not being used at all (zeroed) that is readily available;\n       note that this doesn't reflect the actual memory available\n       (use 'available' instead)\n\n    Platform-specific fields:\n\n     - active (UNIX):\n       memory currently in use or very recently used, and so it is in RAM.\n\n     - inactive (UNIX):\n       memory that is marked as not used.\n\n     - buffers (BSD, Linux):\n       cache for things like file system metadata.\n\n     - cached (BSD, OSX):\n       cache for various things.\n\n     - wired (OSX, BSD):\n       memory that is marked to always stay in RAM. It is never moved to disk.\n\n     - shared (BSD):\n       memory that may be simultaneously accessed by multiple processes.\n\n    The sum of 'used' and 'available' does not necessarily equal total.\n    On Windows 'available' and 'free' are the same.\n    \"\"\"\n    global _TOTAL_PHYMEM\n    ret = _psplatform.virtual_memory()\n    # cached for later use in Process.memory_percent()\n    _TOTAL_PHYMEM = ret.total\n    return ret\n\n\ndef swap_memory():\n    \"\"\"Return system swap memory statistics as a namedtuple including\n    the following fields:\n\n     - total:   total swap memory in bytes\n     - used:    used swap memory in bytes\n     - free:    free swap memory in bytes\n     - percent: the percentage usage\n     - sin:     no. of bytes the system has swapped in from disk (cumulative)\n     - sout:    no. of bytes the system has swapped out from disk (cumulative)\n\n    'sin' and 'sout' on Windows are meaningless and always set to 0.\n    \"\"\"\n    return _psplatform.swap_memory()\n\n\n# =====================================================================\n# --- disks/paritions related functions\n# =====================================================================\n\ndef disk_usage(path):\n    \"\"\"Return disk usage statistics about the given path as a namedtuple\n    including total, used and free space expressed in bytes plus the\n    percentage usage.\n    \"\"\"\n    return _psplatform.disk_usage(path)\n\n\ndef disk_partitions(all=False):\n    \"\"\"Return mounted partitions as a list of\n    (device, mountpoint, fstype, opts) namedtuple.\n    'opts' field is a raw string separated by commas indicating mount\n    options which may vary depending on the platform.\n\n    If \"all\" parameter is False return physical devices only and ignore\n    all others.\n    \"\"\"\n    return _psplatform.disk_partitions(all)\n\n\ndef disk_io_counters(perdisk=False):\n    \"\"\"Return system disk I/O statistics as a namedtuple including\n    the following fields:\n\n     - read_count:  number of reads\n     - write_count: number of writes\n     - read_bytes:  number of bytes read\n     - write_bytes: number of bytes written\n     - read_time:   time spent reading from disk (in milliseconds)\n     - write_time:  time spent writing to disk (in milliseconds)\n\n    If perdisk is True return the same information for every\n    physical disk installed on the system as a dictionary\n    with partition names as the keys and the namedutuple\n    described above as the values.\n\n    On recent Windows versions 'diskperf -y' command may need to be\n    executed first otherwise this function won't find any disk.\n    \"\"\"\n    rawdict = _psplatform.disk_io_counters()\n    if not rawdict:\n        raise RuntimeError(\"couldn't find any physical disk\")\n    if perdisk:\n        for disk, fields in rawdict.items():\n            rawdict[disk] = _nt_sys_diskio(*fields)\n        return rawdict\n    else:\n        return _nt_sys_diskio(*[sum(x) for x in zip(*rawdict.values())])\n\n\n# =====================================================================\n# --- network related functions\n# =====================================================================\n\ndef net_io_counters(pernic=False):\n    \"\"\"Return network I/O statistics as a namedtuple including\n    the following fields:\n\n     - bytes_sent:   number of bytes sent\n     - bytes_recv:   number of bytes received\n     - packets_sent: number of packets sent\n     - packets_recv: number of packets received\n     - errin:        total number of errors while receiving\n     - errout:       total number of errors while sending\n     - dropin:       total number of incoming packets which were dropped\n     - dropout:      total number of outgoing packets which were dropped\n                     (always 0 on OSX and BSD)\n\n    If pernic is True return the same information for every\n    network interface installed on the system as a dictionary\n    with network interface names as the keys and the namedtuple\n    described above as the values.\n    \"\"\"\n    rawdict = _psplatform.net_io_counters()\n    if not rawdict:\n        raise RuntimeError(\"couldn't find any network interface\")\n    if pernic:\n        for nic, fields in rawdict.items():\n            rawdict[nic] = _nt_sys_netio(*fields)\n        return rawdict\n    else:\n        return _nt_sys_netio(*[sum(x) for x in zip(*rawdict.values())])\n\n\ndef net_connections(kind='inet'):\n    \"\"\"Return system-wide connections as a list of\n    (fd, family, type, laddr, raddr, status, pid) namedtuples.\n    In case of limited privileges 'fd' and 'pid' may be set to -1\n    and None respectively.\n    The 'kind' parameter filters for connections that fit the\n    following criteria:\n\n    Kind Value      Connections using\n    inet            IPv4 and IPv6\n    inet4           IPv4\n    inet6           IPv6\n    tcp             TCP\n    tcp4            TCP over IPv4\n    tcp6            TCP over IPv6\n    udp             UDP\n    udp4            UDP over IPv4\n    udp6            UDP over IPv6\n    unix            UNIX socket (both UDP and TCP protocols)\n    all             the sum of all the possible families and protocols\n    \"\"\"\n    return _psplatform.net_connections(kind)\n\n\n# =====================================================================\n# --- other system related functions\n# =====================================================================\n\n\ndef boot_time():\n    \"\"\"Return the system boot time expressed in seconds since the epoch.\n    This is also available as psutil.BOOT_TIME.\n    \"\"\"\n    # Note: we are not caching this because it is subject to\n    # system clock updates.\n    return _psplatform.boot_time()\n\n\ndef users():\n    \"\"\"Return users currently connected on the system as a list of\n    namedtuples including the following fields.\n\n     - user: the name of the user\n     - terminal: the tty or pseudo-tty associated with the user, if any.\n     - host: the host name associated with the entry, if any.\n     - started: the creation time as a floating point number expressed in\n       seconds since the epoch.\n    \"\"\"\n    return _psplatform.users()\n\n\n# =====================================================================\n# --- deprecated functions\n# =====================================================================\n\n@_deprecated(replacement=\"psutil.pids()\")\ndef get_pid_list():\n    return pids()\n\n\n@_deprecated(replacement=\"list(process_iter())\")\ndef get_process_list():\n    return list(process_iter())\n\n\n@_deprecated(replacement=\"psutil.users()\")\ndef get_users():\n    return users()\n\n\n@_deprecated(replacement=\"psutil.virtual_memory()\")\ndef phymem_usage():\n    \"\"\"Return the amount of total, used and free physical memory\n    on the system in bytes plus the percentage usage.\n    Deprecated; use psutil.virtual_memory() instead.\n    \"\"\"\n    return virtual_memory()\n\n\n@_deprecated(replacement=\"psutil.swap_memory()\")\ndef virtmem_usage():\n    return swap_memory()\n\n\n@_deprecated(replacement=\"psutil.phymem_usage().free\")\ndef avail_phymem():\n    return phymem_usage().free\n\n\n@_deprecated(replacement=\"psutil.phymem_usage().used\")\ndef used_phymem():\n    return phymem_usage().used\n\n\n@_deprecated(replacement=\"psutil.virtmem_usage().total\")\ndef total_virtmem():\n    return virtmem_usage().total\n\n\n@_deprecated(replacement=\"psutil.virtmem_usage().used\")\ndef used_virtmem():\n    return virtmem_usage().used\n\n\n@_deprecated(replacement=\"psutil.virtmem_usage().free\")\ndef avail_virtmem():\n    return virtmem_usage().free\n\n\n@_deprecated(replacement=\"psutil.net_io_counters()\")\ndef network_io_counters(pernic=False):\n    return net_io_counters(pernic)\n\n\ndef test():\n    \"\"\"List info of all currently running processes emulating ps aux\n    output.\n    \"\"\"\n    import datetime\n\n    today_day = datetime.date.today()\n    templ = \"%-10s %5s %4s %4s %7s %7s %-13s %5s %7s  %s\"\n    attrs = ['pid', 'cpu_percent', 'memory_percent', 'name', 'cpu_times',\n             'create_time', 'memory_info']\n    if _POSIX:\n        attrs.append('uids')\n        attrs.append('terminal')\n    print(templ % (\"USER\", \"PID\", \"%CPU\", \"%MEM\", \"VSZ\", \"RSS\", \"TTY\",\n                   \"START\", \"TIME\", \"COMMAND\"))\n    for p in process_iter():\n        try:\n            pinfo = p.as_dict(attrs, ad_value='')\n        except NoSuchProcess:\n            pass\n        else:\n            if pinfo['create_time']:\n                ctime = datetime.datetime.fromtimestamp(pinfo['create_time'])\n                if ctime.date() == today_day:\n                    ctime = ctime.strftime(\"%H:%M\")\n                else:\n                    ctime = ctime.strftime(\"%b%d\")\n            else:\n                ctime = ''\n            cputime = time.strftime(\"%M:%S\",\n                                    time.localtime(sum(pinfo['cpu_times'])))\n            try:\n                user = p.username()\n            except KeyError:\n                if _POSIX:\n                    if pinfo['uids']:\n                        user = str(pinfo['uids'].real)\n                    else:\n                        user = ''\n                else:\n                    raise\n            except Error:\n                user = ''\n            if _WINDOWS and '\\\\' in user:\n                user = user.split('\\\\')[1]\n            vms = pinfo['memory_info'] and \\\n                int(pinfo['memory_info'].vms / 1024) or '?'\n            rss = pinfo['memory_info'] and \\\n                int(pinfo['memory_info'].rss / 1024) or '?'\n            memp = pinfo['memory_percent'] and \\\n                round(pinfo['memory_percent'], 1) or '?'\n            print(templ % (\n                user[:10],\n                pinfo['pid'],\n                pinfo['cpu_percent'],\n                memp,\n                vms,\n                rss,\n                pinfo.get('terminal', '') or '?',\n                ctime,\n                cputime,\n                pinfo['name'].strip() or '?'))\n\n\ndef _replace_module():\n    \"\"\"Dirty hack to replace the module object in order to access\n    deprecated module constants, see:\n    http://www.dr-josiah.com/2013/12/properties-on-python-modules.html\n    \"\"\"\n    class ModuleWrapper(object):\n\n        def __repr__(self):\n            return repr(self._module)\n        __str__ = __repr__\n\n        @property\n        def NUM_CPUS(self):\n            msg = \"NUM_CPUS constant is deprecated; use cpu_count() instead\"\n            warnings.warn(msg, category=DeprecationWarning, stacklevel=2)\n            return cpu_count()\n\n        @property\n        def BOOT_TIME(self):\n            msg = \"BOOT_TIME constant is deprecated; use boot_time() instead\"\n            warnings.warn(msg, category=DeprecationWarning, stacklevel=2)\n            return boot_time()\n\n        @property\n        def TOTAL_PHYMEM(self):\n            msg = \"TOTAL_PHYMEM constant is deprecated; \" \\\n                  \"use virtual_memory().total instead\"\n            warnings.warn(msg, category=DeprecationWarning, stacklevel=2)\n            return virtual_memory().total\n\n    mod = ModuleWrapper()\n    mod.__dict__ = globals()\n    mod._module = sys.modules[__name__]\n    sys.modules[__name__] = mod\n\n\n_replace_module()\ndel memoize, division, _replace_module\nif sys.version_info < (3, 0):\n    del num\n\nif __name__ == \"__main__\":\n    test()\n"
  },
  {
    "path": "Common/libpsutil/py2.6-glibc-2.12-pre/psutil/_common.py",
    "content": "# /usr/bin/env python\n\n# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\n\"\"\"Common objects shared by all _ps* modules.\"\"\"\n\nfrom __future__ import division\nimport errno\nimport functools\nimport os\nimport socket\nimport stat\nimport warnings\ntry:\n    import threading\nexcept ImportError:\n    import dummy_threading as threading\n\nfrom collections import namedtuple\nfrom socket import AF_INET, SOCK_STREAM, SOCK_DGRAM\n\n# --- constants\n\nAF_INET6 = getattr(socket, 'AF_INET6', None)\nAF_UNIX = getattr(socket, 'AF_UNIX', None)\n\nSTATUS_RUNNING = \"running\"\nSTATUS_SLEEPING = \"sleeping\"\nSTATUS_DISK_SLEEP = \"disk-sleep\"\nSTATUS_STOPPED = \"stopped\"\nSTATUS_TRACING_STOP = \"tracing-stop\"\nSTATUS_ZOMBIE = \"zombie\"\nSTATUS_DEAD = \"dead\"\nSTATUS_WAKE_KILL = \"wake-kill\"\nSTATUS_WAKING = \"waking\"\nSTATUS_IDLE = \"idle\"  # BSD\nSTATUS_LOCKED = \"locked\"  # BSD\nSTATUS_WAITING = \"waiting\"  # BSD\n\nCONN_ESTABLISHED = \"ESTABLISHED\"\nCONN_SYN_SENT = \"SYN_SENT\"\nCONN_SYN_RECV = \"SYN_RECV\"\nCONN_FIN_WAIT1 = \"FIN_WAIT1\"\nCONN_FIN_WAIT2 = \"FIN_WAIT2\"\nCONN_TIME_WAIT = \"TIME_WAIT\"\nCONN_CLOSE = \"CLOSE\"\nCONN_CLOSE_WAIT = \"CLOSE_WAIT\"\nCONN_LAST_ACK = \"LAST_ACK\"\nCONN_LISTEN = \"LISTEN\"\nCONN_CLOSING = \"CLOSING\"\nCONN_NONE = \"NONE\"\n\n\n# --- functions\n\ndef usage_percent(used, total, _round=None):\n    \"\"\"Calculate percentage usage of 'used' against 'total'.\"\"\"\n    try:\n        ret = (used / total) * 100\n    except ZeroDivisionError:\n        ret = 0\n    if _round is not None:\n        return round(ret, _round)\n    else:\n        return ret\n\n\ndef memoize(fun):\n    \"\"\"A simple memoize decorator for functions supporting (hashable)\n    positional arguments.\n    It also provides a cache_clear() function for clearing the cache:\n\n    >>> @memoize\n    ... def foo()\n    ...     return 1\n    ...\n    >>> foo()\n    1\n    >>> foo.cache_clear()\n    >>>\n    \"\"\"\n    @functools.wraps(fun)\n    def wrapper(*args, **kwargs):\n        key = (args, frozenset(sorted(kwargs.items())))\n        lock.acquire()\n        try:\n            try:\n                return cache[key]\n            except KeyError:\n                ret = cache[key] = fun(*args, **kwargs)\n        finally:\n            lock.release()\n        return ret\n\n    def cache_clear():\n        \"\"\"Clear cache.\"\"\"\n        lock.acquire()\n        try:\n            cache.clear()\n        finally:\n            lock.release()\n\n    lock = threading.RLock()\n    cache = {}\n    wrapper.cache_clear = cache_clear\n    return wrapper\n\n\n# http://code.activestate.com/recipes/577819-deprecated-decorator/\ndef deprecated(replacement=None):\n    \"\"\"A decorator which can be used to mark functions as deprecated.\"\"\"\n    def outer(fun):\n        msg = \"psutil.%s is deprecated\" % fun.__name__\n        if replacement is not None:\n            msg += \"; use %s instead\" % replacement\n        if fun.__doc__ is None:\n            fun.__doc__ = msg\n\n        @functools.wraps(fun)\n        def inner(*args, **kwargs):\n            warnings.warn(msg, category=DeprecationWarning, stacklevel=2)\n            return fun(*args, **kwargs)\n\n        return inner\n    return outer\n\n\ndef deprecated_method(replacement):\n    \"\"\"A decorator which can be used to mark a method as deprecated\n    'replcement' is the method name which will be called instead.\n    \"\"\"\n    def outer(fun):\n        msg = \"%s() is deprecated; use %s() instead\" % (\n            fun.__name__, replacement)\n        if fun.__doc__ is None:\n            fun.__doc__ = msg\n\n        @functools.wraps(fun)\n        def inner(self, *args, **kwargs):\n            warnings.warn(msg, category=DeprecationWarning, stacklevel=2)\n            return getattr(self, replacement)(*args, **kwargs)\n        return inner\n    return outer\n\n\ndef isfile_strict(path):\n    \"\"\"Same as os.path.isfile() but does not swallow EACCES / EPERM\n    exceptions, see:\n    http://mail.python.org/pipermail/python-dev/2012-June/120787.html\n    \"\"\"\n    try:\n        st = os.stat(path)\n    except OSError as err:\n        if err.errno in (errno.EPERM, errno.EACCES):\n            raise\n        return False\n    else:\n        return stat.S_ISREG(st.st_mode)\n\n\n# --- Process.connections() 'kind' parameter mapping\n\nconn_tmap = {\n    \"all\": ([AF_INET, AF_INET6, AF_UNIX], [SOCK_STREAM, SOCK_DGRAM]),\n    \"tcp\": ([AF_INET, AF_INET6], [SOCK_STREAM]),\n    \"tcp4\": ([AF_INET], [SOCK_STREAM]),\n    \"udp\": ([AF_INET, AF_INET6], [SOCK_DGRAM]),\n    \"udp4\": ([AF_INET], [SOCK_DGRAM]),\n    \"inet\": ([AF_INET, AF_INET6], [SOCK_STREAM, SOCK_DGRAM]),\n    \"inet4\": ([AF_INET], [SOCK_STREAM, SOCK_DGRAM]),\n    \"inet6\": ([AF_INET6], [SOCK_STREAM, SOCK_DGRAM]),\n}\n\nif AF_INET6 is not None:\n    conn_tmap.update({\n        \"tcp6\": ([AF_INET6], [SOCK_STREAM]),\n        \"udp6\": ([AF_INET6], [SOCK_DGRAM]),\n    })\n\nif AF_UNIX is not None:\n    conn_tmap.update({\n        \"unix\": ([AF_UNIX], [SOCK_STREAM, SOCK_DGRAM]),\n    })\n\ndel AF_INET, AF_INET6, AF_UNIX, SOCK_STREAM, SOCK_DGRAM, socket\n\n\n# --- namedtuples for psutil.* system-related functions\n\n# psutil.swap_memory()\nsswap = namedtuple('sswap', ['total', 'used', 'free', 'percent', 'sin',\n                             'sout'])\n# psutil.disk_usage()\nsdiskusage = namedtuple('sdiskusage', ['total', 'used', 'free', 'percent'])\n# psutil.disk_io_counters()\nsdiskio = namedtuple('sdiskio', ['read_count', 'write_count',\n                                 'read_bytes', 'write_bytes',\n                                 'read_time', 'write_time'])\n# psutil.disk_partitions()\nsdiskpart = namedtuple('sdiskpart', ['device', 'mountpoint', 'fstype', 'opts'])\n# psutil.net_io_counters()\nsnetio = namedtuple('snetio', ['bytes_sent', 'bytes_recv',\n                               'packets_sent', 'packets_recv',\n                               'errin', 'errout',\n                               'dropin', 'dropout'])\n# psutil.users()\nsuser = namedtuple('suser', ['name', 'terminal', 'host', 'started'])\n# psutil.net_connections()\nsconn = namedtuple('sconn', ['fd', 'family', 'type', 'laddr', 'raddr',\n                             'status', 'pid'])\n\n\n# --- namedtuples for psutil.Process methods\n\n# psutil.Process.memory_info()\npmem = namedtuple('pmem', ['rss', 'vms'])\n# psutil.Process.cpu_times()\npcputimes = namedtuple('pcputimes', ['user', 'system'])\n# psutil.Process.open_files()\npopenfile = namedtuple('popenfile', ['path', 'fd'])\n# psutil.Process.threads()\npthread = namedtuple('pthread', ['id', 'user_time', 'system_time'])\n# psutil.Process.uids()\npuids = namedtuple('puids', ['real', 'effective', 'saved'])\n# psutil.Process.gids()\npgids = namedtuple('pgids', ['real', 'effective', 'saved'])\n# psutil.Process.io_counters()\npio = namedtuple('pio', ['read_count', 'write_count',\n                         'read_bytes', 'write_bytes'])\n# psutil.Process.ionice()\npionice = namedtuple('pionice', ['ioclass', 'value'])\n# psutil.Process.ctx_switches()\npctxsw = namedtuple('pctxsw', ['voluntary', 'involuntary'])\n\n\n# --- misc\n\n# backward compatibility layer for Process.connections() ntuple\nclass pconn(\n    namedtuple('pconn',\n               ['fd', 'family', 'type', 'laddr', 'raddr', 'status'])):\n    __slots__ = ()\n\n    @property\n    def local_address(self):\n        warnings.warn(\"'local_address' field is deprecated; use 'laddr'\"\n                      \"instead\", category=DeprecationWarning, stacklevel=2)\n        return self.laddr\n\n    @property\n    def remote_address(self):\n        warnings.warn(\"'remote_address' field is deprecated; use 'raddr'\"\n                      \"instead\", category=DeprecationWarning, stacklevel=2)\n        return self.raddr\n"
  },
  {
    "path": "Common/libpsutil/py2.6-glibc-2.12-pre/psutil/_compat.py",
    "content": "#!/usr/bin/env python\n\n# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\n\"\"\"Module which provides compatibility with older Python versions.\"\"\"\n\n__all__ = [\"PY3\", \"int\", \"long\", \"xrange\", \"exec_\", \"callable\", \"lru_cache\"]\n\nimport collections\nimport functools\nimport sys\ntry:\n    import __builtin__\nexcept ImportError:\n    import builtins as __builtin__  # py3\n\nPY3 = sys.version_info[0] == 3\n\nif PY3:\n    int = int\n    long = int\n    xrange = range\n    unicode = str\n    basestring = str\n    exec_ = getattr(__builtin__, \"exec\")\nelse:\n    int = int\n    long = long\n    xrange = xrange\n    unicode = unicode\n    basestring = basestring\n\n    def exec_(code, globs=None, locs=None):\n        if globs is None:\n            frame = sys._getframe(1)\n            globs = frame.f_globals\n            if locs is None:\n                locs = frame.f_locals\n            del frame\n        elif locs is None:\n            locs = globs\n        exec(\"\"\"exec code in globs, locs\"\"\")\n\n\n# removed in 3.0, reintroduced in 3.2\ntry:\n    callable = callable\nexcept NameError:\n    def callable(obj):\n        return any(\"__call__\" in klass.__dict__ for klass in type(obj).__mro__)\n\n\n# --- stdlib additions\n\n\n# py 3.2 functools.lru_cache\n# Taken from: http://code.activestate.com/recipes/578078\n# Credit: Raymond Hettinger\ntry:\n    from functools import lru_cache\nexcept ImportError:\n    try:\n        from threading import RLock\n    except ImportError:\n        from dummy_threading import RLock\n\n    _CacheInfo = collections.namedtuple(\n        \"CacheInfo\", [\"hits\", \"misses\", \"maxsize\", \"currsize\"])\n\n    class _HashedSeq(list):\n        __slots__ = 'hashvalue'\n\n        def __init__(self, tup, hash=hash):\n            self[:] = tup\n            self.hashvalue = hash(tup)\n\n        def __hash__(self):\n            return self.hashvalue\n\n    def _make_key(args, kwds, typed,\n                  kwd_mark=(object(), ),\n                  fasttypes=set((int, str, frozenset, type(None))),\n                  sorted=sorted, tuple=tuple, type=type, len=len):\n        key = args\n        if kwds:\n            sorted_items = sorted(kwds.items())\n            key += kwd_mark\n            for item in sorted_items:\n                key += item\n        if typed:\n            key += tuple(type(v) for v in args)\n            if kwds:\n                key += tuple(type(v) for k, v in sorted_items)\n        elif len(key) == 1 and type(key[0]) in fasttypes:\n            return key[0]\n        return _HashedSeq(key)\n\n    def lru_cache(maxsize=100, typed=False):\n        \"\"\"Least-recently-used cache decorator, see:\n        http://docs.python.org/3/library/functools.html#functools.lru_cache\n        \"\"\"\n        def decorating_function(user_function):\n            cache = dict()\n            stats = [0, 0]\n            HITS, MISSES = 0, 1\n            make_key = _make_key\n            cache_get = cache.get\n            _len = len\n            lock = RLock()\n            root = []\n            root[:] = [root, root, None, None]\n            nonlocal_root = [root]\n            PREV, NEXT, KEY, RESULT = 0, 1, 2, 3\n            if maxsize == 0:\n                def wrapper(*args, **kwds):\n                    result = user_function(*args, **kwds)\n                    stats[MISSES] += 1\n                    return result\n            elif maxsize is None:\n                def wrapper(*args, **kwds):\n                    key = make_key(args, kwds, typed)\n                    result = cache_get(key, root)\n                    if result is not root:\n                        stats[HITS] += 1\n                        return result\n                    result = user_function(*args, **kwds)\n                    cache[key] = result\n                    stats[MISSES] += 1\n                    return result\n            else:\n                def wrapper(*args, **kwds):\n                    if kwds or typed:\n                        key = make_key(args, kwds, typed)\n                    else:\n                        key = args\n                    lock.acquire()\n                    try:\n                        link = cache_get(key)\n                        if link is not None:\n                            root, = nonlocal_root\n                            link_prev, link_next, key, result = link\n                            link_prev[NEXT] = link_next\n                            link_next[PREV] = link_prev\n                            last = root[PREV]\n                            last[NEXT] = root[PREV] = link\n                            link[PREV] = last\n                            link[NEXT] = root\n                            stats[HITS] += 1\n                            return result\n                    finally:\n                        lock.release()\n                    result = user_function(*args, **kwds)\n                    lock.acquire()\n                    try:\n                        root, = nonlocal_root\n                        if key in cache:\n                            pass\n                        elif _len(cache) >= maxsize:\n                            oldroot = root\n                            oldroot[KEY] = key\n                            oldroot[RESULT] = result\n                            root = nonlocal_root[0] = oldroot[NEXT]\n                            oldkey = root[KEY]\n                            root[KEY] = root[RESULT] = None\n                            del cache[oldkey]\n                            cache[key] = oldroot\n                        else:\n                            last = root[PREV]\n                            link = [last, root, key, result]\n                            last[NEXT] = root[PREV] = cache[key] = link\n                        stats[MISSES] += 1\n                    finally:\n                        lock.release()\n                    return result\n\n            def cache_info():\n                \"\"\"Report cache statistics\"\"\"\n                lock.acquire()\n                try:\n                    return _CacheInfo(stats[HITS], stats[MISSES], maxsize,\n                                      len(cache))\n                finally:\n                    lock.release()\n\n            def cache_clear():\n                \"\"\"Clear the cache and cache statistics\"\"\"\n                lock.acquire()\n                try:\n                    cache.clear()\n                    root = nonlocal_root[0]\n                    root[:] = [root, root, None, None]\n                    stats[:] = [0, 0]\n                finally:\n                    lock.release()\n\n            wrapper.__wrapped__ = user_function\n            wrapper.cache_info = cache_info\n            wrapper.cache_clear = cache_clear\n            return functools.update_wrapper(wrapper, user_function)\n\n        return decorating_function\n"
  },
  {
    "path": "Common/libpsutil/py2.6-glibc-2.12-pre/psutil/_psbsd.py",
    "content": "#!/usr/bin/env python\n\n# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\n\"\"\"FreeBSD platform implementation.\"\"\"\n\nimport errno\nimport functools\nimport os\nimport sys\nfrom collections import namedtuple\n\nfrom psutil import _common\nfrom psutil import _psposix\nfrom psutil._common import conn_tmap, usage_percent\nimport _psutil_bsd as cext\nimport _psutil_posix\n\n\n__extra__all__ = []\n\n# --- constants\n\nPROC_STATUSES = {\n    cext.SSTOP: _common.STATUS_STOPPED,\n    cext.SSLEEP: _common.STATUS_SLEEPING,\n    cext.SRUN: _common.STATUS_RUNNING,\n    cext.SIDL: _common.STATUS_IDLE,\n    cext.SWAIT: _common.STATUS_WAITING,\n    cext.SLOCK: _common.STATUS_LOCKED,\n    cext.SZOMB: _common.STATUS_ZOMBIE,\n}\n\nTCP_STATUSES = {\n    cext.TCPS_ESTABLISHED: _common.CONN_ESTABLISHED,\n    cext.TCPS_SYN_SENT: _common.CONN_SYN_SENT,\n    cext.TCPS_SYN_RECEIVED: _common.CONN_SYN_RECV,\n    cext.TCPS_FIN_WAIT_1: _common.CONN_FIN_WAIT1,\n    cext.TCPS_FIN_WAIT_2: _common.CONN_FIN_WAIT2,\n    cext.TCPS_TIME_WAIT: _common.CONN_TIME_WAIT,\n    cext.TCPS_CLOSED: _common.CONN_CLOSE,\n    cext.TCPS_CLOSE_WAIT: _common.CONN_CLOSE_WAIT,\n    cext.TCPS_LAST_ACK: _common.CONN_LAST_ACK,\n    cext.TCPS_LISTEN: _common.CONN_LISTEN,\n    cext.TCPS_CLOSING: _common.CONN_CLOSING,\n    cext.PSUTIL_CONN_NONE: _common.CONN_NONE,\n}\n\nPAGESIZE = os.sysconf(\"SC_PAGE_SIZE\")\n\n# extend base mem ntuple with BSD-specific memory metrics\nsvmem = namedtuple(\n    'svmem', ['total', 'available', 'percent', 'used', 'free',\n              'active', 'inactive', 'buffers', 'cached', 'shared', 'wired'])\nscputimes = namedtuple(\n    'scputimes', ['user', 'nice', 'system', 'idle', 'irq'])\npextmem = namedtuple('pextmem', ['rss', 'vms', 'text', 'data', 'stack'])\npmmap_grouped = namedtuple(\n    'pmmap_grouped', 'path rss, private, ref_count, shadow_count')\npmmap_ext = namedtuple(\n    'pmmap_ext', 'addr, perms path rss, private, ref_count, shadow_count')\n\n# set later from __init__.py\nNoSuchProcess = None\nAccessDenied = None\nTimeoutExpired = None\n\n\ndef virtual_memory():\n    \"\"\"System virtual memory as a namedtuple.\"\"\"\n    mem = cext.virtual_mem()\n    total, free, active, inactive, wired, cached, buffers, shared = mem\n    avail = inactive + cached + free\n    used = active + wired + cached\n    percent = usage_percent((total - avail), total, _round=1)\n    return svmem(total, avail, percent, used, free,\n                 active, inactive, buffers, cached, shared, wired)\n\n\ndef swap_memory():\n    \"\"\"System swap memory as (total, used, free, sin, sout) namedtuple.\"\"\"\n    total, used, free, sin, sout = [x * PAGESIZE for x in cext.swap_mem()]\n    percent = usage_percent(used, total, _round=1)\n    return _common.sswap(total, used, free, percent, sin, sout)\n\n\ndef cpu_times():\n    \"\"\"Return system per-CPU times as a namedtuple\"\"\"\n    user, nice, system, idle, irq = cext.cpu_times()\n    return scputimes(user, nice, system, idle, irq)\n\n\nif hasattr(cext, \"per_cpu_times\"):\n    def per_cpu_times():\n        \"\"\"Return system CPU times as a namedtuple\"\"\"\n        ret = []\n        for cpu_t in cext.per_cpu_times():\n            user, nice, system, idle, irq = cpu_t\n            item = scputimes(user, nice, system, idle, irq)\n            ret.append(item)\n        return ret\nelse:\n    # XXX\n    # Ok, this is very dirty.\n    # On FreeBSD < 8 we cannot gather per-cpu information, see:\n    # https://github.com/giampaolo/psutil/issues/226\n    # If num cpus > 1, on first call we return single cpu times to avoid a\n    # crash at psutil import time.\n    # Next calls will fail with NotImplementedError\n    def per_cpu_times():\n        if cpu_count_logical() == 1:\n            return [cpu_times()]\n        if per_cpu_times.__called__:\n            raise NotImplementedError(\"supported only starting from FreeBSD 8\")\n        per_cpu_times.__called__ = True\n        return [cpu_times()]\n\n    per_cpu_times.__called__ = False\n\n\ndef cpu_count_logical():\n    \"\"\"Return the number of logical CPUs in the system.\"\"\"\n    return cext.cpu_count_logical()\n\n\ndef cpu_count_physical():\n    \"\"\"Return the number of physical CPUs in the system.\"\"\"\n    # From the C module we'll get an XML string similar to this:\n    # http://manpages.ubuntu.com/manpages/precise/man4/smp.4freebsd.html\n    # We may get None in case \"sysctl kern.sched.topology_spec\"\n    # is not supported on this BSD version, in which case we'll mimic\n    # os.cpu_count() and return None.\n    s = cext.cpu_count_phys()\n    if s is not None:\n        # get rid of padding chars appended at the end of the string\n        index = s.rfind(\"</groups>\")\n        if index != -1:\n            s = s[:index + 9]\n            if sys.version_info >= (2, 5):\n                import xml.etree.ElementTree as ET\n                root = ET.fromstring(s)\n                return len(root.findall('group/children/group/cpu')) or None\n            else:\n                s = s[s.find('<children>'):]\n                return s.count(\"<cpu\") or None\n\n\ndef boot_time():\n    \"\"\"The system boot time expressed in seconds since the epoch.\"\"\"\n    return cext.boot_time()\n\n\ndef disk_partitions(all=False):\n    retlist = []\n    partitions = cext.disk_partitions()\n    for partition in partitions:\n        device, mountpoint, fstype, opts = partition\n        if device == 'none':\n            device = ''\n        if not all:\n            if not os.path.isabs(device) or not os.path.exists(device):\n                continue\n        ntuple = _common.sdiskpart(device, mountpoint, fstype, opts)\n        retlist.append(ntuple)\n    return retlist\n\n\ndef users():\n    retlist = []\n    rawlist = cext.users()\n    for item in rawlist:\n        user, tty, hostname, tstamp = item\n        if tty == '~':\n            continue  # reboot or shutdown\n        nt = _common.suser(user, tty or None, hostname, tstamp)\n        retlist.append(nt)\n    return retlist\n\n\ndef net_connections(kind):\n    if kind not in _common.conn_tmap:\n        raise ValueError(\"invalid %r kind argument; choose between %s\"\n                         % (kind, ', '.join([repr(x) for x in conn_tmap])))\n    families, types = conn_tmap[kind]\n    ret = []\n    rawlist = cext.net_connections()\n    for item in rawlist:\n        fd, fam, type, laddr, raddr, status, pid = item\n        # TODO: apply filter at C level\n        if fam in families and type in types:\n            status = TCP_STATUSES[status]\n            nt = _common.sconn(fd, fam, type, laddr, raddr, status, pid)\n            ret.append(nt)\n    return ret\n\n\npids = cext.pids\npid_exists = _psposix.pid_exists\ndisk_usage = _psposix.disk_usage\nnet_io_counters = cext.net_io_counters\ndisk_io_counters = cext.disk_io_counters\n\n\ndef wrap_exceptions(fun):\n    \"\"\"Decorator which translates bare OSError exceptions into\n    NoSuchProcess and AccessDenied.\n    \"\"\"\n    @functools.wraps(fun)\n    def wrapper(self, *args, **kwargs):\n        try:\n            return fun(self, *args, **kwargs)\n        except OSError as err:\n            # support for private module import\n            if NoSuchProcess is None or AccessDenied is None:\n                raise\n            if err.errno == errno.ESRCH:\n                raise NoSuchProcess(self.pid, self._name)\n            if err.errno in (errno.EPERM, errno.EACCES):\n                raise AccessDenied(self.pid, self._name)\n            raise\n    return wrapper\n\n\nclass Process(object):\n    \"\"\"Wrapper class around underlying C implementation.\"\"\"\n\n    __slots__ = [\"pid\", \"_name\"]\n\n    def __init__(self, pid):\n        self.pid = pid\n        self._name = None\n\n    @wrap_exceptions\n    def name(self):\n        return cext.proc_name(self.pid)\n\n    @wrap_exceptions\n    def exe(self):\n        return cext.proc_exe(self.pid)\n\n    @wrap_exceptions\n    def cmdline(self):\n        return cext.proc_cmdline(self.pid)\n\n    @wrap_exceptions\n    def terminal(self):\n        tty_nr = cext.proc_tty_nr(self.pid)\n        tmap = _psposix._get_terminal_map()\n        try:\n            return tmap[tty_nr]\n        except KeyError:\n            return None\n\n    @wrap_exceptions\n    def ppid(self):\n        return cext.proc_ppid(self.pid)\n\n    @wrap_exceptions\n    def uids(self):\n        real, effective, saved = cext.proc_uids(self.pid)\n        return _common.puids(real, effective, saved)\n\n    @wrap_exceptions\n    def gids(self):\n        real, effective, saved = cext.proc_gids(self.pid)\n        return _common.pgids(real, effective, saved)\n\n    @wrap_exceptions\n    def cpu_times(self):\n        user, system = cext.proc_cpu_times(self.pid)\n        return _common.pcputimes(user, system)\n\n    @wrap_exceptions\n    def memory_info(self):\n        rss, vms = cext.proc_memory_info(self.pid)[:2]\n        return _common.pmem(rss, vms)\n\n    @wrap_exceptions\n    def memory_info_ex(self):\n        return pextmem(*cext.proc_memory_info(self.pid))\n\n    @wrap_exceptions\n    def create_time(self):\n        return cext.proc_create_time(self.pid)\n\n    @wrap_exceptions\n    def num_threads(self):\n        return cext.proc_num_threads(self.pid)\n\n    @wrap_exceptions\n    def num_ctx_switches(self):\n        return _common.pctxsw(*cext.proc_num_ctx_switches(self.pid))\n\n    @wrap_exceptions\n    def threads(self):\n        rawlist = cext.proc_threads(self.pid)\n        retlist = []\n        for thread_id, utime, stime in rawlist:\n            ntuple = _common.pthread(thread_id, utime, stime)\n            retlist.append(ntuple)\n        return retlist\n\n    @wrap_exceptions\n    def connections(self, kind='inet'):\n        if kind not in conn_tmap:\n            raise ValueError(\"invalid %r kind argument; choose between %s\"\n                             % (kind, ', '.join([repr(x) for x in conn_tmap])))\n        families, types = conn_tmap[kind]\n        rawlist = cext.proc_connections(self.pid, families, types)\n        ret = []\n        for item in rawlist:\n            fd, fam, type, laddr, raddr, status = item\n            status = TCP_STATUSES[status]\n            nt = _common.pconn(fd, fam, type, laddr, raddr, status)\n            ret.append(nt)\n        return ret\n\n    @wrap_exceptions\n    def wait(self, timeout=None):\n        try:\n            return _psposix.wait_pid(self.pid, timeout)\n        except _psposix.TimeoutExpired:\n            # support for private module import\n            if TimeoutExpired is None:\n                raise\n            raise TimeoutExpired(timeout, self.pid, self._name)\n\n    @wrap_exceptions\n    def nice_get(self):\n        return _psutil_posix.getpriority(self.pid)\n\n    @wrap_exceptions\n    def nice_set(self, value):\n        return _psutil_posix.setpriority(self.pid, value)\n\n    @wrap_exceptions\n    def status(self):\n        code = cext.proc_status(self.pid)\n        if code in PROC_STATUSES:\n            return PROC_STATUSES[code]\n        # XXX is this legit? will we even ever get here?\n        return \"?\"\n\n    @wrap_exceptions\n    def io_counters(self):\n        rc, wc, rb, wb = cext.proc_io_counters(self.pid)\n        return _common.pio(rc, wc, rb, wb)\n\n    nt_mmap_grouped = namedtuple(\n        'mmap', 'path rss, private, ref_count, shadow_count')\n    nt_mmap_ext = namedtuple(\n        'mmap', 'addr, perms path rss, private, ref_count, shadow_count')\n\n    # FreeBSD < 8 does not support functions based on kinfo_getfile()\n    # and kinfo_getvmmap()\n    if hasattr(cext, 'proc_open_files'):\n\n        @wrap_exceptions\n        def open_files(self):\n            \"\"\"Return files opened by process as a list of namedtuples.\"\"\"\n            rawlist = cext.proc_open_files(self.pid)\n            return [_common.popenfile(path, fd) for path, fd in rawlist]\n\n        @wrap_exceptions\n        def cwd(self):\n            \"\"\"Return process current working directory.\"\"\"\n            # sometimes we get an empty string, in which case we turn\n            # it into None\n            return cext.proc_cwd(self.pid) or None\n\n        @wrap_exceptions\n        def memory_maps(self):\n            return cext.proc_memory_maps(self.pid)\n\n        @wrap_exceptions\n        def num_fds(self):\n            \"\"\"Return the number of file descriptors opened by this process.\"\"\"\n            return cext.proc_num_fds(self.pid)\n\n    else:\n        def _not_implemented(self):\n            raise NotImplementedError(\"supported only starting from FreeBSD 8\")\n\n        open_files = _not_implemented\n        proc_cwd = _not_implemented\n        memory_maps = _not_implemented\n        num_fds = _not_implemented\n\n    @wrap_exceptions\n    def cpu_affinity_get(self):\n        return cext.proc_cpu_affinity_get(self.pid)\n\n    @wrap_exceptions\n    def cpu_affinity_set(self, cpus):\n        try:\n            cext.proc_cpu_affinity_set(self.pid, cpus)\n        except OSError as err:\n            # 'man cpuset_setaffinity' about EDEADLK:\n            # <<the call would leave a thread without a valid CPU to run\n            # on because the set does not overlap with the thread's\n            # anonymous mask>>\n            if err.errno in (errno.EINVAL, errno.EDEADLK):\n                allcpus = tuple(range(len(per_cpu_times())))\n                for cpu in cpus:\n                    if cpu not in allcpus:\n                        raise ValueError(\"invalid CPU #%i (choose between %s)\"\n                                         % (cpu, allcpus))\n            raise\n"
  },
  {
    "path": "Common/libpsutil/py2.6-glibc-2.12-pre/psutil/_pslinux.py",
    "content": "#!/usr/bin/env python\n\n# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\n\"\"\"Linux platform implementation.\"\"\"\n\nfrom __future__ import division\n\nimport base64\nimport errno\nimport functools\nimport os\nimport re\nimport socket\nimport struct\nimport sys\nimport warnings\nfrom collections import namedtuple, defaultdict\n\nfrom psutil import _common\nfrom psutil import _psposix\nfrom psutil._common import (isfile_strict, usage_percent, deprecated)\nfrom psutil._compat import PY3\nimport _psutil_linux as cext\nimport _psutil_posix\n\n\n__extra__all__ = [\n    # io prio constants\n    \"IOPRIO_CLASS_NONE\", \"IOPRIO_CLASS_RT\", \"IOPRIO_CLASS_BE\",\n    \"IOPRIO_CLASS_IDLE\",\n    # connection status constants\n    \"CONN_ESTABLISHED\", \"CONN_SYN_SENT\", \"CONN_SYN_RECV\", \"CONN_FIN_WAIT1\",\n    \"CONN_FIN_WAIT2\", \"CONN_TIME_WAIT\", \"CONN_CLOSE\", \"CONN_CLOSE_WAIT\",\n    \"CONN_LAST_ACK\", \"CONN_LISTEN\", \"CONN_CLOSING\",\n    # other\n    \"phymem_buffers\", \"cached_phymem\"]\n\n\n# --- constants\n\nHAS_PRLIMIT = hasattr(cext, \"linux_prlimit\")\n\n# RLIMIT_* constants, not guaranteed to be present on all kernels\nif HAS_PRLIMIT:\n    for name in dir(cext):\n        if name.startswith('RLIM'):\n            __extra__all__.append(name)\n\n# Number of clock ticks per second\nCLOCK_TICKS = os.sysconf(\"SC_CLK_TCK\")\nPAGESIZE = os.sysconf(\"SC_PAGE_SIZE\")\nBOOT_TIME = None  # set later\nDEFAULT_ENCODING = sys.getdefaultencoding()\n\n# ioprio_* constants http://linux.die.net/man/2/ioprio_get\nIOPRIO_CLASS_NONE = 0\nIOPRIO_CLASS_RT = 1\nIOPRIO_CLASS_BE = 2\nIOPRIO_CLASS_IDLE = 3\n\n# taken from /fs/proc/array.c\nPROC_STATUSES = {\n    \"R\": _common.STATUS_RUNNING,\n    \"S\": _common.STATUS_SLEEPING,\n    \"D\": _common.STATUS_DISK_SLEEP,\n    \"T\": _common.STATUS_STOPPED,\n    \"t\": _common.STATUS_TRACING_STOP,\n    \"Z\": _common.STATUS_ZOMBIE,\n    \"X\": _common.STATUS_DEAD,\n    \"x\": _common.STATUS_DEAD,\n    \"K\": _common.STATUS_WAKE_KILL,\n    \"W\": _common.STATUS_WAKING\n}\n\n# http://students.mimuw.edu.pl/lxr/source/include/net/tcp_states.h\nTCP_STATUSES = {\n    \"01\": _common.CONN_ESTABLISHED,\n    \"02\": _common.CONN_SYN_SENT,\n    \"03\": _common.CONN_SYN_RECV,\n    \"04\": _common.CONN_FIN_WAIT1,\n    \"05\": _common.CONN_FIN_WAIT2,\n    \"06\": _common.CONN_TIME_WAIT,\n    \"07\": _common.CONN_CLOSE,\n    \"08\": _common.CONN_CLOSE_WAIT,\n    \"09\": _common.CONN_LAST_ACK,\n    \"0A\": _common.CONN_LISTEN,\n    \"0B\": _common.CONN_CLOSING\n}\n\n# set later from __init__.py\nNoSuchProcess = None\nAccessDenied = None\nTimeoutExpired = None\n\n\n# --- named tuples\n\ndef _get_cputimes_fields():\n    \"\"\"Return a namedtuple of variable fields depending on the\n    CPU times available on this Linux kernel version which may be:\n    (user, nice, system, idle, iowait, irq, softirq, [steal, [guest,\n     [guest_nice]]])\n    \"\"\"\n    with open('/proc/stat', 'rb') as f:\n        values = f.readline().split()[1:]\n    fields = ['user', 'nice', 'system', 'idle', 'iowait', 'irq', 'softirq']\n    vlen = len(values)\n    if vlen >= 8:\n        # Linux >= 2.6.11\n        fields.append('steal')\n    if vlen >= 9:\n        # Linux >= 2.6.24\n        fields.append('guest')\n    if vlen >= 10:\n        # Linux >= 3.2.0\n        fields.append('guest_nice')\n    return fields\n\n\nscputimes = namedtuple('scputimes', _get_cputimes_fields())\n\nsvmem = namedtuple(\n    'svmem', ['total', 'available', 'percent', 'used', 'free',\n              'active', 'inactive', 'buffers', 'cached'])\n\npextmem = namedtuple('pextmem', 'rss vms shared text lib data dirty')\n\npmmap_grouped = namedtuple(\n    'pmmap_grouped', ['path', 'rss', 'size', 'pss', 'shared_clean',\n                      'shared_dirty', 'private_clean', 'private_dirty',\n                      'referenced', 'anonymous', 'swap'])\n\npmmap_ext = namedtuple(\n    'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields))\n\n\n# --- system memory\n\ndef virtual_memory():\n    total, free, buffers, shared, _, _ = cext.linux_sysinfo()\n    cached = active = inactive = None\n    with open('/proc/meminfo', 'rb') as f:\n        for line in f:\n            if line.startswith(b\"Cached:\"):\n                cached = int(line.split()[1]) * 1024\n            elif line.startswith(b\"Active:\"):\n                active = int(line.split()[1]) * 1024\n            elif line.startswith(b\"Inactive:\"):\n                inactive = int(line.split()[1]) * 1024\n            if (cached is not None\n                    and active is not None\n                    and inactive is not None):\n                break\n        else:\n            # we might get here when dealing with exotic Linux flavors, see:\n            # https://github.com/giampaolo/psutil/issues/313\n            msg = \"'cached', 'active' and 'inactive' memory stats couldn't \" \\\n                  \"be determined and were set to 0\"\n            warnings.warn(msg, RuntimeWarning)\n            cached = active = inactive = 0\n    avail = free + buffers + cached\n    used = total - free\n    percent = usage_percent((total - avail), total, _round=1)\n    return svmem(total, avail, percent, used, free,\n                 active, inactive, buffers, cached)\n\n\ndef swap_memory():\n    _, _, _, _, total, free = cext.linux_sysinfo()\n    used = total - free\n    percent = usage_percent(used, total, _round=1)\n    # get pgin/pgouts\n    with open(\"/proc/vmstat\", \"rb\") as f:\n        sin = sout = None\n        for line in f:\n            # values are expressed in 4 kilo bytes, we want bytes instead\n            if line.startswith(b'pswpin'):\n                sin = int(line.split(b' ')[1]) * 4 * 1024\n            elif line.startswith(b'pswpout'):\n                sout = int(line.split(b' ')[1]) * 4 * 1024\n            if sin is not None and sout is not None:\n                break\n        else:\n            # we might get here when dealing with exotic Linux flavors, see:\n            # https://github.com/giampaolo/psutil/issues/313\n            msg = \"'sin' and 'sout' swap memory stats couldn't \" \\\n                  \"be determined and were set to 0\"\n            warnings.warn(msg, RuntimeWarning)\n            sin = sout = 0\n    return _common.sswap(total, used, free, percent, sin, sout)\n\n\n@deprecated(replacement='psutil.virtual_memory().cached')\ndef cached_phymem():\n    return virtual_memory().cached\n\n\n@deprecated(replacement='psutil.virtual_memory().buffers')\ndef phymem_buffers():\n    return virtual_memory().buffers\n\n\n# --- CPUs\n\ndef cpu_times():\n    \"\"\"Return a named tuple representing the following system-wide\n    CPU times:\n    (user, nice, system, idle, iowait, irq, softirq [steal, [guest,\n     [guest_nice]]])\n    Last 3 fields may not be available on all Linux kernel versions.\n    \"\"\"\n    with open('/proc/stat', 'rb') as f:\n        values = f.readline().split()\n    fields = values[1:len(scputimes._fields) + 1]\n    fields = [float(x) / CLOCK_TICKS for x in fields]\n    return scputimes(*fields)\n\n\ndef per_cpu_times():\n    \"\"\"Return a list of namedtuple representing the CPU times\n    for every CPU available on the system.\n    \"\"\"\n    cpus = []\n    with open('/proc/stat', 'rb') as f:\n        # get rid of the first line which refers to system wide CPU stats\n        f.readline()\n        for line in f:\n            if line.startswith(b'cpu'):\n                values = line.split()\n                fields = values[1:len(scputimes._fields) + 1]\n                fields = [float(x) / CLOCK_TICKS for x in fields]\n                entry = scputimes(*fields)\n                cpus.append(entry)\n        return cpus\n\n\ndef cpu_count_logical():\n    \"\"\"Return the number of logical CPUs in the system.\"\"\"\n    try:\n        return os.sysconf(\"SC_NPROCESSORS_ONLN\")\n    except ValueError:\n        # as a second fallback we try to parse /proc/cpuinfo\n        num = 0\n        with open('/proc/cpuinfo', 'rb') as f:\n            for line in f:\n                if line.lower().startswith(b'processor'):\n                    num += 1\n\n        # unknown format (e.g. amrel/sparc architectures), see:\n        # https://github.com/giampaolo/psutil/issues/200\n        # try to parse /proc/stat as a last resort\n        if num == 0:\n            search = re.compile('cpu\\d')\n            with open('/proc/stat', 'rt') as f:\n                for line in f:\n                    line = line.split(' ')[0]\n                    if search.match(line):\n                        num += 1\n\n        if num == 0:\n            # mimic os.cpu_count()\n            return None\n        return num\n\n\ndef cpu_count_physical():\n    \"\"\"Return the number of physical CPUs in the system.\"\"\"\n    with open('/proc/cpuinfo', 'rb') as f:\n        found = set()\n        for line in f:\n            if line.lower().startswith(b'physical id'):\n                found.add(line.strip())\n    # mimic os.cpu_count()\n    return len(found) if found else None\n\n\n# --- other system functions\n\ndef users():\n    \"\"\"Return currently connected users as a list of namedtuples.\"\"\"\n    retlist = []\n    rawlist = cext.users()\n    for item in rawlist:\n        user, tty, hostname, tstamp, user_process = item\n        # note: the underlying C function includes entries about\n        # system boot, run level and others.  We might want\n        # to use them in the future.\n        if not user_process:\n            continue\n        if hostname == ':0.0':\n            hostname = 'localhost'\n        nt = _common.suser(user, tty or None, hostname, tstamp)\n        retlist.append(nt)\n    return retlist\n\n\ndef boot_time():\n    \"\"\"Return the system boot time expressed in seconds since the epoch.\"\"\"\n    global BOOT_TIME\n    with open('/proc/stat', 'rb') as f:\n        for line in f:\n            if line.startswith(b'btime'):\n                ret = float(line.strip().split()[1])\n                BOOT_TIME = ret\n                return ret\n        raise RuntimeError(\"line 'btime' not found\")\n\n\n# --- processes\n\ndef pids():\n    \"\"\"Returns a list of PIDs currently running on the system.\"\"\"\n    return [int(x) for x in os.listdir(b'/proc') if x.isdigit()]\n\n\ndef pid_exists(pid):\n    \"\"\"Check For the existence of a unix pid.\"\"\"\n    return _psposix.pid_exists(pid)\n\n\n# --- network\n\nclass Connections:\n    \"\"\"A wrapper on top of /proc/net/* files, retrieving per-process\n    and system-wide open connections (TCP, UDP, UNIX) similarly to\n    \"netstat -an\".\n\n    Note: in case of UNIX sockets we're only able to determine the\n    local endpoint/path, not the one it's connected to.\n    According to [1] it would be possible but not easily.\n\n    [1] http://serverfault.com/a/417946\n    \"\"\"\n\n    def __init__(self):\n        tcp4 = (\"tcp\", socket.AF_INET, socket.SOCK_STREAM)\n        tcp6 = (\"tcp6\", socket.AF_INET6, socket.SOCK_STREAM)\n        udp4 = (\"udp\", socket.AF_INET, socket.SOCK_DGRAM)\n        udp6 = (\"udp6\", socket.AF_INET6, socket.SOCK_DGRAM)\n        unix = (\"unix\", socket.AF_UNIX, None)\n        self.tmap = {\n            \"all\": (tcp4, tcp6, udp4, udp6, unix),\n            \"tcp\": (tcp4, tcp6),\n            \"tcp4\": (tcp4,),\n            \"tcp6\": (tcp6,),\n            \"udp\": (udp4, udp6),\n            \"udp4\": (udp4,),\n            \"udp6\": (udp6,),\n            \"unix\": (unix,),\n            \"inet\": (tcp4, tcp6, udp4, udp6),\n            \"inet4\": (tcp4, udp4),\n            \"inet6\": (tcp6, udp6),\n        }\n\n    def get_proc_inodes(self, pid):\n        inodes = defaultdict(list)\n        for fd in os.listdir(\"/proc/%s/fd\" % pid):\n            try:\n                inode = os.readlink(\"/proc/%s/fd/%s\" % (pid, fd))\n            except OSError:\n                # TODO: need comment here\n                continue\n            else:\n                if inode.startswith('socket:['):\n                    # the process is using a socket\n                    inode = inode[8:][:-1]\n                    inodes[inode].append((pid, int(fd)))\n        return inodes\n\n    def get_all_inodes(self):\n        inodes = {}\n        for pid in pids():\n            try:\n                inodes.update(self.get_proc_inodes(pid))\n            except OSError as err:\n                # os.listdir() is gonna raise a lot of access denied\n                # exceptions in case of unprivileged user; that's fine\n                # as we'll just end up returning a connection with PID\n                # and fd set to None anyway.\n                # Both netstat -an and lsof does the same so it's\n                # unlikely we can do any better.\n                # ENOENT just means a PID disappeared on us.\n                if err.errno not in (\n                        errno.ENOENT, errno.ESRCH, errno.EPERM, errno.EACCES):\n                    raise\n        return inodes\n\n    def decode_address(self, addr, family):\n        \"\"\"Accept an \"ip:port\" address as displayed in /proc/net/*\n        and convert it into a human readable form, like:\n\n        \"0500000A:0016\" -> (\"10.0.0.5\", 22)\n        \"0000000000000000FFFF00000100007F:9E49\" -> (\"::ffff:127.0.0.1\", 40521)\n\n        The IP address portion is a little or big endian four-byte\n        hexadecimal number; that is, the least significant byte is listed\n        first, so we need to reverse the order of the bytes to convert it\n        to an IP address.\n        The port is represented as a two-byte hexadecimal number.\n\n        Reference:\n        http://linuxdevcenter.com/pub/a/linux/2000/11/16/LinuxAdmin.html\n        \"\"\"\n        ip, port = addr.split(':')\n        port = int(port, 16)\n        # this usually refers to a local socket in listen mode with\n        # no end-points connected\n        if not port:\n            return ()\n        if PY3:\n            ip = ip.encode('ascii')\n        if family == socket.AF_INET:\n            # see: https://github.com/giampaolo/psutil/issues/201\n            if sys.byteorder == 'little':\n                ip = socket.inet_ntop(family, base64.b16decode(ip)[::-1])\n            else:\n                ip = socket.inet_ntop(family, base64.b16decode(ip))\n        else:  # IPv6\n            # old version - let's keep it, just in case...\n            # ip = ip.decode('hex')\n            # return socket.inet_ntop(socket.AF_INET6,\n            #          ''.join(ip[i:i+4][::-1] for i in xrange(0, 16, 4)))\n            ip = base64.b16decode(ip)\n            # see: https://github.com/giampaolo/psutil/issues/201\n            if sys.byteorder == 'little':\n                ip = socket.inet_ntop(\n                    socket.AF_INET6,\n                    struct.pack('>4I', *struct.unpack('<4I', ip)))\n            else:\n                ip = socket.inet_ntop(\n                    socket.AF_INET6,\n                    struct.pack('<4I', *struct.unpack('<4I', ip)))\n        return (ip, port)\n\n    def process_inet(self, file, family, type_, inodes, filter_pid=None):\n        \"\"\"Parse /proc/net/tcp* and /proc/net/udp* files.\"\"\"\n        if file.endswith('6') and not os.path.exists(file):\n            # IPv6 not supported\n            return\n        with open(file, 'rt') as f:\n            f.readline()  # skip the first line\n            for line in f:\n                _, laddr, raddr, status, _, _, _, _, _, inode = \\\n                    line.split()[:10]\n                if inode in inodes:\n                    # # We assume inet sockets are unique, so we error\n                    # # out if there are multiple references to the\n                    # # same inode. We won't do this for UNIX sockets.\n                    # if len(inodes[inode]) > 1 and family != socket.AF_UNIX:\n                    #     raise ValueError(\"ambiguos inode with multiple \"\n                    #                      \"PIDs references\")\n                    pid, fd = inodes[inode][0]\n                else:\n                    pid, fd = None, -1\n                if filter_pid is not None and filter_pid != pid:\n                    continue\n                else:\n                    if type_ == socket.SOCK_STREAM:\n                        status = TCP_STATUSES[status]\n                    else:\n                        status = _common.CONN_NONE\n                    laddr = self.decode_address(laddr, family)\n                    raddr = self.decode_address(raddr, family)\n                    yield (fd, family, type_, laddr, raddr, status, pid)\n\n    def process_unix(self, file, family, inodes, filter_pid=None):\n        \"\"\"Parse /proc/net/unix files.\"\"\"\n        with open(file, 'rt') as f:\n            f.readline()  # skip the first line\n            for line in f:\n                tokens = line.split()\n                _, _, _, _, type_, _, inode = tokens[0:7]\n                if inode in inodes:\n                    # With UNIX sockets we can have a single inode\n                    # referencing many file descriptors.\n                    pairs = inodes[inode]\n                else:\n                    pairs = [(None, -1)]\n                for pid, fd in pairs:\n                    if filter_pid is not None and filter_pid != pid:\n                        continue\n                    else:\n                        if len(tokens) == 8:\n                            path = tokens[-1]\n                        else:\n                            path = \"\"\n                        type_ = int(type_)\n                        raddr = None\n                        status = _common.CONN_NONE\n                        yield (fd, family, type_, path, raddr, status, pid)\n\n    def retrieve(self, kind, pid=None):\n        if kind not in self.tmap:\n            raise ValueError(\"invalid %r kind argument; choose between %s\"\n                             % (kind, ', '.join([repr(x) for x in self.tmap])))\n        if pid is not None:\n            inodes = self.get_proc_inodes(pid)\n            if not inodes:\n                # no connections for this process\n                return []\n        else:\n            inodes = self.get_all_inodes()\n        ret = []\n        for f, family, type_ in self.tmap[kind]:\n            if family in (socket.AF_INET, socket.AF_INET6):\n                ls = self.process_inet(\n                    \"/proc/net/%s\" % f, family, type_, inodes, filter_pid=pid)\n            else:\n                ls = self.process_unix(\n                    \"/proc/net/%s\" % f, family, inodes, filter_pid=pid)\n            for fd, family, type_, laddr, raddr, status, bound_pid in ls:\n                if pid:\n                    conn = _common.pconn(fd, family, type_, laddr, raddr,\n                                         status)\n                else:\n                    conn = _common.sconn(fd, family, type_, laddr, raddr,\n                                         status, bound_pid)\n                ret.append(conn)\n        return ret\n\n\n_connections = Connections()\n\n\ndef net_connections(kind='inet'):\n    \"\"\"Return system-wide open connections.\"\"\"\n    return _connections.retrieve(kind)\n\n\ndef net_io_counters():\n    \"\"\"Return network I/O statistics for every network interface\n    installed on the system as a dict of raw tuples.\n    \"\"\"\n    with open(\"/proc/net/dev\", \"rt\") as f:\n        lines = f.readlines()\n    retdict = {}\n    for line in lines[2:]:\n        colon = line.rfind(':')\n        assert colon > 0, repr(line)\n        name = line[:colon].strip()\n        fields = line[colon + 1:].strip().split()\n        bytes_recv = int(fields[0])\n        packets_recv = int(fields[1])\n        errin = int(fields[2])\n        dropin = int(fields[3])\n        bytes_sent = int(fields[8])\n        packets_sent = int(fields[9])\n        errout = int(fields[10])\n        dropout = int(fields[11])\n        retdict[name] = (bytes_sent, bytes_recv, packets_sent, packets_recv,\n                         errin, errout, dropin, dropout)\n    return retdict\n\n\n# --- disks\n\ndef disk_io_counters():\n    \"\"\"Return disk I/O statistics for every disk installed on the\n    system as a dict of raw tuples.\n    \"\"\"\n    # man iostat states that sectors are equivalent with blocks and\n    # have a size of 512 bytes since 2.4 kernels. This value is\n    # needed to calculate the amount of disk I/O in bytes.\n    SECTOR_SIZE = 512\n\n    # determine partitions we want to look for\n    partitions = []\n    with open(\"/proc/partitions\", \"rt\") as f:\n        lines = f.readlines()[2:]\n    for line in reversed(lines):\n        _, _, _, name = line.split()\n        if name[-1].isdigit():\n            # we're dealing with a partition (e.g. 'sda1'); 'sda' will\n            # also be around but we want to omit it\n            partitions.append(name)\n        else:\n            if not partitions or not partitions[-1].startswith(name):\n                # we're dealing with a disk entity for which no\n                # partitions have been defined (e.g. 'sda' but\n                # 'sda1' was not around), see:\n                # https://github.com/giampaolo/psutil/issues/338\n                partitions.append(name)\n    #\n    retdict = {}\n    with open(\"/proc/diskstats\", \"rt\") as f:\n        lines = f.readlines()\n    for line in lines:\n        # http://www.mjmwired.net/kernel/Documentation/iostats.txt\n        fields = line.split()\n        if len(fields) > 7:\n            _, _, name, reads, _, rbytes, rtime, writes, _, wbytes, wtime = \\\n                fields[:11]\n        else:\n            # from kernel 2.6.0 to 2.6.25\n            _, _, name, reads, rbytes, writes, wbytes = fields\n            rtime, wtime = 0, 0\n        if name in partitions:\n            rbytes = int(rbytes) * SECTOR_SIZE\n            wbytes = int(wbytes) * SECTOR_SIZE\n            reads = int(reads)\n            writes = int(writes)\n            rtime = int(rtime)\n            wtime = int(wtime)\n            retdict[name] = (reads, writes, rbytes, wbytes, rtime, wtime)\n    return retdict\n\n\ndef disk_partitions(all=False):\n    \"\"\"Return mounted disk partitions as a list of nameduples\"\"\"\n    phydevs = []\n    with open(\"/proc/filesystems\", \"r\") as f:\n        for line in f:\n            if not line.startswith(\"nodev\"):\n                phydevs.append(line.strip())\n\n    retlist = []\n    partitions = cext.disk_partitions()\n    for partition in partitions:\n        device, mountpoint, fstype, opts = partition\n        if device == 'none':\n            device = ''\n        if not all:\n            if device == '' or fstype not in phydevs:\n                continue\n        ntuple = _common.sdiskpart(device, mountpoint, fstype, opts)\n        retlist.append(ntuple)\n    return retlist\n\n\ndisk_usage = _psposix.disk_usage\n\n\n# --- decorators\n\ndef wrap_exceptions(fun):\n    \"\"\"Decorator which translates bare OSError and IOError exceptions\n    into NoSuchProcess and AccessDenied.\n    \"\"\"\n    @functools.wraps(fun)\n    def wrapper(self, *args, **kwargs):\n        try:\n            return fun(self, *args, **kwargs)\n        except EnvironmentError as err:\n            # support for private module import\n            if NoSuchProcess is None or AccessDenied is None:\n                raise\n            # ENOENT (no such file or directory) gets raised on open().\n            # ESRCH (no such process) can get raised on read() if\n            # process is gone in meantime.\n            if err.errno in (errno.ENOENT, errno.ESRCH):\n                raise NoSuchProcess(self.pid, self._name)\n            if err.errno in (errno.EPERM, errno.EACCES):\n                raise AccessDenied(self.pid, self._name)\n            raise\n    return wrapper\n\n\nclass Process(object):\n    \"\"\"Linux process implementation.\"\"\"\n\n    __slots__ = [\"pid\", \"_name\"]\n\n    def __init__(self, pid):\n        self.pid = pid\n        self._name = None\n\n    @wrap_exceptions\n    def name(self):\n        fname = \"/proc/%s/stat\" % self.pid\n        kw = dict(encoding=DEFAULT_ENCODING) if PY3 else dict()\n        with open(fname, \"rt\", **kw) as f:\n            # XXX - gets changed later and probably needs refactoring\n            return f.read().split(' ')[1].replace('(', '').replace(')', '')\n\n    def exe(self):\n        try:\n            exe = os.readlink(\"/proc/%s/exe\" % self.pid)\n        except (OSError, IOError) as err:\n            if err.errno in (errno.ENOENT, errno.ESRCH):\n                # no such file error; might be raised also if the\n                # path actually exists for system processes with\n                # low pids (about 0-20)\n                if os.path.lexists(\"/proc/%s\" % self.pid):\n                    return \"\"\n                else:\n                    # ok, it is a process which has gone away\n                    raise NoSuchProcess(self.pid, self._name)\n            if err.errno in (errno.EPERM, errno.EACCES):\n                raise AccessDenied(self.pid, self._name)\n            raise\n\n        # readlink() might return paths containing null bytes ('\\x00').\n        # Certain names have ' (deleted)' appended. Usually this is\n        # bogus as the file actually exists. Either way that's not\n        # important as we don't want to discriminate executables which\n        # have been deleted.\n        exe = exe.split('\\x00')[0]\n        if exe.endswith(' (deleted)') and not os.path.exists(exe):\n            exe = exe[:-10]\n        return exe\n\n    @wrap_exceptions\n    def cmdline(self):\n        fname = \"/proc/%s/cmdline\" % self.pid\n        kw = dict(encoding=DEFAULT_ENCODING) if PY3 else dict()\n        with open(fname, \"rt\", **kw) as f:\n            return [x for x in f.read().split('\\x00') if x]\n\n    @wrap_exceptions\n    def terminal(self):\n        tmap = _psposix._get_terminal_map()\n        with open(\"/proc/%s/stat\" % self.pid, 'rb') as f:\n            tty_nr = int(f.read().split(b' ')[6])\n        try:\n            return tmap[tty_nr]\n        except KeyError:\n            return None\n\n    if os.path.exists('/proc/%s/io' % os.getpid()):\n        @wrap_exceptions\n        def io_counters(self):\n            fname = \"/proc/%s/io\" % self.pid\n            with open(fname, 'rb') as f:\n                rcount = wcount = rbytes = wbytes = None\n                for line in f:\n                    if rcount is None and line.startswith(b\"syscr\"):\n                        rcount = int(line.split()[1])\n                    elif wcount is None and line.startswith(b\"syscw\"):\n                        wcount = int(line.split()[1])\n                    elif rbytes is None and line.startswith(b\"read_bytes\"):\n                        rbytes = int(line.split()[1])\n                    elif wbytes is None and line.startswith(b\"write_bytes\"):\n                        wbytes = int(line.split()[1])\n                for x in (rcount, wcount, rbytes, wbytes):\n                    if x is None:\n                        raise NotImplementedError(\n                            \"couldn't read all necessary info from %r\" % fname)\n                return _common.pio(rcount, wcount, rbytes, wbytes)\n    else:\n        def io_counters(self):\n            raise NotImplementedError(\"couldn't find /proc/%s/io (kernel \"\n                                      \"too old?)\" % self.pid)\n\n    @wrap_exceptions\n    def cpu_times(self):\n        with open(\"/proc/%s/stat\" % self.pid, 'rb') as f:\n            st = f.read().strip()\n        # ignore the first two values (\"pid (exe)\")\n        st = st[st.find(b')') + 2:]\n        values = st.split(b' ')\n        utime = float(values[11]) / CLOCK_TICKS\n        stime = float(values[12]) / CLOCK_TICKS\n        return _common.pcputimes(utime, stime)\n\n    @wrap_exceptions\n    def wait(self, timeout=None):\n        try:\n            return _psposix.wait_pid(self.pid, timeout)\n        except _psposix.TimeoutExpired:\n            # support for private module import\n            if TimeoutExpired is None:\n                raise\n            raise TimeoutExpired(timeout, self.pid, self._name)\n\n    @wrap_exceptions\n    def create_time(self):\n        with open(\"/proc/%s/stat\" % self.pid, 'rb') as f:\n            st = f.read().strip()\n        # ignore the first two values (\"pid (exe)\")\n        st = st[st.rfind(b')') + 2:]\n        values = st.split(b' ')\n        # According to documentation, starttime is in field 21 and the\n        # unit is jiffies (clock ticks).\n        # We first divide it for clock ticks and then add uptime returning\n        # seconds since the epoch, in UTC.\n        # Also use cached value if available.\n        bt = BOOT_TIME or boot_time()\n        return (float(values[19]) / CLOCK_TICKS) + bt\n\n    @wrap_exceptions\n    def memory_info(self):\n        with open(\"/proc/%s/statm\" % self.pid, 'rb') as f:\n            vms, rss = f.readline().split()[:2]\n            return _common.pmem(int(rss) * PAGESIZE,\n                                int(vms) * PAGESIZE)\n\n    @wrap_exceptions\n    def memory_info_ex(self):\n        #  ============================================================\n        # | FIELD  | DESCRIPTION                         | AKA  | TOP  |\n        #  ============================================================\n        # | rss    | resident set size                   |      | RES  |\n        # | vms    | total program size                  | size | VIRT |\n        # | shared | shared pages (from shared mappings) |      | SHR  |\n        # | text   | text ('code')                       | trs  | CODE |\n        # | lib    | library (unused in Linux 2.6)       | lrs  |      |\n        # | data   | data + stack                        | drs  | DATA |\n        # | dirty  | dirty pages (unused in Linux 2.6)   | dt   |      |\n        #  ============================================================\n        with open(\"/proc/%s/statm\" % self.pid, \"rb\") as f:\n            vms, rss, shared, text, lib, data, dirty = \\\n                [int(x) * PAGESIZE for x in f.readline().split()[:7]]\n        return pextmem(rss, vms, shared, text, lib, data, dirty)\n\n    if os.path.exists('/proc/%s/smaps' % os.getpid()):\n\n        @wrap_exceptions\n        def memory_maps(self):\n            \"\"\"Return process's mapped memory regions as a list of nameduples.\n            Fields are explained in 'man proc'; here is an updated (Apr 2012)\n            version: http://goo.gl/fmebo\n            \"\"\"\n            with open(\"/proc/%s/smaps\" % self.pid, \"rt\") as f:\n                first_line = f.readline()\n                current_block = [first_line]\n\n                def get_blocks():\n                    data = {}\n                    for line in f:\n                        fields = line.split(None, 5)\n                        if not fields[0].endswith(':'):\n                            # new block section\n                            yield (current_block.pop(), data)\n                            current_block.append(line)\n                        else:\n                            try:\n                                data[fields[0]] = int(fields[1]) * 1024\n                            except ValueError:\n                                if fields[0].startswith('VmFlags:'):\n                                    # see issue #369\n                                    continue\n                                else:\n                                    raise ValueError(\"don't know how to inte\"\n                                                     \"rpret line %r\" % line)\n                    yield (current_block.pop(), data)\n\n                ls = []\n                if first_line:  # smaps file can be empty\n                    for header, data in get_blocks():\n                        hfields = header.split(None, 5)\n                        try:\n                            addr, perms, offset, dev, inode, path = hfields\n                        except ValueError:\n                            addr, perms, offset, dev, inode, path = \\\n                                hfields + ['']\n                        if not path:\n                            path = '[anon]'\n                        else:\n                            path = path.strip()\n                        ls.append((\n                            addr, perms, path,\n                            data['Rss:'],\n                            data.get('Size:', 0),\n                            data.get('Pss:', 0),\n                            data.get('Shared_Clean:', 0),\n                            data.get('Shared_Dirty:', 0),\n                            data.get('Private_Clean:', 0),\n                            data.get('Private_Dirty:', 0),\n                            data.get('Referenced:', 0),\n                            data.get('Anonymous:', 0),\n                            data.get('Swap:', 0)\n                        ))\n            return ls\n\n    else:\n        def memory_maps(self):\n            msg = \"couldn't find /proc/%s/smaps; kernel < 2.6.14 or \"  \\\n                  \"CONFIG_MMU kernel configuration option is not enabled\" \\\n                  % self.pid\n            raise NotImplementedError(msg)\n\n    @wrap_exceptions\n    def cwd(self):\n        # readlink() might return paths containing null bytes causing\n        # problems when used with other fs-related functions (os.*,\n        # open(), ...)\n        path = os.readlink(\"/proc/%s/cwd\" % self.pid)\n        return path.replace('\\x00', '')\n\n    @wrap_exceptions\n    def num_ctx_switches(self):\n        vol = unvol = None\n        with open(\"/proc/%s/status\" % self.pid, \"rb\") as f:\n            for line in f:\n                if line.startswith(b\"voluntary_ctxt_switches\"):\n                    vol = int(line.split()[1])\n                elif line.startswith(b\"nonvoluntary_ctxt_switches\"):\n                    unvol = int(line.split()[1])\n                if vol is not None and unvol is not None:\n                    return _common.pctxsw(vol, unvol)\n            raise NotImplementedError(\n                \"'voluntary_ctxt_switches' and 'nonvoluntary_ctxt_switches'\"\n                \"fields were not found in /proc/%s/status; the kernel is \"\n                \"probably older than 2.6.23\" % self.pid)\n\n    @wrap_exceptions\n    def num_threads(self):\n        with open(\"/proc/%s/status\" % self.pid, \"rb\") as f:\n            for line in f:\n                if line.startswith(b\"Threads:\"):\n                    return int(line.split()[1])\n            raise NotImplementedError(\"line not found\")\n\n    @wrap_exceptions\n    def threads(self):\n        thread_ids = os.listdir(\"/proc/%s/task\" % self.pid)\n        thread_ids.sort()\n        retlist = []\n        hit_enoent = False\n        for thread_id in thread_ids:\n            fname = \"/proc/%s/task/%s/stat\" % (self.pid, thread_id)\n            try:\n                with open(fname, 'rb') as f:\n                    st = f.read().strip()\n            except EnvironmentError as err:\n                if err.errno == errno.ENOENT:\n                    # no such file or directory; it means thread\n                    # disappeared on us\n                    hit_enoent = True\n                    continue\n                raise\n            # ignore the first two values (\"pid (exe)\")\n            st = st[st.find(b')') + 2:]\n            values = st.split(b' ')\n            utime = float(values[11]) / CLOCK_TICKS\n            stime = float(values[12]) / CLOCK_TICKS\n            ntuple = _common.pthread(int(thread_id), utime, stime)\n            retlist.append(ntuple)\n        if hit_enoent:\n            # raise NSP if the process disappeared on us\n            os.stat('/proc/%s' % self.pid)\n        return retlist\n\n    @wrap_exceptions\n    def nice_get(self):\n        # with open('/proc/%s/stat' % self.pid, 'r') as f:\n        #   data = f.read()\n        #   return int(data.split()[18])\n\n        # Use C implementation\n        return _psutil_posix.getpriority(self.pid)\n\n    @wrap_exceptions\n    def nice_set(self, value):\n        return _psutil_posix.setpriority(self.pid, value)\n\n    @wrap_exceptions\n    def cpu_affinity_get(self):\n        return cext.proc_cpu_affinity_get(self.pid)\n\n    @wrap_exceptions\n    def cpu_affinity_set(self, cpus):\n        try:\n            cext.proc_cpu_affinity_set(self.pid, cpus)\n        except OSError as err:\n            if err.errno == errno.EINVAL:\n                allcpus = tuple(range(len(per_cpu_times())))\n                for cpu in cpus:\n                    if cpu not in allcpus:\n                        raise ValueError(\"invalid CPU #%i (choose between %s)\"\n                                         % (cpu, allcpus))\n            raise\n\n    # only starting from kernel 2.6.13\n    if hasattr(cext, \"proc_ioprio_get\"):\n\n        @wrap_exceptions\n        def ionice_get(self):\n            ioclass, value = cext.proc_ioprio_get(self.pid)\n            return _common.pionice(ioclass, value)\n\n        @wrap_exceptions\n        def ionice_set(self, ioclass, value):\n            if ioclass in (IOPRIO_CLASS_NONE, None):\n                if value:\n                    msg = \"can't specify value with IOPRIO_CLASS_NONE\"\n                    raise ValueError(msg)\n                ioclass = IOPRIO_CLASS_NONE\n                value = 0\n            if ioclass in (IOPRIO_CLASS_RT, IOPRIO_CLASS_BE):\n                if value is None:\n                    value = 4\n            elif ioclass == IOPRIO_CLASS_IDLE:\n                if value:\n                    msg = \"can't specify value with IOPRIO_CLASS_IDLE\"\n                    raise ValueError(msg)\n                value = 0\n            else:\n                value = 0\n            if not 0 <= value <= 8:\n                raise ValueError(\n                    \"value argument range expected is between 0 and 8\")\n            return cext.proc_ioprio_set(self.pid, ioclass, value)\n\n    if HAS_PRLIMIT:\n        @wrap_exceptions\n        def rlimit(self, resource, limits=None):\n            # if pid is 0 prlimit() applies to the calling process and\n            # we don't want that\n            if self.pid == 0:\n                raise ValueError(\"can't use prlimit() against PID 0 process\")\n            if limits is None:\n                # get\n                return cext.linux_prlimit(self.pid, resource)\n            else:\n                # set\n                if len(limits) != 2:\n                    raise ValueError(\n                        \"second argument must be a (soft, hard) tuple\")\n                soft, hard = limits\n                cext.linux_prlimit(self.pid, resource, soft, hard)\n\n    @wrap_exceptions\n    def status(self):\n        with open(\"/proc/%s/status\" % self.pid, 'rb') as f:\n            for line in f:\n                if line.startswith(b\"State:\"):\n                    letter = line.split()[1]\n                    if PY3:\n                        letter = letter.decode()\n                    # XXX is '?' legit? (we're not supposed to return\n                    # it anyway)\n                    return PROC_STATUSES.get(letter, '?')\n\n    @wrap_exceptions\n    def open_files(self):\n        retlist = []\n        files = os.listdir(\"/proc/%s/fd\" % self.pid)\n        hit_enoent = False\n        for fd in files:\n            file = \"/proc/%s/fd/%s\" % (self.pid, fd)\n            try:\n                file = os.readlink(file)\n            except OSError as err:\n                # ENOENT == file which is gone in the meantime\n                if err.errno in (errno.ENOENT, errno.ESRCH):\n                    hit_enoent = True\n                    continue\n                elif err.errno == errno.EINVAL:\n                    # not a link\n                    continue\n                else:\n                    raise\n            else:\n                # If file is not an absolute path there's no way\n                # to tell whether it's a regular file or not,\n                # so we skip it. A regular file is always supposed\n                # to be absolutized though.\n                if file.startswith('/') and isfile_strict(file):\n                    ntuple = _common.popenfile(file, int(fd))\n                    retlist.append(ntuple)\n        if hit_enoent:\n            # raise NSP if the process disappeared on us\n            os.stat('/proc/%s' % self.pid)\n        return retlist\n\n    @wrap_exceptions\n    def connections(self, kind='inet'):\n        ret = _connections.retrieve(kind, self.pid)\n        # raise NSP if the process disappeared on us\n        os.stat('/proc/%s' % self.pid)\n        return ret\n\n    @wrap_exceptions\n    def num_fds(self):\n        return len(os.listdir(\"/proc/%s/fd\" % self.pid))\n\n    @wrap_exceptions\n    def ppid(self):\n        with open(\"/proc/%s/status\" % self.pid, 'rb') as f:\n            for line in f:\n                if line.startswith(b\"PPid:\"):\n                    # PPid: nnnn\n                    return int(line.split()[1])\n            raise NotImplementedError(\"line not found\")\n\n    @wrap_exceptions\n    def uids(self):\n        with open(\"/proc/%s/status\" % self.pid, 'rb') as f:\n            for line in f:\n                if line.startswith(b'Uid:'):\n                    _, real, effective, saved, fs = line.split()\n                    return _common.puids(int(real), int(effective), int(saved))\n            raise NotImplementedError(\"line not found\")\n\n    @wrap_exceptions\n    def gids(self):\n        with open(\"/proc/%s/status\" % self.pid, 'rb') as f:\n            for line in f:\n                if line.startswith(b'Gid:'):\n                    _, real, effective, saved, fs = line.split()\n                    return _common.pgids(int(real), int(effective), int(saved))\n            raise NotImplementedError(\"line not found\")\n"
  },
  {
    "path": "Common/libpsutil/py2.6-glibc-2.12-pre/psutil/_psosx.py",
    "content": "#!/usr/bin/env python\n\n# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\n\"\"\"OSX platform implementation.\"\"\"\n\nimport errno\nimport functools\nimport os\nfrom collections import namedtuple\n\nfrom psutil import _common\nfrom psutil import _psposix\nfrom psutil._common import conn_tmap, usage_percent, isfile_strict\nimport _psutil_osx as cext\nimport _psutil_posix\n\n\n__extra__all__ = []\n\n# --- constants\n\nPAGESIZE = os.sysconf(\"SC_PAGE_SIZE\")\n\n# http://students.mimuw.edu.pl/lxr/source/include/net/tcp_states.h\nTCP_STATUSES = {\n    cext.TCPS_ESTABLISHED: _common.CONN_ESTABLISHED,\n    cext.TCPS_SYN_SENT: _common.CONN_SYN_SENT,\n    cext.TCPS_SYN_RECEIVED: _common.CONN_SYN_RECV,\n    cext.TCPS_FIN_WAIT_1: _common.CONN_FIN_WAIT1,\n    cext.TCPS_FIN_WAIT_2: _common.CONN_FIN_WAIT2,\n    cext.TCPS_TIME_WAIT: _common.CONN_TIME_WAIT,\n    cext.TCPS_CLOSED: _common.CONN_CLOSE,\n    cext.TCPS_CLOSE_WAIT: _common.CONN_CLOSE_WAIT,\n    cext.TCPS_LAST_ACK: _common.CONN_LAST_ACK,\n    cext.TCPS_LISTEN: _common.CONN_LISTEN,\n    cext.TCPS_CLOSING: _common.CONN_CLOSING,\n    cext.PSUTIL_CONN_NONE: _common.CONN_NONE,\n}\n\nPROC_STATUSES = {\n    cext.SIDL: _common.STATUS_IDLE,\n    cext.SRUN: _common.STATUS_RUNNING,\n    cext.SSLEEP: _common.STATUS_SLEEPING,\n    cext.SSTOP: _common.STATUS_STOPPED,\n    cext.SZOMB: _common.STATUS_ZOMBIE,\n}\n\nscputimes = namedtuple('scputimes', ['user', 'nice', 'system', 'idle'])\n\nsvmem = namedtuple(\n    'svmem', ['total', 'available', 'percent', 'used', 'free',\n              'active', 'inactive', 'wired'])\n\npextmem = namedtuple('pextmem', ['rss', 'vms', 'pfaults', 'pageins'])\n\npmmap_grouped = namedtuple(\n    'pmmap_grouped',\n    'path rss private swapped dirtied ref_count shadow_depth')\n\npmmap_ext = namedtuple(\n    'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields))\n\n# set later from __init__.py\nNoSuchProcess = None\nAccessDenied = None\nTimeoutExpired = None\n\n\n# --- functions\n\ndef virtual_memory():\n    \"\"\"System virtual memory as a namedtuple.\"\"\"\n    total, active, inactive, wired, free = cext.virtual_mem()\n    avail = inactive + free\n    used = active + inactive + wired\n    percent = usage_percent((total - avail), total, _round=1)\n    return svmem(total, avail, percent, used, free,\n                 active, inactive, wired)\n\n\ndef swap_memory():\n    \"\"\"Swap system memory as a (total, used, free, sin, sout) tuple.\"\"\"\n    total, used, free, sin, sout = cext.swap_mem()\n    percent = usage_percent(used, total, _round=1)\n    return _common.sswap(total, used, free, percent, sin, sout)\n\n\ndef cpu_times():\n    \"\"\"Return system CPU times as a namedtuple.\"\"\"\n    user, nice, system, idle = cext.cpu_times()\n    return scputimes(user, nice, system, idle)\n\n\ndef per_cpu_times():\n    \"\"\"Return system CPU times as a named tuple\"\"\"\n    ret = []\n    for cpu_t in cext.per_cpu_times():\n        user, nice, system, idle = cpu_t\n        item = scputimes(user, nice, system, idle)\n        ret.append(item)\n    return ret\n\n\ndef cpu_count_logical():\n    \"\"\"Return the number of logical CPUs in the system.\"\"\"\n    return cext.cpu_count_logical()\n\n\ndef cpu_count_physical():\n    \"\"\"Return the number of physical CPUs in the system.\"\"\"\n    return cext.cpu_count_phys()\n\n\ndef boot_time():\n    \"\"\"The system boot time expressed in seconds since the epoch.\"\"\"\n    return cext.boot_time()\n\n\ndef disk_partitions(all=False):\n    retlist = []\n    partitions = cext.disk_partitions()\n    for partition in partitions:\n        device, mountpoint, fstype, opts = partition\n        if device == 'none':\n            device = ''\n        if not all:\n            if not os.path.isabs(device) or not os.path.exists(device):\n                continue\n        ntuple = _common.sdiskpart(device, mountpoint, fstype, opts)\n        retlist.append(ntuple)\n    return retlist\n\n\ndef users():\n    retlist = []\n    rawlist = cext.users()\n    for item in rawlist:\n        user, tty, hostname, tstamp = item\n        if tty == '~':\n            continue  # reboot or shutdown\n        if not tstamp:\n            continue\n        nt = _common.suser(user, tty or None, hostname or None, tstamp)\n        retlist.append(nt)\n    return retlist\n\n\ndef net_connections(kind='inet'):\n    # Note: on OSX this will fail with AccessDenied unless\n    # the process is owned by root.\n    ret = []\n    for pid in pids():\n        try:\n            cons = Process(pid).connections(kind)\n        except NoSuchProcess:\n            continue\n        else:\n            if cons:\n                for c in cons:\n                    c = list(c) + [pid]\n                    ret.append(_common.sconn(*c))\n    return ret\n\n\npids = cext.pids\npid_exists = _psposix.pid_exists\ndisk_usage = _psposix.disk_usage\nnet_io_counters = cext.net_io_counters\ndisk_io_counters = cext.disk_io_counters\n\n\ndef wrap_exceptions(fun):\n    \"\"\"Decorator which translates bare OSError exceptions into\n    NoSuchProcess and AccessDenied.\n    \"\"\"\n    @functools.wraps(fun)\n    def wrapper(self, *args, **kwargs):\n        try:\n            return fun(self, *args, **kwargs)\n        except OSError as err:\n            # support for private module import\n            if NoSuchProcess is None or AccessDenied is None:\n                raise\n            if err.errno == errno.ESRCH:\n                raise NoSuchProcess(self.pid, self._name)\n            if err.errno in (errno.EPERM, errno.EACCES):\n                raise AccessDenied(self.pid, self._name)\n            raise\n    return wrapper\n\n\nclass Process(object):\n    \"\"\"Wrapper class around underlying C implementation.\"\"\"\n\n    __slots__ = [\"pid\", \"_name\"]\n\n    def __init__(self, pid):\n        self.pid = pid\n        self._name = None\n\n    @wrap_exceptions\n    def name(self):\n        return cext.proc_name(self.pid)\n\n    @wrap_exceptions\n    def exe(self):\n        return cext.proc_exe(self.pid)\n\n    @wrap_exceptions\n    def cmdline(self):\n        if not pid_exists(self.pid):\n            raise NoSuchProcess(self.pid, self._name)\n        return cext.proc_cmdline(self.pid)\n\n    @wrap_exceptions\n    def ppid(self):\n        return cext.proc_ppid(self.pid)\n\n    @wrap_exceptions\n    def cwd(self):\n        return cext.proc_cwd(self.pid)\n\n    @wrap_exceptions\n    def uids(self):\n        real, effective, saved = cext.proc_uids(self.pid)\n        return _common.puids(real, effective, saved)\n\n    @wrap_exceptions\n    def gids(self):\n        real, effective, saved = cext.proc_gids(self.pid)\n        return _common.pgids(real, effective, saved)\n\n    @wrap_exceptions\n    def terminal(self):\n        tty_nr = cext.proc_tty_nr(self.pid)\n        tmap = _psposix._get_terminal_map()\n        try:\n            return tmap[tty_nr]\n        except KeyError:\n            return None\n\n    @wrap_exceptions\n    def memory_info(self):\n        rss, vms = cext.proc_memory_info(self.pid)[:2]\n        return _common.pmem(rss, vms)\n\n    @wrap_exceptions\n    def memory_info_ex(self):\n        rss, vms, pfaults, pageins = cext.proc_memory_info(self.pid)\n        return pextmem(rss, vms, pfaults * PAGESIZE, pageins * PAGESIZE)\n\n    @wrap_exceptions\n    def cpu_times(self):\n        user, system = cext.proc_cpu_times(self.pid)\n        return _common.pcputimes(user, system)\n\n    @wrap_exceptions\n    def create_time(self):\n        return cext.proc_create_time(self.pid)\n\n    @wrap_exceptions\n    def num_ctx_switches(self):\n        return _common.pctxsw(*cext.proc_num_ctx_switches(self.pid))\n\n    @wrap_exceptions\n    def num_threads(self):\n        return cext.proc_num_threads(self.pid)\n\n    @wrap_exceptions\n    def open_files(self):\n        if self.pid == 0:\n            return []\n        files = []\n        rawlist = cext.proc_open_files(self.pid)\n        for path, fd in rawlist:\n            if isfile_strict(path):\n                ntuple = _common.popenfile(path, fd)\n                files.append(ntuple)\n        return files\n\n    @wrap_exceptions\n    def connections(self, kind='inet'):\n        if kind not in conn_tmap:\n            raise ValueError(\"invalid %r kind argument; choose between %s\"\n                             % (kind, ', '.join([repr(x) for x in conn_tmap])))\n        families, types = conn_tmap[kind]\n        rawlist = cext.proc_connections(self.pid, families, types)\n        ret = []\n        for item in rawlist:\n            fd, fam, type, laddr, raddr, status = item\n            status = TCP_STATUSES[status]\n            nt = _common.pconn(fd, fam, type, laddr, raddr, status)\n            ret.append(nt)\n        return ret\n\n    @wrap_exceptions\n    def num_fds(self):\n        if self.pid == 0:\n            return 0\n        return cext.proc_num_fds(self.pid)\n\n    @wrap_exceptions\n    def wait(self, timeout=None):\n        try:\n            return _psposix.wait_pid(self.pid, timeout)\n        except _psposix.TimeoutExpired:\n            # support for private module import\n            if TimeoutExpired is None:\n                raise\n            raise TimeoutExpired(timeout, self.pid, self._name)\n\n    @wrap_exceptions\n    def nice_get(self):\n        return _psutil_posix.getpriority(self.pid)\n\n    @wrap_exceptions\n    def nice_set(self, value):\n        return _psutil_posix.setpriority(self.pid, value)\n\n    @wrap_exceptions\n    def status(self):\n        code = cext.proc_status(self.pid)\n        # XXX is '?' legit? (we're not supposed to return it anyway)\n        return PROC_STATUSES.get(code, '?')\n\n    @wrap_exceptions\n    def threads(self):\n        rawlist = cext.proc_threads(self.pid)\n        retlist = []\n        for thread_id, utime, stime in rawlist:\n            ntuple = _common.pthread(thread_id, utime, stime)\n            retlist.append(ntuple)\n        return retlist\n\n    @wrap_exceptions\n    def memory_maps(self):\n        return cext.proc_memory_maps(self.pid)\n"
  },
  {
    "path": "Common/libpsutil/py2.6-glibc-2.12-pre/psutil/_psposix.py",
    "content": "#!/usr/bin/env python\n\n# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\n\"\"\"Routines common to all posix systems.\"\"\"\n\nimport errno\nimport glob\nimport os\nimport sys\nimport time\n\nfrom psutil._common import sdiskusage, usage_percent, memoize\nfrom psutil._compat import PY3, unicode\n\n\nclass TimeoutExpired(Exception):\n    pass\n\n\ndef pid_exists(pid):\n    \"\"\"Check whether pid exists in the current process table.\"\"\"\n    if pid == 0:\n        # According to \"man 2 kill\" PID 0 has a special meaning:\n        # it refers to <<every process in the process group of the\n        # calling process>> so we don't want to go any further.\n        # If we get here it means this UNIX platform *does* have\n        # a process with id 0.\n        return True\n    try:\n        os.kill(pid, 0)\n    except OSError as err:\n        if err.errno == errno.ESRCH:\n            # ESRCH == No such process\n            return False\n        elif err.errno == errno.EPERM:\n            # EPERM clearly means there's a process to deny access to\n            return True\n        else:\n            # According to \"man 2 kill\" possible error values are\n            # (EINVAL, EPERM, ESRCH) therefore we should never get\n            # here. If we do let's be explicit in considering this\n            # an error.\n            raise err\n    else:\n        return True\n\n\ndef wait_pid(pid, timeout=None):\n    \"\"\"Wait for process with pid 'pid' to terminate and return its\n    exit status code as an integer.\n\n    If pid is not a children of os.getpid() (current process) just\n    waits until the process disappears and return None.\n\n    If pid does not exist at all return None immediately.\n\n    Raise TimeoutExpired on timeout expired.\n    \"\"\"\n    def check_timeout(delay):\n        if timeout is not None:\n            if timer() >= stop_at:\n                raise TimeoutExpired()\n        time.sleep(delay)\n        return min(delay * 2, 0.04)\n\n    timer = getattr(time, 'monotonic', time.time)\n    if timeout is not None:\n        waitcall = lambda: os.waitpid(pid, os.WNOHANG)\n        stop_at = timer() + timeout\n    else:\n        waitcall = lambda: os.waitpid(pid, 0)\n\n    delay = 0.0001\n    while True:\n        try:\n            retpid, status = waitcall()\n        except OSError as err:\n            if err.errno == errno.EINTR:\n                delay = check_timeout(delay)\n                continue\n            elif err.errno == errno.ECHILD:\n                # This has two meanings:\n                # - pid is not a child of os.getpid() in which case\n                #   we keep polling until it's gone\n                # - pid never existed in the first place\n                # In both cases we'll eventually return None as we\n                # can't determine its exit status code.\n                while True:\n                    if pid_exists(pid):\n                        delay = check_timeout(delay)\n                    else:\n                        return\n            else:\n                raise\n        else:\n            if retpid == 0:\n                # WNOHANG was used, pid is still running\n                delay = check_timeout(delay)\n                continue\n            # process exited due to a signal; return the integer of\n            # that signal\n            if os.WIFSIGNALED(status):\n                return os.WTERMSIG(status)\n            # process exited using exit(2) system call; return the\n            # integer exit(2) system call has been called with\n            elif os.WIFEXITED(status):\n                return os.WEXITSTATUS(status)\n            else:\n                # should never happen\n                raise RuntimeError(\"unknown process exit status\")\n\n\ndef disk_usage(path):\n    \"\"\"Return disk usage associated with path.\"\"\"\n    try:\n        st = os.statvfs(path)\n    except UnicodeEncodeError:\n        if not PY3 and isinstance(path, unicode):\n            # this is a bug with os.statvfs() and unicode on\n            # Python 2, see:\n            # - https://github.com/giampaolo/psutil/issues/416\n            # - http://bugs.python.org/issue18695\n            try:\n                path = path.encode(sys.getfilesystemencoding())\n            except UnicodeEncodeError:\n                pass\n            st = os.statvfs(path)\n        else:\n            raise\n    free = (st.f_bavail * st.f_frsize)\n    total = (st.f_blocks * st.f_frsize)\n    used = (st.f_blocks - st.f_bfree) * st.f_frsize\n    percent = usage_percent(used, total, _round=1)\n    # NB: the percentage is -5% than what shown by df due to\n    # reserved blocks that we are currently not considering:\n    # http://goo.gl/sWGbH\n    return sdiskusage(total, used, free, percent)\n\n\n@memoize\ndef _get_terminal_map():\n    ret = {}\n    ls = glob.glob('/dev/tty*') + glob.glob('/dev/pts/*')\n    for name in ls:\n        assert name not in ret\n        try:\n            ret[os.stat(name).st_rdev] = name\n        except OSError as err:\n            if err.errno != errno.ENOENT:\n                raise\n    return ret\n"
  },
  {
    "path": "Common/libpsutil/py2.6-glibc-2.12-pre/psutil/_pssunos.py",
    "content": "#!/usr/bin/env python\n\n# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\n\"\"\"Sun OS Solaris platform implementation.\"\"\"\n\nimport errno\nimport os\nimport socket\nimport subprocess\nimport sys\nfrom collections import namedtuple\n\nfrom psutil import _common\nfrom psutil import _psposix\nfrom psutil._common import usage_percent, isfile_strict\nfrom psutil._compat import PY3\nimport _psutil_posix\nimport _psutil_sunos as cext\n\n\n__extra__all__ = [\"CONN_IDLE\", \"CONN_BOUND\"]\n\nPAGE_SIZE = os.sysconf('SC_PAGE_SIZE')\n\nCONN_IDLE = \"IDLE\"\nCONN_BOUND = \"BOUND\"\n\nPROC_STATUSES = {\n    cext.SSLEEP: _common.STATUS_SLEEPING,\n    cext.SRUN: _common.STATUS_RUNNING,\n    cext.SZOMB: _common.STATUS_ZOMBIE,\n    cext.SSTOP: _common.STATUS_STOPPED,\n    cext.SIDL: _common.STATUS_IDLE,\n    cext.SONPROC: _common.STATUS_RUNNING,  # same as run\n    cext.SWAIT: _common.STATUS_WAITING,\n}\n\nTCP_STATUSES = {\n    cext.TCPS_ESTABLISHED: _common.CONN_ESTABLISHED,\n    cext.TCPS_SYN_SENT: _common.CONN_SYN_SENT,\n    cext.TCPS_SYN_RCVD: _common.CONN_SYN_RECV,\n    cext.TCPS_FIN_WAIT_1: _common.CONN_FIN_WAIT1,\n    cext.TCPS_FIN_WAIT_2: _common.CONN_FIN_WAIT2,\n    cext.TCPS_TIME_WAIT: _common.CONN_TIME_WAIT,\n    cext.TCPS_CLOSED: _common.CONN_CLOSE,\n    cext.TCPS_CLOSE_WAIT: _common.CONN_CLOSE_WAIT,\n    cext.TCPS_LAST_ACK: _common.CONN_LAST_ACK,\n    cext.TCPS_LISTEN: _common.CONN_LISTEN,\n    cext.TCPS_CLOSING: _common.CONN_CLOSING,\n    cext.PSUTIL_CONN_NONE: _common.CONN_NONE,\n    cext.TCPS_IDLE: CONN_IDLE,  # sunos specific\n    cext.TCPS_BOUND: CONN_BOUND,  # sunos specific\n}\n\nscputimes = namedtuple('scputimes', ['user', 'system', 'idle', 'iowait'])\nsvmem = namedtuple('svmem', ['total', 'available', 'percent', 'used', 'free'])\npextmem = namedtuple('pextmem', ['rss', 'vms'])\npmmap_grouped = namedtuple('pmmap_grouped', ['path', 'rss', 'anon', 'locked'])\npmmap_ext = namedtuple(\n    'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields))\n\n# set later from __init__.py\nNoSuchProcess = None\nAccessDenied = None\nTimeoutExpired = None\n\n# --- functions\n\ndisk_io_counters = cext.disk_io_counters\nnet_io_counters = cext.net_io_counters\ndisk_usage = _psposix.disk_usage\n\n\ndef virtual_memory():\n    # we could have done this with kstat, but imho this is good enough\n    total = os.sysconf('SC_PHYS_PAGES') * PAGE_SIZE\n    # note: there's no difference on Solaris\n    free = avail = os.sysconf('SC_AVPHYS_PAGES') * PAGE_SIZE\n    used = total - free\n    percent = usage_percent(used, total, _round=1)\n    return svmem(total, avail, percent, used, free)\n\n\ndef swap_memory():\n    sin, sout = cext.swap_mem()\n    # XXX\n    # we are supposed to get total/free by doing so:\n    # http://cvs.opensolaris.org/source/xref/onnv/onnv-gate/\n    #     usr/src/cmd/swap/swap.c\n    # ...nevertheless I can't manage to obtain the same numbers as 'swap'\n    # cmdline utility, so let's parse its output (sigh!)\n    p = subprocess.Popen(['swap', '-l', '-k'], stdout=subprocess.PIPE)\n    stdout, stderr = p.communicate()\n    if PY3:\n        stdout = stdout.decode(sys.stdout.encoding)\n    if p.returncode != 0:\n        raise RuntimeError(\"'swap -l -k' failed (retcode=%s)\" % p.returncode)\n\n    lines = stdout.strip().split('\\n')[1:]\n    if not lines:\n        raise RuntimeError('no swap device(s) configured')\n    total = free = 0\n    for line in lines:\n        line = line.split()\n        t, f = line[-2:]\n        t = t.replace('K', '')\n        f = f.replace('K', '')\n        total += int(int(t) * 1024)\n        free += int(int(f) * 1024)\n    used = total - free\n    percent = usage_percent(used, total, _round=1)\n    return _common.sswap(total, used, free, percent,\n                         sin * PAGE_SIZE, sout * PAGE_SIZE)\n\n\ndef pids():\n    \"\"\"Returns a list of PIDs currently running on the system.\"\"\"\n    return [int(x) for x in os.listdir('/proc') if x.isdigit()]\n\n\ndef pid_exists(pid):\n    \"\"\"Check for the existence of a unix pid.\"\"\"\n    return _psposix.pid_exists(pid)\n\n\ndef cpu_times():\n    \"\"\"Return system-wide CPU times as a named tuple\"\"\"\n    ret = cext.per_cpu_times()\n    return scputimes(*[sum(x) for x in zip(*ret)])\n\n\ndef per_cpu_times():\n    \"\"\"Return system per-CPU times as a list of named tuples\"\"\"\n    ret = cext.per_cpu_times()\n    return [scputimes(*x) for x in ret]\n\n\ndef cpu_count_logical():\n    \"\"\"Return the number of logical CPUs in the system.\"\"\"\n    try:\n        return os.sysconf(\"SC_NPROCESSORS_ONLN\")\n    except ValueError:\n        # mimic os.cpu_count() behavior\n        return None\n\n\ndef cpu_count_physical():\n    \"\"\"Return the number of physical CPUs in the system.\"\"\"\n    return cext.cpu_count_phys()\n\n\ndef boot_time():\n    \"\"\"The system boot time expressed in seconds since the epoch.\"\"\"\n    return cext.boot_time()\n\n\ndef users():\n    \"\"\"Return currently connected users as a list of namedtuples.\"\"\"\n    retlist = []\n    rawlist = cext.users()\n    localhost = (':0.0', ':0')\n    for item in rawlist:\n        user, tty, hostname, tstamp, user_process = item\n        # note: the underlying C function includes entries about\n        # system boot, run level and others.  We might want\n        # to use them in the future.\n        if not user_process:\n            continue\n        if hostname in localhost:\n            hostname = 'localhost'\n        nt = _common.suser(user, tty, hostname, tstamp)\n        retlist.append(nt)\n    return retlist\n\n\ndef disk_partitions(all=False):\n    \"\"\"Return system disk partitions.\"\"\"\n    # TODO - the filtering logic should be better checked so that\n    # it tries to reflect 'df' as much as possible\n    retlist = []\n    partitions = cext.disk_partitions()\n    for partition in partitions:\n        device, mountpoint, fstype, opts = partition\n        if device == 'none':\n            device = ''\n        if not all:\n            # Differently from, say, Linux, we don't have a list of\n            # common fs types so the best we can do, AFAIK, is to\n            # filter by filesystem having a total size > 0.\n            if not disk_usage(mountpoint).total:\n                continue\n        ntuple = _common.sdiskpart(device, mountpoint, fstype, opts)\n        retlist.append(ntuple)\n    return retlist\n\n\ndef net_connections(kind, _pid=-1):\n    \"\"\"Return socket connections.  If pid == -1 return system-wide\n    connections (as opposed to connections opened by one process only).\n    Only INET sockets are returned (UNIX are not).\n    \"\"\"\n    cmap = _common.conn_tmap.copy()\n    if _pid == -1:\n        cmap.pop('unix', 0)\n    if kind not in cmap:\n        raise ValueError(\"invalid %r kind argument; choose between %s\"\n                         % (kind, ', '.join([repr(x) for x in cmap])))\n    families, types = _common.conn_tmap[kind]\n    rawlist = cext.net_connections(_pid, families, types)\n    ret = []\n    for item in rawlist:\n        fd, fam, type_, laddr, raddr, status, pid = item\n        if fam not in families:\n            continue\n        if type_ not in types:\n            continue\n        status = TCP_STATUSES[status]\n        if _pid == -1:\n            nt = _common.sconn(fd, fam, type_, laddr, raddr, status, pid)\n        else:\n            nt = _common.pconn(fd, fam, type_, laddr, raddr, status)\n        ret.append(nt)\n    return ret\n\n\ndef wrap_exceptions(fun):\n    \"\"\"Call callable into a try/except clause and translate ENOENT,\n    EACCES and EPERM in NoSuchProcess or AccessDenied exceptions.\n    \"\"\"\n    def wrapper(self, *args, **kwargs):\n        try:\n            return fun(self, *args, **kwargs)\n        except EnvironmentError as err:\n            # support for private module import\n            if NoSuchProcess is None or AccessDenied is None:\n                raise\n            # ENOENT (no such file or directory) gets raised on open().\n            # ESRCH (no such process) can get raised on read() if\n            # process is gone in meantime.\n            if err.errno in (errno.ENOENT, errno.ESRCH):\n                raise NoSuchProcess(self.pid, self._name)\n            if err.errno in (errno.EPERM, errno.EACCES):\n                raise AccessDenied(self.pid, self._name)\n            raise\n    return wrapper\n\n\nclass Process(object):\n    \"\"\"Wrapper class around underlying C implementation.\"\"\"\n\n    __slots__ = [\"pid\", \"_name\"]\n\n    def __init__(self, pid):\n        self.pid = pid\n        self._name = None\n\n    @wrap_exceptions\n    def name(self):\n        # note: max len == 15\n        return cext.proc_name_and_args(self.pid)[0]\n\n    @wrap_exceptions\n    def exe(self):\n        # Will be guess later from cmdline but we want to explicitly\n        # invoke cmdline here in order to get an AccessDenied\n        # exception if the user has not enough privileges.\n        self.cmdline()\n        return \"\"\n\n    @wrap_exceptions\n    def cmdline(self):\n        return cext.proc_name_and_args(self.pid)[1].split(' ')\n\n    @wrap_exceptions\n    def create_time(self):\n        return cext.proc_basic_info(self.pid)[3]\n\n    @wrap_exceptions\n    def num_threads(self):\n        return cext.proc_basic_info(self.pid)[5]\n\n    @wrap_exceptions\n    def nice_get(self):\n        # For some reason getpriority(3) return ESRCH (no such process)\n        # for certain low-pid processes, no matter what (even as root).\n        # The process actually exists though, as it has a name,\n        # creation time, etc.\n        # The best thing we can do here appears to be raising AD.\n        # Note: tested on Solaris 11; on Open Solaris 5 everything is\n        # fine.\n        try:\n            return _psutil_posix.getpriority(self.pid)\n        except EnvironmentError as err:\n            if err.errno in (errno.ENOENT, errno.ESRCH):\n                if pid_exists(self.pid):\n                    raise AccessDenied(self.pid, self._name)\n            raise\n\n    @wrap_exceptions\n    def nice_set(self, value):\n        if self.pid in (2, 3):\n            # Special case PIDs: internally setpriority(3) return ESRCH\n            # (no such process), no matter what.\n            # The process actually exists though, as it has a name,\n            # creation time, etc.\n            raise AccessDenied(self.pid, self._name)\n        return _psutil_posix.setpriority(self.pid, value)\n\n    @wrap_exceptions\n    def ppid(self):\n        return cext.proc_basic_info(self.pid)[0]\n\n    @wrap_exceptions\n    def uids(self):\n        real, effective, saved, _, _, _ = cext.proc_cred(self.pid)\n        return _common.puids(real, effective, saved)\n\n    @wrap_exceptions\n    def gids(self):\n        _, _, _, real, effective, saved = cext.proc_cred(self.pid)\n        return _common.puids(real, effective, saved)\n\n    @wrap_exceptions\n    def cpu_times(self):\n        user, system = cext.proc_cpu_times(self.pid)\n        return _common.pcputimes(user, system)\n\n    @wrap_exceptions\n    def terminal(self):\n        hit_enoent = False\n        tty = wrap_exceptions(\n            cext.proc_basic_info(self.pid)[0])\n        if tty != cext.PRNODEV:\n            for x in (0, 1, 2, 255):\n                try:\n                    return os.readlink('/proc/%d/path/%d' % (self.pid, x))\n                except OSError as err:\n                    if err.errno == errno.ENOENT:\n                        hit_enoent = True\n                        continue\n                    raise\n        if hit_enoent:\n            # raise NSP if the process disappeared on us\n            os.stat('/proc/%s' % self.pid)\n\n    @wrap_exceptions\n    def cwd(self):\n        # /proc/PID/path/cwd may not be resolved by readlink() even if\n        # it exists (ls shows it). If that's the case and the process\n        # is still alive return None (we can return None also on BSD).\n        # Reference: http://goo.gl/55XgO\n        try:\n            return os.readlink(\"/proc/%s/path/cwd\" % self.pid)\n        except OSError as err:\n            if err.errno == errno.ENOENT:\n                os.stat(\"/proc/%s\" % self.pid)\n                return None\n            raise\n\n    @wrap_exceptions\n    def memory_info(self):\n        ret = cext.proc_basic_info(self.pid)\n        rss, vms = ret[1] * 1024, ret[2] * 1024\n        return _common.pmem(rss, vms)\n\n    # it seems Solaris uses rss and vms only\n    memory_info_ex = memory_info\n\n    @wrap_exceptions\n    def status(self):\n        code = cext.proc_basic_info(self.pid)[6]\n        # XXX is '?' legit? (we're not supposed to return it anyway)\n        return PROC_STATUSES.get(code, '?')\n\n    @wrap_exceptions\n    def threads(self):\n        ret = []\n        tids = os.listdir('/proc/%d/lwp' % self.pid)\n        hit_enoent = False\n        for tid in tids:\n            tid = int(tid)\n            try:\n                utime, stime = cext.query_process_thread(\n                    self.pid, tid)\n            except EnvironmentError as err:\n                # ENOENT == thread gone in meantime\n                if err.errno == errno.ENOENT:\n                    hit_enoent = True\n                    continue\n                raise\n            else:\n                nt = _common.pthread(tid, utime, stime)\n                ret.append(nt)\n        if hit_enoent:\n            # raise NSP if the process disappeared on us\n            os.stat('/proc/%s' % self.pid)\n        return ret\n\n    @wrap_exceptions\n    def open_files(self):\n        retlist = []\n        hit_enoent = False\n        pathdir = '/proc/%d/path' % self.pid\n        for fd in os.listdir('/proc/%d/fd' % self.pid):\n            path = os.path.join(pathdir, fd)\n            if os.path.islink(path):\n                try:\n                    file = os.readlink(path)\n                except OSError as err:\n                    # ENOENT == file which is gone in the meantime\n                    if err.errno == errno.ENOENT:\n                        hit_enoent = True\n                        continue\n                    raise\n                else:\n                    if isfile_strict(file):\n                        retlist.append(_common.popenfile(file, int(fd)))\n        if hit_enoent:\n            # raise NSP if the process disappeared on us\n            os.stat('/proc/%s' % self.pid)\n        return retlist\n\n    def _get_unix_sockets(self, pid):\n        \"\"\"Get UNIX sockets used by process by parsing 'pfiles' output.\"\"\"\n        # TODO: rewrite this in C (...but the damn netstat source code\n        # does not include this part! Argh!!)\n        cmd = \"pfiles %s\" % pid\n        p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,\n                             stderr=subprocess.PIPE)\n        stdout, stderr = p.communicate()\n        if PY3:\n            stdout, stderr = [x.decode(sys.stdout.encoding)\n                              for x in (stdout, stderr)]\n        if p.returncode != 0:\n            if 'permission denied' in stderr.lower():\n                raise AccessDenied(self.pid, self._name)\n            if 'no such process' in stderr.lower():\n                raise NoSuchProcess(self.pid, self._name)\n            raise RuntimeError(\"%r command error\\n%s\" % (cmd, stderr))\n\n        lines = stdout.split('\\n')[2:]\n        for i, line in enumerate(lines):\n            line = line.lstrip()\n            if line.startswith('sockname: AF_UNIX'):\n                path = line.split(' ', 2)[2]\n                type = lines[i - 2].strip()\n                if type == 'SOCK_STREAM':\n                    type = socket.SOCK_STREAM\n                elif type == 'SOCK_DGRAM':\n                    type = socket.SOCK_DGRAM\n                else:\n                    type = -1\n                yield (-1, socket.AF_UNIX, type, path, \"\", _common.CONN_NONE)\n\n    @wrap_exceptions\n    def connections(self, kind='inet'):\n        ret = net_connections(kind, _pid=self.pid)\n        # The underlying C implementation retrieves all OS connections\n        # and filters them by PID.  At this point we can't tell whether\n        # an empty list means there were no connections for process or\n        # process is no longer active so we force NSP in case the PID\n        # is no longer there.\n        if not ret:\n            os.stat('/proc/%s' % self.pid)  # will raise NSP if process is gone\n\n        # UNIX sockets\n        if kind in ('all', 'unix'):\n            ret.extend([_common.pconn(*conn) for conn in\n                        self._get_unix_sockets(self.pid)])\n        return ret\n\n    nt_mmap_grouped = namedtuple('mmap', 'path rss anon locked')\n    nt_mmap_ext = namedtuple('mmap', 'addr perms path rss anon locked')\n\n    @wrap_exceptions\n    def memory_maps(self):\n        def toaddr(start, end):\n            return '%s-%s' % (hex(start)[2:].strip('L'),\n                              hex(end)[2:].strip('L'))\n\n        retlist = []\n        rawlist = cext.proc_memory_maps(self.pid)\n        hit_enoent = False\n        for item in rawlist:\n            addr, addrsize, perm, name, rss, anon, locked = item\n            addr = toaddr(addr, addrsize)\n            if not name.startswith('['):\n                try:\n                    name = os.readlink('/proc/%s/path/%s' % (self.pid, name))\n                except OSError as err:\n                    if err.errno == errno.ENOENT:\n                        # sometimes the link may not be resolved by\n                        # readlink() even if it exists (ls shows it).\n                        # If that's the case we just return the\n                        # unresolved link path.\n                        # This seems an incosistency with /proc similar\n                        # to: http://goo.gl/55XgO\n                        name = '/proc/%s/path/%s' % (self.pid, name)\n                        hit_enoent = True\n                    else:\n                        raise\n            retlist.append((addr, perm, name, rss, anon, locked))\n        if hit_enoent:\n            # raise NSP if the process disappeared on us\n            os.stat('/proc/%s' % self.pid)\n        return retlist\n\n    @wrap_exceptions\n    def num_fds(self):\n        return len(os.listdir(\"/proc/%s/fd\" % self.pid))\n\n    @wrap_exceptions\n    def num_ctx_switches(self):\n        return _common.pctxsw(*cext.proc_num_ctx_switches(self.pid))\n\n    @wrap_exceptions\n    def wait(self, timeout=None):\n        try:\n            return _psposix.wait_pid(self.pid, timeout)\n        except _psposix.TimeoutExpired:\n            # support for private module import\n            if TimeoutExpired is None:\n                raise\n            raise TimeoutExpired(timeout, self.pid, self._name)\n"
  },
  {
    "path": "Common/libpsutil/py2.6-glibc-2.12-pre/psutil/_pswindows.py",
    "content": "#!/usr/bin/env python\n\n# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\n\"\"\"Windows platform implementation.\"\"\"\n\nimport errno\nimport functools\nimport os\nfrom collections import namedtuple\n\nfrom psutil import _common\nfrom psutil._common import conn_tmap, usage_percent, isfile_strict\nfrom psutil._compat import PY3, xrange, lru_cache\nimport _psutil_windows as cext\n\n# process priority constants, import from __init__.py:\n# http://msdn.microsoft.com/en-us/library/ms686219(v=vs.85).aspx\n__extra__all__ = [\"ABOVE_NORMAL_PRIORITY_CLASS\", \"BELOW_NORMAL_PRIORITY_CLASS\",\n                  \"HIGH_PRIORITY_CLASS\", \"IDLE_PRIORITY_CLASS\",\n                  \"NORMAL_PRIORITY_CLASS\", \"REALTIME_PRIORITY_CLASS\",\n                  #\n                  \"CONN_DELETE_TCB\",\n                  ]\n\n# --- module level constants (gets pushed up to psutil module)\n\nCONN_DELETE_TCB = \"DELETE_TCB\"\nWAIT_TIMEOUT = 0x00000102  # 258 in decimal\nACCESS_DENIED_SET = frozenset([errno.EPERM, errno.EACCES,\n                               cext.ERROR_ACCESS_DENIED])\n\nTCP_STATUSES = {\n    cext.MIB_TCP_STATE_ESTAB: _common.CONN_ESTABLISHED,\n    cext.MIB_TCP_STATE_SYN_SENT: _common.CONN_SYN_SENT,\n    cext.MIB_TCP_STATE_SYN_RCVD: _common.CONN_SYN_RECV,\n    cext.MIB_TCP_STATE_FIN_WAIT1: _common.CONN_FIN_WAIT1,\n    cext.MIB_TCP_STATE_FIN_WAIT2: _common.CONN_FIN_WAIT2,\n    cext.MIB_TCP_STATE_TIME_WAIT: _common.CONN_TIME_WAIT,\n    cext.MIB_TCP_STATE_CLOSED: _common.CONN_CLOSE,\n    cext.MIB_TCP_STATE_CLOSE_WAIT: _common.CONN_CLOSE_WAIT,\n    cext.MIB_TCP_STATE_LAST_ACK: _common.CONN_LAST_ACK,\n    cext.MIB_TCP_STATE_LISTEN: _common.CONN_LISTEN,\n    cext.MIB_TCP_STATE_CLOSING: _common.CONN_CLOSING,\n    cext.MIB_TCP_STATE_DELETE_TCB: CONN_DELETE_TCB,\n    cext.PSUTIL_CONN_NONE: _common.CONN_NONE,\n}\n\n\nscputimes = namedtuple('scputimes', ['user', 'system', 'idle'])\nsvmem = namedtuple('svmem', ['total', 'available', 'percent', 'used', 'free'])\npextmem = namedtuple(\n    'pextmem', ['num_page_faults', 'peak_wset', 'wset', 'peak_paged_pool',\n                'paged_pool', 'peak_nonpaged_pool', 'nonpaged_pool',\n                'pagefile', 'peak_pagefile', 'private'])\npmmap_grouped = namedtuple('pmmap_grouped', ['path', 'rss'])\npmmap_ext = namedtuple(\n    'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields))\n\n# set later from __init__.py\nNoSuchProcess = None\nAccessDenied = None\nTimeoutExpired = None\n\n\n@lru_cache(maxsize=512)\ndef _win32_QueryDosDevice(s):\n    return cext.win32_QueryDosDevice(s)\n\n\ndef _convert_raw_path(s):\n    # convert paths using native DOS format like:\n    # \"\\Device\\HarddiskVolume1\\Windows\\systemew\\file.txt\"\n    # into: \"C:\\Windows\\systemew\\file.txt\"\n    if PY3 and not isinstance(s, str):\n        s = s.decode('utf8')\n    rawdrive = '\\\\'.join(s.split('\\\\')[:3])\n    driveletter = _win32_QueryDosDevice(rawdrive)\n    return os.path.join(driveletter, s[len(rawdrive):])\n\n\n# --- public functions\n\n\ndef virtual_memory():\n    \"\"\"System virtual memory as a namedtuple.\"\"\"\n    mem = cext.virtual_mem()\n    totphys, availphys, totpagef, availpagef, totvirt, freevirt = mem\n    #\n    total = totphys\n    avail = availphys\n    free = availphys\n    used = total - avail\n    percent = usage_percent((total - avail), total, _round=1)\n    return svmem(total, avail, percent, used, free)\n\n\ndef swap_memory():\n    \"\"\"Swap system memory as a (total, used, free, sin, sout) tuple.\"\"\"\n    mem = cext.virtual_mem()\n    total = mem[2]\n    free = mem[3]\n    used = total - free\n    percent = usage_percent(used, total, _round=1)\n    return _common.sswap(total, used, free, percent, 0, 0)\n\n\ndef disk_usage(path):\n    \"\"\"Return disk usage associated with path.\"\"\"\n    try:\n        total, free = cext.disk_usage(path)\n    except WindowsError:\n        if not os.path.exists(path):\n            msg = \"No such file or directory: '%s'\" % path\n            raise OSError(errno.ENOENT, msg)\n        raise\n    used = total - free\n    percent = usage_percent(used, total, _round=1)\n    return _common.sdiskusage(total, used, free, percent)\n\n\ndef disk_partitions(all):\n    \"\"\"Return disk partitions.\"\"\"\n    rawlist = cext.disk_partitions(all)\n    return [_common.sdiskpart(*x) for x in rawlist]\n\n\ndef cpu_times():\n    \"\"\"Return system CPU times as a named tuple.\"\"\"\n    user, system, idle = cext.cpu_times()\n    return scputimes(user, system, idle)\n\n\ndef per_cpu_times():\n    \"\"\"Return system per-CPU times as a list of named tuples.\"\"\"\n    ret = []\n    for cpu_t in cext.per_cpu_times():\n        user, system, idle = cpu_t\n        item = scputimes(user, system, idle)\n        ret.append(item)\n    return ret\n\n\ndef cpu_count_logical():\n    \"\"\"Return the number of logical CPUs in the system.\"\"\"\n    return cext.cpu_count_logical()\n\n\ndef cpu_count_physical():\n    \"\"\"Return the number of physical CPUs in the system.\"\"\"\n    return cext.cpu_count_phys()\n\n\ndef boot_time():\n    \"\"\"The system boot time expressed in seconds since the epoch.\"\"\"\n    return cext.boot_time()\n\n\ndef net_connections(kind, _pid=-1):\n    \"\"\"Return socket connections.  If pid == -1 return system-wide\n    connections (as opposed to connections opened by one process only).\n    \"\"\"\n    if kind not in conn_tmap:\n        raise ValueError(\"invalid %r kind argument; choose between %s\"\n                         % (kind, ', '.join([repr(x) for x in conn_tmap])))\n    families, types = conn_tmap[kind]\n    rawlist = cext.net_connections(_pid, families, types)\n    ret = []\n    for item in rawlist:\n        fd, fam, type, laddr, raddr, status, pid = item\n        status = TCP_STATUSES[status]\n        if _pid == -1:\n            nt = _common.sconn(fd, fam, type, laddr, raddr, status, pid)\n        else:\n            nt = _common.pconn(fd, fam, type, laddr, raddr, status)\n        ret.append(nt)\n    return ret\n\n\ndef users():\n    \"\"\"Return currently connected users as a list of namedtuples.\"\"\"\n    retlist = []\n    rawlist = cext.users()\n    for item in rawlist:\n        user, hostname, tstamp = item\n        nt = _common.suser(user, None, hostname, tstamp)\n        retlist.append(nt)\n    return retlist\n\n\npids = cext.pids\npid_exists = cext.pid_exists\nnet_io_counters = cext.net_io_counters\ndisk_io_counters = cext.disk_io_counters\nppid_map = cext.ppid_map  # not meant to be public\n\n\ndef wrap_exceptions(fun):\n    \"\"\"Decorator which translates bare OSError and WindowsError\n    exceptions into NoSuchProcess and AccessDenied.\n    \"\"\"\n    @functools.wraps(fun)\n    def wrapper(self, *args, **kwargs):\n        try:\n            return fun(self, *args, **kwargs)\n        except OSError as err:\n            # support for private module import\n            if NoSuchProcess is None or AccessDenied is None:\n                raise\n            if err.errno in ACCESS_DENIED_SET:\n                raise AccessDenied(self.pid, self._name)\n            if err.errno == errno.ESRCH:\n                raise NoSuchProcess(self.pid, self._name)\n            raise\n    return wrapper\n\n\nclass Process(object):\n    \"\"\"Wrapper class around underlying C implementation.\"\"\"\n\n    __slots__ = [\"pid\", \"_name\"]\n\n    def __init__(self, pid):\n        self.pid = pid\n        self._name = None\n\n    @wrap_exceptions\n    def name(self):\n        \"\"\"Return process name, which on Windows is always the final\n        part of the executable.\n        \"\"\"\n        # This is how PIDs 0 and 4 are always represented in taskmgr\n        # and process-hacker.\n        if self.pid == 0:\n            return \"System Idle Process\"\n        elif self.pid == 4:\n            return \"System\"\n        else:\n            return os.path.basename(self.exe())\n\n    @wrap_exceptions\n    def exe(self):\n        # Note: os.path.exists(path) may return False even if the file\n        # is there, see:\n        # http://stackoverflow.com/questions/3112546/os-path-exists-lies\n\n        # see https://github.com/giampaolo/psutil/issues/414\n        # see https://github.com/giampaolo/psutil/issues/528\n        if self.pid in (0, 4):\n            raise AccessDenied(self.pid, self._name)\n        return _convert_raw_path(cext.proc_exe(self.pid))\n\n    @wrap_exceptions\n    def cmdline(self):\n        return cext.proc_cmdline(self.pid)\n\n    def ppid(self):\n        try:\n            return ppid_map()[self.pid]\n        except KeyError:\n            raise NoSuchProcess(self.pid, self._name)\n\n    def _get_raw_meminfo(self):\n        try:\n            return cext.proc_memory_info(self.pid)\n        except OSError as err:\n            if err.errno in ACCESS_DENIED_SET:\n                return cext.proc_memory_info_2(self.pid)\n            raise\n\n    @wrap_exceptions\n    def memory_info(self):\n        # on Windows RSS == WorkingSetSize and VSM == PagefileUsage\n        # fields of PROCESS_MEMORY_COUNTERS struct:\n        # http://msdn.microsoft.com/en-us/library/windows/desktop/\n        #     ms684877(v=vs.85).aspx\n        t = self._get_raw_meminfo()\n        return _common.pmem(t[2], t[7])\n\n    @wrap_exceptions\n    def memory_info_ex(self):\n        return pextmem(*self._get_raw_meminfo())\n\n    def memory_maps(self):\n        try:\n            raw = cext.proc_memory_maps(self.pid)\n        except OSError as err:\n            # XXX - can't use wrap_exceptions decorator as we're\n            # returning a generator; probably needs refactoring.\n            if err.errno in ACCESS_DENIED_SET:\n                raise AccessDenied(self.pid, self._name)\n            if err.errno == errno.ESRCH:\n                raise NoSuchProcess(self.pid, self._name)\n            raise\n        else:\n            for addr, perm, path, rss in raw:\n                path = _convert_raw_path(path)\n                addr = hex(addr)\n                yield (addr, perm, path, rss)\n\n    @wrap_exceptions\n    def kill(self):\n        return cext.proc_kill(self.pid)\n\n    @wrap_exceptions\n    def wait(self, timeout=None):\n        if timeout is None:\n            timeout = cext.INFINITE\n        else:\n            # WaitForSingleObject() expects time in milliseconds\n            timeout = int(timeout * 1000)\n        ret = cext.proc_wait(self.pid, timeout)\n        if ret == WAIT_TIMEOUT:\n            # support for private module import\n            if TimeoutExpired is None:\n                raise RuntimeError(\"timeout expired\")\n            raise TimeoutExpired(timeout, self.pid, self._name)\n        return ret\n\n    @wrap_exceptions\n    def username(self):\n        if self.pid in (0, 4):\n            return 'NT AUTHORITY\\\\SYSTEM'\n        return cext.proc_username(self.pid)\n\n    @wrap_exceptions\n    def create_time(self):\n        # special case for kernel process PIDs; return system boot time\n        if self.pid in (0, 4):\n            return boot_time()\n        try:\n            return cext.proc_create_time(self.pid)\n        except OSError as err:\n            if err.errno in ACCESS_DENIED_SET:\n                return cext.proc_create_time_2(self.pid)\n            raise\n\n    @wrap_exceptions\n    def num_threads(self):\n        return cext.proc_num_threads(self.pid)\n\n    @wrap_exceptions\n    def threads(self):\n        rawlist = cext.proc_threads(self.pid)\n        retlist = []\n        for thread_id, utime, stime in rawlist:\n            ntuple = _common.pthread(thread_id, utime, stime)\n            retlist.append(ntuple)\n        return retlist\n\n    @wrap_exceptions\n    def cpu_times(self):\n        try:\n            ret = cext.proc_cpu_times(self.pid)\n        except OSError as err:\n            if err.errno in ACCESS_DENIED_SET:\n                ret = cext.proc_cpu_times_2(self.pid)\n            else:\n                raise\n        return _common.pcputimes(*ret)\n\n    @wrap_exceptions\n    def suspend(self):\n        return cext.proc_suspend(self.pid)\n\n    @wrap_exceptions\n    def resume(self):\n        return cext.proc_resume(self.pid)\n\n    @wrap_exceptions\n    def cwd(self):\n        if self.pid in (0, 4):\n            raise AccessDenied(self.pid, self._name)\n        # return a normalized pathname since the native C function appends\n        # \"\\\\\" at the and of the path\n        path = cext.proc_cwd(self.pid)\n        return os.path.normpath(path)\n\n    @wrap_exceptions\n    def open_files(self):\n        if self.pid in (0, 4):\n            return []\n        retlist = []\n        # Filenames come in in native format like:\n        # \"\\Device\\HarddiskVolume1\\Windows\\systemew\\file.txt\"\n        # Convert the first part in the corresponding drive letter\n        # (e.g. \"C:\\\") by using Windows's QueryDosDevice()\n        raw_file_names = cext.proc_open_files(self.pid)\n        for file in raw_file_names:\n            file = _convert_raw_path(file)\n            if isfile_strict(file) and file not in retlist:\n                ntuple = _common.popenfile(file, -1)\n                retlist.append(ntuple)\n        return retlist\n\n    @wrap_exceptions\n    def connections(self, kind='inet'):\n        return net_connections(kind, _pid=self.pid)\n\n    @wrap_exceptions\n    def nice_get(self):\n        return cext.proc_priority_get(self.pid)\n\n    @wrap_exceptions\n    def nice_set(self, value):\n        return cext.proc_priority_set(self.pid, value)\n\n    # available on Windows >= Vista\n    if hasattr(cext, \"proc_io_priority_get\"):\n        @wrap_exceptions\n        def ionice_get(self):\n            return cext.proc_io_priority_get(self.pid)\n\n        @wrap_exceptions\n        def ionice_set(self, value, _):\n            if _:\n                raise TypeError(\"set_proc_ionice() on Windows takes only \"\n                                \"1 argument (2 given)\")\n            if value not in (2, 1, 0):\n                raise ValueError(\"value must be 2 (normal), 1 (low) or 0 \"\n                                 \"(very low); got %r\" % value)\n            return cext.proc_io_priority_set(self.pid, value)\n\n    @wrap_exceptions\n    def io_counters(self):\n        try:\n            ret = cext.proc_io_counters(self.pid)\n        except OSError as err:\n            if err.errno in ACCESS_DENIED_SET:\n                ret = cext.proc_io_counters_2(self.pid)\n            else:\n                raise\n        return _common.pio(*ret)\n\n    @wrap_exceptions\n    def status(self):\n        suspended = cext.proc_is_suspended(self.pid)\n        if suspended:\n            return _common.STATUS_STOPPED\n        else:\n            return _common.STATUS_RUNNING\n\n    @wrap_exceptions\n    def cpu_affinity_get(self):\n        from_bitmask = lambda x: [i for i in xrange(64) if (1 << i) & x]\n        bitmask = cext.proc_cpu_affinity_get(self.pid)\n        return from_bitmask(bitmask)\n\n    @wrap_exceptions\n    def cpu_affinity_set(self, value):\n        def to_bitmask(l):\n            if not l:\n                raise ValueError(\"invalid argument %r\" % l)\n            out = 0\n            for b in l:\n                out |= 2 ** b\n            return out\n\n        # SetProcessAffinityMask() states that ERROR_INVALID_PARAMETER\n        # is returned for an invalid CPU but this seems not to be true,\n        # therefore we check CPUs validy beforehand.\n        allcpus = list(range(len(per_cpu_times())))\n        for cpu in value:\n            if cpu not in allcpus:\n                raise ValueError(\"invalid CPU %r\" % cpu)\n\n        bitmask = to_bitmask(value)\n        cext.proc_cpu_affinity_set(self.pid, bitmask)\n\n    @wrap_exceptions\n    def num_handles(self):\n        try:\n            return cext.proc_num_handles(self.pid)\n        except OSError as err:\n            if err.errno in ACCESS_DENIED_SET:\n                return cext.proc_num_handles_2(self.pid)\n            raise\n\n    @wrap_exceptions\n    def num_ctx_switches(self):\n        tupl = cext.proc_num_ctx_switches(self.pid)\n        return _common.pctxsw(*tupl)\n"
  },
  {
    "path": "Common/libpsutil/py2.7-glibc-2.12+/psutil/__init__.py",
    "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\n\"\"\"psutil is a cross-platform library for retrieving information on\nrunning processes and system utilization (CPU, memory, disks, network)\nin Python.\n\"\"\"\n\nfrom __future__ import division\n\n__author__ = \"Giampaolo Rodola'\"\n__version__ = \"2.2.0\"\nversion_info = tuple([int(num) for num in __version__.split('.')])\n\n__all__ = [\n    # exceptions\n    \"Error\", \"NoSuchProcess\", \"AccessDenied\", \"TimeoutExpired\",\n    # constants\n    \"version_info\", \"__version__\",\n    \"STATUS_RUNNING\", \"STATUS_IDLE\", \"STATUS_SLEEPING\", \"STATUS_DISK_SLEEP\",\n    \"STATUS_STOPPED\", \"STATUS_TRACING_STOP\", \"STATUS_ZOMBIE\", \"STATUS_DEAD\",\n    \"STATUS_WAKING\", \"STATUS_LOCKED\", \"STATUS_WAITING\", \"STATUS_LOCKED\",\n    \"CONN_ESTABLISHED\", \"CONN_SYN_SENT\", \"CONN_SYN_RECV\", \"CONN_FIN_WAIT1\",\n    \"CONN_FIN_WAIT2\", \"CONN_TIME_WAIT\", \"CONN_CLOSE\", \"CONN_CLOSE_WAIT\",\n    \"CONN_LAST_ACK\", \"CONN_LISTEN\", \"CONN_CLOSING\", \"CONN_NONE\",\n    # classes\n    \"Process\", \"Popen\",\n    # functions\n    \"pid_exists\", \"pids\", \"process_iter\", \"wait_procs\",             # proc\n    \"virtual_memory\", \"swap_memory\",                                # memory\n    \"cpu_times\", \"cpu_percent\", \"cpu_times_percent\", \"cpu_count\",   # cpu\n    \"net_io_counters\", \"net_connections\",                           # network\n    \"disk_io_counters\", \"disk_partitions\", \"disk_usage\",            # disk\n    \"users\", \"boot_time\",                                           # others\n]\n\nimport collections\nimport errno\nimport functools\nimport os\nimport signal\nimport subprocess\nimport sys\nimport time\nimport warnings\ntry:\n    import pwd\nexcept ImportError:\n    pwd = None\n\nfrom psutil._common import memoize\nfrom psutil._compat import callable, long\nfrom psutil._compat import PY3 as _PY3\nfrom psutil._common import (deprecated_method as _deprecated_method,\n                            deprecated as _deprecated,\n                            sdiskio as _nt_sys_diskio,\n                            snetio as _nt_sys_netio)\n\nfrom psutil._common import (STATUS_RUNNING,  # NOQA\n                            STATUS_SLEEPING,\n                            STATUS_DISK_SLEEP,\n                            STATUS_STOPPED,\n                            STATUS_TRACING_STOP,\n                            STATUS_ZOMBIE,\n                            STATUS_DEAD,\n                            STATUS_WAKING,\n                            STATUS_LOCKED,\n                            STATUS_IDLE,  # bsd\n                            STATUS_WAITING,  # bsd\n                            STATUS_LOCKED)  # bsd\n\nfrom psutil._common import (CONN_ESTABLISHED,\n                            CONN_SYN_SENT,\n                            CONN_SYN_RECV,\n                            CONN_FIN_WAIT1,\n                            CONN_FIN_WAIT2,\n                            CONN_TIME_WAIT,\n                            CONN_CLOSE,\n                            CONN_CLOSE_WAIT,\n                            CONN_LAST_ACK,\n                            CONN_LISTEN,\n                            CONN_CLOSING,\n                            CONN_NONE)\n\nif sys.platform.startswith(\"linux\"):\n    import psutil._pslinux as _psplatform\n    from psutil._pslinux import (phymem_buffers,  # NOQA\n                                 cached_phymem)\n\n    from psutil._pslinux import (IOPRIO_CLASS_NONE,  # NOQA\n                                 IOPRIO_CLASS_RT,\n                                 IOPRIO_CLASS_BE,\n                                 IOPRIO_CLASS_IDLE)\n    # Linux >= 2.6.36\n    if _psplatform.HAS_PRLIMIT:\n        from _psutil_linux import (RLIM_INFINITY,  # NOQA\n                                   RLIMIT_AS,\n                                   RLIMIT_CORE,\n                                   RLIMIT_CPU,\n                                   RLIMIT_DATA,\n                                   RLIMIT_FSIZE,\n                                   RLIMIT_LOCKS,\n                                   RLIMIT_MEMLOCK,\n                                   RLIMIT_NOFILE,\n                                   RLIMIT_NPROC,\n                                   RLIMIT_RSS,\n                                   RLIMIT_STACK)\n        # Kinda ugly but considerably faster than using hasattr() and\n        # setattr() against the module object (we are at import time:\n        # speed matters).\n        import _psutil_linux\n        try:\n            RLIMIT_MSGQUEUE = _psutil_linux.RLIMIT_MSGQUEUE\n        except AttributeError:\n            pass\n        try:\n            RLIMIT_NICE = _psutil_linux.RLIMIT_NICE\n        except AttributeError:\n            pass\n        try:\n            RLIMIT_RTPRIO = _psutil_linux.RLIMIT_RTPRIO\n        except AttributeError:\n            pass\n        try:\n            RLIMIT_RTTIME = _psutil_linux.RLIMIT_RTTIME\n        except AttributeError:\n            pass\n        try:\n            RLIMIT_SIGPENDING = _psutil_linux.RLIMIT_SIGPENDING\n        except AttributeError:\n            pass\n        del _psutil_linux\n\nelif sys.platform.startswith(\"win32\"):\n    import psutil._pswindows as _psplatform\n    from _psutil_windows import (ABOVE_NORMAL_PRIORITY_CLASS,  # NOQA\n                                 BELOW_NORMAL_PRIORITY_CLASS,\n                                 HIGH_PRIORITY_CLASS,\n                                 IDLE_PRIORITY_CLASS,\n                                 NORMAL_PRIORITY_CLASS,\n                                 REALTIME_PRIORITY_CLASS)\n    from psutil._pswindows import CONN_DELETE_TCB  # NOQA\n\nelif sys.platform.startswith(\"darwin\"):\n    import psutil._psosx as _psplatform\n\nelif sys.platform.startswith(\"freebsd\"):\n    import psutil._psbsd as _psplatform\n\nelif sys.platform.startswith(\"sunos\"):\n    import psutil._pssunos as _psplatform\n    from psutil._pssunos import (CONN_IDLE,  # NOQA\n                                 CONN_BOUND)\n\nelse:\n    raise NotImplementedError('platform %s is not supported' % sys.platform)\n\n__all__.extend(_psplatform.__extra__all__)\n\n\n_TOTAL_PHYMEM = None\n_POSIX = os.name == 'posix'\n_WINDOWS = os.name == 'nt'\n_timer = getattr(time, 'monotonic', time.time)\n\n\n# Sanity check in case the user messed up with psutil installation\n# or did something weird with sys.path. In this case we might end\n# up importing a python module using a C extension module which\n# was compiled for a different version of psutil.\n# We want to prevent that by failing sooner rather than later.\n# See: https://github.com/giampaolo/psutil/issues/564\nif (int(__version__.replace('.', '')) !=\n        getattr(_psplatform.cext, 'version', None)):\n    msg = \"version conflict: %r C extension module was built for another \" \\\n          \"version of psutil (different than %s)\" % (_psplatform.cext.__file__,\n                                                     __version__)\n    raise ImportError(msg)\n\n\n# =====================================================================\n# --- exceptions\n# =====================================================================\n\nclass Error(Exception):\n    \"\"\"Base exception class. All other psutil exceptions inherit\n    from this one.\n    \"\"\"\n\n\nclass NoSuchProcess(Error):\n    \"\"\"Exception raised when a process with a certain PID doesn't\n    or no longer exists (zombie).\n    \"\"\"\n\n    def __init__(self, pid, name=None, msg=None):\n        Error.__init__(self)\n        self.pid = pid\n        self.name = name\n        self.msg = msg\n        if msg is None:\n            if name:\n                details = \"(pid=%s, name=%s)\" % (self.pid, repr(self.name))\n            else:\n                details = \"(pid=%s)\" % self.pid\n            self.msg = \"process no longer exists \" + details\n\n    def __str__(self):\n        return self.msg\n\n\nclass AccessDenied(Error):\n    \"\"\"Exception raised when permission to perform an action is denied.\"\"\"\n\n    def __init__(self, pid=None, name=None, msg=None):\n        Error.__init__(self)\n        self.pid = pid\n        self.name = name\n        self.msg = msg\n        if msg is None:\n            if (pid is not None) and (name is not None):\n                self.msg = \"(pid=%s, name=%s)\" % (pid, repr(name))\n            elif (pid is not None):\n                self.msg = \"(pid=%s)\" % self.pid\n            else:\n                self.msg = \"\"\n\n    def __str__(self):\n        return self.msg\n\n\nclass TimeoutExpired(Error):\n    \"\"\"Raised on Process.wait(timeout) if timeout expires and process\n    is still alive.\n    \"\"\"\n\n    def __init__(self, seconds, pid=None, name=None):\n        Error.__init__(self)\n        self.seconds = seconds\n        self.pid = pid\n        self.name = name\n        self.msg = \"timeout after %s seconds\" % seconds\n        if (pid is not None) and (name is not None):\n            self.msg += \" (pid=%s, name=%s)\" % (pid, repr(name))\n        elif (pid is not None):\n            self.msg += \" (pid=%s)\" % self.pid\n\n    def __str__(self):\n        return self.msg\n\n# push exception classes into platform specific module namespace\n_psplatform.NoSuchProcess = NoSuchProcess\n_psplatform.AccessDenied = AccessDenied\n_psplatform.TimeoutExpired = TimeoutExpired\n\n\n# =====================================================================\n# --- Process class\n# =====================================================================\n\ndef _assert_pid_not_reused(fun):\n    \"\"\"Decorator which raises NoSuchProcess in case a process is no\n    longer running or its PID has been reused.\n    \"\"\"\n    @functools.wraps(fun)\n    def wrapper(self, *args, **kwargs):\n        if not self.is_running():\n            raise NoSuchProcess(self.pid, self._name)\n        return fun(self, *args, **kwargs)\n    return wrapper\n\n\nclass Process(object):\n    \"\"\"Represents an OS process with the given PID.\n    If PID is omitted current process PID (os.getpid()) is used.\n    Raise NoSuchProcess if PID does not exist.\n\n    Note that most of the methods of this class do not make sure\n    the PID of the process being queried has been reused over time.\n    That means you might end up retrieving an information referring\n    to another process in case the original one this instance\n    refers to is gone in the meantime.\n\n    The only exceptions for which process identity is pre-emptively\n    checked and guaranteed are:\n\n     - parent()\n     - children()\n     - nice() (set)\n     - ionice() (set)\n     - rlimit() (set)\n     - cpu_affinity (set)\n     - suspend()\n     - resume()\n     - send_signal()\n     - terminate()\n     - kill()\n\n    To prevent this problem for all other methods you can:\n      - use is_running() before querying the process\n      - if you're continuously iterating over a set of Process\n        instances use process_iter() which pre-emptively checks\n        process identity for every yielded instance\n    \"\"\"\n\n    def __init__(self, pid=None):\n        self._init(pid)\n\n    def _init(self, pid, _ignore_nsp=False):\n        if pid is None:\n            pid = os.getpid()\n        else:\n            if not _PY3 and not isinstance(pid, (int, long)):\n                raise TypeError('pid must be an integer (got %r)' % pid)\n            if pid < 0:\n                raise ValueError('pid must be a positive integer (got %s)'\n                                 % pid)\n        self._pid = pid\n        self._name = None\n        self._exe = None\n        self._create_time = None\n        self._gone = False\n        self._hash = None\n        # used for caching on Windows only (on POSIX ppid may change)\n        self._ppid = None\n        # platform-specific modules define an _psplatform.Process\n        # implementation class\n        self._proc = _psplatform.Process(pid)\n        self._last_sys_cpu_times = None\n        self._last_proc_cpu_times = None\n        # cache creation time for later use in is_running() method\n        try:\n            self.create_time()\n        except AccessDenied:\n            # we should never get here as AFAIK we're able to get\n            # process creation time on all platforms even as a\n            # limited user\n            pass\n        except NoSuchProcess:\n            if not _ignore_nsp:\n                msg = 'no process found with pid %s' % pid\n                raise NoSuchProcess(pid, None, msg)\n            else:\n                self._gone = True\n        # This pair is supposed to indentify a Process instance\n        # univocally over time (the PID alone is not enough as\n        # it might refer to a process whose PID has been reused).\n        # This will be used later in __eq__() and is_running().\n        self._ident = (self.pid, self._create_time)\n\n    def __str__(self):\n        try:\n            pid = self.pid\n            name = repr(self.name())\n        except NoSuchProcess:\n            details = \"(pid=%s (terminated))\" % self.pid\n        except AccessDenied:\n            details = \"(pid=%s)\" % (self.pid)\n        else:\n            details = \"(pid=%s, name=%s)\" % (pid, name)\n        return \"%s.%s%s\" % (self.__class__.__module__,\n                            self.__class__.__name__, details)\n\n    def __repr__(self):\n        return \"<%s at %s>\" % (self.__str__(), id(self))\n\n    def __eq__(self, other):\n        # Test for equality with another Process object based\n        # on PID and creation time.\n        if not isinstance(other, Process):\n            return NotImplemented\n        return self._ident == other._ident\n\n    def __ne__(self, other):\n        return not self == other\n\n    def __hash__(self):\n        if self._hash is None:\n            self._hash = hash(self._ident)\n        return self._hash\n\n    # --- utility methods\n\n    def as_dict(self, attrs=None, ad_value=None):\n        \"\"\"Utility method returning process information as a\n        hashable dictionary.\n\n        If 'attrs' is specified it must be a list of strings\n        reflecting available Process class' attribute names\n        (e.g. ['cpu_times', 'name']) else all public (read\n        only) attributes are assumed.\n\n        'ad_value' is the value which gets assigned in case\n        AccessDenied  exception is raised when retrieving that\n        particular process information.\n        \"\"\"\n        excluded_names = set(\n            ['send_signal', 'suspend', 'resume', 'terminate', 'kill', 'wait',\n             'is_running', 'as_dict', 'parent', 'children', 'rlimit'])\n        retdict = dict()\n        ls = set(attrs or [x for x in dir(self) if not x.startswith('get')])\n        for name in ls:\n            if name.startswith('_'):\n                continue\n            if name.startswith('set_'):\n                continue\n            if name.startswith('get_'):\n                msg = \"%s() is deprecated; use %s() instead\" % (name, name[4:])\n                warnings.warn(msg, category=DeprecationWarning, stacklevel=2)\n                name = name[4:]\n                if name in ls:\n                    continue\n            if name == 'getcwd':\n                msg = \"getcwd() is deprecated; use cwd() instead\"\n                warnings.warn(msg, category=DeprecationWarning, stacklevel=2)\n                name = 'cwd'\n                if name in ls:\n                    continue\n\n            if name in excluded_names:\n                continue\n            try:\n                attr = getattr(self, name)\n                if callable(attr):\n                    ret = attr()\n                else:\n                    ret = attr\n            except AccessDenied:\n                ret = ad_value\n            except NotImplementedError:\n                # in case of not implemented functionality (may happen\n                # on old or exotic systems) we want to crash only if\n                # the user explicitly asked for that particular attr\n                if attrs:\n                    raise\n                continue\n            retdict[name] = ret\n        return retdict\n\n    def parent(self):\n        \"\"\"Return the parent process as a Process object pre-emptively\n        checking whether PID has been reused.\n        If no parent is known return None.\n        \"\"\"\n        ppid = self.ppid()\n        if ppid is not None:\n            try:\n                parent = Process(ppid)\n                if parent.create_time() <= self.create_time():\n                    return parent\n                # ...else ppid has been reused by another process\n            except NoSuchProcess:\n                pass\n\n    def is_running(self):\n        \"\"\"Return whether this process is running.\n        It also checks if PID has been reused by another process in\n        which case return False.\n        \"\"\"\n        if self._gone:\n            return False\n        try:\n            # Checking if PID is alive is not enough as the PID might\n            # have been reused by another process: we also want to\n            # check process identity.\n            # Process identity / uniqueness over time is greanted by\n            # (PID + creation time) and that is verified in __eq__.\n            return self == Process(self.pid)\n        except NoSuchProcess:\n            self._gone = True\n            return False\n\n    # --- actual API\n\n    @property\n    def pid(self):\n        \"\"\"The process PID.\"\"\"\n        return self._pid\n\n    def ppid(self):\n        \"\"\"The process parent PID.\n        On Windows the return value is cached after first call.\n        \"\"\"\n        # On POSIX we don't want to cache the ppid as it may unexpectedly\n        # change to 1 (init) in case this process turns into a zombie:\n        # https://github.com/giampaolo/psutil/issues/321\n        # http://stackoverflow.com/questions/356722/\n\n        # XXX should we check creation time here rather than in\n        # Process.parent()?\n        if _POSIX:\n            return self._proc.ppid()\n        else:\n            if self._ppid is None:\n                self._ppid = self._proc.ppid()\n            return self._ppid\n\n    def name(self):\n        \"\"\"The process name. The return value is cached after first call.\"\"\"\n        if self._name is None:\n            name = self._proc.name()\n            if _POSIX and len(name) >= 15:\n                # On UNIX the name gets truncated to the first 15 characters.\n                # If it matches the first part of the cmdline we return that\n                # one instead because it's usually more explicative.\n                # Examples are \"gnome-keyring-d\" vs. \"gnome-keyring-daemon\".\n                try:\n                    cmdline = self.cmdline()\n                except AccessDenied:\n                    pass\n                else:\n                    if cmdline:\n                        extended_name = os.path.basename(cmdline[0])\n                        if extended_name.startswith(name):\n                            name = extended_name\n            self._proc._name = name\n            self._name = name\n        return self._name\n\n    def exe(self):\n        \"\"\"The process executable as an absolute path.\n        May also be an empty string.\n        The return value is cached after first call.\n        \"\"\"\n        def guess_it(fallback):\n            # try to guess exe from cmdline[0] in absence of a native\n            # exe representation\n            cmdline = self.cmdline()\n            if cmdline and hasattr(os, 'access') and hasattr(os, 'X_OK'):\n                exe = cmdline[0]  # the possible exe\n                # Attempt to guess only in case of an absolute path.\n                # It is not safe otherwise as the process might have\n                # changed cwd.\n                if (os.path.isabs(exe)\n                        and os.path.isfile(exe)\n                        and os.access(exe, os.X_OK)):\n                    return exe\n            if isinstance(fallback, AccessDenied):\n                raise fallback\n            return fallback\n\n        if self._exe is None:\n            try:\n                exe = self._proc.exe()\n            except AccessDenied as err:\n                return guess_it(fallback=err)\n            else:\n                if not exe:\n                    # underlying implementation can legitimately return an\n                    # empty string; if that's the case we don't want to\n                    # raise AD while guessing from the cmdline\n                    try:\n                        exe = guess_it(fallback=exe)\n                    except AccessDenied:\n                        pass\n                self._exe = exe\n        return self._exe\n\n    def cmdline(self):\n        \"\"\"The command line this process has been called with.\"\"\"\n        return self._proc.cmdline()\n\n    def status(self):\n        \"\"\"The process current status as a STATUS_* constant.\"\"\"\n        return self._proc.status()\n\n    def username(self):\n        \"\"\"The name of the user that owns the process.\n        On UNIX this is calculated by using *real* process uid.\n        \"\"\"\n        if _POSIX:\n            if pwd is None:\n                # might happen if python was installed from sources\n                raise ImportError(\n                    \"requires pwd module shipped with standard python\")\n            real_uid = self.uids().real\n            try:\n                return pwd.getpwuid(real_uid).pw_name\n            except KeyError:\n                # the uid can't be resolved by the system\n                return str(real_uid)\n        else:\n            return self._proc.username()\n\n    def create_time(self):\n        \"\"\"The process creation time as a floating point number\n        expressed in seconds since the epoch, in UTC.\n        The return value is cached after first call.\n        \"\"\"\n        if self._create_time is None:\n            self._create_time = self._proc.create_time()\n        return self._create_time\n\n    def cwd(self):\n        \"\"\"Process current working directory as an absolute path.\"\"\"\n        return self._proc.cwd()\n\n    def nice(self, value=None):\n        \"\"\"Get or set process niceness (priority).\"\"\"\n        if value is None:\n            return self._proc.nice_get()\n        else:\n            if not self.is_running():\n                raise NoSuchProcess(self.pid, self._name)\n            self._proc.nice_set(value)\n\n    if _POSIX:\n\n        def uids(self):\n            \"\"\"Return process UIDs as a (real, effective, saved)\n            namedtuple.\n            \"\"\"\n            return self._proc.uids()\n\n        def gids(self):\n            \"\"\"Return process GIDs as a (real, effective, saved)\n            namedtuple.\n            \"\"\"\n            return self._proc.gids()\n\n        def terminal(self):\n            \"\"\"The terminal associated with this process, if any,\n            else None.\n            \"\"\"\n            return self._proc.terminal()\n\n        def num_fds(self):\n            \"\"\"Return the number of file descriptors opened by this\n            process (POSIX only).\n            \"\"\"\n            return self._proc.num_fds()\n\n    # Linux, BSD and Windows only\n    if hasattr(_psplatform.Process, \"io_counters\"):\n\n        def io_counters(self):\n            \"\"\"Return process I/O statistics as a\n            (read_count, write_count, read_bytes, write_bytes)\n            namedtuple.\n            Those are the number of read/write calls performed and the\n            amount of bytes read and written by the process.\n            \"\"\"\n            return self._proc.io_counters()\n\n    # Linux and Windows >= Vista only\n    if hasattr(_psplatform.Process, \"ionice_get\"):\n\n        def ionice(self, ioclass=None, value=None):\n            \"\"\"Get or set process I/O niceness (priority).\n\n            On Linux 'ioclass' is one of the IOPRIO_CLASS_* constants.\n            'value' is a number which goes from 0 to 7. The higher the\n            value, the lower the I/O priority of the process.\n\n            On Windows only 'ioclass' is used and it can be set to 2\n            (normal), 1 (low) or 0 (very low).\n\n            Available on Linux and Windows > Vista only.\n            \"\"\"\n            if ioclass is None:\n                if value is not None:\n                    raise ValueError(\"'ioclass' must be specified\")\n                return self._proc.ionice_get()\n            else:\n                return self._proc.ionice_set(ioclass, value)\n\n    # Linux only\n    if hasattr(_psplatform.Process, \"rlimit\"):\n\n        def rlimit(self, resource, limits=None):\n            \"\"\"Get or set process resource limits as a (soft, hard)\n            tuple.\n\n            'resource' is one of the RLIMIT_* constants.\n            'limits' is supposed to be a (soft, hard)  tuple.\n\n            See \"man prlimit\" for further info.\n            Available on Linux only.\n            \"\"\"\n            if limits is None:\n                return self._proc.rlimit(resource)\n            else:\n                return self._proc.rlimit(resource, limits)\n\n    # Windows, Linux and BSD only\n    if hasattr(_psplatform.Process, \"cpu_affinity_get\"):\n\n        def cpu_affinity(self, cpus=None):\n            \"\"\"Get or set process CPU affinity.\n            If specified 'cpus' must be a list of CPUs for which you\n            want to set the affinity (e.g. [0, 1]).\n            (Windows, Linux and BSD only).\n            \"\"\"\n            if cpus is None:\n                return self._proc.cpu_affinity_get()\n            else:\n                self._proc.cpu_affinity_set(cpus)\n\n    if _WINDOWS:\n\n        def num_handles(self):\n            \"\"\"Return the number of handles opened by this process\n            (Windows only).\n            \"\"\"\n            return self._proc.num_handles()\n\n    def num_ctx_switches(self):\n        \"\"\"Return the number of voluntary and involuntary context\n        switches performed by this process.\n        \"\"\"\n        return self._proc.num_ctx_switches()\n\n    def num_threads(self):\n        \"\"\"Return the number of threads used by this process.\"\"\"\n        return self._proc.num_threads()\n\n    def threads(self):\n        \"\"\"Return threads opened by process as a list of\n        (id, user_time, system_time) namedtuples representing\n        thread id and thread CPU times (user/system).\n        \"\"\"\n        return self._proc.threads()\n\n    @_assert_pid_not_reused\n    def children(self, recursive=False):\n        \"\"\"Return the children of this process as a list of Process\n        instances, pre-emptively checking whether PID has been reused.\n        If recursive is True return all the parent descendants.\n\n        Example (A == this process):\n\n         A ─┐\n            │\n            ├─ B (child) ─┐\n            │             └─ X (grandchild) ─┐\n            │                                └─ Y (great grandchild)\n            ├─ C (child)\n            └─ D (child)\n\n        >>> import psutil\n        >>> p = psutil.Process()\n        >>> p.children()\n        B, C, D\n        >>> p.children(recursive=True)\n        B, X, Y, C, D\n\n        Note that in the example above if process X disappears\n        process Y won't be listed as the reference to process A\n        is lost.\n        \"\"\"\n        if hasattr(_psplatform, 'ppid_map'):\n            # Windows only: obtain a {pid:ppid, ...} dict for all running\n            # processes in one shot (faster).\n            ppid_map = _psplatform.ppid_map()\n        else:\n            ppid_map = None\n\n        ret = []\n        if not recursive:\n            if ppid_map is None:\n                # 'slow' version, common to all platforms except Windows\n                for p in process_iter():\n                    try:\n                        if p.ppid() == self.pid:\n                            # if child happens to be older than its parent\n                            # (self) it means child's PID has been reused\n                            if self.create_time() <= p.create_time():\n                                ret.append(p)\n                    except NoSuchProcess:\n                        pass\n            else:\n                # Windows only (faster)\n                for pid, ppid in ppid_map.items():\n                    if ppid == self.pid:\n                        try:\n                            child = Process(pid)\n                            # if child happens to be older than its parent\n                            # (self) it means child's PID has been reused\n                            if self.create_time() <= child.create_time():\n                                ret.append(child)\n                        except NoSuchProcess:\n                            pass\n        else:\n            # construct a dict where 'values' are all the processes\n            # having 'key' as their parent\n            table = collections.defaultdict(list)\n            if ppid_map is None:\n                for p in process_iter():\n                    try:\n                        table[p.ppid()].append(p)\n                    except NoSuchProcess:\n                        pass\n            else:\n                for pid, ppid in ppid_map.items():\n                    try:\n                        p = Process(pid)\n                        table[ppid].append(p)\n                    except NoSuchProcess:\n                        pass\n            # At this point we have a mapping table where table[self.pid]\n            # are the current process' children.\n            # Below, we look for all descendants recursively, similarly\n            # to a recursive function call.\n            checkpids = [self.pid]\n            for pid in checkpids:\n                for child in table[pid]:\n                    try:\n                        # if child happens to be older than its parent\n                        # (self) it means child's PID has been reused\n                        intime = self.create_time() <= child.create_time()\n                    except NoSuchProcess:\n                        pass\n                    else:\n                        if intime:\n                            ret.append(child)\n                            if child.pid not in checkpids:\n                                checkpids.append(child.pid)\n        return ret\n\n    def cpu_percent(self, interval=None):\n        \"\"\"Return a float representing the current process CPU\n        utilization as a percentage.\n\n        When interval is 0.0 or None (default) compares process times\n        to system CPU times elapsed since last call, returning\n        immediately (non-blocking). That means that the first time\n        this is called it will return a meaningful 0.0 value.\n\n        When interval is > 0.0 compares process times to system CPU\n        times elapsed before and after the interval (blocking).\n\n        In this case is recommended for accuracy that this function\n        be called with at least 0.1 seconds between calls.\n\n        Examples:\n\n          >>> import psutil\n          >>> p = psutil.Process(os.getpid())\n          >>> # blocking\n          >>> p.cpu_percent(interval=1)\n          2.0\n          >>> # non-blocking (percentage since last call)\n          >>> p.cpu_percent(interval=None)\n          2.9\n          >>>\n        \"\"\"\n        blocking = interval is not None and interval > 0.0\n        num_cpus = cpu_count()\n        if _POSIX:\n            timer = lambda: _timer() * num_cpus\n        else:\n            timer = lambda: sum(cpu_times())\n        if blocking:\n            st1 = timer()\n            pt1 = self._proc.cpu_times()\n            time.sleep(interval)\n            st2 = timer()\n            pt2 = self._proc.cpu_times()\n        else:\n            st1 = self._last_sys_cpu_times\n            pt1 = self._last_proc_cpu_times\n            st2 = timer()\n            pt2 = self._proc.cpu_times()\n            if st1 is None or pt1 is None:\n                self._last_sys_cpu_times = st2\n                self._last_proc_cpu_times = pt2\n                return 0.0\n\n        delta_proc = (pt2.user - pt1.user) + (pt2.system - pt1.system)\n        delta_time = st2 - st1\n        # reset values for next call in case of interval == None\n        self._last_sys_cpu_times = st2\n        self._last_proc_cpu_times = pt2\n\n        try:\n            # The utilization split between all CPUs.\n            # Note: a percentage > 100 is legitimate as it can result\n            # from a process with multiple threads running on different\n            # CPU cores, see:\n            # http://stackoverflow.com/questions/1032357\n            # https://github.com/giampaolo/psutil/issues/474\n            overall_percent = ((delta_proc / delta_time) * 100) * num_cpus\n        except ZeroDivisionError:\n            # interval was too low\n            return 0.0\n        else:\n            return round(overall_percent, 1)\n\n    def cpu_times(self):\n        \"\"\"Return a (user, system) namedtuple representing  the\n        accumulated process time, in seconds.\n        This is the same as os.times() but per-process.\n        \"\"\"\n        return self._proc.cpu_times()\n\n    def memory_info(self):\n        \"\"\"Return a tuple representing RSS (Resident Set Size) and VMS\n        (Virtual Memory Size) in bytes.\n\n        On UNIX RSS and VMS are the same values shown by 'ps'.\n\n        On Windows RSS and VMS refer to \"Mem Usage\" and \"VM Size\"\n        columns of taskmgr.exe.\n        \"\"\"\n        return self._proc.memory_info()\n\n    def memory_info_ex(self):\n        \"\"\"Return a namedtuple with variable fields depending on the\n        platform representing extended memory information about\n        this process. All numbers are expressed in bytes.\n        \"\"\"\n        return self._proc.memory_info_ex()\n\n    def memory_percent(self):\n        \"\"\"Compare physical system memory to process resident memory\n        (RSS) and calculate process memory utilization as a percentage.\n        \"\"\"\n        rss = self._proc.memory_info()[0]\n        # use cached value if available\n        total_phymem = _TOTAL_PHYMEM or virtual_memory().total\n        try:\n            return (rss / float(total_phymem)) * 100\n        except ZeroDivisionError:\n            return 0.0\n\n    def memory_maps(self, grouped=True):\n        \"\"\"Return process' mapped memory regions as a list of nameduples\n        whose fields are variable depending on the platform.\n\n        If 'grouped' is True the mapped regions with the same 'path'\n        are grouped together and the different memory fields are summed.\n\n        If 'grouped' is False every mapped region is shown as a single\n        entity and the namedtuple will also include the mapped region's\n        address space ('addr') and permission set ('perms').\n        \"\"\"\n        it = self._proc.memory_maps()\n        if grouped:\n            d = {}\n            for tupl in it:\n                path = tupl[2]\n                nums = tupl[3:]\n                try:\n                    d[path] = map(lambda x, y: x + y, d[path], nums)\n                except KeyError:\n                    d[path] = nums\n            nt = _psplatform.pmmap_grouped\n            return [nt(path, *d[path]) for path in d]  # NOQA\n        else:\n            nt = _psplatform.pmmap_ext\n            return [nt(*x) for x in it]\n\n    def open_files(self):\n        \"\"\"Return files opened by process as a list of\n        (path, fd) namedtuples including the absolute file name\n        and file descriptor number.\n        \"\"\"\n        return self._proc.open_files()\n\n    def connections(self, kind='inet'):\n        \"\"\"Return connections opened by process as a list of\n        (fd, family, type, laddr, raddr, status) namedtuples.\n        The 'kind' parameter filters for connections that match the\n        following criteria:\n\n        Kind Value      Connections using\n        inet            IPv4 and IPv6\n        inet4           IPv4\n        inet6           IPv6\n        tcp             TCP\n        tcp4            TCP over IPv4\n        tcp6            TCP over IPv6\n        udp             UDP\n        udp4            UDP over IPv4\n        udp6            UDP over IPv6\n        unix            UNIX socket (both UDP and TCP protocols)\n        all             the sum of all the possible families and protocols\n        \"\"\"\n        return self._proc.connections(kind)\n\n    if _POSIX:\n        def _send_signal(self, sig):\n            # XXX: according to \"man 2 kill\" PID 0 has a special\n            # meaning as it refers to <<every process in the process\n            # group of the calling process>>, so should we prevent\n            # it here?\n            try:\n                os.kill(self.pid, sig)\n            except OSError as err:\n                if err.errno == errno.ESRCH:\n                    self._gone = True\n                    raise NoSuchProcess(self.pid, self._name)\n                if err.errno == errno.EPERM:\n                    raise AccessDenied(self.pid, self._name)\n                raise\n\n    @_assert_pid_not_reused\n    def send_signal(self, sig):\n        \"\"\"Send a signal to process pre-emptively checking whether\n        PID has been reused (see signal module constants) .\n        On Windows only SIGTERM is valid and is treated as an alias\n        for kill().\n        \"\"\"\n        if _POSIX:\n            self._send_signal(sig)\n        else:\n            if sig == signal.SIGTERM:\n                self._proc.kill()\n            else:\n                raise ValueError(\"only SIGTERM is supported on Windows\")\n\n    @_assert_pid_not_reused\n    def suspend(self):\n        \"\"\"Suspend process execution with SIGSTOP pre-emptively checking\n        whether PID has been reused.\n        On Windows this has the effect ot suspending all process threads.\n        \"\"\"\n        if _POSIX:\n            self._send_signal(signal.SIGSTOP)\n        else:\n            self._proc.suspend()\n\n    @_assert_pid_not_reused\n    def resume(self):\n        \"\"\"Resume process execution with SIGCONT pre-emptively checking\n        whether PID has been reused.\n        On Windows this has the effect of resuming all process threads.\n        \"\"\"\n        if _POSIX:\n            self._send_signal(signal.SIGCONT)\n        else:\n            self._proc.resume()\n\n    @_assert_pid_not_reused\n    def terminate(self):\n        \"\"\"Terminate the process with SIGTERM pre-emptively checking\n        whether PID has been reused.\n        On Windows this is an alias for kill().\n        \"\"\"\n        if _POSIX:\n            self._send_signal(signal.SIGTERM)\n        else:\n            self._proc.kill()\n\n    @_assert_pid_not_reused\n    def kill(self):\n        \"\"\"Kill the current process with SIGKILL pre-emptively checking\n        whether PID has been reused.\n        \"\"\"\n        if _POSIX:\n            self._send_signal(signal.SIGKILL)\n        else:\n            self._proc.kill()\n\n    def wait(self, timeout=None):\n        \"\"\"Wait for process to terminate and, if process is a children\n        of os.getpid(), also return its exit code, else None.\n\n        If the process is already terminated immediately return None\n        instead of raising NoSuchProcess.\n\n        If timeout (in seconds) is specified and process is still alive\n        raise TimeoutExpired.\n\n        To wait for multiple Process(es) use psutil.wait_procs().\n        \"\"\"\n        if timeout is not None and not timeout >= 0:\n            raise ValueError(\"timeout must be a positive integer\")\n        return self._proc.wait(timeout)\n\n    # --- deprecated APIs\n\n    _locals = set(locals())\n\n    @_deprecated_method(replacement='children')\n    def get_children(self):\n        pass\n\n    @_deprecated_method(replacement='connections')\n    def get_connections(self):\n        pass\n\n    if \"cpu_affinity\" in _locals:\n        @_deprecated_method(replacement='cpu_affinity')\n        def get_cpu_affinity(self):\n            pass\n\n        @_deprecated_method(replacement='cpu_affinity')\n        def set_cpu_affinity(self, cpus):\n            pass\n\n    @_deprecated_method(replacement='cpu_percent')\n    def get_cpu_percent(self):\n        pass\n\n    @_deprecated_method(replacement='cpu_times')\n    def get_cpu_times(self):\n        pass\n\n    @_deprecated_method(replacement='cwd')\n    def getcwd(self):\n        pass\n\n    @_deprecated_method(replacement='memory_info_ex')\n    def get_ext_memory_info(self):\n        pass\n\n    if \"io_counters\" in _locals:\n        @_deprecated_method(replacement='io_counters')\n        def get_io_counters(self):\n            pass\n\n    if \"ionice\" in _locals:\n        @_deprecated_method(replacement='ionice')\n        def get_ionice(self):\n            pass\n\n        @_deprecated_method(replacement='ionice')\n        def set_ionice(self, ioclass, value=None):\n            pass\n\n    @_deprecated_method(replacement='memory_info')\n    def get_memory_info(self):\n        pass\n\n    @_deprecated_method(replacement='memory_maps')\n    def get_memory_maps(self):\n        pass\n\n    @_deprecated_method(replacement='memory_percent')\n    def get_memory_percent(self):\n        pass\n\n    @_deprecated_method(replacement='nice')\n    def get_nice(self):\n        pass\n\n    @_deprecated_method(replacement='num_ctx_switches')\n    def get_num_ctx_switches(self):\n        pass\n\n    if 'num_fds' in _locals:\n        @_deprecated_method(replacement='num_fds')\n        def get_num_fds(self):\n            pass\n\n    if 'num_handles' in _locals:\n        @_deprecated_method(replacement='num_handles')\n        def get_num_handles(self):\n            pass\n\n    @_deprecated_method(replacement='num_threads')\n    def get_num_threads(self):\n        pass\n\n    @_deprecated_method(replacement='open_files')\n    def get_open_files(self):\n        pass\n\n    if \"rlimit\" in _locals:\n        @_deprecated_method(replacement='rlimit')\n        def get_rlimit(self):\n            pass\n\n        @_deprecated_method(replacement='rlimit')\n        def set_rlimit(self, resource, limits):\n            pass\n\n    @_deprecated_method(replacement='threads')\n    def get_threads(self):\n        pass\n\n    @_deprecated_method(replacement='nice')\n    def set_nice(self, value):\n        pass\n\n    del _locals\n\n\n# =====================================================================\n# --- Popen class\n# =====================================================================\n\nclass Popen(Process):\n    \"\"\"A more convenient interface to stdlib subprocess module.\n    It starts a sub process and deals with it exactly as when using\n    subprocess.Popen class but in addition also provides all the\n    properties and methods of psutil.Process class as a unified\n    interface:\n\n      >>> import psutil\n      >>> from subprocess import PIPE\n      >>> p = psutil.Popen([\"python\", \"-c\", \"print 'hi'\"], stdout=PIPE)\n      >>> p.name()\n      'python'\n      >>> p.uids()\n      user(real=1000, effective=1000, saved=1000)\n      >>> p.username()\n      'giampaolo'\n      >>> p.communicate()\n      ('hi\\n', None)\n      >>> p.terminate()\n      >>> p.wait(timeout=2)\n      0\n      >>>\n\n    For method names common to both classes such as kill(), terminate()\n    and wait(), psutil.Process implementation takes precedence.\n\n    Unlike subprocess.Popen this class pre-emptively checks wheter PID\n    has been reused on send_signal(), terminate() and kill() so that\n    you don't accidentally terminate another process, fixing\n    http://bugs.python.org/issue6973.\n\n    For a complete documentation refer to:\n    http://docs.python.org/library/subprocess.html\n    \"\"\"\n\n    def __init__(self, *args, **kwargs):\n        # Explicitly avoid to raise NoSuchProcess in case the process\n        # spawned by subprocess.Popen terminates too quickly, see:\n        # https://github.com/giampaolo/psutil/issues/193\n        self.__subproc = subprocess.Popen(*args, **kwargs)\n        self._init(self.__subproc.pid, _ignore_nsp=True)\n\n    def __dir__(self):\n        return sorted(set(dir(Popen) + dir(subprocess.Popen)))\n\n    def __getattribute__(self, name):\n        try:\n            return object.__getattribute__(self, name)\n        except AttributeError:\n            try:\n                return object.__getattribute__(self.__subproc, name)\n            except AttributeError:\n                raise AttributeError(\"%s instance has no attribute '%s'\"\n                                     % (self.__class__.__name__, name))\n\n    def wait(self, timeout=None):\n        if self.__subproc.returncode is not None:\n            return self.__subproc.returncode\n        ret = super(Popen, self).wait(timeout)\n        self.__subproc.returncode = ret\n        return ret\n\n\n# =====================================================================\n# --- system processes related functions\n# =====================================================================\n\ndef pids():\n    \"\"\"Return a list of current running PIDs.\"\"\"\n    return _psplatform.pids()\n\n\ndef pid_exists(pid):\n    \"\"\"Return True if given PID exists in the current process list.\n    This is faster than doing \"pid in psutil.pids()\" and\n    should be preferred.\n    \"\"\"\n    if pid < 0:\n        return False\n    elif pid == 0 and _POSIX:\n        # On POSIX we use os.kill() to determine PID existence.\n        # According to \"man 2 kill\" PID 0 has a special meaning\n        # though: it refers to <<every process in the process\n        # group of the calling process>> and that is not we want\n        # to do here.\n        return pid in pids()\n    else:\n        return _psplatform.pid_exists(pid)\n\n\n_pmap = {}\n\n\ndef process_iter():\n    \"\"\"Return a generator yielding a Process instance for all\n    running processes.\n\n    Every new Process instance is only created once and then cached\n    into an internal table which is updated every time this is used.\n\n    Cached Process instances are checked for identity so that you're\n    safe in case a PID has been reused by another process, in which\n    case the cached instance is updated.\n\n    The sorting order in which processes are yielded is based on\n    their PIDs.\n    \"\"\"\n    def add(pid):\n        proc = Process(pid)\n        _pmap[proc.pid] = proc\n        return proc\n\n    def remove(pid):\n        _pmap.pop(pid, None)\n\n    a = set(pids())\n    b = set(_pmap.keys())\n    new_pids = a - b\n    gone_pids = b - a\n\n    for pid in gone_pids:\n        remove(pid)\n    for pid, proc in sorted(list(_pmap.items()) +\n                            list(dict.fromkeys(new_pids).items())):\n        try:\n            if proc is None:  # new process\n                yield add(pid)\n            else:\n                # use is_running() to check whether PID has been reused by\n                # another process in which case yield a new Process instance\n                if proc.is_running():\n                    yield proc\n                else:\n                    yield add(pid)\n        except NoSuchProcess:\n            remove(pid)\n        except AccessDenied:\n            # Process creation time can't be determined hence there's\n            # no way to tell whether the pid of the cached process\n            # has been reused. Just return the cached version.\n            yield proc\n\n\ndef wait_procs(procs, timeout=None, callback=None):\n    \"\"\"Convenience function which waits for a list of processes to\n    terminate.\n\n    Return a (gone, alive) tuple indicating which processes\n    are gone and which ones are still alive.\n\n    The gone ones will have a new 'returncode' attribute indicating\n    process exit status (may be None).\n\n    'callback' is a function which gets called every time a process\n    terminates (a Process instance is passed as callback argument).\n\n    Function will return as soon as all processes terminate or when\n    timeout occurs.\n\n    Typical use case is:\n\n     - send SIGTERM to a list of processes\n     - give them some time to terminate\n     - send SIGKILL to those ones which are still alive\n\n    Example:\n\n    >>> def on_terminate(proc):\n    ...     print(\"process {} terminated\".format(proc))\n    ...\n    >>> for p in procs:\n    ...    p.terminate()\n    ...\n    >>> gone, alive = wait_procs(procs, timeout=3, callback=on_terminate)\n    >>> for p in alive:\n    ...     p.kill()\n    \"\"\"\n    def check_gone(proc, timeout):\n        try:\n            returncode = proc.wait(timeout=timeout)\n        except TimeoutExpired:\n            pass\n        else:\n            if returncode is not None or not proc.is_running():\n                proc.returncode = returncode\n                gone.add(proc)\n                if callback is not None:\n                    callback(proc)\n\n    if timeout is not None and not timeout >= 0:\n        msg = \"timeout must be a positive integer, got %s\" % timeout\n        raise ValueError(msg)\n    gone = set()\n    alive = set(procs)\n    if callback is not None and not callable(callback):\n        raise TypeError(\"callback %r is not a callable\" % callable)\n    if timeout is not None:\n        deadline = _timer() + timeout\n\n    while alive:\n        if timeout is not None and timeout <= 0:\n            break\n        for proc in alive:\n            # Make sure that every complete iteration (all processes)\n            # will last max 1 sec.\n            # We do this because we don't want to wait too long on a\n            # single process: in case it terminates too late other\n            # processes may disappear in the meantime and their PID\n            # reused.\n            max_timeout = 1.0 / len(alive)\n            if timeout is not None:\n                timeout = min((deadline - _timer()), max_timeout)\n                if timeout <= 0:\n                    break\n                check_gone(proc, timeout)\n            else:\n                check_gone(proc, max_timeout)\n        alive = alive - gone\n\n    if alive:\n        # Last attempt over processes survived so far.\n        # timeout == 0 won't make this function wait any further.\n        for proc in alive:\n            check_gone(proc, 0)\n        alive = alive - gone\n\n    return (list(gone), list(alive))\n\n\n# =====================================================================\n# --- CPU related functions\n# =====================================================================\n\n@memoize\ndef cpu_count(logical=True):\n    \"\"\"Return the number of logical CPUs in the system (same as\n    os.cpu_count() in Python 3.4).\n\n    If logical is False return the number of physical cores only\n    (hyper thread CPUs are excluded).\n\n    Return None if undetermined.\n\n    The return value is cached after first call.\n    If desired cache can be cleared like this:\n\n    >>> psutil.cpu_count.cache_clear()\n    \"\"\"\n    if logical:\n        return _psplatform.cpu_count_logical()\n    else:\n        return _psplatform.cpu_count_physical()\n\n\ndef cpu_times(percpu=False):\n    \"\"\"Return system-wide CPU times as a namedtuple.\n    Every CPU time represents the seconds the CPU has spent in the given mode.\n    The namedtuple's fields availability varies depending on the platform:\n     - user\n     - system\n     - idle\n     - nice (UNIX)\n     - iowait (Linux)\n     - irq (Linux, FreeBSD)\n     - softirq (Linux)\n     - steal (Linux >= 2.6.11)\n     - guest (Linux >= 2.6.24)\n     - guest_nice (Linux >= 3.2.0)\n\n    When percpu is True return a list of nameduples for each CPU.\n    First element of the list refers to first CPU, second element\n    to second CPU and so on.\n    The order of the list is consistent across calls.\n    \"\"\"\n    if not percpu:\n        return _psplatform.cpu_times()\n    else:\n        return _psplatform.per_cpu_times()\n\n\n_last_cpu_times = cpu_times()\n_last_per_cpu_times = cpu_times(percpu=True)\n\n\ndef cpu_percent(interval=None, percpu=False):\n    \"\"\"Return a float representing the current system-wide CPU\n    utilization as a percentage.\n\n    When interval is > 0.0 compares system CPU times elapsed before\n    and after the interval (blocking).\n\n    When interval is 0.0 or None compares system CPU times elapsed\n    since last call or module import, returning immediately (non\n    blocking). That means the first time this is called it will\n    return a meaningless 0.0 value which you should ignore.\n    In this case is recommended for accuracy that this function be\n    called with at least 0.1 seconds between calls.\n\n    When percpu is True returns a list of floats representing the\n    utilization as a percentage for each CPU.\n    First element of the list refers to first CPU, second element\n    to second CPU and so on.\n    The order of the list is consistent across calls.\n\n    Examples:\n\n      >>> # blocking, system-wide\n      >>> psutil.cpu_percent(interval=1)\n      2.0\n      >>>\n      >>> # blocking, per-cpu\n      >>> psutil.cpu_percent(interval=1, percpu=True)\n      [2.0, 1.0]\n      >>>\n      >>> # non-blocking (percentage since last call)\n      >>> psutil.cpu_percent(interval=None)\n      2.9\n      >>>\n    \"\"\"\n    global _last_cpu_times\n    global _last_per_cpu_times\n    blocking = interval is not None and interval > 0.0\n\n    def calculate(t1, t2):\n        t1_all = sum(t1)\n        t1_busy = t1_all - t1.idle\n\n        t2_all = sum(t2)\n        t2_busy = t2_all - t2.idle\n\n        # this usually indicates a float precision issue\n        if t2_busy <= t1_busy:\n            return 0.0\n\n        busy_delta = t2_busy - t1_busy\n        all_delta = t2_all - t1_all\n        busy_perc = (busy_delta / all_delta) * 100\n        return round(busy_perc, 1)\n\n    # system-wide usage\n    if not percpu:\n        if blocking:\n            t1 = cpu_times()\n            time.sleep(interval)\n        else:\n            t1 = _last_cpu_times\n        _last_cpu_times = cpu_times()\n        return calculate(t1, _last_cpu_times)\n    # per-cpu usage\n    else:\n        ret = []\n        if blocking:\n            tot1 = cpu_times(percpu=True)\n            time.sleep(interval)\n        else:\n            tot1 = _last_per_cpu_times\n        _last_per_cpu_times = cpu_times(percpu=True)\n        for t1, t2 in zip(tot1, _last_per_cpu_times):\n            ret.append(calculate(t1, t2))\n        return ret\n\n\n# Use separate global vars for cpu_times_percent() so that it's\n# independent from cpu_percent() and they can both be used within\n# the same program.\n_last_cpu_times_2 = _last_cpu_times\n_last_per_cpu_times_2 = _last_per_cpu_times\n\n\ndef cpu_times_percent(interval=None, percpu=False):\n    \"\"\"Same as cpu_percent() but provides utilization percentages\n    for each specific CPU time as is returned by cpu_times().\n    For instance, on Linux we'll get:\n\n      >>> cpu_times_percent()\n      cpupercent(user=4.8, nice=0.0, system=4.8, idle=90.5, iowait=0.0,\n                 irq=0.0, softirq=0.0, steal=0.0, guest=0.0, guest_nice=0.0)\n      >>>\n\n    interval and percpu arguments have the same meaning as in\n    cpu_percent().\n    \"\"\"\n    global _last_cpu_times_2\n    global _last_per_cpu_times_2\n    blocking = interval is not None and interval > 0.0\n\n    def calculate(t1, t2):\n        nums = []\n        all_delta = sum(t2) - sum(t1)\n        for field in t1._fields:\n            field_delta = getattr(t2, field) - getattr(t1, field)\n            try:\n                field_perc = (100 * field_delta) / all_delta\n            except ZeroDivisionError:\n                field_perc = 0.0\n            field_perc = round(field_perc, 1)\n            if _WINDOWS:\n                # XXX\n                # Work around:\n                # https://github.com/giampaolo/psutil/issues/392\n                # CPU times are always supposed to increase over time\n                # or at least remain the same and that's because time\n                # cannot go backwards.\n                # Surprisingly sometimes this might not be the case on\n                # Windows where 'system' CPU time can be smaller\n                # compared to the previous call, resulting in corrupted\n                # percentages (< 0 or > 100).\n                # I really don't know what to do about that except\n                # forcing the value to 0 or 100.\n                if field_perc > 100.0:\n                    field_perc = 100.0\n                elif field_perc < 0.0:\n                    field_perc = 0.0\n            nums.append(field_perc)\n        return _psplatform.scputimes(*nums)\n\n    # system-wide usage\n    if not percpu:\n        if blocking:\n            t1 = cpu_times()\n            time.sleep(interval)\n        else:\n            t1 = _last_cpu_times_2\n        _last_cpu_times_2 = cpu_times()\n        return calculate(t1, _last_cpu_times_2)\n    # per-cpu usage\n    else:\n        ret = []\n        if blocking:\n            tot1 = cpu_times(percpu=True)\n            time.sleep(interval)\n        else:\n            tot1 = _last_per_cpu_times_2\n        _last_per_cpu_times_2 = cpu_times(percpu=True)\n        for t1, t2 in zip(tot1, _last_per_cpu_times_2):\n            ret.append(calculate(t1, t2))\n        return ret\n\n\n# =====================================================================\n# --- system memory related functions\n# =====================================================================\n\ndef virtual_memory():\n    \"\"\"Return statistics about system memory usage as a namedtuple\n    including the following fields, expressed in bytes:\n\n     - total:\n       total physical memory available.\n\n     - available:\n       the actual amount of available memory that can be given\n       instantly to processes that request more memory in bytes; this\n       is calculated by summing different memory values depending on\n       the platform (e.g. free + buffers + cached on Linux) and it is\n       supposed to be used to monitor actual memory usage in a cross\n       platform fashion.\n\n     - percent:\n       the percentage usage calculated as (total - available) / total * 100\n\n     - used:\n       memory used, calculated differently depending on the platform and\n       designed for informational purposes only:\n        OSX: active + inactive + wired\n        BSD: active + wired + cached\n        LINUX: total - free\n\n     - free:\n       memory not being used at all (zeroed) that is readily available;\n       note that this doesn't reflect the actual memory available\n       (use 'available' instead)\n\n    Platform-specific fields:\n\n     - active (UNIX):\n       memory currently in use or very recently used, and so it is in RAM.\n\n     - inactive (UNIX):\n       memory that is marked as not used.\n\n     - buffers (BSD, Linux):\n       cache for things like file system metadata.\n\n     - cached (BSD, OSX):\n       cache for various things.\n\n     - wired (OSX, BSD):\n       memory that is marked to always stay in RAM. It is never moved to disk.\n\n     - shared (BSD):\n       memory that may be simultaneously accessed by multiple processes.\n\n    The sum of 'used' and 'available' does not necessarily equal total.\n    On Windows 'available' and 'free' are the same.\n    \"\"\"\n    global _TOTAL_PHYMEM\n    ret = _psplatform.virtual_memory()\n    # cached for later use in Process.memory_percent()\n    _TOTAL_PHYMEM = ret.total\n    return ret\n\n\ndef swap_memory():\n    \"\"\"Return system swap memory statistics as a namedtuple including\n    the following fields:\n\n     - total:   total swap memory in bytes\n     - used:    used swap memory in bytes\n     - free:    free swap memory in bytes\n     - percent: the percentage usage\n     - sin:     no. of bytes the system has swapped in from disk (cumulative)\n     - sout:    no. of bytes the system has swapped out from disk (cumulative)\n\n    'sin' and 'sout' on Windows are meaningless and always set to 0.\n    \"\"\"\n    return _psplatform.swap_memory()\n\n\n# =====================================================================\n# --- disks/paritions related functions\n# =====================================================================\n\ndef disk_usage(path):\n    \"\"\"Return disk usage statistics about the given path as a namedtuple\n    including total, used and free space expressed in bytes plus the\n    percentage usage.\n    \"\"\"\n    return _psplatform.disk_usage(path)\n\n\ndef disk_partitions(all=False):\n    \"\"\"Return mounted partitions as a list of\n    (device, mountpoint, fstype, opts) namedtuple.\n    'opts' field is a raw string separated by commas indicating mount\n    options which may vary depending on the platform.\n\n    If \"all\" parameter is False return physical devices only and ignore\n    all others.\n    \"\"\"\n    return _psplatform.disk_partitions(all)\n\n\ndef disk_io_counters(perdisk=False):\n    \"\"\"Return system disk I/O statistics as a namedtuple including\n    the following fields:\n\n     - read_count:  number of reads\n     - write_count: number of writes\n     - read_bytes:  number of bytes read\n     - write_bytes: number of bytes written\n     - read_time:   time spent reading from disk (in milliseconds)\n     - write_time:  time spent writing to disk (in milliseconds)\n\n    If perdisk is True return the same information for every\n    physical disk installed on the system as a dictionary\n    with partition names as the keys and the namedutuple\n    described above as the values.\n\n    On recent Windows versions 'diskperf -y' command may need to be\n    executed first otherwise this function won't find any disk.\n    \"\"\"\n    rawdict = _psplatform.disk_io_counters()\n    if not rawdict:\n        raise RuntimeError(\"couldn't find any physical disk\")\n    if perdisk:\n        for disk, fields in rawdict.items():\n            rawdict[disk] = _nt_sys_diskio(*fields)\n        return rawdict\n    else:\n        return _nt_sys_diskio(*[sum(x) for x in zip(*rawdict.values())])\n\n\n# =====================================================================\n# --- network related functions\n# =====================================================================\n\ndef net_io_counters(pernic=False):\n    \"\"\"Return network I/O statistics as a namedtuple including\n    the following fields:\n\n     - bytes_sent:   number of bytes sent\n     - bytes_recv:   number of bytes received\n     - packets_sent: number of packets sent\n     - packets_recv: number of packets received\n     - errin:        total number of errors while receiving\n     - errout:       total number of errors while sending\n     - dropin:       total number of incoming packets which were dropped\n     - dropout:      total number of outgoing packets which were dropped\n                     (always 0 on OSX and BSD)\n\n    If pernic is True return the same information for every\n    network interface installed on the system as a dictionary\n    with network interface names as the keys and the namedtuple\n    described above as the values.\n    \"\"\"\n    rawdict = _psplatform.net_io_counters()\n    if not rawdict:\n        raise RuntimeError(\"couldn't find any network interface\")\n    if pernic:\n        for nic, fields in rawdict.items():\n            rawdict[nic] = _nt_sys_netio(*fields)\n        return rawdict\n    else:\n        return _nt_sys_netio(*[sum(x) for x in zip(*rawdict.values())])\n\n\ndef net_connections(kind='inet'):\n    \"\"\"Return system-wide connections as a list of\n    (fd, family, type, laddr, raddr, status, pid) namedtuples.\n    In case of limited privileges 'fd' and 'pid' may be set to -1\n    and None respectively.\n    The 'kind' parameter filters for connections that fit the\n    following criteria:\n\n    Kind Value      Connections using\n    inet            IPv4 and IPv6\n    inet4           IPv4\n    inet6           IPv6\n    tcp             TCP\n    tcp4            TCP over IPv4\n    tcp6            TCP over IPv6\n    udp             UDP\n    udp4            UDP over IPv4\n    udp6            UDP over IPv6\n    unix            UNIX socket (both UDP and TCP protocols)\n    all             the sum of all the possible families and protocols\n    \"\"\"\n    return _psplatform.net_connections(kind)\n\n\n# =====================================================================\n# --- other system related functions\n# =====================================================================\n\n\ndef boot_time():\n    \"\"\"Return the system boot time expressed in seconds since the epoch.\n    This is also available as psutil.BOOT_TIME.\n    \"\"\"\n    # Note: we are not caching this because it is subject to\n    # system clock updates.\n    return _psplatform.boot_time()\n\n\ndef users():\n    \"\"\"Return users currently connected on the system as a list of\n    namedtuples including the following fields.\n\n     - user: the name of the user\n     - terminal: the tty or pseudo-tty associated with the user, if any.\n     - host: the host name associated with the entry, if any.\n     - started: the creation time as a floating point number expressed in\n       seconds since the epoch.\n    \"\"\"\n    return _psplatform.users()\n\n\n# =====================================================================\n# --- deprecated functions\n# =====================================================================\n\n@_deprecated(replacement=\"psutil.pids()\")\ndef get_pid_list():\n    return pids()\n\n\n@_deprecated(replacement=\"list(process_iter())\")\ndef get_process_list():\n    return list(process_iter())\n\n\n@_deprecated(replacement=\"psutil.users()\")\ndef get_users():\n    return users()\n\n\n@_deprecated(replacement=\"psutil.virtual_memory()\")\ndef phymem_usage():\n    \"\"\"Return the amount of total, used and free physical memory\n    on the system in bytes plus the percentage usage.\n    Deprecated; use psutil.virtual_memory() instead.\n    \"\"\"\n    return virtual_memory()\n\n\n@_deprecated(replacement=\"psutil.swap_memory()\")\ndef virtmem_usage():\n    return swap_memory()\n\n\n@_deprecated(replacement=\"psutil.phymem_usage().free\")\ndef avail_phymem():\n    return phymem_usage().free\n\n\n@_deprecated(replacement=\"psutil.phymem_usage().used\")\ndef used_phymem():\n    return phymem_usage().used\n\n\n@_deprecated(replacement=\"psutil.virtmem_usage().total\")\ndef total_virtmem():\n    return virtmem_usage().total\n\n\n@_deprecated(replacement=\"psutil.virtmem_usage().used\")\ndef used_virtmem():\n    return virtmem_usage().used\n\n\n@_deprecated(replacement=\"psutil.virtmem_usage().free\")\ndef avail_virtmem():\n    return virtmem_usage().free\n\n\n@_deprecated(replacement=\"psutil.net_io_counters()\")\ndef network_io_counters(pernic=False):\n    return net_io_counters(pernic)\n\n\ndef test():\n    \"\"\"List info of all currently running processes emulating ps aux\n    output.\n    \"\"\"\n    import datetime\n\n    today_day = datetime.date.today()\n    templ = \"%-10s %5s %4s %4s %7s %7s %-13s %5s %7s  %s\"\n    attrs = ['pid', 'cpu_percent', 'memory_percent', 'name', 'cpu_times',\n             'create_time', 'memory_info']\n    if _POSIX:\n        attrs.append('uids')\n        attrs.append('terminal')\n    print(templ % (\"USER\", \"PID\", \"%CPU\", \"%MEM\", \"VSZ\", \"RSS\", \"TTY\",\n                   \"START\", \"TIME\", \"COMMAND\"))\n    for p in process_iter():\n        try:\n            pinfo = p.as_dict(attrs, ad_value='')\n        except NoSuchProcess:\n            pass\n        else:\n            if pinfo['create_time']:\n                ctime = datetime.datetime.fromtimestamp(pinfo['create_time'])\n                if ctime.date() == today_day:\n                    ctime = ctime.strftime(\"%H:%M\")\n                else:\n                    ctime = ctime.strftime(\"%b%d\")\n            else:\n                ctime = ''\n            cputime = time.strftime(\"%M:%S\",\n                                    time.localtime(sum(pinfo['cpu_times'])))\n            try:\n                user = p.username()\n            except KeyError:\n                if _POSIX:\n                    if pinfo['uids']:\n                        user = str(pinfo['uids'].real)\n                    else:\n                        user = ''\n                else:\n                    raise\n            except Error:\n                user = ''\n            if _WINDOWS and '\\\\' in user:\n                user = user.split('\\\\')[1]\n            vms = pinfo['memory_info'] and \\\n                int(pinfo['memory_info'].vms / 1024) or '?'\n            rss = pinfo['memory_info'] and \\\n                int(pinfo['memory_info'].rss / 1024) or '?'\n            memp = pinfo['memory_percent'] and \\\n                round(pinfo['memory_percent'], 1) or '?'\n            print(templ % (\n                user[:10],\n                pinfo['pid'],\n                pinfo['cpu_percent'],\n                memp,\n                vms,\n                rss,\n                pinfo.get('terminal', '') or '?',\n                ctime,\n                cputime,\n                pinfo['name'].strip() or '?'))\n\n\ndef _replace_module():\n    \"\"\"Dirty hack to replace the module object in order to access\n    deprecated module constants, see:\n    http://www.dr-josiah.com/2013/12/properties-on-python-modules.html\n    \"\"\"\n    class ModuleWrapper(object):\n\n        def __repr__(self):\n            return repr(self._module)\n        __str__ = __repr__\n\n        @property\n        def NUM_CPUS(self):\n            msg = \"NUM_CPUS constant is deprecated; use cpu_count() instead\"\n            warnings.warn(msg, category=DeprecationWarning, stacklevel=2)\n            return cpu_count()\n\n        @property\n        def BOOT_TIME(self):\n            msg = \"BOOT_TIME constant is deprecated; use boot_time() instead\"\n            warnings.warn(msg, category=DeprecationWarning, stacklevel=2)\n            return boot_time()\n\n        @property\n        def TOTAL_PHYMEM(self):\n            msg = \"TOTAL_PHYMEM constant is deprecated; \" \\\n                  \"use virtual_memory().total instead\"\n            warnings.warn(msg, category=DeprecationWarning, stacklevel=2)\n            return virtual_memory().total\n\n    mod = ModuleWrapper()\n    mod.__dict__ = globals()\n    mod._module = sys.modules[__name__]\n    sys.modules[__name__] = mod\n\n\n_replace_module()\ndel memoize, division, _replace_module\nif sys.version_info < (3, 0):\n    del num\n\nif __name__ == \"__main__\":\n    test()\n"
  },
  {
    "path": "Common/libpsutil/py2.7-glibc-2.12+/psutil/_common.py",
    "content": "# /usr/bin/env python\n\n# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\n\"\"\"Common objects shared by all _ps* modules.\"\"\"\n\nfrom __future__ import division\nimport errno\nimport functools\nimport os\nimport socket\nimport stat\nimport warnings\ntry:\n    import threading\nexcept ImportError:\n    import dummy_threading as threading\n\nfrom collections import namedtuple\nfrom socket import AF_INET, SOCK_STREAM, SOCK_DGRAM\n\n# --- constants\n\nAF_INET6 = getattr(socket, 'AF_INET6', None)\nAF_UNIX = getattr(socket, 'AF_UNIX', None)\n\nSTATUS_RUNNING = \"running\"\nSTATUS_SLEEPING = \"sleeping\"\nSTATUS_DISK_SLEEP = \"disk-sleep\"\nSTATUS_STOPPED = \"stopped\"\nSTATUS_TRACING_STOP = \"tracing-stop\"\nSTATUS_ZOMBIE = \"zombie\"\nSTATUS_DEAD = \"dead\"\nSTATUS_WAKE_KILL = \"wake-kill\"\nSTATUS_WAKING = \"waking\"\nSTATUS_IDLE = \"idle\"  # BSD\nSTATUS_LOCKED = \"locked\"  # BSD\nSTATUS_WAITING = \"waiting\"  # BSD\n\nCONN_ESTABLISHED = \"ESTABLISHED\"\nCONN_SYN_SENT = \"SYN_SENT\"\nCONN_SYN_RECV = \"SYN_RECV\"\nCONN_FIN_WAIT1 = \"FIN_WAIT1\"\nCONN_FIN_WAIT2 = \"FIN_WAIT2\"\nCONN_TIME_WAIT = \"TIME_WAIT\"\nCONN_CLOSE = \"CLOSE\"\nCONN_CLOSE_WAIT = \"CLOSE_WAIT\"\nCONN_LAST_ACK = \"LAST_ACK\"\nCONN_LISTEN = \"LISTEN\"\nCONN_CLOSING = \"CLOSING\"\nCONN_NONE = \"NONE\"\n\n\n# --- functions\n\ndef usage_percent(used, total, _round=None):\n    \"\"\"Calculate percentage usage of 'used' against 'total'.\"\"\"\n    try:\n        ret = (used / total) * 100\n    except ZeroDivisionError:\n        ret = 0\n    if _round is not None:\n        return round(ret, _round)\n    else:\n        return ret\n\n\ndef memoize(fun):\n    \"\"\"A simple memoize decorator for functions supporting (hashable)\n    positional arguments.\n    It also provides a cache_clear() function for clearing the cache:\n\n    >>> @memoize\n    ... def foo()\n    ...     return 1\n    ...\n    >>> foo()\n    1\n    >>> foo.cache_clear()\n    >>>\n    \"\"\"\n    @functools.wraps(fun)\n    def wrapper(*args, **kwargs):\n        key = (args, frozenset(sorted(kwargs.items())))\n        lock.acquire()\n        try:\n            try:\n                return cache[key]\n            except KeyError:\n                ret = cache[key] = fun(*args, **kwargs)\n        finally:\n            lock.release()\n        return ret\n\n    def cache_clear():\n        \"\"\"Clear cache.\"\"\"\n        lock.acquire()\n        try:\n            cache.clear()\n        finally:\n            lock.release()\n\n    lock = threading.RLock()\n    cache = {}\n    wrapper.cache_clear = cache_clear\n    return wrapper\n\n\n# http://code.activestate.com/recipes/577819-deprecated-decorator/\ndef deprecated(replacement=None):\n    \"\"\"A decorator which can be used to mark functions as deprecated.\"\"\"\n    def outer(fun):\n        msg = \"psutil.%s is deprecated\" % fun.__name__\n        if replacement is not None:\n            msg += \"; use %s instead\" % replacement\n        if fun.__doc__ is None:\n            fun.__doc__ = msg\n\n        @functools.wraps(fun)\n        def inner(*args, **kwargs):\n            warnings.warn(msg, category=DeprecationWarning, stacklevel=2)\n            return fun(*args, **kwargs)\n\n        return inner\n    return outer\n\n\ndef deprecated_method(replacement):\n    \"\"\"A decorator which can be used to mark a method as deprecated\n    'replcement' is the method name which will be called instead.\n    \"\"\"\n    def outer(fun):\n        msg = \"%s() is deprecated; use %s() instead\" % (\n            fun.__name__, replacement)\n        if fun.__doc__ is None:\n            fun.__doc__ = msg\n\n        @functools.wraps(fun)\n        def inner(self, *args, **kwargs):\n            warnings.warn(msg, category=DeprecationWarning, stacklevel=2)\n            return getattr(self, replacement)(*args, **kwargs)\n        return inner\n    return outer\n\n\ndef isfile_strict(path):\n    \"\"\"Same as os.path.isfile() but does not swallow EACCES / EPERM\n    exceptions, see:\n    http://mail.python.org/pipermail/python-dev/2012-June/120787.html\n    \"\"\"\n    try:\n        st = os.stat(path)\n    except OSError as err:\n        if err.errno in (errno.EPERM, errno.EACCES):\n            raise\n        return False\n    else:\n        return stat.S_ISREG(st.st_mode)\n\n\n# --- Process.connections() 'kind' parameter mapping\n\nconn_tmap = {\n    \"all\": ([AF_INET, AF_INET6, AF_UNIX], [SOCK_STREAM, SOCK_DGRAM]),\n    \"tcp\": ([AF_INET, AF_INET6], [SOCK_STREAM]),\n    \"tcp4\": ([AF_INET], [SOCK_STREAM]),\n    \"udp\": ([AF_INET, AF_INET6], [SOCK_DGRAM]),\n    \"udp4\": ([AF_INET], [SOCK_DGRAM]),\n    \"inet\": ([AF_INET, AF_INET6], [SOCK_STREAM, SOCK_DGRAM]),\n    \"inet4\": ([AF_INET], [SOCK_STREAM, SOCK_DGRAM]),\n    \"inet6\": ([AF_INET6], [SOCK_STREAM, SOCK_DGRAM]),\n}\n\nif AF_INET6 is not None:\n    conn_tmap.update({\n        \"tcp6\": ([AF_INET6], [SOCK_STREAM]),\n        \"udp6\": ([AF_INET6], [SOCK_DGRAM]),\n    })\n\nif AF_UNIX is not None:\n    conn_tmap.update({\n        \"unix\": ([AF_UNIX], [SOCK_STREAM, SOCK_DGRAM]),\n    })\n\ndel AF_INET, AF_INET6, AF_UNIX, SOCK_STREAM, SOCK_DGRAM, socket\n\n\n# --- namedtuples for psutil.* system-related functions\n\n# psutil.swap_memory()\nsswap = namedtuple('sswap', ['total', 'used', 'free', 'percent', 'sin',\n                             'sout'])\n# psutil.disk_usage()\nsdiskusage = namedtuple('sdiskusage', ['total', 'used', 'free', 'percent'])\n# psutil.disk_io_counters()\nsdiskio = namedtuple('sdiskio', ['read_count', 'write_count',\n                                 'read_bytes', 'write_bytes',\n                                 'read_time', 'write_time'])\n# psutil.disk_partitions()\nsdiskpart = namedtuple('sdiskpart', ['device', 'mountpoint', 'fstype', 'opts'])\n# psutil.net_io_counters()\nsnetio = namedtuple('snetio', ['bytes_sent', 'bytes_recv',\n                               'packets_sent', 'packets_recv',\n                               'errin', 'errout',\n                               'dropin', 'dropout'])\n# psutil.users()\nsuser = namedtuple('suser', ['name', 'terminal', 'host', 'started'])\n# psutil.net_connections()\nsconn = namedtuple('sconn', ['fd', 'family', 'type', 'laddr', 'raddr',\n                             'status', 'pid'])\n\n\n# --- namedtuples for psutil.Process methods\n\n# psutil.Process.memory_info()\npmem = namedtuple('pmem', ['rss', 'vms'])\n# psutil.Process.cpu_times()\npcputimes = namedtuple('pcputimes', ['user', 'system'])\n# psutil.Process.open_files()\npopenfile = namedtuple('popenfile', ['path', 'fd'])\n# psutil.Process.threads()\npthread = namedtuple('pthread', ['id', 'user_time', 'system_time'])\n# psutil.Process.uids()\npuids = namedtuple('puids', ['real', 'effective', 'saved'])\n# psutil.Process.gids()\npgids = namedtuple('pgids', ['real', 'effective', 'saved'])\n# psutil.Process.io_counters()\npio = namedtuple('pio', ['read_count', 'write_count',\n                         'read_bytes', 'write_bytes'])\n# psutil.Process.ionice()\npionice = namedtuple('pionice', ['ioclass', 'value'])\n# psutil.Process.ctx_switches()\npctxsw = namedtuple('pctxsw', ['voluntary', 'involuntary'])\n\n\n# --- misc\n\n# backward compatibility layer for Process.connections() ntuple\nclass pconn(\n    namedtuple('pconn',\n               ['fd', 'family', 'type', 'laddr', 'raddr', 'status'])):\n    __slots__ = ()\n\n    @property\n    def local_address(self):\n        warnings.warn(\"'local_address' field is deprecated; use 'laddr'\"\n                      \"instead\", category=DeprecationWarning, stacklevel=2)\n        return self.laddr\n\n    @property\n    def remote_address(self):\n        warnings.warn(\"'remote_address' field is deprecated; use 'raddr'\"\n                      \"instead\", category=DeprecationWarning, stacklevel=2)\n        return self.raddr\n"
  },
  {
    "path": "Common/libpsutil/py2.7-glibc-2.12+/psutil/_compat.py",
    "content": "#!/usr/bin/env python\n\n# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\n\"\"\"Module which provides compatibility with older Python versions.\"\"\"\n\n__all__ = [\"PY3\", \"int\", \"long\", \"xrange\", \"exec_\", \"callable\", \"lru_cache\"]\n\nimport collections\nimport functools\nimport sys\ntry:\n    import __builtin__\nexcept ImportError:\n    import builtins as __builtin__  # py3\n\nPY3 = sys.version_info[0] == 3\n\nif PY3:\n    int = int\n    long = int\n    xrange = range\n    unicode = str\n    basestring = str\n    exec_ = getattr(__builtin__, \"exec\")\nelse:\n    int = int\n    long = long\n    xrange = xrange\n    unicode = unicode\n    basestring = basestring\n\n    def exec_(code, globs=None, locs=None):\n        if globs is None:\n            frame = sys._getframe(1)\n            globs = frame.f_globals\n            if locs is None:\n                locs = frame.f_locals\n            del frame\n        elif locs is None:\n            locs = globs\n        exec(\"\"\"exec code in globs, locs\"\"\")\n\n\n# removed in 3.0, reintroduced in 3.2\ntry:\n    callable = callable\nexcept NameError:\n    def callable(obj):\n        return any(\"__call__\" in klass.__dict__ for klass in type(obj).__mro__)\n\n\n# --- stdlib additions\n\n\n# py 3.2 functools.lru_cache\n# Taken from: http://code.activestate.com/recipes/578078\n# Credit: Raymond Hettinger\ntry:\n    from functools import lru_cache\nexcept ImportError:\n    try:\n        from threading import RLock\n    except ImportError:\n        from dummy_threading import RLock\n\n    _CacheInfo = collections.namedtuple(\n        \"CacheInfo\", [\"hits\", \"misses\", \"maxsize\", \"currsize\"])\n\n    class _HashedSeq(list):\n        __slots__ = 'hashvalue'\n\n        def __init__(self, tup, hash=hash):\n            self[:] = tup\n            self.hashvalue = hash(tup)\n\n        def __hash__(self):\n            return self.hashvalue\n\n    def _make_key(args, kwds, typed,\n                  kwd_mark=(object(), ),\n                  fasttypes=set((int, str, frozenset, type(None))),\n                  sorted=sorted, tuple=tuple, type=type, len=len):\n        key = args\n        if kwds:\n            sorted_items = sorted(kwds.items())\n            key += kwd_mark\n            for item in sorted_items:\n                key += item\n        if typed:\n            key += tuple(type(v) for v in args)\n            if kwds:\n                key += tuple(type(v) for k, v in sorted_items)\n        elif len(key) == 1 and type(key[0]) in fasttypes:\n            return key[0]\n        return _HashedSeq(key)\n\n    def lru_cache(maxsize=100, typed=False):\n        \"\"\"Least-recently-used cache decorator, see:\n        http://docs.python.org/3/library/functools.html#functools.lru_cache\n        \"\"\"\n        def decorating_function(user_function):\n            cache = dict()\n            stats = [0, 0]\n            HITS, MISSES = 0, 1\n            make_key = _make_key\n            cache_get = cache.get\n            _len = len\n            lock = RLock()\n            root = []\n            root[:] = [root, root, None, None]\n            nonlocal_root = [root]\n            PREV, NEXT, KEY, RESULT = 0, 1, 2, 3\n            if maxsize == 0:\n                def wrapper(*args, **kwds):\n                    result = user_function(*args, **kwds)\n                    stats[MISSES] += 1\n                    return result\n            elif maxsize is None:\n                def wrapper(*args, **kwds):\n                    key = make_key(args, kwds, typed)\n                    result = cache_get(key, root)\n                    if result is not root:\n                        stats[HITS] += 1\n                        return result\n                    result = user_function(*args, **kwds)\n                    cache[key] = result\n                    stats[MISSES] += 1\n                    return result\n            else:\n                def wrapper(*args, **kwds):\n                    if kwds or typed:\n                        key = make_key(args, kwds, typed)\n                    else:\n                        key = args\n                    lock.acquire()\n                    try:\n                        link = cache_get(key)\n                        if link is not None:\n                            root, = nonlocal_root\n                            link_prev, link_next, key, result = link\n                            link_prev[NEXT] = link_next\n                            link_next[PREV] = link_prev\n                            last = root[PREV]\n                            last[NEXT] = root[PREV] = link\n                            link[PREV] = last\n                            link[NEXT] = root\n                            stats[HITS] += 1\n                            return result\n                    finally:\n                        lock.release()\n                    result = user_function(*args, **kwds)\n                    lock.acquire()\n                    try:\n                        root, = nonlocal_root\n                        if key in cache:\n                            pass\n                        elif _len(cache) >= maxsize:\n                            oldroot = root\n                            oldroot[KEY] = key\n                            oldroot[RESULT] = result\n                            root = nonlocal_root[0] = oldroot[NEXT]\n                            oldkey = root[KEY]\n                            root[KEY] = root[RESULT] = None\n                            del cache[oldkey]\n                            cache[key] = oldroot\n                        else:\n                            last = root[PREV]\n                            link = [last, root, key, result]\n                            last[NEXT] = root[PREV] = cache[key] = link\n                        stats[MISSES] += 1\n                    finally:\n                        lock.release()\n                    return result\n\n            def cache_info():\n                \"\"\"Report cache statistics\"\"\"\n                lock.acquire()\n                try:\n                    return _CacheInfo(stats[HITS], stats[MISSES], maxsize,\n                                      len(cache))\n                finally:\n                    lock.release()\n\n            def cache_clear():\n                \"\"\"Clear the cache and cache statistics\"\"\"\n                lock.acquire()\n                try:\n                    cache.clear()\n                    root = nonlocal_root[0]\n                    root[:] = [root, root, None, None]\n                    stats[:] = [0, 0]\n                finally:\n                    lock.release()\n\n            wrapper.__wrapped__ = user_function\n            wrapper.cache_info = cache_info\n            wrapper.cache_clear = cache_clear\n            return functools.update_wrapper(wrapper, user_function)\n\n        return decorating_function\n"
  },
  {
    "path": "Common/libpsutil/py2.7-glibc-2.12+/psutil/_psbsd.py",
    "content": "#!/usr/bin/env python\n\n# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\n\"\"\"FreeBSD platform implementation.\"\"\"\n\nimport errno\nimport functools\nimport os\nimport sys\nfrom collections import namedtuple\n\nfrom psutil import _common\nfrom psutil import _psposix\nfrom psutil._common import conn_tmap, usage_percent\nimport _psutil_bsd as cext\nimport _psutil_posix\n\n\n__extra__all__ = []\n\n# --- constants\n\nPROC_STATUSES = {\n    cext.SSTOP: _common.STATUS_STOPPED,\n    cext.SSLEEP: _common.STATUS_SLEEPING,\n    cext.SRUN: _common.STATUS_RUNNING,\n    cext.SIDL: _common.STATUS_IDLE,\n    cext.SWAIT: _common.STATUS_WAITING,\n    cext.SLOCK: _common.STATUS_LOCKED,\n    cext.SZOMB: _common.STATUS_ZOMBIE,\n}\n\nTCP_STATUSES = {\n    cext.TCPS_ESTABLISHED: _common.CONN_ESTABLISHED,\n    cext.TCPS_SYN_SENT: _common.CONN_SYN_SENT,\n    cext.TCPS_SYN_RECEIVED: _common.CONN_SYN_RECV,\n    cext.TCPS_FIN_WAIT_1: _common.CONN_FIN_WAIT1,\n    cext.TCPS_FIN_WAIT_2: _common.CONN_FIN_WAIT2,\n    cext.TCPS_TIME_WAIT: _common.CONN_TIME_WAIT,\n    cext.TCPS_CLOSED: _common.CONN_CLOSE,\n    cext.TCPS_CLOSE_WAIT: _common.CONN_CLOSE_WAIT,\n    cext.TCPS_LAST_ACK: _common.CONN_LAST_ACK,\n    cext.TCPS_LISTEN: _common.CONN_LISTEN,\n    cext.TCPS_CLOSING: _common.CONN_CLOSING,\n    cext.PSUTIL_CONN_NONE: _common.CONN_NONE,\n}\n\nPAGESIZE = os.sysconf(\"SC_PAGE_SIZE\")\n\n# extend base mem ntuple with BSD-specific memory metrics\nsvmem = namedtuple(\n    'svmem', ['total', 'available', 'percent', 'used', 'free',\n              'active', 'inactive', 'buffers', 'cached', 'shared', 'wired'])\nscputimes = namedtuple(\n    'scputimes', ['user', 'nice', 'system', 'idle', 'irq'])\npextmem = namedtuple('pextmem', ['rss', 'vms', 'text', 'data', 'stack'])\npmmap_grouped = namedtuple(\n    'pmmap_grouped', 'path rss, private, ref_count, shadow_count')\npmmap_ext = namedtuple(\n    'pmmap_ext', 'addr, perms path rss, private, ref_count, shadow_count')\n\n# set later from __init__.py\nNoSuchProcess = None\nAccessDenied = None\nTimeoutExpired = None\n\n\ndef virtual_memory():\n    \"\"\"System virtual memory as a namedtuple.\"\"\"\n    mem = cext.virtual_mem()\n    total, free, active, inactive, wired, cached, buffers, shared = mem\n    avail = inactive + cached + free\n    used = active + wired + cached\n    percent = usage_percent((total - avail), total, _round=1)\n    return svmem(total, avail, percent, used, free,\n                 active, inactive, buffers, cached, shared, wired)\n\n\ndef swap_memory():\n    \"\"\"System swap memory as (total, used, free, sin, sout) namedtuple.\"\"\"\n    total, used, free, sin, sout = [x * PAGESIZE for x in cext.swap_mem()]\n    percent = usage_percent(used, total, _round=1)\n    return _common.sswap(total, used, free, percent, sin, sout)\n\n\ndef cpu_times():\n    \"\"\"Return system per-CPU times as a namedtuple\"\"\"\n    user, nice, system, idle, irq = cext.cpu_times()\n    return scputimes(user, nice, system, idle, irq)\n\n\nif hasattr(cext, \"per_cpu_times\"):\n    def per_cpu_times():\n        \"\"\"Return system CPU times as a namedtuple\"\"\"\n        ret = []\n        for cpu_t in cext.per_cpu_times():\n            user, nice, system, idle, irq = cpu_t\n            item = scputimes(user, nice, system, idle, irq)\n            ret.append(item)\n        return ret\nelse:\n    # XXX\n    # Ok, this is very dirty.\n    # On FreeBSD < 8 we cannot gather per-cpu information, see:\n    # https://github.com/giampaolo/psutil/issues/226\n    # If num cpus > 1, on first call we return single cpu times to avoid a\n    # crash at psutil import time.\n    # Next calls will fail with NotImplementedError\n    def per_cpu_times():\n        if cpu_count_logical() == 1:\n            return [cpu_times()]\n        if per_cpu_times.__called__:\n            raise NotImplementedError(\"supported only starting from FreeBSD 8\")\n        per_cpu_times.__called__ = True\n        return [cpu_times()]\n\n    per_cpu_times.__called__ = False\n\n\ndef cpu_count_logical():\n    \"\"\"Return the number of logical CPUs in the system.\"\"\"\n    return cext.cpu_count_logical()\n\n\ndef cpu_count_physical():\n    \"\"\"Return the number of physical CPUs in the system.\"\"\"\n    # From the C module we'll get an XML string similar to this:\n    # http://manpages.ubuntu.com/manpages/precise/man4/smp.4freebsd.html\n    # We may get None in case \"sysctl kern.sched.topology_spec\"\n    # is not supported on this BSD version, in which case we'll mimic\n    # os.cpu_count() and return None.\n    s = cext.cpu_count_phys()\n    if s is not None:\n        # get rid of padding chars appended at the end of the string\n        index = s.rfind(\"</groups>\")\n        if index != -1:\n            s = s[:index + 9]\n            if sys.version_info >= (2, 5):\n                import xml.etree.ElementTree as ET\n                root = ET.fromstring(s)\n                return len(root.findall('group/children/group/cpu')) or None\n            else:\n                s = s[s.find('<children>'):]\n                return s.count(\"<cpu\") or None\n\n\ndef boot_time():\n    \"\"\"The system boot time expressed in seconds since the epoch.\"\"\"\n    return cext.boot_time()\n\n\ndef disk_partitions(all=False):\n    retlist = []\n    partitions = cext.disk_partitions()\n    for partition in partitions:\n        device, mountpoint, fstype, opts = partition\n        if device == 'none':\n            device = ''\n        if not all:\n            if not os.path.isabs(device) or not os.path.exists(device):\n                continue\n        ntuple = _common.sdiskpart(device, mountpoint, fstype, opts)\n        retlist.append(ntuple)\n    return retlist\n\n\ndef users():\n    retlist = []\n    rawlist = cext.users()\n    for item in rawlist:\n        user, tty, hostname, tstamp = item\n        if tty == '~':\n            continue  # reboot or shutdown\n        nt = _common.suser(user, tty or None, hostname, tstamp)\n        retlist.append(nt)\n    return retlist\n\n\ndef net_connections(kind):\n    if kind not in _common.conn_tmap:\n        raise ValueError(\"invalid %r kind argument; choose between %s\"\n                         % (kind, ', '.join([repr(x) for x in conn_tmap])))\n    families, types = conn_tmap[kind]\n    ret = []\n    rawlist = cext.net_connections()\n    for item in rawlist:\n        fd, fam, type, laddr, raddr, status, pid = item\n        # TODO: apply filter at C level\n        if fam in families and type in types:\n            status = TCP_STATUSES[status]\n            nt = _common.sconn(fd, fam, type, laddr, raddr, status, pid)\n            ret.append(nt)\n    return ret\n\n\npids = cext.pids\npid_exists = _psposix.pid_exists\ndisk_usage = _psposix.disk_usage\nnet_io_counters = cext.net_io_counters\ndisk_io_counters = cext.disk_io_counters\n\n\ndef wrap_exceptions(fun):\n    \"\"\"Decorator which translates bare OSError exceptions into\n    NoSuchProcess and AccessDenied.\n    \"\"\"\n    @functools.wraps(fun)\n    def wrapper(self, *args, **kwargs):\n        try:\n            return fun(self, *args, **kwargs)\n        except OSError as err:\n            # support for private module import\n            if NoSuchProcess is None or AccessDenied is None:\n                raise\n            if err.errno == errno.ESRCH:\n                raise NoSuchProcess(self.pid, self._name)\n            if err.errno in (errno.EPERM, errno.EACCES):\n                raise AccessDenied(self.pid, self._name)\n            raise\n    return wrapper\n\n\nclass Process(object):\n    \"\"\"Wrapper class around underlying C implementation.\"\"\"\n\n    __slots__ = [\"pid\", \"_name\"]\n\n    def __init__(self, pid):\n        self.pid = pid\n        self._name = None\n\n    @wrap_exceptions\n    def name(self):\n        return cext.proc_name(self.pid)\n\n    @wrap_exceptions\n    def exe(self):\n        return cext.proc_exe(self.pid)\n\n    @wrap_exceptions\n    def cmdline(self):\n        return cext.proc_cmdline(self.pid)\n\n    @wrap_exceptions\n    def terminal(self):\n        tty_nr = cext.proc_tty_nr(self.pid)\n        tmap = _psposix._get_terminal_map()\n        try:\n            return tmap[tty_nr]\n        except KeyError:\n            return None\n\n    @wrap_exceptions\n    def ppid(self):\n        return cext.proc_ppid(self.pid)\n\n    @wrap_exceptions\n    def uids(self):\n        real, effective, saved = cext.proc_uids(self.pid)\n        return _common.puids(real, effective, saved)\n\n    @wrap_exceptions\n    def gids(self):\n        real, effective, saved = cext.proc_gids(self.pid)\n        return _common.pgids(real, effective, saved)\n\n    @wrap_exceptions\n    def cpu_times(self):\n        user, system = cext.proc_cpu_times(self.pid)\n        return _common.pcputimes(user, system)\n\n    @wrap_exceptions\n    def memory_info(self):\n        rss, vms = cext.proc_memory_info(self.pid)[:2]\n        return _common.pmem(rss, vms)\n\n    @wrap_exceptions\n    def memory_info_ex(self):\n        return pextmem(*cext.proc_memory_info(self.pid))\n\n    @wrap_exceptions\n    def create_time(self):\n        return cext.proc_create_time(self.pid)\n\n    @wrap_exceptions\n    def num_threads(self):\n        return cext.proc_num_threads(self.pid)\n\n    @wrap_exceptions\n    def num_ctx_switches(self):\n        return _common.pctxsw(*cext.proc_num_ctx_switches(self.pid))\n\n    @wrap_exceptions\n    def threads(self):\n        rawlist = cext.proc_threads(self.pid)\n        retlist = []\n        for thread_id, utime, stime in rawlist:\n            ntuple = _common.pthread(thread_id, utime, stime)\n            retlist.append(ntuple)\n        return retlist\n\n    @wrap_exceptions\n    def connections(self, kind='inet'):\n        if kind not in conn_tmap:\n            raise ValueError(\"invalid %r kind argument; choose between %s\"\n                             % (kind, ', '.join([repr(x) for x in conn_tmap])))\n        families, types = conn_tmap[kind]\n        rawlist = cext.proc_connections(self.pid, families, types)\n        ret = []\n        for item in rawlist:\n            fd, fam, type, laddr, raddr, status = item\n            status = TCP_STATUSES[status]\n            nt = _common.pconn(fd, fam, type, laddr, raddr, status)\n            ret.append(nt)\n        return ret\n\n    @wrap_exceptions\n    def wait(self, timeout=None):\n        try:\n            return _psposix.wait_pid(self.pid, timeout)\n        except _psposix.TimeoutExpired:\n            # support for private module import\n            if TimeoutExpired is None:\n                raise\n            raise TimeoutExpired(timeout, self.pid, self._name)\n\n    @wrap_exceptions\n    def nice_get(self):\n        return _psutil_posix.getpriority(self.pid)\n\n    @wrap_exceptions\n    def nice_set(self, value):\n        return _psutil_posix.setpriority(self.pid, value)\n\n    @wrap_exceptions\n    def status(self):\n        code = cext.proc_status(self.pid)\n        if code in PROC_STATUSES:\n            return PROC_STATUSES[code]\n        # XXX is this legit? will we even ever get here?\n        return \"?\"\n\n    @wrap_exceptions\n    def io_counters(self):\n        rc, wc, rb, wb = cext.proc_io_counters(self.pid)\n        return _common.pio(rc, wc, rb, wb)\n\n    nt_mmap_grouped = namedtuple(\n        'mmap', 'path rss, private, ref_count, shadow_count')\n    nt_mmap_ext = namedtuple(\n        'mmap', 'addr, perms path rss, private, ref_count, shadow_count')\n\n    # FreeBSD < 8 does not support functions based on kinfo_getfile()\n    # and kinfo_getvmmap()\n    if hasattr(cext, 'proc_open_files'):\n\n        @wrap_exceptions\n        def open_files(self):\n            \"\"\"Return files opened by process as a list of namedtuples.\"\"\"\n            rawlist = cext.proc_open_files(self.pid)\n            return [_common.popenfile(path, fd) for path, fd in rawlist]\n\n        @wrap_exceptions\n        def cwd(self):\n            \"\"\"Return process current working directory.\"\"\"\n            # sometimes we get an empty string, in which case we turn\n            # it into None\n            return cext.proc_cwd(self.pid) or None\n\n        @wrap_exceptions\n        def memory_maps(self):\n            return cext.proc_memory_maps(self.pid)\n\n        @wrap_exceptions\n        def num_fds(self):\n            \"\"\"Return the number of file descriptors opened by this process.\"\"\"\n            return cext.proc_num_fds(self.pid)\n\n    else:\n        def _not_implemented(self):\n            raise NotImplementedError(\"supported only starting from FreeBSD 8\")\n\n        open_files = _not_implemented\n        proc_cwd = _not_implemented\n        memory_maps = _not_implemented\n        num_fds = _not_implemented\n\n    @wrap_exceptions\n    def cpu_affinity_get(self):\n        return cext.proc_cpu_affinity_get(self.pid)\n\n    @wrap_exceptions\n    def cpu_affinity_set(self, cpus):\n        try:\n            cext.proc_cpu_affinity_set(self.pid, cpus)\n        except OSError as err:\n            # 'man cpuset_setaffinity' about EDEADLK:\n            # <<the call would leave a thread without a valid CPU to run\n            # on because the set does not overlap with the thread's\n            # anonymous mask>>\n            if err.errno in (errno.EINVAL, errno.EDEADLK):\n                allcpus = tuple(range(len(per_cpu_times())))\n                for cpu in cpus:\n                    if cpu not in allcpus:\n                        raise ValueError(\"invalid CPU #%i (choose between %s)\"\n                                         % (cpu, allcpus))\n            raise\n"
  },
  {
    "path": "Common/libpsutil/py2.7-glibc-2.12+/psutil/_pslinux.py",
    "content": "#!/usr/bin/env python\n\n# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\n\"\"\"Linux platform implementation.\"\"\"\n\nfrom __future__ import division\n\nimport base64\nimport errno\nimport functools\nimport os\nimport re\nimport socket\nimport struct\nimport sys\nimport warnings\nfrom collections import namedtuple, defaultdict\n\nfrom psutil import _common\nfrom psutil import _psposix\nfrom psutil._common import (isfile_strict, usage_percent, deprecated)\nfrom psutil._compat import PY3\nimport _psutil_linux as cext\nimport _psutil_posix\n\n\n__extra__all__ = [\n    # io prio constants\n    \"IOPRIO_CLASS_NONE\", \"IOPRIO_CLASS_RT\", \"IOPRIO_CLASS_BE\",\n    \"IOPRIO_CLASS_IDLE\",\n    # connection status constants\n    \"CONN_ESTABLISHED\", \"CONN_SYN_SENT\", \"CONN_SYN_RECV\", \"CONN_FIN_WAIT1\",\n    \"CONN_FIN_WAIT2\", \"CONN_TIME_WAIT\", \"CONN_CLOSE\", \"CONN_CLOSE_WAIT\",\n    \"CONN_LAST_ACK\", \"CONN_LISTEN\", \"CONN_CLOSING\",\n    # other\n    \"phymem_buffers\", \"cached_phymem\"]\n\n\n# --- constants\n\nHAS_PRLIMIT = hasattr(cext, \"linux_prlimit\")\n\n# RLIMIT_* constants, not guaranteed to be present on all kernels\nif HAS_PRLIMIT:\n    for name in dir(cext):\n        if name.startswith('RLIM'):\n            __extra__all__.append(name)\n\n# Number of clock ticks per second\nCLOCK_TICKS = os.sysconf(\"SC_CLK_TCK\")\nPAGESIZE = os.sysconf(\"SC_PAGE_SIZE\")\nBOOT_TIME = None  # set later\nDEFAULT_ENCODING = sys.getdefaultencoding()\n\n# ioprio_* constants http://linux.die.net/man/2/ioprio_get\nIOPRIO_CLASS_NONE = 0\nIOPRIO_CLASS_RT = 1\nIOPRIO_CLASS_BE = 2\nIOPRIO_CLASS_IDLE = 3\n\n# taken from /fs/proc/array.c\nPROC_STATUSES = {\n    \"R\": _common.STATUS_RUNNING,\n    \"S\": _common.STATUS_SLEEPING,\n    \"D\": _common.STATUS_DISK_SLEEP,\n    \"T\": _common.STATUS_STOPPED,\n    \"t\": _common.STATUS_TRACING_STOP,\n    \"Z\": _common.STATUS_ZOMBIE,\n    \"X\": _common.STATUS_DEAD,\n    \"x\": _common.STATUS_DEAD,\n    \"K\": _common.STATUS_WAKE_KILL,\n    \"W\": _common.STATUS_WAKING\n}\n\n# http://students.mimuw.edu.pl/lxr/source/include/net/tcp_states.h\nTCP_STATUSES = {\n    \"01\": _common.CONN_ESTABLISHED,\n    \"02\": _common.CONN_SYN_SENT,\n    \"03\": _common.CONN_SYN_RECV,\n    \"04\": _common.CONN_FIN_WAIT1,\n    \"05\": _common.CONN_FIN_WAIT2,\n    \"06\": _common.CONN_TIME_WAIT,\n    \"07\": _common.CONN_CLOSE,\n    \"08\": _common.CONN_CLOSE_WAIT,\n    \"09\": _common.CONN_LAST_ACK,\n    \"0A\": _common.CONN_LISTEN,\n    \"0B\": _common.CONN_CLOSING\n}\n\n# set later from __init__.py\nNoSuchProcess = None\nAccessDenied = None\nTimeoutExpired = None\n\n\n# --- named tuples\n\ndef _get_cputimes_fields():\n    \"\"\"Return a namedtuple of variable fields depending on the\n    CPU times available on this Linux kernel version which may be:\n    (user, nice, system, idle, iowait, irq, softirq, [steal, [guest,\n     [guest_nice]]])\n    \"\"\"\n    with open('/proc/stat', 'rb') as f:\n        values = f.readline().split()[1:]\n    fields = ['user', 'nice', 'system', 'idle', 'iowait', 'irq', 'softirq']\n    vlen = len(values)\n    if vlen >= 8:\n        # Linux >= 2.6.11\n        fields.append('steal')\n    if vlen >= 9:\n        # Linux >= 2.6.24\n        fields.append('guest')\n    if vlen >= 10:\n        # Linux >= 3.2.0\n        fields.append('guest_nice')\n    return fields\n\n\nscputimes = namedtuple('scputimes', _get_cputimes_fields())\n\nsvmem = namedtuple(\n    'svmem', ['total', 'available', 'percent', 'used', 'free',\n              'active', 'inactive', 'buffers', 'cached'])\n\npextmem = namedtuple('pextmem', 'rss vms shared text lib data dirty')\n\npmmap_grouped = namedtuple(\n    'pmmap_grouped', ['path', 'rss', 'size', 'pss', 'shared_clean',\n                      'shared_dirty', 'private_clean', 'private_dirty',\n                      'referenced', 'anonymous', 'swap'])\n\npmmap_ext = namedtuple(\n    'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields))\n\n\n# --- system memory\n\ndef virtual_memory():\n    total, free, buffers, shared, _, _ = cext.linux_sysinfo()\n    cached = active = inactive = None\n    with open('/proc/meminfo', 'rb') as f:\n        for line in f:\n            if line.startswith(b\"Cached:\"):\n                cached = int(line.split()[1]) * 1024\n            elif line.startswith(b\"Active:\"):\n                active = int(line.split()[1]) * 1024\n            elif line.startswith(b\"Inactive:\"):\n                inactive = int(line.split()[1]) * 1024\n            if (cached is not None\n                    and active is not None\n                    and inactive is not None):\n                break\n        else:\n            # we might get here when dealing with exotic Linux flavors, see:\n            # https://github.com/giampaolo/psutil/issues/313\n            msg = \"'cached', 'active' and 'inactive' memory stats couldn't \" \\\n                  \"be determined and were set to 0\"\n            warnings.warn(msg, RuntimeWarning)\n            cached = active = inactive = 0\n    avail = free + buffers + cached\n    used = total - free\n    percent = usage_percent((total - avail), total, _round=1)\n    return svmem(total, avail, percent, used, free,\n                 active, inactive, buffers, cached)\n\n\ndef swap_memory():\n    _, _, _, _, total, free = cext.linux_sysinfo()\n    used = total - free\n    percent = usage_percent(used, total, _round=1)\n    # get pgin/pgouts\n    with open(\"/proc/vmstat\", \"rb\") as f:\n        sin = sout = None\n        for line in f:\n            # values are expressed in 4 kilo bytes, we want bytes instead\n            if line.startswith(b'pswpin'):\n                sin = int(line.split(b' ')[1]) * 4 * 1024\n            elif line.startswith(b'pswpout'):\n                sout = int(line.split(b' ')[1]) * 4 * 1024\n            if sin is not None and sout is not None:\n                break\n        else:\n            # we might get here when dealing with exotic Linux flavors, see:\n            # https://github.com/giampaolo/psutil/issues/313\n            msg = \"'sin' and 'sout' swap memory stats couldn't \" \\\n                  \"be determined and were set to 0\"\n            warnings.warn(msg, RuntimeWarning)\n            sin = sout = 0\n    return _common.sswap(total, used, free, percent, sin, sout)\n\n\n@deprecated(replacement='psutil.virtual_memory().cached')\ndef cached_phymem():\n    return virtual_memory().cached\n\n\n@deprecated(replacement='psutil.virtual_memory().buffers')\ndef phymem_buffers():\n    return virtual_memory().buffers\n\n\n# --- CPUs\n\ndef cpu_times():\n    \"\"\"Return a named tuple representing the following system-wide\n    CPU times:\n    (user, nice, system, idle, iowait, irq, softirq [steal, [guest,\n     [guest_nice]]])\n    Last 3 fields may not be available on all Linux kernel versions.\n    \"\"\"\n    with open('/proc/stat', 'rb') as f:\n        values = f.readline().split()\n    fields = values[1:len(scputimes._fields) + 1]\n    fields = [float(x) / CLOCK_TICKS for x in fields]\n    return scputimes(*fields)\n\n\ndef per_cpu_times():\n    \"\"\"Return a list of namedtuple representing the CPU times\n    for every CPU available on the system.\n    \"\"\"\n    cpus = []\n    with open('/proc/stat', 'rb') as f:\n        # get rid of the first line which refers to system wide CPU stats\n        f.readline()\n        for line in f:\n            if line.startswith(b'cpu'):\n                values = line.split()\n                fields = values[1:len(scputimes._fields) + 1]\n                fields = [float(x) / CLOCK_TICKS for x in fields]\n                entry = scputimes(*fields)\n                cpus.append(entry)\n        return cpus\n\n\ndef cpu_count_logical():\n    \"\"\"Return the number of logical CPUs in the system.\"\"\"\n    try:\n        return os.sysconf(\"SC_NPROCESSORS_ONLN\")\n    except ValueError:\n        # as a second fallback we try to parse /proc/cpuinfo\n        num = 0\n        with open('/proc/cpuinfo', 'rb') as f:\n            for line in f:\n                if line.lower().startswith(b'processor'):\n                    num += 1\n\n        # unknown format (e.g. amrel/sparc architectures), see:\n        # https://github.com/giampaolo/psutil/issues/200\n        # try to parse /proc/stat as a last resort\n        if num == 0:\n            search = re.compile('cpu\\d')\n            with open('/proc/stat', 'rt') as f:\n                for line in f:\n                    line = line.split(' ')[0]\n                    if search.match(line):\n                        num += 1\n\n        if num == 0:\n            # mimic os.cpu_count()\n            return None\n        return num\n\n\ndef cpu_count_physical():\n    \"\"\"Return the number of physical CPUs in the system.\"\"\"\n    with open('/proc/cpuinfo', 'rb') as f:\n        found = set()\n        for line in f:\n            if line.lower().startswith(b'physical id'):\n                found.add(line.strip())\n    # mimic os.cpu_count()\n    return len(found) if found else None\n\n\n# --- other system functions\n\ndef users():\n    \"\"\"Return currently connected users as a list of namedtuples.\"\"\"\n    retlist = []\n    rawlist = cext.users()\n    for item in rawlist:\n        user, tty, hostname, tstamp, user_process = item\n        # note: the underlying C function includes entries about\n        # system boot, run level and others.  We might want\n        # to use them in the future.\n        if not user_process:\n            continue\n        if hostname == ':0.0':\n            hostname = 'localhost'\n        nt = _common.suser(user, tty or None, hostname, tstamp)\n        retlist.append(nt)\n    return retlist\n\n\ndef boot_time():\n    \"\"\"Return the system boot time expressed in seconds since the epoch.\"\"\"\n    global BOOT_TIME\n    with open('/proc/stat', 'rb') as f:\n        for line in f:\n            if line.startswith(b'btime'):\n                ret = float(line.strip().split()[1])\n                BOOT_TIME = ret\n                return ret\n        raise RuntimeError(\"line 'btime' not found\")\n\n\n# --- processes\n\ndef pids():\n    \"\"\"Returns a list of PIDs currently running on the system.\"\"\"\n    return [int(x) for x in os.listdir(b'/proc') if x.isdigit()]\n\n\ndef pid_exists(pid):\n    \"\"\"Check For the existence of a unix pid.\"\"\"\n    return _psposix.pid_exists(pid)\n\n\n# --- network\n\nclass Connections:\n    \"\"\"A wrapper on top of /proc/net/* files, retrieving per-process\n    and system-wide open connections (TCP, UDP, UNIX) similarly to\n    \"netstat -an\".\n\n    Note: in case of UNIX sockets we're only able to determine the\n    local endpoint/path, not the one it's connected to.\n    According to [1] it would be possible but not easily.\n\n    [1] http://serverfault.com/a/417946\n    \"\"\"\n\n    def __init__(self):\n        tcp4 = (\"tcp\", socket.AF_INET, socket.SOCK_STREAM)\n        tcp6 = (\"tcp6\", socket.AF_INET6, socket.SOCK_STREAM)\n        udp4 = (\"udp\", socket.AF_INET, socket.SOCK_DGRAM)\n        udp6 = (\"udp6\", socket.AF_INET6, socket.SOCK_DGRAM)\n        unix = (\"unix\", socket.AF_UNIX, None)\n        self.tmap = {\n            \"all\": (tcp4, tcp6, udp4, udp6, unix),\n            \"tcp\": (tcp4, tcp6),\n            \"tcp4\": (tcp4,),\n            \"tcp6\": (tcp6,),\n            \"udp\": (udp4, udp6),\n            \"udp4\": (udp4,),\n            \"udp6\": (udp6,),\n            \"unix\": (unix,),\n            \"inet\": (tcp4, tcp6, udp4, udp6),\n            \"inet4\": (tcp4, udp4),\n            \"inet6\": (tcp6, udp6),\n        }\n\n    def get_proc_inodes(self, pid):\n        inodes = defaultdict(list)\n        for fd in os.listdir(\"/proc/%s/fd\" % pid):\n            try:\n                inode = os.readlink(\"/proc/%s/fd/%s\" % (pid, fd))\n            except OSError:\n                # TODO: need comment here\n                continue\n            else:\n                if inode.startswith('socket:['):\n                    # the process is using a socket\n                    inode = inode[8:][:-1]\n                    inodes[inode].append((pid, int(fd)))\n        return inodes\n\n    def get_all_inodes(self):\n        inodes = {}\n        for pid in pids():\n            try:\n                inodes.update(self.get_proc_inodes(pid))\n            except OSError as err:\n                # os.listdir() is gonna raise a lot of access denied\n                # exceptions in case of unprivileged user; that's fine\n                # as we'll just end up returning a connection with PID\n                # and fd set to None anyway.\n                # Both netstat -an and lsof does the same so it's\n                # unlikely we can do any better.\n                # ENOENT just means a PID disappeared on us.\n                if err.errno not in (\n                        errno.ENOENT, errno.ESRCH, errno.EPERM, errno.EACCES):\n                    raise\n        return inodes\n\n    def decode_address(self, addr, family):\n        \"\"\"Accept an \"ip:port\" address as displayed in /proc/net/*\n        and convert it into a human readable form, like:\n\n        \"0500000A:0016\" -> (\"10.0.0.5\", 22)\n        \"0000000000000000FFFF00000100007F:9E49\" -> (\"::ffff:127.0.0.1\", 40521)\n\n        The IP address portion is a little or big endian four-byte\n        hexadecimal number; that is, the least significant byte is listed\n        first, so we need to reverse the order of the bytes to convert it\n        to an IP address.\n        The port is represented as a two-byte hexadecimal number.\n\n        Reference:\n        http://linuxdevcenter.com/pub/a/linux/2000/11/16/LinuxAdmin.html\n        \"\"\"\n        ip, port = addr.split(':')\n        port = int(port, 16)\n        # this usually refers to a local socket in listen mode with\n        # no end-points connected\n        if not port:\n            return ()\n        if PY3:\n            ip = ip.encode('ascii')\n        if family == socket.AF_INET:\n            # see: https://github.com/giampaolo/psutil/issues/201\n            if sys.byteorder == 'little':\n                ip = socket.inet_ntop(family, base64.b16decode(ip)[::-1])\n            else:\n                ip = socket.inet_ntop(family, base64.b16decode(ip))\n        else:  # IPv6\n            # old version - let's keep it, just in case...\n            # ip = ip.decode('hex')\n            # return socket.inet_ntop(socket.AF_INET6,\n            #          ''.join(ip[i:i+4][::-1] for i in xrange(0, 16, 4)))\n            ip = base64.b16decode(ip)\n            # see: https://github.com/giampaolo/psutil/issues/201\n            if sys.byteorder == 'little':\n                ip = socket.inet_ntop(\n                    socket.AF_INET6,\n                    struct.pack('>4I', *struct.unpack('<4I', ip)))\n            else:\n                ip = socket.inet_ntop(\n                    socket.AF_INET6,\n                    struct.pack('<4I', *struct.unpack('<4I', ip)))\n        return (ip, port)\n\n    def process_inet(self, file, family, type_, inodes, filter_pid=None):\n        \"\"\"Parse /proc/net/tcp* and /proc/net/udp* files.\"\"\"\n        if file.endswith('6') and not os.path.exists(file):\n            # IPv6 not supported\n            return\n        with open(file, 'rt') as f:\n            f.readline()  # skip the first line\n            for line in f:\n                _, laddr, raddr, status, _, _, _, _, _, inode = \\\n                    line.split()[:10]\n                if inode in inodes:\n                    # We assume inet sockets are unique, so we error\n                    # out if there are multiple references to the\n                    # same inode. We won't do this for UNIX sockets.\n                    if len(inodes[inode]) > 1 and family != socket.AF_UNIX:\n                        raise ValueError(\"ambiguos inode with multiple \"\n                                         \"PIDs references\")\n                    pid, fd = inodes[inode][0]\n                else:\n                    pid, fd = None, -1\n                if filter_pid is not None and filter_pid != pid:\n                    continue\n                else:\n                    if type_ == socket.SOCK_STREAM:\n                        status = TCP_STATUSES[status]\n                    else:\n                        status = _common.CONN_NONE\n                    laddr = self.decode_address(laddr, family)\n                    raddr = self.decode_address(raddr, family)\n                    yield (fd, family, type_, laddr, raddr, status, pid)\n\n    def process_unix(self, file, family, inodes, filter_pid=None):\n        \"\"\"Parse /proc/net/unix files.\"\"\"\n        with open(file, 'rt') as f:\n            f.readline()  # skip the first line\n            for line in f:\n                tokens = line.split()\n                _, _, _, _, type_, _, inode = tokens[0:7]\n                if inode in inodes:\n                    # With UNIX sockets we can have a single inode\n                    # referencing many file descriptors.\n                    pairs = inodes[inode]\n                else:\n                    pairs = [(None, -1)]\n                for pid, fd in pairs:\n                    if filter_pid is not None and filter_pid != pid:\n                        continue\n                    else:\n                        if len(tokens) == 8:\n                            path = tokens[-1]\n                        else:\n                            path = \"\"\n                        type_ = int(type_)\n                        raddr = None\n                        status = _common.CONN_NONE\n                        yield (fd, family, type_, path, raddr, status, pid)\n\n    def retrieve(self, kind, pid=None):\n        if kind not in self.tmap:\n            raise ValueError(\"invalid %r kind argument; choose between %s\"\n                             % (kind, ', '.join([repr(x) for x in self.tmap])))\n        if pid is not None:\n            inodes = self.get_proc_inodes(pid)\n            if not inodes:\n                # no connections for this process\n                return []\n        else:\n            inodes = self.get_all_inodes()\n        ret = []\n        for f, family, type_ in self.tmap[kind]:\n            if family in (socket.AF_INET, socket.AF_INET6):\n                ls = self.process_inet(\n                    \"/proc/net/%s\" % f, family, type_, inodes, filter_pid=pid)\n            else:\n                ls = self.process_unix(\n                    \"/proc/net/%s\" % f, family, inodes, filter_pid=pid)\n            for fd, family, type_, laddr, raddr, status, bound_pid in ls:\n                if pid:\n                    conn = _common.pconn(fd, family, type_, laddr, raddr,\n                                         status)\n                else:\n                    conn = _common.sconn(fd, family, type_, laddr, raddr,\n                                         status, bound_pid)\n                ret.append(conn)\n        return ret\n\n\n_connections = Connections()\n\n\ndef net_connections(kind='inet'):\n    \"\"\"Return system-wide open connections.\"\"\"\n    return _connections.retrieve(kind)\n\n\ndef net_io_counters():\n    \"\"\"Return network I/O statistics for every network interface\n    installed on the system as a dict of raw tuples.\n    \"\"\"\n    with open(\"/proc/net/dev\", \"rt\") as f:\n        lines = f.readlines()\n    retdict = {}\n    for line in lines[2:]:\n        colon = line.rfind(':')\n        assert colon > 0, repr(line)\n        name = line[:colon].strip()\n        fields = line[colon + 1:].strip().split()\n        bytes_recv = int(fields[0])\n        packets_recv = int(fields[1])\n        errin = int(fields[2])\n        dropin = int(fields[3])\n        bytes_sent = int(fields[8])\n        packets_sent = int(fields[9])\n        errout = int(fields[10])\n        dropout = int(fields[11])\n        retdict[name] = (bytes_sent, bytes_recv, packets_sent, packets_recv,\n                         errin, errout, dropin, dropout)\n    return retdict\n\n\n# --- disks\n\ndef disk_io_counters():\n    \"\"\"Return disk I/O statistics for every disk installed on the\n    system as a dict of raw tuples.\n    \"\"\"\n    # man iostat states that sectors are equivalent with blocks and\n    # have a size of 512 bytes since 2.4 kernels. This value is\n    # needed to calculate the amount of disk I/O in bytes.\n    SECTOR_SIZE = 512\n\n    # determine partitions we want to look for\n    partitions = []\n    with open(\"/proc/partitions\", \"rt\") as f:\n        lines = f.readlines()[2:]\n    for line in reversed(lines):\n        _, _, _, name = line.split()\n        if name[-1].isdigit():\n            # we're dealing with a partition (e.g. 'sda1'); 'sda' will\n            # also be around but we want to omit it\n            partitions.append(name)\n        else:\n            if not partitions or not partitions[-1].startswith(name):\n                # we're dealing with a disk entity for which no\n                # partitions have been defined (e.g. 'sda' but\n                # 'sda1' was not around), see:\n                # https://github.com/giampaolo/psutil/issues/338\n                partitions.append(name)\n    #\n    retdict = {}\n    with open(\"/proc/diskstats\", \"rt\") as f:\n        lines = f.readlines()\n    for line in lines:\n        # http://www.mjmwired.net/kernel/Documentation/iostats.txt\n        fields = line.split()\n        if len(fields) > 7:\n            _, _, name, reads, _, rbytes, rtime, writes, _, wbytes, wtime = \\\n                fields[:11]\n        else:\n            # from kernel 2.6.0 to 2.6.25\n            _, _, name, reads, rbytes, writes, wbytes = fields\n            rtime, wtime = 0, 0\n        if name in partitions:\n            rbytes = int(rbytes) * SECTOR_SIZE\n            wbytes = int(wbytes) * SECTOR_SIZE\n            reads = int(reads)\n            writes = int(writes)\n            rtime = int(rtime)\n            wtime = int(wtime)\n            retdict[name] = (reads, writes, rbytes, wbytes, rtime, wtime)\n    return retdict\n\n\ndef disk_partitions(all=False):\n    \"\"\"Return mounted disk partitions as a list of nameduples\"\"\"\n    phydevs = []\n    with open(\"/proc/filesystems\", \"r\") as f:\n        for line in f:\n            if not line.startswith(\"nodev\"):\n                phydevs.append(line.strip())\n\n    retlist = []\n    partitions = cext.disk_partitions()\n    for partition in partitions:\n        device, mountpoint, fstype, opts = partition\n        if device == 'none':\n            device = ''\n        if not all:\n            if device == '' or fstype not in phydevs:\n                continue\n        ntuple = _common.sdiskpart(device, mountpoint, fstype, opts)\n        retlist.append(ntuple)\n    return retlist\n\n\ndisk_usage = _psposix.disk_usage\n\n\n# --- decorators\n\ndef wrap_exceptions(fun):\n    \"\"\"Decorator which translates bare OSError and IOError exceptions\n    into NoSuchProcess and AccessDenied.\n    \"\"\"\n    @functools.wraps(fun)\n    def wrapper(self, *args, **kwargs):\n        try:\n            return fun(self, *args, **kwargs)\n        except EnvironmentError as err:\n            # support for private module import\n            if NoSuchProcess is None or AccessDenied is None:\n                raise\n            # ENOENT (no such file or directory) gets raised on open().\n            # ESRCH (no such process) can get raised on read() if\n            # process is gone in meantime.\n            if err.errno in (errno.ENOENT, errno.ESRCH):\n                raise NoSuchProcess(self.pid, self._name)\n            if err.errno in (errno.EPERM, errno.EACCES):\n                raise AccessDenied(self.pid, self._name)\n            raise\n    return wrapper\n\n\nclass Process(object):\n    \"\"\"Linux process implementation.\"\"\"\n\n    __slots__ = [\"pid\", \"_name\"]\n\n    def __init__(self, pid):\n        self.pid = pid\n        self._name = None\n\n    @wrap_exceptions\n    def name(self):\n        fname = \"/proc/%s/stat\" % self.pid\n        kw = dict(encoding=DEFAULT_ENCODING) if PY3 else dict()\n        with open(fname, \"rt\", **kw) as f:\n            # XXX - gets changed later and probably needs refactoring\n            return f.read().split(' ')[1].replace('(', '').replace(')', '')\n\n    def exe(self):\n        try:\n            exe = os.readlink(\"/proc/%s/exe\" % self.pid)\n        except (OSError, IOError) as err:\n            if err.errno in (errno.ENOENT, errno.ESRCH):\n                # no such file error; might be raised also if the\n                # path actually exists for system processes with\n                # low pids (about 0-20)\n                if os.path.lexists(\"/proc/%s\" % self.pid):\n                    return \"\"\n                else:\n                    # ok, it is a process which has gone away\n                    raise NoSuchProcess(self.pid, self._name)\n            if err.errno in (errno.EPERM, errno.EACCES):\n                raise AccessDenied(self.pid, self._name)\n            raise\n\n        # readlink() might return paths containing null bytes ('\\x00').\n        # Certain names have ' (deleted)' appended. Usually this is\n        # bogus as the file actually exists. Either way that's not\n        # important as we don't want to discriminate executables which\n        # have been deleted.\n        exe = exe.split('\\x00')[0]\n        if exe.endswith(' (deleted)') and not os.path.exists(exe):\n            exe = exe[:-10]\n        return exe\n\n    @wrap_exceptions\n    def cmdline(self):\n        fname = \"/proc/%s/cmdline\" % self.pid\n        kw = dict(encoding=DEFAULT_ENCODING) if PY3 else dict()\n        with open(fname, \"rt\", **kw) as f:\n            return [x for x in f.read().split('\\x00') if x]\n\n    @wrap_exceptions\n    def terminal(self):\n        tmap = _psposix._get_terminal_map()\n        with open(\"/proc/%s/stat\" % self.pid, 'rb') as f:\n            tty_nr = int(f.read().split(b' ')[6])\n        try:\n            return tmap[tty_nr]\n        except KeyError:\n            return None\n\n    if os.path.exists('/proc/%s/io' % os.getpid()):\n        @wrap_exceptions\n        def io_counters(self):\n            fname = \"/proc/%s/io\" % self.pid\n            with open(fname, 'rb') as f:\n                rcount = wcount = rbytes = wbytes = None\n                for line in f:\n                    if rcount is None and line.startswith(b\"syscr\"):\n                        rcount = int(line.split()[1])\n                    elif wcount is None and line.startswith(b\"syscw\"):\n                        wcount = int(line.split()[1])\n                    elif rbytes is None and line.startswith(b\"read_bytes\"):\n                        rbytes = int(line.split()[1])\n                    elif wbytes is None and line.startswith(b\"write_bytes\"):\n                        wbytes = int(line.split()[1])\n                for x in (rcount, wcount, rbytes, wbytes):\n                    if x is None:\n                        raise NotImplementedError(\n                            \"couldn't read all necessary info from %r\" % fname)\n                return _common.pio(rcount, wcount, rbytes, wbytes)\n    else:\n        def io_counters(self):\n            raise NotImplementedError(\"couldn't find /proc/%s/io (kernel \"\n                                      \"too old?)\" % self.pid)\n\n    @wrap_exceptions\n    def cpu_times(self):\n        with open(\"/proc/%s/stat\" % self.pid, 'rb') as f:\n            st = f.read().strip()\n        # ignore the first two values (\"pid (exe)\")\n        st = st[st.find(b')') + 2:]\n        values = st.split(b' ')\n        utime = float(values[11]) / CLOCK_TICKS\n        stime = float(values[12]) / CLOCK_TICKS\n        return _common.pcputimes(utime, stime)\n\n    @wrap_exceptions\n    def wait(self, timeout=None):\n        try:\n            return _psposix.wait_pid(self.pid, timeout)\n        except _psposix.TimeoutExpired:\n            # support for private module import\n            if TimeoutExpired is None:\n                raise\n            raise TimeoutExpired(timeout, self.pid, self._name)\n\n    @wrap_exceptions\n    def create_time(self):\n        with open(\"/proc/%s/stat\" % self.pid, 'rb') as f:\n            st = f.read().strip()\n        # ignore the first two values (\"pid (exe)\")\n        st = st[st.rfind(b')') + 2:]\n        values = st.split(b' ')\n        # According to documentation, starttime is in field 21 and the\n        # unit is jiffies (clock ticks).\n        # We first divide it for clock ticks and then add uptime returning\n        # seconds since the epoch, in UTC.\n        # Also use cached value if available.\n        bt = BOOT_TIME or boot_time()\n        return (float(values[19]) / CLOCK_TICKS) + bt\n\n    @wrap_exceptions\n    def memory_info(self):\n        with open(\"/proc/%s/statm\" % self.pid, 'rb') as f:\n            vms, rss = f.readline().split()[:2]\n            return _common.pmem(int(rss) * PAGESIZE,\n                                int(vms) * PAGESIZE)\n\n    @wrap_exceptions\n    def memory_info_ex(self):\n        #  ============================================================\n        # | FIELD  | DESCRIPTION                         | AKA  | TOP  |\n        #  ============================================================\n        # | rss    | resident set size                   |      | RES  |\n        # | vms    | total program size                  | size | VIRT |\n        # | shared | shared pages (from shared mappings) |      | SHR  |\n        # | text   | text ('code')                       | trs  | CODE |\n        # | lib    | library (unused in Linux 2.6)       | lrs  |      |\n        # | data   | data + stack                        | drs  | DATA |\n        # | dirty  | dirty pages (unused in Linux 2.6)   | dt   |      |\n        #  ============================================================\n        with open(\"/proc/%s/statm\" % self.pid, \"rb\") as f:\n            vms, rss, shared, text, lib, data, dirty = \\\n                [int(x) * PAGESIZE for x in f.readline().split()[:7]]\n        return pextmem(rss, vms, shared, text, lib, data, dirty)\n\n    if os.path.exists('/proc/%s/smaps' % os.getpid()):\n\n        @wrap_exceptions\n        def memory_maps(self):\n            \"\"\"Return process's mapped memory regions as a list of nameduples.\n            Fields are explained in 'man proc'; here is an updated (Apr 2012)\n            version: http://goo.gl/fmebo\n            \"\"\"\n            with open(\"/proc/%s/smaps\" % self.pid, \"rt\") as f:\n                first_line = f.readline()\n                current_block = [first_line]\n\n                def get_blocks():\n                    data = {}\n                    for line in f:\n                        fields = line.split(None, 5)\n                        if not fields[0].endswith(':'):\n                            # new block section\n                            yield (current_block.pop(), data)\n                            current_block.append(line)\n                        else:\n                            try:\n                                data[fields[0]] = int(fields[1]) * 1024\n                            except ValueError:\n                                if fields[0].startswith('VmFlags:'):\n                                    # see issue #369\n                                    continue\n                                else:\n                                    raise ValueError(\"don't know how to inte\"\n                                                     \"rpret line %r\" % line)\n                    yield (current_block.pop(), data)\n\n                ls = []\n                if first_line:  # smaps file can be empty\n                    for header, data in get_blocks():\n                        hfields = header.split(None, 5)\n                        try:\n                            addr, perms, offset, dev, inode, path = hfields\n                        except ValueError:\n                            addr, perms, offset, dev, inode, path = \\\n                                hfields + ['']\n                        if not path:\n                            path = '[anon]'\n                        else:\n                            path = path.strip()\n                        ls.append((\n                            addr, perms, path,\n                            data['Rss:'],\n                            data.get('Size:', 0),\n                            data.get('Pss:', 0),\n                            data.get('Shared_Clean:', 0),\n                            data.get('Shared_Dirty:', 0),\n                            data.get('Private_Clean:', 0),\n                            data.get('Private_Dirty:', 0),\n                            data.get('Referenced:', 0),\n                            data.get('Anonymous:', 0),\n                            data.get('Swap:', 0)\n                        ))\n            return ls\n\n    else:\n        def memory_maps(self):\n            msg = \"couldn't find /proc/%s/smaps; kernel < 2.6.14 or \"  \\\n                  \"CONFIG_MMU kernel configuration option is not enabled\" \\\n                  % self.pid\n            raise NotImplementedError(msg)\n\n    @wrap_exceptions\n    def cwd(self):\n        # readlink() might return paths containing null bytes causing\n        # problems when used with other fs-related functions (os.*,\n        # open(), ...)\n        path = os.readlink(\"/proc/%s/cwd\" % self.pid)\n        return path.replace('\\x00', '')\n\n    @wrap_exceptions\n    def num_ctx_switches(self):\n        vol = unvol = None\n        with open(\"/proc/%s/status\" % self.pid, \"rb\") as f:\n            for line in f:\n                if line.startswith(b\"voluntary_ctxt_switches\"):\n                    vol = int(line.split()[1])\n                elif line.startswith(b\"nonvoluntary_ctxt_switches\"):\n                    unvol = int(line.split()[1])\n                if vol is not None and unvol is not None:\n                    return _common.pctxsw(vol, unvol)\n            raise NotImplementedError(\n                \"'voluntary_ctxt_switches' and 'nonvoluntary_ctxt_switches'\"\n                \"fields were not found in /proc/%s/status; the kernel is \"\n                \"probably older than 2.6.23\" % self.pid)\n\n    @wrap_exceptions\n    def num_threads(self):\n        with open(\"/proc/%s/status\" % self.pid, \"rb\") as f:\n            for line in f:\n                if line.startswith(b\"Threads:\"):\n                    return int(line.split()[1])\n            raise NotImplementedError(\"line not found\")\n\n    @wrap_exceptions\n    def threads(self):\n        thread_ids = os.listdir(\"/proc/%s/task\" % self.pid)\n        thread_ids.sort()\n        retlist = []\n        hit_enoent = False\n        for thread_id in thread_ids:\n            fname = \"/proc/%s/task/%s/stat\" % (self.pid, thread_id)\n            try:\n                with open(fname, 'rb') as f:\n                    st = f.read().strip()\n            except EnvironmentError as err:\n                if err.errno == errno.ENOENT:\n                    # no such file or directory; it means thread\n                    # disappeared on us\n                    hit_enoent = True\n                    continue\n                raise\n            # ignore the first two values (\"pid (exe)\")\n            st = st[st.find(b')') + 2:]\n            values = st.split(b' ')\n            utime = float(values[11]) / CLOCK_TICKS\n            stime = float(values[12]) / CLOCK_TICKS\n            ntuple = _common.pthread(int(thread_id), utime, stime)\n            retlist.append(ntuple)\n        if hit_enoent:\n            # raise NSP if the process disappeared on us\n            os.stat('/proc/%s' % self.pid)\n        return retlist\n\n    @wrap_exceptions\n    def nice_get(self):\n        # with open('/proc/%s/stat' % self.pid, 'r') as f:\n        #   data = f.read()\n        #   return int(data.split()[18])\n\n        # Use C implementation\n        return _psutil_posix.getpriority(self.pid)\n\n    @wrap_exceptions\n    def nice_set(self, value):\n        return _psutil_posix.setpriority(self.pid, value)\n\n    @wrap_exceptions\n    def cpu_affinity_get(self):\n        return cext.proc_cpu_affinity_get(self.pid)\n\n    @wrap_exceptions\n    def cpu_affinity_set(self, cpus):\n        try:\n            cext.proc_cpu_affinity_set(self.pid, cpus)\n        except OSError as err:\n            if err.errno == errno.EINVAL:\n                allcpus = tuple(range(len(per_cpu_times())))\n                for cpu in cpus:\n                    if cpu not in allcpus:\n                        raise ValueError(\"invalid CPU #%i (choose between %s)\"\n                                         % (cpu, allcpus))\n            raise\n\n    # only starting from kernel 2.6.13\n    if hasattr(cext, \"proc_ioprio_get\"):\n\n        @wrap_exceptions\n        def ionice_get(self):\n            ioclass, value = cext.proc_ioprio_get(self.pid)\n            return _common.pionice(ioclass, value)\n\n        @wrap_exceptions\n        def ionice_set(self, ioclass, value):\n            if ioclass in (IOPRIO_CLASS_NONE, None):\n                if value:\n                    msg = \"can't specify value with IOPRIO_CLASS_NONE\"\n                    raise ValueError(msg)\n                ioclass = IOPRIO_CLASS_NONE\n                value = 0\n            if ioclass in (IOPRIO_CLASS_RT, IOPRIO_CLASS_BE):\n                if value is None:\n                    value = 4\n            elif ioclass == IOPRIO_CLASS_IDLE:\n                if value:\n                    msg = \"can't specify value with IOPRIO_CLASS_IDLE\"\n                    raise ValueError(msg)\n                value = 0\n            else:\n                value = 0\n            if not 0 <= value <= 8:\n                raise ValueError(\n                    \"value argument range expected is between 0 and 8\")\n            return cext.proc_ioprio_set(self.pid, ioclass, value)\n\n    if HAS_PRLIMIT:\n        @wrap_exceptions\n        def rlimit(self, resource, limits=None):\n            # if pid is 0 prlimit() applies to the calling process and\n            # we don't want that\n            if self.pid == 0:\n                raise ValueError(\"can't use prlimit() against PID 0 process\")\n            if limits is None:\n                # get\n                return cext.linux_prlimit(self.pid, resource)\n            else:\n                # set\n                if len(limits) != 2:\n                    raise ValueError(\n                        \"second argument must be a (soft, hard) tuple\")\n                soft, hard = limits\n                cext.linux_prlimit(self.pid, resource, soft, hard)\n\n    @wrap_exceptions\n    def status(self):\n        with open(\"/proc/%s/status\" % self.pid, 'rb') as f:\n            for line in f:\n                if line.startswith(b\"State:\"):\n                    letter = line.split()[1]\n                    if PY3:\n                        letter = letter.decode()\n                    # XXX is '?' legit? (we're not supposed to return\n                    # it anyway)\n                    return PROC_STATUSES.get(letter, '?')\n\n    @wrap_exceptions\n    def open_files(self):\n        retlist = []\n        files = os.listdir(\"/proc/%s/fd\" % self.pid)\n        hit_enoent = False\n        for fd in files:\n            file = \"/proc/%s/fd/%s\" % (self.pid, fd)\n            try:\n                file = os.readlink(file)\n            except OSError as err:\n                # ENOENT == file which is gone in the meantime\n                if err.errno in (errno.ENOENT, errno.ESRCH):\n                    hit_enoent = True\n                    continue\n                elif err.errno == errno.EINVAL:\n                    # not a link\n                    continue\n                else:\n                    raise\n            else:\n                # If file is not an absolute path there's no way\n                # to tell whether it's a regular file or not,\n                # so we skip it. A regular file is always supposed\n                # to be absolutized though.\n                if file.startswith('/') and isfile_strict(file):\n                    ntuple = _common.popenfile(file, int(fd))\n                    retlist.append(ntuple)\n        if hit_enoent:\n            # raise NSP if the process disappeared on us\n            os.stat('/proc/%s' % self.pid)\n        return retlist\n\n    @wrap_exceptions\n    def connections(self, kind='inet'):\n        ret = _connections.retrieve(kind, self.pid)\n        # raise NSP if the process disappeared on us\n        os.stat('/proc/%s' % self.pid)\n        return ret\n\n    @wrap_exceptions\n    def num_fds(self):\n        return len(os.listdir(\"/proc/%s/fd\" % self.pid))\n\n    @wrap_exceptions\n    def ppid(self):\n        with open(\"/proc/%s/status\" % self.pid, 'rb') as f:\n            for line in f:\n                if line.startswith(b\"PPid:\"):\n                    # PPid: nnnn\n                    return int(line.split()[1])\n            raise NotImplementedError(\"line not found\")\n\n    @wrap_exceptions\n    def uids(self):\n        with open(\"/proc/%s/status\" % self.pid, 'rb') as f:\n            for line in f:\n                if line.startswith(b'Uid:'):\n                    _, real, effective, saved, fs = line.split()\n                    return _common.puids(int(real), int(effective), int(saved))\n            raise NotImplementedError(\"line not found\")\n\n    @wrap_exceptions\n    def gids(self):\n        with open(\"/proc/%s/status\" % self.pid, 'rb') as f:\n            for line in f:\n                if line.startswith(b'Gid:'):\n                    _, real, effective, saved, fs = line.split()\n                    return _common.pgids(int(real), int(effective), int(saved))\n            raise NotImplementedError(\"line not found\")\n"
  },
  {
    "path": "Common/libpsutil/py2.7-glibc-2.12+/psutil/_psosx.py",
    "content": "#!/usr/bin/env python\n\n# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\n\"\"\"OSX platform implementation.\"\"\"\n\nimport errno\nimport functools\nimport os\nfrom collections import namedtuple\n\nfrom psutil import _common\nfrom psutil import _psposix\nfrom psutil._common import conn_tmap, usage_percent, isfile_strict\nimport _psutil_osx as cext\nimport _psutil_posix\n\n\n__extra__all__ = []\n\n# --- constants\n\nPAGESIZE = os.sysconf(\"SC_PAGE_SIZE\")\n\n# http://students.mimuw.edu.pl/lxr/source/include/net/tcp_states.h\nTCP_STATUSES = {\n    cext.TCPS_ESTABLISHED: _common.CONN_ESTABLISHED,\n    cext.TCPS_SYN_SENT: _common.CONN_SYN_SENT,\n    cext.TCPS_SYN_RECEIVED: _common.CONN_SYN_RECV,\n    cext.TCPS_FIN_WAIT_1: _common.CONN_FIN_WAIT1,\n    cext.TCPS_FIN_WAIT_2: _common.CONN_FIN_WAIT2,\n    cext.TCPS_TIME_WAIT: _common.CONN_TIME_WAIT,\n    cext.TCPS_CLOSED: _common.CONN_CLOSE,\n    cext.TCPS_CLOSE_WAIT: _common.CONN_CLOSE_WAIT,\n    cext.TCPS_LAST_ACK: _common.CONN_LAST_ACK,\n    cext.TCPS_LISTEN: _common.CONN_LISTEN,\n    cext.TCPS_CLOSING: _common.CONN_CLOSING,\n    cext.PSUTIL_CONN_NONE: _common.CONN_NONE,\n}\n\nPROC_STATUSES = {\n    cext.SIDL: _common.STATUS_IDLE,\n    cext.SRUN: _common.STATUS_RUNNING,\n    cext.SSLEEP: _common.STATUS_SLEEPING,\n    cext.SSTOP: _common.STATUS_STOPPED,\n    cext.SZOMB: _common.STATUS_ZOMBIE,\n}\n\nscputimes = namedtuple('scputimes', ['user', 'nice', 'system', 'idle'])\n\nsvmem = namedtuple(\n    'svmem', ['total', 'available', 'percent', 'used', 'free',\n              'active', 'inactive', 'wired'])\n\npextmem = namedtuple('pextmem', ['rss', 'vms', 'pfaults', 'pageins'])\n\npmmap_grouped = namedtuple(\n    'pmmap_grouped',\n    'path rss private swapped dirtied ref_count shadow_depth')\n\npmmap_ext = namedtuple(\n    'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields))\n\n# set later from __init__.py\nNoSuchProcess = None\nAccessDenied = None\nTimeoutExpired = None\n\n\n# --- functions\n\ndef virtual_memory():\n    \"\"\"System virtual memory as a namedtuple.\"\"\"\n    total, active, inactive, wired, free = cext.virtual_mem()\n    avail = inactive + free\n    used = active + inactive + wired\n    percent = usage_percent((total - avail), total, _round=1)\n    return svmem(total, avail, percent, used, free,\n                 active, inactive, wired)\n\n\ndef swap_memory():\n    \"\"\"Swap system memory as a (total, used, free, sin, sout) tuple.\"\"\"\n    total, used, free, sin, sout = cext.swap_mem()\n    percent = usage_percent(used, total, _round=1)\n    return _common.sswap(total, used, free, percent, sin, sout)\n\n\ndef cpu_times():\n    \"\"\"Return system CPU times as a namedtuple.\"\"\"\n    user, nice, system, idle = cext.cpu_times()\n    return scputimes(user, nice, system, idle)\n\n\ndef per_cpu_times():\n    \"\"\"Return system CPU times as a named tuple\"\"\"\n    ret = []\n    for cpu_t in cext.per_cpu_times():\n        user, nice, system, idle = cpu_t\n        item = scputimes(user, nice, system, idle)\n        ret.append(item)\n    return ret\n\n\ndef cpu_count_logical():\n    \"\"\"Return the number of logical CPUs in the system.\"\"\"\n    return cext.cpu_count_logical()\n\n\ndef cpu_count_physical():\n    \"\"\"Return the number of physical CPUs in the system.\"\"\"\n    return cext.cpu_count_phys()\n\n\ndef boot_time():\n    \"\"\"The system boot time expressed in seconds since the epoch.\"\"\"\n    return cext.boot_time()\n\n\ndef disk_partitions(all=False):\n    retlist = []\n    partitions = cext.disk_partitions()\n    for partition in partitions:\n        device, mountpoint, fstype, opts = partition\n        if device == 'none':\n            device = ''\n        if not all:\n            if not os.path.isabs(device) or not os.path.exists(device):\n                continue\n        ntuple = _common.sdiskpart(device, mountpoint, fstype, opts)\n        retlist.append(ntuple)\n    return retlist\n\n\ndef users():\n    retlist = []\n    rawlist = cext.users()\n    for item in rawlist:\n        user, tty, hostname, tstamp = item\n        if tty == '~':\n            continue  # reboot or shutdown\n        if not tstamp:\n            continue\n        nt = _common.suser(user, tty or None, hostname or None, tstamp)\n        retlist.append(nt)\n    return retlist\n\n\ndef net_connections(kind='inet'):\n    # Note: on OSX this will fail with AccessDenied unless\n    # the process is owned by root.\n    ret = []\n    for pid in pids():\n        try:\n            cons = Process(pid).connections(kind)\n        except NoSuchProcess:\n            continue\n        else:\n            if cons:\n                for c in cons:\n                    c = list(c) + [pid]\n                    ret.append(_common.sconn(*c))\n    return ret\n\n\npids = cext.pids\npid_exists = _psposix.pid_exists\ndisk_usage = _psposix.disk_usage\nnet_io_counters = cext.net_io_counters\ndisk_io_counters = cext.disk_io_counters\n\n\ndef wrap_exceptions(fun):\n    \"\"\"Decorator which translates bare OSError exceptions into\n    NoSuchProcess and AccessDenied.\n    \"\"\"\n    @functools.wraps(fun)\n    def wrapper(self, *args, **kwargs):\n        try:\n            return fun(self, *args, **kwargs)\n        except OSError as err:\n            # support for private module import\n            if NoSuchProcess is None or AccessDenied is None:\n                raise\n            if err.errno == errno.ESRCH:\n                raise NoSuchProcess(self.pid, self._name)\n            if err.errno in (errno.EPERM, errno.EACCES):\n                raise AccessDenied(self.pid, self._name)\n            raise\n    return wrapper\n\n\nclass Process(object):\n    \"\"\"Wrapper class around underlying C implementation.\"\"\"\n\n    __slots__ = [\"pid\", \"_name\"]\n\n    def __init__(self, pid):\n        self.pid = pid\n        self._name = None\n\n    @wrap_exceptions\n    def name(self):\n        return cext.proc_name(self.pid)\n\n    @wrap_exceptions\n    def exe(self):\n        return cext.proc_exe(self.pid)\n\n    @wrap_exceptions\n    def cmdline(self):\n        if not pid_exists(self.pid):\n            raise NoSuchProcess(self.pid, self._name)\n        return cext.proc_cmdline(self.pid)\n\n    @wrap_exceptions\n    def ppid(self):\n        return cext.proc_ppid(self.pid)\n\n    @wrap_exceptions\n    def cwd(self):\n        return cext.proc_cwd(self.pid)\n\n    @wrap_exceptions\n    def uids(self):\n        real, effective, saved = cext.proc_uids(self.pid)\n        return _common.puids(real, effective, saved)\n\n    @wrap_exceptions\n    def gids(self):\n        real, effective, saved = cext.proc_gids(self.pid)\n        return _common.pgids(real, effective, saved)\n\n    @wrap_exceptions\n    def terminal(self):\n        tty_nr = cext.proc_tty_nr(self.pid)\n        tmap = _psposix._get_terminal_map()\n        try:\n            return tmap[tty_nr]\n        except KeyError:\n            return None\n\n    @wrap_exceptions\n    def memory_info(self):\n        rss, vms = cext.proc_memory_info(self.pid)[:2]\n        return _common.pmem(rss, vms)\n\n    @wrap_exceptions\n    def memory_info_ex(self):\n        rss, vms, pfaults, pageins = cext.proc_memory_info(self.pid)\n        return pextmem(rss, vms, pfaults * PAGESIZE, pageins * PAGESIZE)\n\n    @wrap_exceptions\n    def cpu_times(self):\n        user, system = cext.proc_cpu_times(self.pid)\n        return _common.pcputimes(user, system)\n\n    @wrap_exceptions\n    def create_time(self):\n        return cext.proc_create_time(self.pid)\n\n    @wrap_exceptions\n    def num_ctx_switches(self):\n        return _common.pctxsw(*cext.proc_num_ctx_switches(self.pid))\n\n    @wrap_exceptions\n    def num_threads(self):\n        return cext.proc_num_threads(self.pid)\n\n    @wrap_exceptions\n    def open_files(self):\n        if self.pid == 0:\n            return []\n        files = []\n        rawlist = cext.proc_open_files(self.pid)\n        for path, fd in rawlist:\n            if isfile_strict(path):\n                ntuple = _common.popenfile(path, fd)\n                files.append(ntuple)\n        return files\n\n    @wrap_exceptions\n    def connections(self, kind='inet'):\n        if kind not in conn_tmap:\n            raise ValueError(\"invalid %r kind argument; choose between %s\"\n                             % (kind, ', '.join([repr(x) for x in conn_tmap])))\n        families, types = conn_tmap[kind]\n        rawlist = cext.proc_connections(self.pid, families, types)\n        ret = []\n        for item in rawlist:\n            fd, fam, type, laddr, raddr, status = item\n            status = TCP_STATUSES[status]\n            nt = _common.pconn(fd, fam, type, laddr, raddr, status)\n            ret.append(nt)\n        return ret\n\n    @wrap_exceptions\n    def num_fds(self):\n        if self.pid == 0:\n            return 0\n        return cext.proc_num_fds(self.pid)\n\n    @wrap_exceptions\n    def wait(self, timeout=None):\n        try:\n            return _psposix.wait_pid(self.pid, timeout)\n        except _psposix.TimeoutExpired:\n            # support for private module import\n            if TimeoutExpired is None:\n                raise\n            raise TimeoutExpired(timeout, self.pid, self._name)\n\n    @wrap_exceptions\n    def nice_get(self):\n        return _psutil_posix.getpriority(self.pid)\n\n    @wrap_exceptions\n    def nice_set(self, value):\n        return _psutil_posix.setpriority(self.pid, value)\n\n    @wrap_exceptions\n    def status(self):\n        code = cext.proc_status(self.pid)\n        # XXX is '?' legit? (we're not supposed to return it anyway)\n        return PROC_STATUSES.get(code, '?')\n\n    @wrap_exceptions\n    def threads(self):\n        rawlist = cext.proc_threads(self.pid)\n        retlist = []\n        for thread_id, utime, stime in rawlist:\n            ntuple = _common.pthread(thread_id, utime, stime)\n            retlist.append(ntuple)\n        return retlist\n\n    @wrap_exceptions\n    def memory_maps(self):\n        return cext.proc_memory_maps(self.pid)\n"
  },
  {
    "path": "Common/libpsutil/py2.7-glibc-2.12+/psutil/_psposix.py",
    "content": "#!/usr/bin/env python\n\n# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\n\"\"\"Routines common to all posix systems.\"\"\"\n\nimport errno\nimport glob\nimport os\nimport sys\nimport time\n\nfrom psutil._common import sdiskusage, usage_percent, memoize\nfrom psutil._compat import PY3, unicode\n\n\nclass TimeoutExpired(Exception):\n    pass\n\n\ndef pid_exists(pid):\n    \"\"\"Check whether pid exists in the current process table.\"\"\"\n    if pid == 0:\n        # According to \"man 2 kill\" PID 0 has a special meaning:\n        # it refers to <<every process in the process group of the\n        # calling process>> so we don't want to go any further.\n        # If we get here it means this UNIX platform *does* have\n        # a process with id 0.\n        return True\n    try:\n        os.kill(pid, 0)\n    except OSError as err:\n        if err.errno == errno.ESRCH:\n            # ESRCH == No such process\n            return False\n        elif err.errno == errno.EPERM:\n            # EPERM clearly means there's a process to deny access to\n            return True\n        else:\n            # According to \"man 2 kill\" possible error values are\n            # (EINVAL, EPERM, ESRCH) therefore we should never get\n            # here. If we do let's be explicit in considering this\n            # an error.\n            raise err\n    else:\n        return True\n\n\ndef wait_pid(pid, timeout=None):\n    \"\"\"Wait for process with pid 'pid' to terminate and return its\n    exit status code as an integer.\n\n    If pid is not a children of os.getpid() (current process) just\n    waits until the process disappears and return None.\n\n    If pid does not exist at all return None immediately.\n\n    Raise TimeoutExpired on timeout expired.\n    \"\"\"\n    def check_timeout(delay):\n        if timeout is not None:\n            if timer() >= stop_at:\n                raise TimeoutExpired()\n        time.sleep(delay)\n        return min(delay * 2, 0.04)\n\n    timer = getattr(time, 'monotonic', time.time)\n    if timeout is not None:\n        waitcall = lambda: os.waitpid(pid, os.WNOHANG)\n        stop_at = timer() + timeout\n    else:\n        waitcall = lambda: os.waitpid(pid, 0)\n\n    delay = 0.0001\n    while True:\n        try:\n            retpid, status = waitcall()\n        except OSError as err:\n            if err.errno == errno.EINTR:\n                delay = check_timeout(delay)\n                continue\n            elif err.errno == errno.ECHILD:\n                # This has two meanings:\n                # - pid is not a child of os.getpid() in which case\n                #   we keep polling until it's gone\n                # - pid never existed in the first place\n                # In both cases we'll eventually return None as we\n                # can't determine its exit status code.\n                while True:\n                    if pid_exists(pid):\n                        delay = check_timeout(delay)\n                    else:\n                        return\n            else:\n                raise\n        else:\n            if retpid == 0:\n                # WNOHANG was used, pid is still running\n                delay = check_timeout(delay)\n                continue\n            # process exited due to a signal; return the integer of\n            # that signal\n            if os.WIFSIGNALED(status):\n                return os.WTERMSIG(status)\n            # process exited using exit(2) system call; return the\n            # integer exit(2) system call has been called with\n            elif os.WIFEXITED(status):\n                return os.WEXITSTATUS(status)\n            else:\n                # should never happen\n                raise RuntimeError(\"unknown process exit status\")\n\n\ndef disk_usage(path):\n    \"\"\"Return disk usage associated with path.\"\"\"\n    try:\n        st = os.statvfs(path)\n    except UnicodeEncodeError:\n        if not PY3 and isinstance(path, unicode):\n            # this is a bug with os.statvfs() and unicode on\n            # Python 2, see:\n            # - https://github.com/giampaolo/psutil/issues/416\n            # - http://bugs.python.org/issue18695\n            try:\n                path = path.encode(sys.getfilesystemencoding())\n            except UnicodeEncodeError:\n                pass\n            st = os.statvfs(path)\n        else:\n            raise\n    free = (st.f_bavail * st.f_frsize)\n    total = (st.f_blocks * st.f_frsize)\n    used = (st.f_blocks - st.f_bfree) * st.f_frsize\n    percent = usage_percent(used, total, _round=1)\n    # NB: the percentage is -5% than what shown by df due to\n    # reserved blocks that we are currently not considering:\n    # http://goo.gl/sWGbH\n    return sdiskusage(total, used, free, percent)\n\n\n@memoize\ndef _get_terminal_map():\n    ret = {}\n    ls = glob.glob('/dev/tty*') + glob.glob('/dev/pts/*')\n    for name in ls:\n        assert name not in ret\n        try:\n            ret[os.stat(name).st_rdev] = name\n        except OSError as err:\n            if err.errno != errno.ENOENT:\n                raise\n    return ret\n"
  },
  {
    "path": "Common/libpsutil/py2.7-glibc-2.12+/psutil/_pssunos.py",
    "content": "#!/usr/bin/env python\n\n# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\n\"\"\"Sun OS Solaris platform implementation.\"\"\"\n\nimport errno\nimport os\nimport socket\nimport subprocess\nimport sys\nfrom collections import namedtuple\n\nfrom psutil import _common\nfrom psutil import _psposix\nfrom psutil._common import usage_percent, isfile_strict\nfrom psutil._compat import PY3\nimport _psutil_posix\nimport _psutil_sunos as cext\n\n\n__extra__all__ = [\"CONN_IDLE\", \"CONN_BOUND\"]\n\nPAGE_SIZE = os.sysconf('SC_PAGE_SIZE')\n\nCONN_IDLE = \"IDLE\"\nCONN_BOUND = \"BOUND\"\n\nPROC_STATUSES = {\n    cext.SSLEEP: _common.STATUS_SLEEPING,\n    cext.SRUN: _common.STATUS_RUNNING,\n    cext.SZOMB: _common.STATUS_ZOMBIE,\n    cext.SSTOP: _common.STATUS_STOPPED,\n    cext.SIDL: _common.STATUS_IDLE,\n    cext.SONPROC: _common.STATUS_RUNNING,  # same as run\n    cext.SWAIT: _common.STATUS_WAITING,\n}\n\nTCP_STATUSES = {\n    cext.TCPS_ESTABLISHED: _common.CONN_ESTABLISHED,\n    cext.TCPS_SYN_SENT: _common.CONN_SYN_SENT,\n    cext.TCPS_SYN_RCVD: _common.CONN_SYN_RECV,\n    cext.TCPS_FIN_WAIT_1: _common.CONN_FIN_WAIT1,\n    cext.TCPS_FIN_WAIT_2: _common.CONN_FIN_WAIT2,\n    cext.TCPS_TIME_WAIT: _common.CONN_TIME_WAIT,\n    cext.TCPS_CLOSED: _common.CONN_CLOSE,\n    cext.TCPS_CLOSE_WAIT: _common.CONN_CLOSE_WAIT,\n    cext.TCPS_LAST_ACK: _common.CONN_LAST_ACK,\n    cext.TCPS_LISTEN: _common.CONN_LISTEN,\n    cext.TCPS_CLOSING: _common.CONN_CLOSING,\n    cext.PSUTIL_CONN_NONE: _common.CONN_NONE,\n    cext.TCPS_IDLE: CONN_IDLE,  # sunos specific\n    cext.TCPS_BOUND: CONN_BOUND,  # sunos specific\n}\n\nscputimes = namedtuple('scputimes', ['user', 'system', 'idle', 'iowait'])\nsvmem = namedtuple('svmem', ['total', 'available', 'percent', 'used', 'free'])\npextmem = namedtuple('pextmem', ['rss', 'vms'])\npmmap_grouped = namedtuple('pmmap_grouped', ['path', 'rss', 'anon', 'locked'])\npmmap_ext = namedtuple(\n    'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields))\n\n# set later from __init__.py\nNoSuchProcess = None\nAccessDenied = None\nTimeoutExpired = None\n\n# --- functions\n\ndisk_io_counters = cext.disk_io_counters\nnet_io_counters = cext.net_io_counters\ndisk_usage = _psposix.disk_usage\n\n\ndef virtual_memory():\n    # we could have done this with kstat, but imho this is good enough\n    total = os.sysconf('SC_PHYS_PAGES') * PAGE_SIZE\n    # note: there's no difference on Solaris\n    free = avail = os.sysconf('SC_AVPHYS_PAGES') * PAGE_SIZE\n    used = total - free\n    percent = usage_percent(used, total, _round=1)\n    return svmem(total, avail, percent, used, free)\n\n\ndef swap_memory():\n    sin, sout = cext.swap_mem()\n    # XXX\n    # we are supposed to get total/free by doing so:\n    # http://cvs.opensolaris.org/source/xref/onnv/onnv-gate/\n    #     usr/src/cmd/swap/swap.c\n    # ...nevertheless I can't manage to obtain the same numbers as 'swap'\n    # cmdline utility, so let's parse its output (sigh!)\n    p = subprocess.Popen(['swap', '-l', '-k'], stdout=subprocess.PIPE)\n    stdout, stderr = p.communicate()\n    if PY3:\n        stdout = stdout.decode(sys.stdout.encoding)\n    if p.returncode != 0:\n        raise RuntimeError(\"'swap -l -k' failed (retcode=%s)\" % p.returncode)\n\n    lines = stdout.strip().split('\\n')[1:]\n    if not lines:\n        raise RuntimeError('no swap device(s) configured')\n    total = free = 0\n    for line in lines:\n        line = line.split()\n        t, f = line[-2:]\n        t = t.replace('K', '')\n        f = f.replace('K', '')\n        total += int(int(t) * 1024)\n        free += int(int(f) * 1024)\n    used = total - free\n    percent = usage_percent(used, total, _round=1)\n    return _common.sswap(total, used, free, percent,\n                         sin * PAGE_SIZE, sout * PAGE_SIZE)\n\n\ndef pids():\n    \"\"\"Returns a list of PIDs currently running on the system.\"\"\"\n    return [int(x) for x in os.listdir('/proc') if x.isdigit()]\n\n\ndef pid_exists(pid):\n    \"\"\"Check for the existence of a unix pid.\"\"\"\n    return _psposix.pid_exists(pid)\n\n\ndef cpu_times():\n    \"\"\"Return system-wide CPU times as a named tuple\"\"\"\n    ret = cext.per_cpu_times()\n    return scputimes(*[sum(x) for x in zip(*ret)])\n\n\ndef per_cpu_times():\n    \"\"\"Return system per-CPU times as a list of named tuples\"\"\"\n    ret = cext.per_cpu_times()\n    return [scputimes(*x) for x in ret]\n\n\ndef cpu_count_logical():\n    \"\"\"Return the number of logical CPUs in the system.\"\"\"\n    try:\n        return os.sysconf(\"SC_NPROCESSORS_ONLN\")\n    except ValueError:\n        # mimic os.cpu_count() behavior\n        return None\n\n\ndef cpu_count_physical():\n    \"\"\"Return the number of physical CPUs in the system.\"\"\"\n    return cext.cpu_count_phys()\n\n\ndef boot_time():\n    \"\"\"The system boot time expressed in seconds since the epoch.\"\"\"\n    return cext.boot_time()\n\n\ndef users():\n    \"\"\"Return currently connected users as a list of namedtuples.\"\"\"\n    retlist = []\n    rawlist = cext.users()\n    localhost = (':0.0', ':0')\n    for item in rawlist:\n        user, tty, hostname, tstamp, user_process = item\n        # note: the underlying C function includes entries about\n        # system boot, run level and others.  We might want\n        # to use them in the future.\n        if not user_process:\n            continue\n        if hostname in localhost:\n            hostname = 'localhost'\n        nt = _common.suser(user, tty, hostname, tstamp)\n        retlist.append(nt)\n    return retlist\n\n\ndef disk_partitions(all=False):\n    \"\"\"Return system disk partitions.\"\"\"\n    # TODO - the filtering logic should be better checked so that\n    # it tries to reflect 'df' as much as possible\n    retlist = []\n    partitions = cext.disk_partitions()\n    for partition in partitions:\n        device, mountpoint, fstype, opts = partition\n        if device == 'none':\n            device = ''\n        if not all:\n            # Differently from, say, Linux, we don't have a list of\n            # common fs types so the best we can do, AFAIK, is to\n            # filter by filesystem having a total size > 0.\n            if not disk_usage(mountpoint).total:\n                continue\n        ntuple = _common.sdiskpart(device, mountpoint, fstype, opts)\n        retlist.append(ntuple)\n    return retlist\n\n\ndef net_connections(kind, _pid=-1):\n    \"\"\"Return socket connections.  If pid == -1 return system-wide\n    connections (as opposed to connections opened by one process only).\n    Only INET sockets are returned (UNIX are not).\n    \"\"\"\n    cmap = _common.conn_tmap.copy()\n    if _pid == -1:\n        cmap.pop('unix', 0)\n    if kind not in cmap:\n        raise ValueError(\"invalid %r kind argument; choose between %s\"\n                         % (kind, ', '.join([repr(x) for x in cmap])))\n    families, types = _common.conn_tmap[kind]\n    rawlist = cext.net_connections(_pid, families, types)\n    ret = []\n    for item in rawlist:\n        fd, fam, type_, laddr, raddr, status, pid = item\n        if fam not in families:\n            continue\n        if type_ not in types:\n            continue\n        status = TCP_STATUSES[status]\n        if _pid == -1:\n            nt = _common.sconn(fd, fam, type_, laddr, raddr, status, pid)\n        else:\n            nt = _common.pconn(fd, fam, type_, laddr, raddr, status)\n        ret.append(nt)\n    return ret\n\n\ndef wrap_exceptions(fun):\n    \"\"\"Call callable into a try/except clause and translate ENOENT,\n    EACCES and EPERM in NoSuchProcess or AccessDenied exceptions.\n    \"\"\"\n    def wrapper(self, *args, **kwargs):\n        try:\n            return fun(self, *args, **kwargs)\n        except EnvironmentError as err:\n            # support for private module import\n            if NoSuchProcess is None or AccessDenied is None:\n                raise\n            # ENOENT (no such file or directory) gets raised on open().\n            # ESRCH (no such process) can get raised on read() if\n            # process is gone in meantime.\n            if err.errno in (errno.ENOENT, errno.ESRCH):\n                raise NoSuchProcess(self.pid, self._name)\n            if err.errno in (errno.EPERM, errno.EACCES):\n                raise AccessDenied(self.pid, self._name)\n            raise\n    return wrapper\n\n\nclass Process(object):\n    \"\"\"Wrapper class around underlying C implementation.\"\"\"\n\n    __slots__ = [\"pid\", \"_name\"]\n\n    def __init__(self, pid):\n        self.pid = pid\n        self._name = None\n\n    @wrap_exceptions\n    def name(self):\n        # note: max len == 15\n        return cext.proc_name_and_args(self.pid)[0]\n\n    @wrap_exceptions\n    def exe(self):\n        # Will be guess later from cmdline but we want to explicitly\n        # invoke cmdline here in order to get an AccessDenied\n        # exception if the user has not enough privileges.\n        self.cmdline()\n        return \"\"\n\n    @wrap_exceptions\n    def cmdline(self):\n        return cext.proc_name_and_args(self.pid)[1].split(' ')\n\n    @wrap_exceptions\n    def create_time(self):\n        return cext.proc_basic_info(self.pid)[3]\n\n    @wrap_exceptions\n    def num_threads(self):\n        return cext.proc_basic_info(self.pid)[5]\n\n    @wrap_exceptions\n    def nice_get(self):\n        # For some reason getpriority(3) return ESRCH (no such process)\n        # for certain low-pid processes, no matter what (even as root).\n        # The process actually exists though, as it has a name,\n        # creation time, etc.\n        # The best thing we can do here appears to be raising AD.\n        # Note: tested on Solaris 11; on Open Solaris 5 everything is\n        # fine.\n        try:\n            return _psutil_posix.getpriority(self.pid)\n        except EnvironmentError as err:\n            if err.errno in (errno.ENOENT, errno.ESRCH):\n                if pid_exists(self.pid):\n                    raise AccessDenied(self.pid, self._name)\n            raise\n\n    @wrap_exceptions\n    def nice_set(self, value):\n        if self.pid in (2, 3):\n            # Special case PIDs: internally setpriority(3) return ESRCH\n            # (no such process), no matter what.\n            # The process actually exists though, as it has a name,\n            # creation time, etc.\n            raise AccessDenied(self.pid, self._name)\n        return _psutil_posix.setpriority(self.pid, value)\n\n    @wrap_exceptions\n    def ppid(self):\n        return cext.proc_basic_info(self.pid)[0]\n\n    @wrap_exceptions\n    def uids(self):\n        real, effective, saved, _, _, _ = cext.proc_cred(self.pid)\n        return _common.puids(real, effective, saved)\n\n    @wrap_exceptions\n    def gids(self):\n        _, _, _, real, effective, saved = cext.proc_cred(self.pid)\n        return _common.puids(real, effective, saved)\n\n    @wrap_exceptions\n    def cpu_times(self):\n        user, system = cext.proc_cpu_times(self.pid)\n        return _common.pcputimes(user, system)\n\n    @wrap_exceptions\n    def terminal(self):\n        hit_enoent = False\n        tty = wrap_exceptions(\n            cext.proc_basic_info(self.pid)[0])\n        if tty != cext.PRNODEV:\n            for x in (0, 1, 2, 255):\n                try:\n                    return os.readlink('/proc/%d/path/%d' % (self.pid, x))\n                except OSError as err:\n                    if err.errno == errno.ENOENT:\n                        hit_enoent = True\n                        continue\n                    raise\n        if hit_enoent:\n            # raise NSP if the process disappeared on us\n            os.stat('/proc/%s' % self.pid)\n\n    @wrap_exceptions\n    def cwd(self):\n        # /proc/PID/path/cwd may not be resolved by readlink() even if\n        # it exists (ls shows it). If that's the case and the process\n        # is still alive return None (we can return None also on BSD).\n        # Reference: http://goo.gl/55XgO\n        try:\n            return os.readlink(\"/proc/%s/path/cwd\" % self.pid)\n        except OSError as err:\n            if err.errno == errno.ENOENT:\n                os.stat(\"/proc/%s\" % self.pid)\n                return None\n            raise\n\n    @wrap_exceptions\n    def memory_info(self):\n        ret = cext.proc_basic_info(self.pid)\n        rss, vms = ret[1] * 1024, ret[2] * 1024\n        return _common.pmem(rss, vms)\n\n    # it seems Solaris uses rss and vms only\n    memory_info_ex = memory_info\n\n    @wrap_exceptions\n    def status(self):\n        code = cext.proc_basic_info(self.pid)[6]\n        # XXX is '?' legit? (we're not supposed to return it anyway)\n        return PROC_STATUSES.get(code, '?')\n\n    @wrap_exceptions\n    def threads(self):\n        ret = []\n        tids = os.listdir('/proc/%d/lwp' % self.pid)\n        hit_enoent = False\n        for tid in tids:\n            tid = int(tid)\n            try:\n                utime, stime = cext.query_process_thread(\n                    self.pid, tid)\n            except EnvironmentError as err:\n                # ENOENT == thread gone in meantime\n                if err.errno == errno.ENOENT:\n                    hit_enoent = True\n                    continue\n                raise\n            else:\n                nt = _common.pthread(tid, utime, stime)\n                ret.append(nt)\n        if hit_enoent:\n            # raise NSP if the process disappeared on us\n            os.stat('/proc/%s' % self.pid)\n        return ret\n\n    @wrap_exceptions\n    def open_files(self):\n        retlist = []\n        hit_enoent = False\n        pathdir = '/proc/%d/path' % self.pid\n        for fd in os.listdir('/proc/%d/fd' % self.pid):\n            path = os.path.join(pathdir, fd)\n            if os.path.islink(path):\n                try:\n                    file = os.readlink(path)\n                except OSError as err:\n                    # ENOENT == file which is gone in the meantime\n                    if err.errno == errno.ENOENT:\n                        hit_enoent = True\n                        continue\n                    raise\n                else:\n                    if isfile_strict(file):\n                        retlist.append(_common.popenfile(file, int(fd)))\n        if hit_enoent:\n            # raise NSP if the process disappeared on us\n            os.stat('/proc/%s' % self.pid)\n        return retlist\n\n    def _get_unix_sockets(self, pid):\n        \"\"\"Get UNIX sockets used by process by parsing 'pfiles' output.\"\"\"\n        # TODO: rewrite this in C (...but the damn netstat source code\n        # does not include this part! Argh!!)\n        cmd = \"pfiles %s\" % pid\n        p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,\n                             stderr=subprocess.PIPE)\n        stdout, stderr = p.communicate()\n        if PY3:\n            stdout, stderr = [x.decode(sys.stdout.encoding)\n                              for x in (stdout, stderr)]\n        if p.returncode != 0:\n            if 'permission denied' in stderr.lower():\n                raise AccessDenied(self.pid, self._name)\n            if 'no such process' in stderr.lower():\n                raise NoSuchProcess(self.pid, self._name)\n            raise RuntimeError(\"%r command error\\n%s\" % (cmd, stderr))\n\n        lines = stdout.split('\\n')[2:]\n        for i, line in enumerate(lines):\n            line = line.lstrip()\n            if line.startswith('sockname: AF_UNIX'):\n                path = line.split(' ', 2)[2]\n                type = lines[i - 2].strip()\n                if type == 'SOCK_STREAM':\n                    type = socket.SOCK_STREAM\n                elif type == 'SOCK_DGRAM':\n                    type = socket.SOCK_DGRAM\n                else:\n                    type = -1\n                yield (-1, socket.AF_UNIX, type, path, \"\", _common.CONN_NONE)\n\n    @wrap_exceptions\n    def connections(self, kind='inet'):\n        ret = net_connections(kind, _pid=self.pid)\n        # The underlying C implementation retrieves all OS connections\n        # and filters them by PID.  At this point we can't tell whether\n        # an empty list means there were no connections for process or\n        # process is no longer active so we force NSP in case the PID\n        # is no longer there.\n        if not ret:\n            os.stat('/proc/%s' % self.pid)  # will raise NSP if process is gone\n\n        # UNIX sockets\n        if kind in ('all', 'unix'):\n            ret.extend([_common.pconn(*conn) for conn in\n                        self._get_unix_sockets(self.pid)])\n        return ret\n\n    nt_mmap_grouped = namedtuple('mmap', 'path rss anon locked')\n    nt_mmap_ext = namedtuple('mmap', 'addr perms path rss anon locked')\n\n    @wrap_exceptions\n    def memory_maps(self):\n        def toaddr(start, end):\n            return '%s-%s' % (hex(start)[2:].strip('L'),\n                              hex(end)[2:].strip('L'))\n\n        retlist = []\n        rawlist = cext.proc_memory_maps(self.pid)\n        hit_enoent = False\n        for item in rawlist:\n            addr, addrsize, perm, name, rss, anon, locked = item\n            addr = toaddr(addr, addrsize)\n            if not name.startswith('['):\n                try:\n                    name = os.readlink('/proc/%s/path/%s' % (self.pid, name))\n                except OSError as err:\n                    if err.errno == errno.ENOENT:\n                        # sometimes the link may not be resolved by\n                        # readlink() even if it exists (ls shows it).\n                        # If that's the case we just return the\n                        # unresolved link path.\n                        # This seems an incosistency with /proc similar\n                        # to: http://goo.gl/55XgO\n                        name = '/proc/%s/path/%s' % (self.pid, name)\n                        hit_enoent = True\n                    else:\n                        raise\n            retlist.append((addr, perm, name, rss, anon, locked))\n        if hit_enoent:\n            # raise NSP if the process disappeared on us\n            os.stat('/proc/%s' % self.pid)\n        return retlist\n\n    @wrap_exceptions\n    def num_fds(self):\n        return len(os.listdir(\"/proc/%s/fd\" % self.pid))\n\n    @wrap_exceptions\n    def num_ctx_switches(self):\n        return _common.pctxsw(*cext.proc_num_ctx_switches(self.pid))\n\n    @wrap_exceptions\n    def wait(self, timeout=None):\n        try:\n            return _psposix.wait_pid(self.pid, timeout)\n        except _psposix.TimeoutExpired:\n            # support for private module import\n            if TimeoutExpired is None:\n                raise\n            raise TimeoutExpired(timeout, self.pid, self._name)\n"
  },
  {
    "path": "Common/libpsutil/py2.7-glibc-2.12+/psutil/_pswindows.py",
    "content": "#!/usr/bin/env python\n\n# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\n\"\"\"Windows platform implementation.\"\"\"\n\nimport errno\nimport functools\nimport os\nfrom collections import namedtuple\n\nfrom psutil import _common\nfrom psutil._common import conn_tmap, usage_percent, isfile_strict\nfrom psutil._compat import PY3, xrange, lru_cache\nimport _psutil_windows as cext\n\n# process priority constants, import from __init__.py:\n# http://msdn.microsoft.com/en-us/library/ms686219(v=vs.85).aspx\n__extra__all__ = [\"ABOVE_NORMAL_PRIORITY_CLASS\", \"BELOW_NORMAL_PRIORITY_CLASS\",\n                  \"HIGH_PRIORITY_CLASS\", \"IDLE_PRIORITY_CLASS\",\n                  \"NORMAL_PRIORITY_CLASS\", \"REALTIME_PRIORITY_CLASS\",\n                  #\n                  \"CONN_DELETE_TCB\",\n                  ]\n\n# --- module level constants (gets pushed up to psutil module)\n\nCONN_DELETE_TCB = \"DELETE_TCB\"\nWAIT_TIMEOUT = 0x00000102  # 258 in decimal\nACCESS_DENIED_SET = frozenset([errno.EPERM, errno.EACCES,\n                               cext.ERROR_ACCESS_DENIED])\n\nTCP_STATUSES = {\n    cext.MIB_TCP_STATE_ESTAB: _common.CONN_ESTABLISHED,\n    cext.MIB_TCP_STATE_SYN_SENT: _common.CONN_SYN_SENT,\n    cext.MIB_TCP_STATE_SYN_RCVD: _common.CONN_SYN_RECV,\n    cext.MIB_TCP_STATE_FIN_WAIT1: _common.CONN_FIN_WAIT1,\n    cext.MIB_TCP_STATE_FIN_WAIT2: _common.CONN_FIN_WAIT2,\n    cext.MIB_TCP_STATE_TIME_WAIT: _common.CONN_TIME_WAIT,\n    cext.MIB_TCP_STATE_CLOSED: _common.CONN_CLOSE,\n    cext.MIB_TCP_STATE_CLOSE_WAIT: _common.CONN_CLOSE_WAIT,\n    cext.MIB_TCP_STATE_LAST_ACK: _common.CONN_LAST_ACK,\n    cext.MIB_TCP_STATE_LISTEN: _common.CONN_LISTEN,\n    cext.MIB_TCP_STATE_CLOSING: _common.CONN_CLOSING,\n    cext.MIB_TCP_STATE_DELETE_TCB: CONN_DELETE_TCB,\n    cext.PSUTIL_CONN_NONE: _common.CONN_NONE,\n}\n\n\nscputimes = namedtuple('scputimes', ['user', 'system', 'idle'])\nsvmem = namedtuple('svmem', ['total', 'available', 'percent', 'used', 'free'])\npextmem = namedtuple(\n    'pextmem', ['num_page_faults', 'peak_wset', 'wset', 'peak_paged_pool',\n                'paged_pool', 'peak_nonpaged_pool', 'nonpaged_pool',\n                'pagefile', 'peak_pagefile', 'private'])\npmmap_grouped = namedtuple('pmmap_grouped', ['path', 'rss'])\npmmap_ext = namedtuple(\n    'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields))\n\n# set later from __init__.py\nNoSuchProcess = None\nAccessDenied = None\nTimeoutExpired = None\n\n\n@lru_cache(maxsize=512)\ndef _win32_QueryDosDevice(s):\n    return cext.win32_QueryDosDevice(s)\n\n\ndef _convert_raw_path(s):\n    # convert paths using native DOS format like:\n    # \"\\Device\\HarddiskVolume1\\Windows\\systemew\\file.txt\"\n    # into: \"C:\\Windows\\systemew\\file.txt\"\n    if PY3 and not isinstance(s, str):\n        s = s.decode('utf8')\n    rawdrive = '\\\\'.join(s.split('\\\\')[:3])\n    driveletter = _win32_QueryDosDevice(rawdrive)\n    return os.path.join(driveletter, s[len(rawdrive):])\n\n\n# --- public functions\n\n\ndef virtual_memory():\n    \"\"\"System virtual memory as a namedtuple.\"\"\"\n    mem = cext.virtual_mem()\n    totphys, availphys, totpagef, availpagef, totvirt, freevirt = mem\n    #\n    total = totphys\n    avail = availphys\n    free = availphys\n    used = total - avail\n    percent = usage_percent((total - avail), total, _round=1)\n    return svmem(total, avail, percent, used, free)\n\n\ndef swap_memory():\n    \"\"\"Swap system memory as a (total, used, free, sin, sout) tuple.\"\"\"\n    mem = cext.virtual_mem()\n    total = mem[2]\n    free = mem[3]\n    used = total - free\n    percent = usage_percent(used, total, _round=1)\n    return _common.sswap(total, used, free, percent, 0, 0)\n\n\ndef disk_usage(path):\n    \"\"\"Return disk usage associated with path.\"\"\"\n    try:\n        total, free = cext.disk_usage(path)\n    except WindowsError:\n        if not os.path.exists(path):\n            msg = \"No such file or directory: '%s'\" % path\n            raise OSError(errno.ENOENT, msg)\n        raise\n    used = total - free\n    percent = usage_percent(used, total, _round=1)\n    return _common.sdiskusage(total, used, free, percent)\n\n\ndef disk_partitions(all):\n    \"\"\"Return disk partitions.\"\"\"\n    rawlist = cext.disk_partitions(all)\n    return [_common.sdiskpart(*x) for x in rawlist]\n\n\ndef cpu_times():\n    \"\"\"Return system CPU times as a named tuple.\"\"\"\n    user, system, idle = cext.cpu_times()\n    return scputimes(user, system, idle)\n\n\ndef per_cpu_times():\n    \"\"\"Return system per-CPU times as a list of named tuples.\"\"\"\n    ret = []\n    for cpu_t in cext.per_cpu_times():\n        user, system, idle = cpu_t\n        item = scputimes(user, system, idle)\n        ret.append(item)\n    return ret\n\n\ndef cpu_count_logical():\n    \"\"\"Return the number of logical CPUs in the system.\"\"\"\n    return cext.cpu_count_logical()\n\n\ndef cpu_count_physical():\n    \"\"\"Return the number of physical CPUs in the system.\"\"\"\n    return cext.cpu_count_phys()\n\n\ndef boot_time():\n    \"\"\"The system boot time expressed in seconds since the epoch.\"\"\"\n    return cext.boot_time()\n\n\ndef net_connections(kind, _pid=-1):\n    \"\"\"Return socket connections.  If pid == -1 return system-wide\n    connections (as opposed to connections opened by one process only).\n    \"\"\"\n    if kind not in conn_tmap:\n        raise ValueError(\"invalid %r kind argument; choose between %s\"\n                         % (kind, ', '.join([repr(x) for x in conn_tmap])))\n    families, types = conn_tmap[kind]\n    rawlist = cext.net_connections(_pid, families, types)\n    ret = []\n    for item in rawlist:\n        fd, fam, type, laddr, raddr, status, pid = item\n        status = TCP_STATUSES[status]\n        if _pid == -1:\n            nt = _common.sconn(fd, fam, type, laddr, raddr, status, pid)\n        else:\n            nt = _common.pconn(fd, fam, type, laddr, raddr, status)\n        ret.append(nt)\n    return ret\n\n\ndef users():\n    \"\"\"Return currently connected users as a list of namedtuples.\"\"\"\n    retlist = []\n    rawlist = cext.users()\n    for item in rawlist:\n        user, hostname, tstamp = item\n        nt = _common.suser(user, None, hostname, tstamp)\n        retlist.append(nt)\n    return retlist\n\n\npids = cext.pids\npid_exists = cext.pid_exists\nnet_io_counters = cext.net_io_counters\ndisk_io_counters = cext.disk_io_counters\nppid_map = cext.ppid_map  # not meant to be public\n\n\ndef wrap_exceptions(fun):\n    \"\"\"Decorator which translates bare OSError and WindowsError\n    exceptions into NoSuchProcess and AccessDenied.\n    \"\"\"\n    @functools.wraps(fun)\n    def wrapper(self, *args, **kwargs):\n        try:\n            return fun(self, *args, **kwargs)\n        except OSError as err:\n            # support for private module import\n            if NoSuchProcess is None or AccessDenied is None:\n                raise\n            if err.errno in ACCESS_DENIED_SET:\n                raise AccessDenied(self.pid, self._name)\n            if err.errno == errno.ESRCH:\n                raise NoSuchProcess(self.pid, self._name)\n            raise\n    return wrapper\n\n\nclass Process(object):\n    \"\"\"Wrapper class around underlying C implementation.\"\"\"\n\n    __slots__ = [\"pid\", \"_name\"]\n\n    def __init__(self, pid):\n        self.pid = pid\n        self._name = None\n\n    @wrap_exceptions\n    def name(self):\n        \"\"\"Return process name, which on Windows is always the final\n        part of the executable.\n        \"\"\"\n        # This is how PIDs 0 and 4 are always represented in taskmgr\n        # and process-hacker.\n        if self.pid == 0:\n            return \"System Idle Process\"\n        elif self.pid == 4:\n            return \"System\"\n        else:\n            return os.path.basename(self.exe())\n\n    @wrap_exceptions\n    def exe(self):\n        # Note: os.path.exists(path) may return False even if the file\n        # is there, see:\n        # http://stackoverflow.com/questions/3112546/os-path-exists-lies\n\n        # see https://github.com/giampaolo/psutil/issues/414\n        # see https://github.com/giampaolo/psutil/issues/528\n        if self.pid in (0, 4):\n            raise AccessDenied(self.pid, self._name)\n        return _convert_raw_path(cext.proc_exe(self.pid))\n\n    @wrap_exceptions\n    def cmdline(self):\n        return cext.proc_cmdline(self.pid)\n\n    def ppid(self):\n        try:\n            return ppid_map()[self.pid]\n        except KeyError:\n            raise NoSuchProcess(self.pid, self._name)\n\n    def _get_raw_meminfo(self):\n        try:\n            return cext.proc_memory_info(self.pid)\n        except OSError as err:\n            if err.errno in ACCESS_DENIED_SET:\n                return cext.proc_memory_info_2(self.pid)\n            raise\n\n    @wrap_exceptions\n    def memory_info(self):\n        # on Windows RSS == WorkingSetSize and VSM == PagefileUsage\n        # fields of PROCESS_MEMORY_COUNTERS struct:\n        # http://msdn.microsoft.com/en-us/library/windows/desktop/\n        #     ms684877(v=vs.85).aspx\n        t = self._get_raw_meminfo()\n        return _common.pmem(t[2], t[7])\n\n    @wrap_exceptions\n    def memory_info_ex(self):\n        return pextmem(*self._get_raw_meminfo())\n\n    def memory_maps(self):\n        try:\n            raw = cext.proc_memory_maps(self.pid)\n        except OSError as err:\n            # XXX - can't use wrap_exceptions decorator as we're\n            # returning a generator; probably needs refactoring.\n            if err.errno in ACCESS_DENIED_SET:\n                raise AccessDenied(self.pid, self._name)\n            if err.errno == errno.ESRCH:\n                raise NoSuchProcess(self.pid, self._name)\n            raise\n        else:\n            for addr, perm, path, rss in raw:\n                path = _convert_raw_path(path)\n                addr = hex(addr)\n                yield (addr, perm, path, rss)\n\n    @wrap_exceptions\n    def kill(self):\n        return cext.proc_kill(self.pid)\n\n    @wrap_exceptions\n    def wait(self, timeout=None):\n        if timeout is None:\n            timeout = cext.INFINITE\n        else:\n            # WaitForSingleObject() expects time in milliseconds\n            timeout = int(timeout * 1000)\n        ret = cext.proc_wait(self.pid, timeout)\n        if ret == WAIT_TIMEOUT:\n            # support for private module import\n            if TimeoutExpired is None:\n                raise RuntimeError(\"timeout expired\")\n            raise TimeoutExpired(timeout, self.pid, self._name)\n        return ret\n\n    @wrap_exceptions\n    def username(self):\n        if self.pid in (0, 4):\n            return 'NT AUTHORITY\\\\SYSTEM'\n        return cext.proc_username(self.pid)\n\n    @wrap_exceptions\n    def create_time(self):\n        # special case for kernel process PIDs; return system boot time\n        if self.pid in (0, 4):\n            return boot_time()\n        try:\n            return cext.proc_create_time(self.pid)\n        except OSError as err:\n            if err.errno in ACCESS_DENIED_SET:\n                return cext.proc_create_time_2(self.pid)\n            raise\n\n    @wrap_exceptions\n    def num_threads(self):\n        return cext.proc_num_threads(self.pid)\n\n    @wrap_exceptions\n    def threads(self):\n        rawlist = cext.proc_threads(self.pid)\n        retlist = []\n        for thread_id, utime, stime in rawlist:\n            ntuple = _common.pthread(thread_id, utime, stime)\n            retlist.append(ntuple)\n        return retlist\n\n    @wrap_exceptions\n    def cpu_times(self):\n        try:\n            ret = cext.proc_cpu_times(self.pid)\n        except OSError as err:\n            if err.errno in ACCESS_DENIED_SET:\n                ret = cext.proc_cpu_times_2(self.pid)\n            else:\n                raise\n        return _common.pcputimes(*ret)\n\n    @wrap_exceptions\n    def suspend(self):\n        return cext.proc_suspend(self.pid)\n\n    @wrap_exceptions\n    def resume(self):\n        return cext.proc_resume(self.pid)\n\n    @wrap_exceptions\n    def cwd(self):\n        if self.pid in (0, 4):\n            raise AccessDenied(self.pid, self._name)\n        # return a normalized pathname since the native C function appends\n        # \"\\\\\" at the and of the path\n        path = cext.proc_cwd(self.pid)\n        return os.path.normpath(path)\n\n    @wrap_exceptions\n    def open_files(self):\n        if self.pid in (0, 4):\n            return []\n        retlist = []\n        # Filenames come in in native format like:\n        # \"\\Device\\HarddiskVolume1\\Windows\\systemew\\file.txt\"\n        # Convert the first part in the corresponding drive letter\n        # (e.g. \"C:\\\") by using Windows's QueryDosDevice()\n        raw_file_names = cext.proc_open_files(self.pid)\n        for file in raw_file_names:\n            file = _convert_raw_path(file)\n            if isfile_strict(file) and file not in retlist:\n                ntuple = _common.popenfile(file, -1)\n                retlist.append(ntuple)\n        return retlist\n\n    @wrap_exceptions\n    def connections(self, kind='inet'):\n        return net_connections(kind, _pid=self.pid)\n\n    @wrap_exceptions\n    def nice_get(self):\n        return cext.proc_priority_get(self.pid)\n\n    @wrap_exceptions\n    def nice_set(self, value):\n        return cext.proc_priority_set(self.pid, value)\n\n    # available on Windows >= Vista\n    if hasattr(cext, \"proc_io_priority_get\"):\n        @wrap_exceptions\n        def ionice_get(self):\n            return cext.proc_io_priority_get(self.pid)\n\n        @wrap_exceptions\n        def ionice_set(self, value, _):\n            if _:\n                raise TypeError(\"set_proc_ionice() on Windows takes only \"\n                                \"1 argument (2 given)\")\n            if value not in (2, 1, 0):\n                raise ValueError(\"value must be 2 (normal), 1 (low) or 0 \"\n                                 \"(very low); got %r\" % value)\n            return cext.proc_io_priority_set(self.pid, value)\n\n    @wrap_exceptions\n    def io_counters(self):\n        try:\n            ret = cext.proc_io_counters(self.pid)\n        except OSError as err:\n            if err.errno in ACCESS_DENIED_SET:\n                ret = cext.proc_io_counters_2(self.pid)\n            else:\n                raise\n        return _common.pio(*ret)\n\n    @wrap_exceptions\n    def status(self):\n        suspended = cext.proc_is_suspended(self.pid)\n        if suspended:\n            return _common.STATUS_STOPPED\n        else:\n            return _common.STATUS_RUNNING\n\n    @wrap_exceptions\n    def cpu_affinity_get(self):\n        from_bitmask = lambda x: [i for i in xrange(64) if (1 << i) & x]\n        bitmask = cext.proc_cpu_affinity_get(self.pid)\n        return from_bitmask(bitmask)\n\n    @wrap_exceptions\n    def cpu_affinity_set(self, value):\n        def to_bitmask(l):\n            if not l:\n                raise ValueError(\"invalid argument %r\" % l)\n            out = 0\n            for b in l:\n                out |= 2 ** b\n            return out\n\n        # SetProcessAffinityMask() states that ERROR_INVALID_PARAMETER\n        # is returned for an invalid CPU but this seems not to be true,\n        # therefore we check CPUs validy beforehand.\n        allcpus = list(range(len(per_cpu_times())))\n        for cpu in value:\n            if cpu not in allcpus:\n                raise ValueError(\"invalid CPU %r\" % cpu)\n\n        bitmask = to_bitmask(value)\n        cext.proc_cpu_affinity_set(self.pid, bitmask)\n\n    @wrap_exceptions\n    def num_handles(self):\n        try:\n            return cext.proc_num_handles(self.pid)\n        except OSError as err:\n            if err.errno in ACCESS_DENIED_SET:\n                return cext.proc_num_handles_2(self.pid)\n            raise\n\n    @wrap_exceptions\n    def num_ctx_switches(self):\n        tupl = cext.proc_num_ctx_switches(self.pid)\n        return _common.pctxsw(*tupl)\n"
  },
  {
    "path": "Common/waagentloader.py",
    "content": "# Wrapper module for waagent\n#\n# waagent is not written as a module. This wrapper module is created \n# to use the waagent code as a module.\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nimport os\n\ndef load_waagent(path=None):\n    if path is None:\n        pwd = os.path.dirname(os.path.abspath(__file__))\n        path = os.path.join(pwd, 'waagent')\n    waagent = None\n    if sys.version_info >= (3, 12):\n        import importlib.util\n        spec = importlib.util.spec_from_file_location('waagent', path)\n        waagent = importlib.util.module_from_spec(spec)\n        spec.loader.exec_module(waagent)\n    else:\n        import imp\n        waagent = imp.load_source('waagent', path)\n    waagent.LoggerInit('/var/log/waagent.log', '/dev/stdout')\n    waagent.MyDistro = waagent.GetMyDistro()\n    waagent.Config = waagent.ConfigurationProvider(None)\n    return waagent\n\n"
  },
  {
    "path": "CustomScript/CHANGELOG.md",
    "content": "## vNext (yyyy-mm-dd)\n- Error message misleading [#150]\n- Fix for internal DNS check [#98]\n\n## 1.5.2.0 (2016-04-11)\n- Fix state machine for status transitions. [#119]\n\n## 1.5.1.0 (2016-04-05)\n- Atomically write the status file. [#117]\n\n## 1.5.0.0 (2016-03-23)\n- Refactor CustomScript and add LogUtil & ScriptUtil\n- Refine MDS enents to log which file the extension fails to download\n- Do not log `commandToExecute` to `extension.log` if it's passed by protectedSettings\n\n## 1.4.1.0 (2015-12-21)\n- Move downloading scripts and internal DNS check into the daemon process\n- Provide an option to disable internal DNS check\n- Add a timeout to urllib2.urlopen()\n\n## 1.4.0.0 (2015-11-19)\n- Protect sensitive data in `commandToExecute`\n"
  },
  {
    "path": "CustomScript/HandlerManifest.json",
    "content": "[\n  {\n    \"version\": 1.0,\n    \"handlerManifest\": {\n      \"disableCommand\": \"shim.sh -disable\",\n      \"enableCommand\": \"shim.sh -enable\",\n      \"installCommand\": \"shim.sh -install\",\n      \"uninstallCommand\": \"shim.sh -uninstall\",\n      \"updateCommand\": \"shim.sh -update\",\n      \"rebootAfterInstall\": false,\n      \"reportHeartbeat\": false\n    }\n  }\n]\n"
  },
  {
    "path": "CustomScript/README.md",
    "content": "# CustomScript Extension\n\nAllow the owner of the Azure Virtual Machines to run customized scripts in the VM.\n\n\n# :warning: New Version Notice :warning:\n\nA new version of **Custom Script Extension** is available at https://github.com/Azure/custom-script-extension-linux. The new `v2.0` version offers better reliability and wider Linux distro support. \n\nPlease consider switching your new deployments to use the new version (`Microsoft.Azure.Extensions.CustomScript`) instead. The new version is intended to be a drop-in replacement. Therefore, the migration is as easy as changing the name and version, you do not need to change your extension configuration.\n\n-----------------------------\n\nThis user guide is for `Microsoft.OSTCExtensions.CustomScript` extension.\n\nYou can read the User Guide below.\n* [Automate Linux VM Customization Tasks Using CustomScript Extension (outdated, needs to update)](https://azure.microsoft.com/en-us/blog/automate-linux-vm-customization-tasks-using-customscript-extension/)\n\nCustomScript Extension can:\n* If provided, download the customized scripts from Azure Storage or external public storage (e.g. Github)\n* Run the entrypoint script\n* Support inline command\n* Convert Windows style newline in Shell and Python scripts automatically\n* Remove BOM in Shell and Python scripts automatically\n* Protect sensitive data in `commandToExecute`\n\n**Note:** The timeout for script download is 200 seconds. There is no timeout period for script execution.\n\n# User Guide\n\n## 1. Configuration schema\n\n### 1.1. Public configuration\n\nSchema for the public configuration file looks like this:\n\n* `fileUris`: (optional, string array) the uri list of the scripts\n* `commandToExecute`: (required, string) the entrypoint script to execute\n* `enableInternalDNSCheck`: (optional, bool) default is True, set to False to disable DNS check.\n \n```json\n{\n  \"fileUris\": [\"<url>\"],\n  \"commandToExecute\": \"<command-to-execute>\"\n}\n```\n\n### 1.2. Protected configuration\nSchema for the protected configuration file looks like this:\n\n* `commandToExecute`: (optional, string) the entrypoint script to execute\n* `storageAccountName`: (optional, string) the name of storage account\n* `storageAccountKey`: (optional, string) the access key of storage account\n\n```json\n{\n  \"commandToExecute\": \"<command-to-execute>\",\n  \"storageAccountName\": \"<storage-account-name>\",\n  \"storageAccountKey\": \"<storage-account-key>\"\n}\n```\n\n**NOTE:**\n\n1. The storage account here is to store the scripts in `fileUris`.\nIf the scripts are stored in the private Azure Storage, you should provide\n`storageAccountName` and `storageAccountKey`. You can get these two values from Azure Portal.\n*Currently only general purpose storage accounts are supported. We intend to add support for the new [Azure Cool Blob Storage](https://azure.microsoft.com/en-us/blog/introducing-azure-cool-storage/) in the near future. See #161*\n2. `commandToExecute` in protected settings can protect your sensitive data.\nBut `commandToExecute` should not be specified both in public and protected configurations.\n\n## 2. Deploying the Extension to a VM\n\nYou can deploy it using Azure CLI, Azure Powershell and ARM template.\n\n**NOTE:**\n\nCreating VM in Azure has two deployment model: Classic and [Resource Manager][arm-overview].\nIn different models, the deploy commands have different syntaxes. Please select the right\none in section 2.1 and 2.2 below.\n \n### 2.1. Using [**Azure CLI**][azure-cli]\nBefore deploying CustomScript Extension, you should configure your `public.json` and `protected.json`\n(in section 1.1 and 1.2 above).\n\n#### 2.1.1 Classic\nThe Classic mode is also called Azure Service Management mode. You can change to it by running:\n```\n$ azure config mode asm\n```\n\nYou can deploy CustomScript Extension by running:\n```\n$ azure vm extension set <vm-name> \\\nCustomScriptForLinux Microsoft.OSTCExtensions <version> \\\n--public-config-path public.json  \\\n--private-config-path protected.json\n```\n\nIn the command above, you can change version with `'*'` to use latest\nversion available, or `'1.*'` to get newest version that does not introduce breaking schema changes. To learn the latest version available, run:\n```\n$ azure vm extension list\n```\nYou can also omit `--private-config-path` if you do not want to configure those settings.\n\n#### 2.1.2 Resource Manager\nYou can change to Azure Resource Manager mode by running:\n```\n$ azure config mode arm\n```\n\nYou can deploy CustomScript Extension by running:\n```\n$ azure vm extension set <resource-group> <vm-name> \\\nCustomScriptForLinux Microsoft.OSTCExtensions <version> \\\n--public-config-path public.json  \\\n--private-config-path protected.json\n```\n\n> **NOTE:** In ARM mode, `azure vm extension list` is not available for now.\n\n\n### 2.2. Using [**Azure Powershell**][azure-powershell]\n\n#### 2.2.1 Classic\n\nYou can login to your Azure account (Azure Service Management mode) by running:\n\n```powershell\nAdd-AzureAccount\n```\n\nYou can deploy CustomScript Extension by running:\n\n```powershell\n$VmName = '<vm-name>'\n$vm = Get-AzureVM -ServiceName $VmName -Name $VmName\n\n$ExtensionName = 'CustomScriptForLinux'\n$Publisher = 'Microsoft.OSTCExtensions'\n$Version = '<version>'\n\n$PublicConf = '{\n    \"fileUris\": [\"<url>\"],\n    \"commandToExecute\": \"<command>\"\n}'\n$PrivateConf = '{\n    \"storageAccountName\": \"<storage-account-name>\",\n    \"storageAccountKey\": \"<storage-account-key>\"\n}'\n\nSet-AzureVMExtension -ExtensionName $ExtensionName -VM $vm `\n  -Publisher $Publisher -Version $Version `\n  -PrivateConfiguration $PrivateConf -PublicConfiguration $PublicConf |\n  Update-AzureVM\n```\n\n#### 2.2.2 Resource Manager\n\nYou can login to your Azure account (Azure Resource Manager mode) by running:\n\n```powershell\nLogin-AzureRmAccount\n```\n\nClick [**HERE**](https://azure.microsoft.com/en-us/documentation/articles/powershell-azure-resource-manager/) to learn more about how to use Azure Powershell with Azure Resource Manager.\n\nYou can deploy CustomScript Extension by running:\n\n```powershell\n$RGName = '<resource-group-name>'\n$VmName = '<vm-name>'\n$Location = '<location>'\n\n$ExtensionName = 'CustomScriptForLinux'\n$Publisher = 'Microsoft.OSTCExtensions'\n$Version = '<version>'\n\n$PublicConf = '{\n    \"fileUris\": [\"<url>\"],\n    \"commandToExecute\": \"<command>\"\n}'\n$PrivateConf = '{\n    \"storageAccountName\": \"<storage-account-name>\",\n    \"storageAccountKey\": \"<storage-account-key>\"\n}'\n\nSet-AzureRmVMExtension -ResourceGroupName $RGName -VMName $VmName -Location $Location `\n  -Name $ExtensionName -Publisher $Publisher `\n  -ExtensionType $ExtensionName -TypeHandlerVersion $Version `\n  -Settingstring $PublicConf -ProtectedSettingString $PrivateConf\n```\n\n### 2.3. Using [**ARM Template**][arm-template]\n\n```json\n{\n  \"type\": \"Microsoft.Compute/virtualMachines/extensions\",\n  \"name\": \"<extension-deployment-name>\",\n  \"apiVersion\": \"<api-version>\",\n  \"location\": \"<location>\",\n  \"dependsOn\": [\n    \"[concat('Microsoft.Compute/virtualMachines/', <vm-name>)]\"\n  ],\n  \"properties\": {\n    \"publisher\": \"Microsoft.OSTCExtensions\",\n    \"type\": \"CustomScriptForLinux\",\n    \"typeHandlerVersion\": \"1.5\",\n    \"autoUpgradeMinorVersion\": true,\n    \"settings\": {\n      \"fileUris\": [\n        \"<url>\"\n      ],\n      \"commandToExecute\": \"<command>\"\n    },\n    \"protectedSettings\": {\n      \"storageAccountName\": \"<storage-account-name>\",\n      \"storageAccountKey\": \"<storage-account-key>\"\n    }\n  }\n}\n```\n\nThere are two sample templates in [Azure/azure-quickstart-templates](https://github.com/Azure/azure-quickstart-templates).\n\n* [201-customscript-extension-public-storage-on-ubuntu](https://github.com/Azure/azure-quickstart-templates/tree/master/201-customscript-extension-public-storage-on-ubuntu)\n* [201-customscript-extension-azure-storage-on-ubuntu](https://github.com/Azure/azure-quickstart-templates/tree/master/201-customscript-extension-azure-storage-on-ubuntu)\n\nFor more details about ARM template, please visit [Authoring Azure Resource Manager templates](https://azure.microsoft.com/en-us/documentation/articles/resource-group-authoring-templates/).\n\n## 3. Scenarios\n\n### 3.1 Run scripts stored in Azure Storage\n\n* Public configuration\n\n  ```json\n  {\n    \"fileUris\": [\"http://MyAccount.blob.core.windows.net/vhds/MyShellScript.sh\"],\n    \"commandToExecute\": \" sh MyShellScript.sh\"\n  }\n  ```\n\n* Protected configuration\n\n  ```json\n  {\n    \"storageAccountName\": \"MyAccount\",\n    \"storageAccountKey\": \"Mykey\"\n  }\n  ```\n\n### 3.2 Run scripts stored in GitHub\n\n* Public configuration\n\n  ```json\n  {\n    \"fileUris\": [\"https://github.com/MyProject/Archive/MyPythonScript.py\"],\n    \"commandToExecute\": \"python MyPythonScript.py\"\n  }\n  ```\n\nNo need to provide protected settings.\n\n### 3.3 Run inline scripts\n\n* Public configuration\n\n  ```json\n  \"commandToExecute\": \"echo Hello\"\n  \"commandToExecute\": \"python -c \\\"print 1.4\\\"\"\n  ```\n\n### 3.4 Run scripts with unchanged configurations\n\nRunning scripts with the exactly same configurations is unaccepted in current design.\nIf you need to run scripts repeatly, you can add a timestamp.\n\n* Public configuration\n\n  ```json\n  {\n    \"fileUris\": [\"<url>\"],\n    \"commandToExecute\": \"<command>\",\n    \"timestamp\": 123456789\n  }\n  ```\n\n### 3.5 Run scripts with sensitive data\n\n* Public configuration\n\n  ```json\n  {\n    \"fileUris\": [\"https://github.com/MyProject/Archive/MyPythonScript.py\"]\n  }\n  ```\n\n* Protected configuration\n\n  ```json\n  {\n    \"commandToExecute\": \"python MyPythonScript.py <my-password>\"\n  }\n  ```\n\n## Supported Linux Distributions\n- CentOS 6.5 and higher\n- Debian 8 and higher\n    - Debian 8.7 does not ship with Python2 in the latest images, which breaks CustomScriptForLinux.\n- FreeBSD\n- OpenSUSE 13.1 and higher\n- Oracle Linux 6.4 and higher\n- SUSE Linux Enterprise Server 11 SP3 and higher\n- Ubuntu 12.04 and higher\n\n## Debug\n\n* The status of the extension is reported back to Azure so that user can\nsee the status on Azure Portal\n* All the execution output and error of the scripts are logged into\nthe download directory of the scripts\n`/var/lib/waagent/<extension-name-and-version>/download/<seq>/`,\nand the tail of the output is logged into the log directory specified\nin HandlerEnvironment.json and reported back to Azure\n* The operation log of the extension is `/var/log/azure/<extension-name>/<version>/extension.log` file.\n\n[azure-powershell]: https://azure.microsoft.com/en-us/documentation/articles/powershell-install-configure/\n[azure-cli]: https://azure.microsoft.com/en-us/documentation/articles/xplat-cli/\n[arm-template]: http://azure.microsoft.com/en-us/documentation/templates/ \n[arm-overview]: https://azure.microsoft.com/en-us/documentation/articles/resource-group-overview/\n"
  },
  {
    "path": "CustomScript/azure/__init__.py",
    "content": "#-------------------------------------------------------------------------\n# Copyright (c) Microsoft.  All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#--------------------------------------------------------------------------\nimport ast\nimport base64\nimport hashlib\nimport hmac\nimport sys\nimport types\nimport warnings\nimport inspect\nif sys.version_info < (3,):\n    from urllib2 import quote as url_quote\n    from urllib2 import unquote as url_unquote\n    _strtype = basestring\nelse:\n    from urllib.parse import quote as url_quote\n    from urllib.parse import unquote as url_unquote\n    _strtype = str\n\nfrom datetime import datetime\nfrom xml.dom import minidom\nfrom xml.sax.saxutils import escape as xml_escape\n\n#--------------------------------------------------------------------------\n# constants\n\n__author__ = 'Microsoft Corp. <ptvshelp@microsoft.com>'\n__version__ = '0.8.4'\n\n# Live ServiceClient URLs\nBLOB_SERVICE_HOST_BASE = '.blob.core.windows.net'\nQUEUE_SERVICE_HOST_BASE = '.queue.core.windows.net'\nTABLE_SERVICE_HOST_BASE = '.table.core.windows.net'\nSERVICE_BUS_HOST_BASE = '.servicebus.windows.net'\nMANAGEMENT_HOST = 'management.core.windows.net'\n\n# Development ServiceClient URLs\nDEV_BLOB_HOST = '127.0.0.1:10000'\nDEV_QUEUE_HOST = '127.0.0.1:10001'\nDEV_TABLE_HOST = '127.0.0.1:10002'\n\n# Default credentials for Development Storage Service\nDEV_ACCOUNT_NAME = 'devstoreaccount1'\nDEV_ACCOUNT_KEY = 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=='\n\n# All of our error messages\n_ERROR_CANNOT_FIND_PARTITION_KEY = 'Cannot find partition key in request.'\n_ERROR_CANNOT_FIND_ROW_KEY = 'Cannot find row key in request.'\n_ERROR_INCORRECT_TABLE_IN_BATCH = \\\n    'Table should be the same in a batch operations'\n_ERROR_INCORRECT_PARTITION_KEY_IN_BATCH = \\\n    'Partition Key should be the same in a batch operations'\n_ERROR_DUPLICATE_ROW_KEY_IN_BATCH = \\\n    'Row Keys should not be the same in a batch operations'\n_ERROR_BATCH_COMMIT_FAIL = 'Batch Commit Fail'\n_ERROR_MESSAGE_NOT_PEEK_LOCKED_ON_DELETE = \\\n    'Message is not peek locked and cannot be deleted.'\n_ERROR_MESSAGE_NOT_PEEK_LOCKED_ON_UNLOCK = \\\n    'Message is not peek locked and cannot be unlocked.'\n_ERROR_QUEUE_NOT_FOUND = 'Queue was not found'\n_ERROR_TOPIC_NOT_FOUND = 'Topic was not found'\n_ERROR_CONFLICT = 'Conflict ({0})'\n_ERROR_NOT_FOUND = 'Not found ({0})'\n_ERROR_UNKNOWN = 'Unknown error ({0})'\n_ERROR_SERVICEBUS_MISSING_INFO = \\\n    'You need to provide servicebus namespace, access key and Issuer'\n_ERROR_STORAGE_MISSING_INFO = \\\n    'You need to provide both account name and access key'\n_ERROR_ACCESS_POLICY = \\\n    'share_access_policy must be either SignedIdentifier or AccessPolicy ' + \\\n    'instance'\n_WARNING_VALUE_SHOULD_BE_BYTES = \\\n    'Warning: {0} must be bytes data type. It will be converted ' + \\\n    'automatically, with utf-8 text encoding.'\n_ERROR_VALUE_SHOULD_BE_BYTES = '{0} should be of type bytes.'\n_ERROR_VALUE_NONE = '{0} should not be None.'\n_ERROR_VALUE_NEGATIVE = '{0} should not be negative.'\n_ERROR_CANNOT_SERIALIZE_VALUE_TO_ENTITY = \\\n    'Cannot serialize the specified value ({0}) to an entity.  Please use ' + \\\n    'an EntityProperty (which can specify custom types), int, str, bool, ' + \\\n    'or datetime.'\n_ERROR_PAGE_BLOB_SIZE_ALIGNMENT = \\\n    'Invalid page blob size: {0}. ' + \\\n    'The size must be aligned to a 512-byte boundary.'\n\n_USER_AGENT_STRING = 'pyazure/' + __version__\n\nMETADATA_NS = 'http://schemas.microsoft.com/ado/2007/08/dataservices/metadata'\n\n\nclass WindowsAzureData(object):\n\n    ''' This is the base of data class.\n    It is only used to check whether it is instance or not. '''\n    pass\n\n\nclass WindowsAzureError(Exception):\n\n    ''' WindowsAzure Excpetion base class. '''\n\n    def __init__(self, message):\n        super(WindowsAzureError, self).__init__(message)\n\n\nclass WindowsAzureConflictError(WindowsAzureError):\n\n    '''Indicates that the resource could not be created because it already\n    exists'''\n\n    def __init__(self, message):\n        super(WindowsAzureConflictError, self).__init__(message)\n\n\nclass WindowsAzureMissingResourceError(WindowsAzureError):\n\n    '''Indicates that a request for a request for a resource (queue, table,\n    container, etc...) failed because the specified resource does not exist'''\n\n    def __init__(self, message):\n        super(WindowsAzureMissingResourceError, self).__init__(message)\n\n\nclass WindowsAzureBatchOperationError(WindowsAzureError):\n\n    '''Indicates that a batch operation failed'''\n\n    def __init__(self, message, code):\n        super(WindowsAzureBatchOperationError, self).__init__(message)\n        self.code = code\n\n\nclass Feed(object):\n    pass\n\n\nclass _Base64String(str):\n    pass\n\n\nclass HeaderDict(dict):\n\n    def __getitem__(self, index):\n        return super(HeaderDict, self).__getitem__(index.lower())\n\n\ndef _encode_base64(data):\n    if isinstance(data, _unicode_type):\n        data = data.encode('utf-8')\n    encoded = base64.b64encode(data)\n    return encoded.decode('utf-8')\n\n\ndef _decode_base64_to_bytes(data):\n    if isinstance(data, _unicode_type):\n        data = data.encode('utf-8')\n    return base64.b64decode(data)\n\n\ndef _decode_base64_to_text(data):\n    decoded_bytes = _decode_base64_to_bytes(data)\n    return decoded_bytes.decode('utf-8')\n\n\ndef _get_readable_id(id_name, id_prefix_to_skip):\n    \"\"\"simplified an id to be more friendly for us people\"\"\"\n    # id_name is in the form 'https://namespace.host.suffix/name'\n    # where name may contain a forward slash!\n    pos = id_name.find('//')\n    if pos != -1:\n        pos += 2\n        if id_prefix_to_skip:\n            pos = id_name.find(id_prefix_to_skip, pos)\n            if pos != -1:\n                pos += len(id_prefix_to_skip)\n        pos = id_name.find('/', pos)\n        if pos != -1:\n            return id_name[pos + 1:]\n    return id_name\n\n\ndef _get_entry_properties_from_node(entry, include_id, id_prefix_to_skip=None, use_title_as_id=False):\n    ''' get properties from entry xml '''\n    properties = {}\n\n    etag = entry.getAttributeNS(METADATA_NS, 'etag')\n    if etag:\n        properties['etag'] = etag\n    for updated in _get_child_nodes(entry, 'updated'):\n        properties['updated'] = updated.firstChild.nodeValue\n    for name in _get_children_from_path(entry, 'author', 'name'):\n        if name.firstChild is not None:\n            properties['author'] = name.firstChild.nodeValue\n\n    if include_id:\n        if use_title_as_id:\n            for title in _get_child_nodes(entry, 'title'):\n                properties['name'] = title.firstChild.nodeValue\n        else:\n            for id in _get_child_nodes(entry, 'id'):\n                properties['name'] = _get_readable_id(\n                    id.firstChild.nodeValue, id_prefix_to_skip)\n\n    return properties\n\n\ndef _get_entry_properties(xmlstr, include_id, id_prefix_to_skip=None):\n    ''' get properties from entry xml '''\n    xmldoc = minidom.parseString(xmlstr)\n    properties = {}\n\n    for entry in _get_child_nodes(xmldoc, 'entry'):\n        properties.update(_get_entry_properties_from_node(entry, include_id, id_prefix_to_skip))\n\n    return properties\n\n\ndef _get_first_child_node_value(parent_node, node_name):\n    xml_attrs = _get_child_nodes(parent_node, node_name)\n    if xml_attrs:\n        xml_attr = xml_attrs[0]\n        if xml_attr.firstChild:\n            value = xml_attr.firstChild.nodeValue\n            return value\n\n\ndef _get_child_nodes(node, tagName):\n    return [childNode for childNode in node.getElementsByTagName(tagName)\n            if childNode.parentNode == node]\n\n\ndef _get_children_from_path(node, *path):\n    '''descends through a hierarchy of nodes returning the list of children\n    at the inner most level.  Only returns children who share a common parent,\n    not cousins.'''\n    cur = node\n    for index, child in enumerate(path):\n        if isinstance(child, _strtype):\n            next = _get_child_nodes(cur, child)\n        else:\n            next = _get_child_nodesNS(cur, *child)\n        if index == len(path) - 1:\n            return next\n        elif not next:\n            break\n\n        cur = next[0]\n    return []\n\n\ndef _get_child_nodesNS(node, ns, tagName):\n    return [childNode for childNode in node.getElementsByTagNameNS(ns, tagName)\n            if childNode.parentNode == node]\n\n\ndef _create_entry(entry_body):\n    ''' Adds common part of entry to a given entry body and return the whole\n    xml. '''\n    updated_str = datetime.utcnow().isoformat()\n    if datetime.utcnow().utcoffset() is None:\n        updated_str += '+00:00'\n\n    entry_start = '''<?xml version=\"1.0\" encoding=\"utf-8\" standalone=\"yes\"?>\n<entry xmlns:d=\"http://schemas.microsoft.com/ado/2007/08/dataservices\" xmlns:m=\"http://schemas.microsoft.com/ado/2007/08/dataservices/metadata\" xmlns=\"http://www.w3.org/2005/Atom\" >\n<title /><updated>{updated}</updated><author><name /></author><id />\n<content type=\"application/xml\">\n    {body}</content></entry>'''\n    return entry_start.format(updated=updated_str, body=entry_body)\n\n\ndef _to_datetime(strtime):\n    return datetime.strptime(strtime, \"%Y-%m-%dT%H:%M:%S.%f\")\n\n_KNOWN_SERIALIZATION_XFORMS = {\n    'include_apis': 'IncludeAPIs',\n    'message_id': 'MessageId',\n    'content_md5': 'Content-MD5',\n    'last_modified': 'Last-Modified',\n    'cache_control': 'Cache-Control',\n    'account_admin_live_email_id': 'AccountAdminLiveEmailId',\n    'service_admin_live_email_id': 'ServiceAdminLiveEmailId',\n    'subscription_id': 'SubscriptionID',\n    'fqdn': 'FQDN',\n    'private_id': 'PrivateID',\n    'os_virtual_hard_disk': 'OSVirtualHardDisk',\n    'logical_disk_size_in_gb': 'LogicalDiskSizeInGB',\n    'logical_size_in_gb': 'LogicalSizeInGB',\n    'os': 'OS',\n    'persistent_vm_downtime_info': 'PersistentVMDowntimeInfo',\n    'copy_id': 'CopyId',\n    }\n\n\ndef _get_serialization_name(element_name):\n    \"\"\"converts a Python name into a serializable name\"\"\"\n    known = _KNOWN_SERIALIZATION_XFORMS.get(element_name)\n    if known is not None:\n        return known\n\n    if element_name.startswith('x_ms_'):\n        return element_name.replace('_', '-')\n    if element_name.endswith('_id'):\n        element_name = element_name.replace('_id', 'ID')\n    for name in ['content_', 'last_modified', 'if_', 'cache_control']:\n        if element_name.startswith(name):\n            element_name = element_name.replace('_', '-_')\n\n    return ''.join(name.capitalize() for name in element_name.split('_'))\n\nif sys.version_info < (3,):\n    _unicode_type = unicode\n\n    def _str(value):\n        if isinstance(value, unicode):\n            return value.encode('utf-8')\n\n        return str(value)\nelse:\n    _str = str\n    _unicode_type = str\n\n\ndef _str_or_none(value):\n    if value is None:\n        return None\n\n    return _str(value)\n\n\ndef _int_or_none(value):\n    if value is None:\n        return None\n\n    return str(int(value))\n\n\ndef _bool_or_none(value):\n    if value is None:\n        return None\n\n    if isinstance(value, bool):\n        if value:\n            return 'true'\n        else:\n            return 'false'\n\n    return str(value)\n\n\ndef _convert_class_to_xml(source, xml_prefix=True):\n    if source is None:\n        return ''\n\n    xmlstr = ''\n    if xml_prefix:\n        xmlstr = '<?xml version=\"1.0\" encoding=\"utf-8\"?>'\n\n    if isinstance(source, list):\n        for value in source:\n            xmlstr += _convert_class_to_xml(value, False)\n    elif isinstance(source, WindowsAzureData):\n        class_name = source.__class__.__name__\n        xmlstr += '<' + class_name + '>'\n        for name, value in vars(source).items():\n            if value is not None:\n                if isinstance(value, list) or \\\n                    isinstance(value, WindowsAzureData):\n                    xmlstr += _convert_class_to_xml(value, False)\n                else:\n                    xmlstr += ('<' + _get_serialization_name(name) + '>' +\n                               xml_escape(str(value)) + '</' +\n                               _get_serialization_name(name) + '>')\n        xmlstr += '</' + class_name + '>'\n    return xmlstr\n\n\ndef _find_namespaces_from_child(parent, child, namespaces):\n    \"\"\"Recursively searches from the parent to the child,\n    gathering all the applicable namespaces along the way\"\"\"\n    for cur_child in parent.childNodes:\n        if cur_child is child:\n            return True\n        if _find_namespaces_from_child(cur_child, child, namespaces):\n            # we are the parent node\n            for key in cur_child.attributes.keys():\n                if key.startswith('xmlns:') or key == 'xmlns':\n                    namespaces[key] = cur_child.attributes[key]\n            break\n    return False\n\n\ndef _find_namespaces(parent, child):\n    res = {}\n    for key in parent.documentElement.attributes.keys():\n        if key.startswith('xmlns:') or key == 'xmlns':\n            res[key] = parent.documentElement.attributes[key]\n    _find_namespaces_from_child(parent, child, res)\n    return res\n\n\ndef _clone_node_with_namespaces(node_to_clone, original_doc):\n    clone = node_to_clone.cloneNode(True)\n\n    for key, value in _find_namespaces(original_doc, node_to_clone).items():\n        clone.attributes[key] = value\n\n    return clone\n\n\ndef _convert_response_to_feeds(response, convert_callback):\n    if response is None:\n        return None\n\n    feeds = _list_of(Feed)\n\n    x_ms_continuation = HeaderDict()\n    for name, value in response.headers:\n        if 'x-ms-continuation' in name:\n            x_ms_continuation[name[len('x-ms-continuation') + 1:]] = value\n    if x_ms_continuation:\n        setattr(feeds, 'x_ms_continuation', x_ms_continuation)\n\n    xmldoc = minidom.parseString(response.body)\n    xml_entries = _get_children_from_path(xmldoc, 'feed', 'entry')\n    if not xml_entries:\n        # in some cases, response contains only entry but no feed\n        xml_entries = _get_children_from_path(xmldoc, 'entry')\n    if inspect.isclass(convert_callback) and issubclass(convert_callback, WindowsAzureData):\n        for xml_entry in xml_entries:\n            return_obj = convert_callback()\n            for node in _get_children_from_path(xml_entry,\n                                                'content',\n                                                convert_callback.__name__):\n                _fill_data_to_return_object(node, return_obj)\n            for name, value in _get_entry_properties_from_node(xml_entry,\n                                                               include_id=True,\n                                                               use_title_as_id=True).items():\n                setattr(return_obj, name, value)\n            feeds.append(return_obj)\n    else:\n        for xml_entry in xml_entries:\n            new_node = _clone_node_with_namespaces(xml_entry, xmldoc)\n            feeds.append(convert_callback(new_node.toxml('utf-8')))\n\n    return feeds\n\n\ndef _validate_type_bytes(param_name, param):\n    if not isinstance(param, bytes):\n        raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES.format(param_name))\n\n\ndef _validate_not_none(param_name, param):\n    if param is None:\n        raise TypeError(_ERROR_VALUE_NONE.format(param_name))\n\n\ndef _fill_list_of(xmldoc, element_type, xml_element_name):\n    xmlelements = _get_child_nodes(xmldoc, xml_element_name)\n    return [_parse_response_body_from_xml_node(xmlelement, element_type) \\\n        for xmlelement in xmlelements]\n\n\ndef _fill_scalar_list_of(xmldoc, element_type, parent_xml_element_name,\n                         xml_element_name):\n    '''Converts an xml fragment into a list of scalar types.  The parent xml\n    element contains a flat list of xml elements which are converted into the\n    specified scalar type and added to the list.\n    Example:\n    xmldoc=\n<Endpoints>\n    <Endpoint>http://{storage-service-name}.blob.core.windows.net/</Endpoint>\n    <Endpoint>http://{storage-service-name}.queue.core.windows.net/</Endpoint>\n    <Endpoint>http://{storage-service-name}.table.core.windows.net/</Endpoint>\n</Endpoints>\n    element_type=str\n    parent_xml_element_name='Endpoints'\n    xml_element_name='Endpoint'\n    '''\n    xmlelements = _get_child_nodes(xmldoc, parent_xml_element_name)\n    if xmlelements:\n        xmlelements = _get_child_nodes(xmlelements[0], xml_element_name)\n        return [_get_node_value(xmlelement, element_type) \\\n            for xmlelement in xmlelements]\n\n\ndef _fill_dict(xmldoc, element_name):\n    xmlelements = _get_child_nodes(xmldoc, element_name)\n    if xmlelements:\n        return_obj = {}\n        for child in xmlelements[0].childNodes:\n            if child.firstChild:\n                return_obj[child.nodeName] = child.firstChild.nodeValue\n        return return_obj\n\n\ndef _fill_dict_of(xmldoc, parent_xml_element_name, pair_xml_element_name,\n                  key_xml_element_name, value_xml_element_name):\n    '''Converts an xml fragment into a dictionary. The parent xml element\n    contains a list of xml elements where each element has a child element for\n    the key, and another for the value.\n    Example:\n    xmldoc=\n<ExtendedProperties>\n    <ExtendedProperty>\n        <Name>Ext1</Name>\n        <Value>Val1</Value>\n    </ExtendedProperty>\n    <ExtendedProperty>\n        <Name>Ext2</Name>\n        <Value>Val2</Value>\n    </ExtendedProperty>\n</ExtendedProperties>\n    element_type=str\n    parent_xml_element_name='ExtendedProperties'\n    pair_xml_element_name='ExtendedProperty'\n    key_xml_element_name='Name'\n    value_xml_element_name='Value'\n    '''\n    return_obj = {}\n\n    xmlelements = _get_child_nodes(xmldoc, parent_xml_element_name)\n    if xmlelements:\n        xmlelements = _get_child_nodes(xmlelements[0], pair_xml_element_name)\n        for pair in xmlelements:\n            keys = _get_child_nodes(pair, key_xml_element_name)\n            values = _get_child_nodes(pair, value_xml_element_name)\n            if keys and values:\n                key = keys[0].firstChild.nodeValue\n                value = values[0].firstChild.nodeValue\n                return_obj[key] = value\n\n    return return_obj\n\n\ndef _fill_instance_child(xmldoc, element_name, return_type):\n    '''Converts a child of the current dom element to the specified type.\n    '''\n    xmlelements = _get_child_nodes(\n        xmldoc, _get_serialization_name(element_name))\n\n    if not xmlelements:\n        return None\n\n    return_obj = return_type()\n    _fill_data_to_return_object(xmlelements[0], return_obj)\n\n    return return_obj\n\n\ndef _fill_instance_element(element, return_type):\n    \"\"\"Converts a DOM element into the specified object\"\"\"\n    return _parse_response_body_from_xml_node(element, return_type)\n\n\ndef _fill_data_minidom(xmldoc, element_name, data_member):\n    xmlelements = _get_child_nodes(\n        xmldoc, _get_serialization_name(element_name))\n\n    if not xmlelements or not xmlelements[0].childNodes:\n        return None\n\n    value = xmlelements[0].firstChild.nodeValue\n\n    if data_member is None:\n        return value\n    elif isinstance(data_member, datetime):\n        return _to_datetime(value)\n    elif type(data_member) is bool:\n        return value.lower() != 'false'\n    else:\n        return type(data_member)(value)\n\n\ndef _get_node_value(xmlelement, data_type):\n    value = xmlelement.firstChild.nodeValue\n    if data_type is datetime:\n        return _to_datetime(value)\n    elif data_type is bool:\n        return value.lower() != 'false'\n    else:\n        return data_type(value)\n\n\ndef _get_request_body_bytes_only(param_name, param_value):\n    '''Validates the request body passed in and converts it to bytes\n    if our policy allows it.'''\n    if param_value is None:\n        return b''\n\n    if isinstance(param_value, bytes):\n        return param_value\n\n    # Previous versions of the SDK allowed data types other than bytes to be\n    # passed in, and they would be auto-converted to bytes.  We preserve this\n    # behavior when running under 2.7, but issue a warning.\n    # Python 3 support is new, so we reject anything that's not bytes.\n    if sys.version_info < (3,):\n        warnings.warn(_WARNING_VALUE_SHOULD_BE_BYTES.format(param_name))\n        return _get_request_body(param_value)\n\n    raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES.format(param_name))\n\n\ndef _get_request_body(request_body):\n    '''Converts an object into a request body.  If it's None\n    we'll return an empty string, if it's one of our objects it'll\n    convert it to XML and return it.  Otherwise we just use the object\n    directly'''\n    if request_body is None:\n        return b''\n\n    if isinstance(request_body, WindowsAzureData):\n        request_body = _convert_class_to_xml(request_body)\n\n    if isinstance(request_body, bytes):\n        return request_body\n\n    if isinstance(request_body, _unicode_type):\n        return request_body.encode('utf-8')\n\n    request_body = str(request_body)\n    if isinstance(request_body, _unicode_type):\n        return request_body.encode('utf-8')\n\n    return request_body\n\n\ndef _parse_enum_results_list(response, return_type, resp_type, item_type):\n    \"\"\"resp_body is the XML we received\nresp_type is a string, such as Containers,\nreturn_type is the type we're constructing, such as ContainerEnumResults\nitem_type is the type object of the item to be created, such as Container\n\nThis function then returns a ContainerEnumResults object with the\ncontainers member populated with the results.\n\"\"\"\n\n    # parsing something like:\n    # <EnumerationResults ... >\n    #   <Queues>\n    #       <Queue>\n    #           <Something />\n    #           <SomethingElse />\n    #       </Queue>\n    #   </Queues>\n    # </EnumerationResults>\n    respbody = response.body\n    return_obj = return_type()\n    doc = minidom.parseString(respbody)\n\n    items = []\n    for enum_results in _get_child_nodes(doc, 'EnumerationResults'):\n        # path is something like Queues, Queue\n        for child in _get_children_from_path(enum_results,\n                                             resp_type,\n                                             resp_type[:-1]):\n            items.append(_fill_instance_element(child, item_type))\n\n        for name, value in vars(return_obj).items():\n            # queues, Queues, this is the list its self which we populated\n            # above\n            if name == resp_type.lower():\n                # the list its self.\n                continue\n            value = _fill_data_minidom(enum_results, name, value)\n            if value is not None:\n                setattr(return_obj, name, value)\n\n    setattr(return_obj, resp_type.lower(), items)\n    return return_obj\n\n\ndef _parse_simple_list(response, type, item_type, list_name):\n    respbody = response.body\n    res = type()\n    res_items = []\n    doc = minidom.parseString(respbody)\n    type_name = type.__name__\n    item_name = item_type.__name__\n    for item in _get_children_from_path(doc, type_name, item_name):\n        res_items.append(_fill_instance_element(item, item_type))\n\n    setattr(res, list_name, res_items)\n    return res\n\n\ndef _parse_response(response, return_type):\n    '''\n    Parse the HTTPResponse's body and fill all the data into a class of\n    return_type.\n    '''\n    return _parse_response_body_from_xml_text(response.body, return_type)\n\ndef _parse_service_resources_response(response, return_type):\n    '''\n    Parse the HTTPResponse's body and fill all the data into a class of\n    return_type.\n    '''\n    return _parse_response_body_from_service_resources_xml_text(response.body, return_type)\n\n\ndef _fill_data_to_return_object(node, return_obj):\n    members = dict(vars(return_obj))\n    for name, value in members.items():\n        if isinstance(value, _list_of):\n            setattr(return_obj,\n                    name,\n                    _fill_list_of(node,\n                                  value.list_type,\n                                  value.xml_element_name))\n        elif isinstance(value, _scalar_list_of):\n            setattr(return_obj,\n                    name,\n                    _fill_scalar_list_of(node,\n                                         value.list_type,\n                                         _get_serialization_name(name),\n                                         value.xml_element_name))\n        elif isinstance(value, _dict_of):\n            setattr(return_obj,\n                    name,\n                    _fill_dict_of(node,\n                                  _get_serialization_name(name),\n                                  value.pair_xml_element_name,\n                                  value.key_xml_element_name,\n                                  value.value_xml_element_name))\n        elif isinstance(value, _xml_attribute):\n            real_value = None\n            if node.hasAttribute(value.xml_element_name):\n                real_value = node.getAttribute(value.xml_element_name)\n            if real_value is not None:\n                setattr(return_obj, name, real_value)\n        elif isinstance(value, WindowsAzureData):\n            setattr(return_obj,\n                    name,\n                    _fill_instance_child(node, name, value.__class__))\n        elif isinstance(value, dict):\n            setattr(return_obj,\n                    name,\n                    _fill_dict(node, _get_serialization_name(name)))\n        elif isinstance(value, _Base64String):\n            value = _fill_data_minidom(node, name, '')\n            if value is not None:\n                value = _decode_base64_to_text(value)\n            # always set the attribute, so we don't end up returning an object\n            # with type _Base64String\n            setattr(return_obj, name, value)\n        else:\n            value = _fill_data_minidom(node, name, value)\n            if value is not None:\n                setattr(return_obj, name, value)\n\n\ndef _parse_response_body_from_xml_node(node, return_type):\n    '''\n    parse the xml and fill all the data into a class of return_type\n    '''\n    return_obj = return_type()\n    _fill_data_to_return_object(node, return_obj)\n\n    return return_obj\n\n\ndef _parse_response_body_from_xml_text(respbody, return_type):\n    '''\n    parse the xml and fill all the data into a class of return_type\n    '''\n    doc = minidom.parseString(respbody)\n    return_obj = return_type()\n    xml_name = return_type._xml_name if hasattr(return_type, '_xml_name') else return_type.__name__ \n    for node in _get_child_nodes(doc, xml_name):\n        _fill_data_to_return_object(node, return_obj)\n\n    return return_obj\n\ndef _parse_response_body_from_service_resources_xml_text(respbody, return_type):\n    '''\n    parse the xml and fill all the data into a class of return_type\n    '''\n    doc = minidom.parseString(respbody)\n    return_obj = _list_of(return_type)\n    for node in _get_children_from_path(doc, \"ServiceResources\", \"ServiceResource\"):\n        local_obj = return_type()\n        _fill_data_to_return_object(node, local_obj)\n        return_obj.append(local_obj)\n\n    return return_obj\n\nclass _dict_of(dict):\n\n    \"\"\"a dict which carries with it the xml element names for key,val.\n    Used for deserializaion and construction of the lists\"\"\"\n\n    def __init__(self, pair_xml_element_name, key_xml_element_name,\n                 value_xml_element_name):\n        self.pair_xml_element_name = pair_xml_element_name\n        self.key_xml_element_name = key_xml_element_name\n        self.value_xml_element_name = value_xml_element_name\n        super(_dict_of, self).__init__()\n\n\nclass _list_of(list):\n\n    \"\"\"a list which carries with it the type that's expected to go in it.\n    Used for deserializaion and construction of the lists\"\"\"\n\n    def __init__(self, list_type, xml_element_name=None):\n        self.list_type = list_type\n        if xml_element_name is None:\n            self.xml_element_name = list_type.__name__\n        else:\n            self.xml_element_name = xml_element_name\n        super(_list_of, self).__init__()\n\n\nclass _scalar_list_of(list):\n\n    \"\"\"a list of scalar types which carries with it the type that's\n    expected to go in it along with its xml element name.\n    Used for deserializaion and construction of the lists\"\"\"\n\n    def __init__(self, list_type, xml_element_name):\n        self.list_type = list_type\n        self.xml_element_name = xml_element_name\n        super(_scalar_list_of, self).__init__()\n        \nclass _xml_attribute:\n    \n    \"\"\"a accessor to XML attributes\n    expected to go in it along with its xml element name.\n    Used for deserialization and construction\"\"\"\n    \n    def __init__(self, xml_element_name):\n        self.xml_element_name = xml_element_name\n\n\ndef _update_request_uri_query_local_storage(request, use_local_storage):\n    ''' create correct uri and query for the request '''\n    uri, query = _update_request_uri_query(request)\n    if use_local_storage:\n        return '/' + DEV_ACCOUNT_NAME + uri, query\n    return uri, query\n\n\ndef _update_request_uri_query(request):\n    '''pulls the query string out of the URI and moves it into\n    the query portion of the request object.  If there are already\n    query parameters on the request the parameters in the URI will\n    appear after the existing parameters'''\n\n    if '?' in request.path:\n        request.path, _, query_string = request.path.partition('?')\n        if query_string:\n            query_params = query_string.split('&')\n            for query in query_params:\n                if '=' in query:\n                    name, _, value = query.partition('=')\n                    request.query.append((name, value))\n\n    request.path = url_quote(request.path, '/()$=\\',')\n\n    # add encoded queries to request.path.\n    if request.query:\n        request.path += '?'\n        for name, value in request.query:\n            if value is not None:\n                request.path += name + '=' + url_quote(value, '/()$=\\',') + '&'\n        request.path = request.path[:-1]\n\n    return request.path, request.query\n\n\ndef _dont_fail_on_exist(error):\n    ''' don't throw exception if the resource exists.\n    This is called by create_* APIs with fail_on_exist=False'''\n    if isinstance(error, WindowsAzureConflictError):\n        return False\n    else:\n        raise error\n\n\ndef _dont_fail_not_exist(error):\n    ''' don't throw exception if the resource doesn't exist.\n    This is called by create_* APIs with fail_on_exist=False'''\n    if isinstance(error, WindowsAzureMissingResourceError):\n        return False\n    else:\n        raise error\n\n\ndef _general_error_handler(http_error):\n    ''' Simple error handler for azure.'''\n    if http_error.status == 409:\n        raise WindowsAzureConflictError(\n            _ERROR_CONFLICT.format(str(http_error)))\n    elif http_error.status == 404:\n        raise WindowsAzureMissingResourceError(\n            _ERROR_NOT_FOUND.format(str(http_error)))\n    else:\n        if http_error.respbody is not None:\n            raise WindowsAzureError(\n                _ERROR_UNKNOWN.format(str(http_error)) + '\\n' + \\\n                    http_error.respbody.decode('utf-8'))\n        else:\n            raise WindowsAzureError(_ERROR_UNKNOWN.format(str(http_error)))\n\n\ndef _parse_response_for_dict(response):\n    ''' Extracts name-values from response header. Filter out the standard\n    http headers.'''\n\n    if response is None:\n        return None\n    http_headers = ['server', 'date', 'location', 'host',\n                    'via', 'proxy-connection', 'connection']\n    return_dict = HeaderDict()\n    if response.headers:\n        for name, value in response.headers:\n            if not name.lower() in http_headers:\n                return_dict[name] = value\n\n    return return_dict\n\n\ndef _parse_response_for_dict_prefix(response, prefixes):\n    ''' Extracts name-values for names starting with prefix from response\n    header. Filter out the standard http headers.'''\n\n    if response is None:\n        return None\n    return_dict = {}\n    orig_dict = _parse_response_for_dict(response)\n    if orig_dict:\n        for name, value in orig_dict.items():\n            for prefix_value in prefixes:\n                if name.lower().startswith(prefix_value.lower()):\n                    return_dict[name] = value\n                    break\n        return return_dict\n    else:\n        return None\n\n\ndef _parse_response_for_dict_filter(response, filter):\n    ''' Extracts name-values for names in filter from response header. Filter\n    out the standard http headers.'''\n    if response is None:\n        return None\n    return_dict = {}\n    orig_dict = _parse_response_for_dict(response)\n    if orig_dict:\n        for name, value in orig_dict.items():\n            if name.lower() in filter:\n                return_dict[name] = value\n        return return_dict\n    else:\n        return None\n\n\ndef _sign_string(key, string_to_sign, key_is_base64=True):\n    if key_is_base64:\n        key = _decode_base64_to_bytes(key)\n    else:\n        if isinstance(key, _unicode_type):\n            key = key.encode('utf-8')\n    if isinstance(string_to_sign, _unicode_type):\n        string_to_sign = string_to_sign.encode('utf-8')\n    signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256)\n    digest = signed_hmac_sha256.digest()\n    encoded_digest = _encode_base64(digest)\n    return encoded_digest\n"
  },
  {
    "path": "CustomScript/azure/azure.pyproj",
    "content": "﻿<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<Project DefaultTargets=\"Build\" xmlns=\"http://schemas.microsoft.com/developer/msbuild/2003\" ToolsVersion=\"4.0\">\n  <PropertyGroup>\n    <Configuration Condition=\" '$(Configuration)' == '' \">Debug</Configuration>\n    <SchemaVersion>2.0</SchemaVersion>\n    <ProjectGuid>{25b2c65a-0553-4452-8907-8b5b17544e68}</ProjectGuid>\n    <ProjectHome>\n    </ProjectHome>\n    <StartupFile>storage\\blobservice.py</StartupFile>\n    <SearchPath>..</SearchPath>\n    <WorkingDirectory>.</WorkingDirectory>\n    <OutputPath>.</OutputPath>\n    <Name>azure</Name>\n    <RootNamespace>azure</RootNamespace>\n    <IsWindowsApplication>False</IsWindowsApplication>\n    <LaunchProvider>Standard Python launcher</LaunchProvider>\n    <CommandLineArguments />\n    <InterpreterPath />\n    <InterpreterArguments />\n    <InterpreterId>{9a7a9026-48c1-4688-9d5d-e5699d47d074}</InterpreterId>\n    <InterpreterVersion>3.4</InterpreterVersion>\n    <SccProjectName>SAK</SccProjectName>\n    <SccProvider>SAK</SccProvider>\n    <SccAuxPath>SAK</SccAuxPath>\n    <SccLocalPath>SAK</SccLocalPath>\n  </PropertyGroup>\n  <PropertyGroup Condition=\" '$(Configuration)' == 'Debug' \">\n    <DebugSymbols>true</DebugSymbols>\n    <EnableUnmanagedDebugging>false</EnableUnmanagedDebugging>\n  </PropertyGroup>\n  <PropertyGroup Condition=\" '$(Configuration)' == 'Release' \">\n    <DebugSymbols>true</DebugSymbols>\n    <EnableUnmanagedDebugging>false</EnableUnmanagedDebugging>\n  </PropertyGroup>\n  <ItemGroup>\n    <Compile Include=\"http\\batchclient.py\" />\n    <Compile Include=\"http\\httpclient.py\" />\n    <Compile Include=\"http\\winhttp.py\" />\n    <Compile Include=\"http\\__init__.py\" />\n    <Compile Include=\"servicemanagement\\servicebusmanagementservice.py\" />\n    <Compile Include=\"servicemanagement\\servicemanagementclient.py\" />\n    <Compile Include=\"servicemanagement\\servicemanagementservice.py\" />\n    <Compile Include=\"servicemanagement\\sqldatabasemanagementservice.py\" />\n    <Compile Include=\"servicemanagement\\websitemanagementservice.py\" />\n    <Compile Include=\"servicemanagement\\__init__.py\" />\n    <Compile Include=\"servicebus\\servicebusservice.py\" />\n    <Compile Include=\"storage\\blobservice.py\" />\n    <Compile Include=\"storage\\queueservice.py\" />\n    <Compile Include=\"storage\\cloudstorageaccount.py\" />\n    <Compile Include=\"storage\\tableservice.py\" />\n    <Compile Include=\"storage\\sharedaccesssignature.py\" />\n    <Compile Include=\"__init__.py\" />\n    <Compile Include=\"servicebus\\__init__.py\" />\n    <Compile Include=\"storage\\storageclient.py\" />\n    <Compile Include=\"storage\\__init__.py\" />\n  </ItemGroup>\n  <ItemGroup>\n    <Folder Include=\"http\" />\n    <Folder Include=\"servicemanagement\" />\n    <Folder Include=\"servicebus\\\" />\n    <Folder Include=\"storage\" />\n  </ItemGroup>\n  <ItemGroup>\n    <InterpreterReference Include=\"{2af0f10d-7135-4994-9156-5d01c9c11b7e}\\2.6\" />\n    <InterpreterReference Include=\"{2af0f10d-7135-4994-9156-5d01c9c11b7e}\\2.7\" />\n    <InterpreterReference Include=\"{2af0f10d-7135-4994-9156-5d01c9c11b7e}\\3.3\" />\n    <InterpreterReference Include=\"{2af0f10d-7135-4994-9156-5d01c9c11b7e}\\3.4\" />\n    <InterpreterReference Include=\"{9a7a9026-48c1-4688-9d5d-e5699d47d074}\\2.7\" />\n    <InterpreterReference Include=\"{9a7a9026-48c1-4688-9d5d-e5699d47d074}\\3.3\" />\n    <InterpreterReference Include=\"{9a7a9026-48c1-4688-9d5d-e5699d47d074}\\3.4\" />\n  </ItemGroup>\n  <PropertyGroup>\n    <VisualStudioVersion Condition=\"'$(VisualStudioVersion)' == ''\">10.0</VisualStudioVersion>\n    <VSToolsPath Condition=\"'$(VSToolsPath)' == ''\">$(MSBuildExtensionsPath32)\\Microsoft\\VisualStudio\\v$(VisualStudioVersion)</VSToolsPath>\n    <PtvsTargetsFile>$(VSToolsPath)\\Python Tools\\Microsoft.PythonTools.targets</PtvsTargetsFile>\n  </PropertyGroup>\n  <Import Condition=\"Exists($(PtvsTargetsFile))\" Project=\"$(PtvsTargetsFile)\" />\n  <Import Condition=\"!Exists($(PtvsTargetsFile))\" Project=\"$(MSBuildToolsPath)\\Microsoft.Common.targets\" />\n</Project>"
  },
  {
    "path": "CustomScript/azure/http/__init__.py",
    "content": "#-------------------------------------------------------------------------\n# Copyright (c) Microsoft.  All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#--------------------------------------------------------------------------\n\nHTTP_RESPONSE_NO_CONTENT = 204\n\n\nclass HTTPError(Exception):\n\n    ''' HTTP Exception when response status code >= 300 '''\n\n    def __init__(self, status, message, respheader, respbody):\n        '''Creates a new HTTPError with the specified status, message,\n        response headers and body'''\n        self.status = status\n        self.respheader = respheader\n        self.respbody = respbody\n        Exception.__init__(self, message)\n\n\nclass HTTPResponse(object):\n\n    \"\"\"Represents a response from an HTTP request.  An HTTPResponse has the\n    following attributes:\n\n    status: the status code of the response\n    message: the message\n    headers: the returned headers, as a list of (name, value) pairs\n    body: the body of the response\n    \"\"\"\n\n    def __init__(self, status, message, headers, body):\n        self.status = status\n        self.message = message\n        self.headers = headers\n        self.body = body\n\n\nclass HTTPRequest(object):\n\n    '''Represents an HTTP Request.  An HTTP Request consists of the following\n    attributes:\n\n    host: the host name to connect to\n    method: the method to use to connect (string such as GET, POST, PUT, etc.)\n    path: the uri fragment\n    query: query parameters specified as a list of (name, value) pairs\n    headers: header values specified as (name, value) pairs\n    body: the body of the request.\n    protocol_override:\n        specify to use this protocol instead of the global one stored in\n        _HTTPClient.\n    '''\n\n    def __init__(self):\n        self.host = ''\n        self.method = ''\n        self.path = ''\n        self.query = []      # list of (name, value)\n        self.headers = []    # list of (header name, header value)\n        self.body = ''\n        self.protocol_override = None\n"
  },
  {
    "path": "CustomScript/azure/http/batchclient.py",
    "content": "#-------------------------------------------------------------------------\n# Copyright (c) Microsoft.  All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#--------------------------------------------------------------------------\nimport sys\nimport uuid\n\nfrom azure import (\n    _update_request_uri_query,\n    WindowsAzureError,\n    WindowsAzureBatchOperationError,\n    _get_children_from_path,\n    url_unquote,\n    _ERROR_CANNOT_FIND_PARTITION_KEY,\n    _ERROR_CANNOT_FIND_ROW_KEY,\n    _ERROR_INCORRECT_TABLE_IN_BATCH,\n    _ERROR_INCORRECT_PARTITION_KEY_IN_BATCH,\n    _ERROR_DUPLICATE_ROW_KEY_IN_BATCH,\n    _ERROR_BATCH_COMMIT_FAIL,\n    )\nfrom azure.http import HTTPError, HTTPRequest, HTTPResponse\nfrom azure.http.httpclient import _HTTPClient\nfrom azure.storage import (\n    _update_storage_table_header,\n    METADATA_NS,\n    _sign_storage_table_request,\n    )\nfrom xml.dom import minidom\n\n_DATASERVICES_NS = 'http://schemas.microsoft.com/ado/2007/08/dataservices'\n\nif sys.version_info < (3,):\n    def _new_boundary():\n        return str(uuid.uuid1())\nelse:\n    def _new_boundary():\n        return str(uuid.uuid1()).encode('utf-8')\n\n\nclass _BatchClient(_HTTPClient):\n\n    '''\n    This is the class that is used for batch operation for storage table\n    service. It only supports one changeset.\n    '''\n\n    def __init__(self, service_instance, account_key, account_name,\n                 protocol='http'):\n        _HTTPClient.__init__(self, service_instance, account_name=account_name,\n                             account_key=account_key, protocol=protocol)\n        self.is_batch = False\n        self.batch_requests = []\n        self.batch_table = ''\n        self.batch_partition_key = ''\n        self.batch_row_keys = []\n\n    def get_request_table(self, request):\n        '''\n        Extracts table name from request.uri. The request.uri has either\n        \"/mytable(...)\" or \"/mytable\" format.\n\n        request: the request to insert, update or delete entity\n        '''\n        if '(' in request.path:\n            pos = request.path.find('(')\n            return request.path[1:pos]\n        else:\n            return request.path[1:]\n\n    def get_request_partition_key(self, request):\n        '''\n        Extracts PartitionKey from request.body if it is a POST request or from\n        request.path if it is not a POST request. Only insert operation request\n        is a POST request and the PartitionKey is in the request body.\n\n        request: the request to insert, update or delete entity\n        '''\n        if request.method == 'POST':\n            doc = minidom.parseString(request.body)\n            part_key = _get_children_from_path(\n                doc, 'entry', 'content', (METADATA_NS, 'properties'),\n                (_DATASERVICES_NS, 'PartitionKey'))\n            if not part_key:\n                raise WindowsAzureError(_ERROR_CANNOT_FIND_PARTITION_KEY)\n            return part_key[0].firstChild.nodeValue\n        else:\n            uri = url_unquote(request.path)\n            pos1 = uri.find('PartitionKey=\\'')\n            pos2 = uri.find('\\',', pos1)\n            if pos1 == -1 or pos2 == -1:\n                raise WindowsAzureError(_ERROR_CANNOT_FIND_PARTITION_KEY)\n            return uri[pos1 + len('PartitionKey=\\''):pos2]\n\n    def get_request_row_key(self, request):\n        '''\n        Extracts RowKey from request.body if it is a POST request or from\n        request.path if it is not a POST request. Only insert operation request\n        is a POST request and the Rowkey is in the request body.\n\n        request: the request to insert, update or delete entity\n        '''\n        if request.method == 'POST':\n            doc = minidom.parseString(request.body)\n            row_key = _get_children_from_path(\n                doc, 'entry', 'content', (METADATA_NS, 'properties'),\n                (_DATASERVICES_NS, 'RowKey'))\n            if not row_key:\n                raise WindowsAzureError(_ERROR_CANNOT_FIND_ROW_KEY)\n            return row_key[0].firstChild.nodeValue\n        else:\n            uri = url_unquote(request.path)\n            pos1 = uri.find('RowKey=\\'')\n            pos2 = uri.find('\\')', pos1)\n            if pos1 == -1 or pos2 == -1:\n                raise WindowsAzureError(_ERROR_CANNOT_FIND_ROW_KEY)\n            row_key = uri[pos1 + len('RowKey=\\''):pos2]\n            return row_key\n\n    def validate_request_table(self, request):\n        '''\n        Validates that all requests have the same table name. Set the table\n        name if it is the first request for the batch operation.\n\n        request: the request to insert, update or delete entity\n        '''\n        if self.batch_table:\n            if self.get_request_table(request) != self.batch_table:\n                raise WindowsAzureError(_ERROR_INCORRECT_TABLE_IN_BATCH)\n        else:\n            self.batch_table = self.get_request_table(request)\n\n    def validate_request_partition_key(self, request):\n        '''\n        Validates that all requests have the same PartitiionKey. Set the\n        PartitionKey if it is the first request for the batch operation.\n\n        request: the request to insert, update or delete entity\n        '''\n        if self.batch_partition_key:\n            if self.get_request_partition_key(request) != \\\n                self.batch_partition_key:\n                raise WindowsAzureError(_ERROR_INCORRECT_PARTITION_KEY_IN_BATCH)\n        else:\n            self.batch_partition_key = self.get_request_partition_key(request)\n\n    def validate_request_row_key(self, request):\n        '''\n        Validates that all requests have the different RowKey and adds RowKey\n        to existing RowKey list.\n\n        request: the request to insert, update or delete entity\n        '''\n        if self.batch_row_keys:\n            if self.get_request_row_key(request) in self.batch_row_keys:\n                raise WindowsAzureError(_ERROR_DUPLICATE_ROW_KEY_IN_BATCH)\n        else:\n            self.batch_row_keys.append(self.get_request_row_key(request))\n\n    def begin_batch(self):\n        '''\n        Starts the batch operation. Intializes the batch variables\n\n        is_batch: batch operation flag.\n        batch_table: the table name of the batch operation\n        batch_partition_key: the PartitionKey of the batch requests.\n        batch_row_keys: the RowKey list of adding requests.\n        batch_requests: the list of the requests.\n        '''\n        self.is_batch = True\n        self.batch_table = ''\n        self.batch_partition_key = ''\n        self.batch_row_keys = []\n        self.batch_requests = []\n\n    def insert_request_to_batch(self, request):\n        '''\n        Adds request to batch operation.\n\n        request: the request to insert, update or delete entity\n        '''\n        self.validate_request_table(request)\n        self.validate_request_partition_key(request)\n        self.validate_request_row_key(request)\n        self.batch_requests.append(request)\n\n    def commit_batch(self):\n        ''' Resets batch flag and commits the batch requests. '''\n        if self.is_batch:\n            self.is_batch = False\n            self.commit_batch_requests()\n\n    def commit_batch_requests(self):\n        ''' Commits the batch requests. '''\n\n        batch_boundary = b'batch_' + _new_boundary()\n        changeset_boundary = b'changeset_' + _new_boundary()\n\n        # Commits batch only the requests list is not empty.\n        if self.batch_requests:\n            request = HTTPRequest()\n            request.method = 'POST'\n            request.host = self.batch_requests[0].host\n            request.path = '/$batch'\n            request.headers = [\n                ('Content-Type', 'multipart/mixed; boundary=' + \\\n                    batch_boundary.decode('utf-8')),\n                ('Accept', 'application/atom+xml,application/xml'),\n                ('Accept-Charset', 'UTF-8')]\n\n            request.body = b'--' + batch_boundary + b'\\n'\n            request.body += b'Content-Type: multipart/mixed; boundary='\n            request.body += changeset_boundary + b'\\n\\n'\n\n            content_id = 1\n\n            # Adds each request body to the POST data.\n            for batch_request in self.batch_requests:\n                request.body += b'--' + changeset_boundary + b'\\n'\n                request.body += b'Content-Type: application/http\\n'\n                request.body += b'Content-Transfer-Encoding: binary\\n\\n'\n                request.body += batch_request.method.encode('utf-8')\n                request.body += b' http://'\n                request.body += batch_request.host.encode('utf-8')\n                request.body += batch_request.path.encode('utf-8')\n                request.body += b' HTTP/1.1\\n'\n                request.body += b'Content-ID: '\n                request.body += str(content_id).encode('utf-8') + b'\\n'\n                content_id += 1\n\n                # Add different headers for different type requests.\n                if not batch_request.method == 'DELETE':\n                    request.body += \\\n                        b'Content-Type: application/atom+xml;type=entry\\n'\n                    for name, value in batch_request.headers:\n                        if name == 'If-Match':\n                            request.body += name.encode('utf-8') + b': '\n                            request.body += value.encode('utf-8') + b'\\n'\n                            break\n                    request.body += b'Content-Length: '\n                    request.body += str(len(batch_request.body)).encode('utf-8')\n                    request.body += b'\\n\\n'\n                    request.body += batch_request.body + b'\\n'\n                else:\n                    for name, value in batch_request.headers:\n                        # If-Match should be already included in\n                        # batch_request.headers, but in case it is missing,\n                        # just add it.\n                        if name == 'If-Match':\n                            request.body += name.encode('utf-8') + b': '\n                            request.body += value.encode('utf-8') + b'\\n\\n'\n                            break\n                    else:\n                        request.body += b'If-Match: *\\n\\n'\n\n            request.body += b'--' + changeset_boundary + b'--' + b'\\n'\n            request.body += b'--' + batch_boundary + b'--'\n\n            request.path, request.query = _update_request_uri_query(request)\n            request.headers = _update_storage_table_header(request)\n            auth = _sign_storage_table_request(request,\n                                               self.account_name,\n                                               self.account_key)\n            request.headers.append(('Authorization', auth))\n\n            # Submit the whole request as batch request.\n            response = self.perform_request(request)\n            if response.status >= 300:\n                raise HTTPError(response.status,\n                                _ERROR_BATCH_COMMIT_FAIL,\n                                self.respheader,\n                                response.body)\n\n            # http://www.odata.org/documentation/odata-version-2-0/batch-processing/\n            # The body of a ChangeSet response is either a response for all the\n            # successfully processed change request within the ChangeSet,\n            # formatted exactly as it would have appeared outside of a batch, \n            # or a single response indicating a failure of the entire ChangeSet.\n            responses = self._parse_batch_response(response.body)\n            if responses and responses[0].status >= 300:\n                self._report_batch_error(responses[0])\n\n    def cancel_batch(self):\n        ''' Resets the batch flag. '''\n        self.is_batch = False\n\n    def _parse_batch_response(self, body):\n        parts = body.split(b'--changesetresponse_')\n\n        responses = []\n        for part in parts:\n            httpLocation = part.find(b'HTTP/')\n            if httpLocation > 0:\n                response = self._parse_batch_response_part(part[httpLocation:])\n                responses.append(response)\n\n        return responses\n\n    def _parse_batch_response_part(self, part):\n        lines = part.splitlines();\n\n        # First line is the HTTP status/reason\n        status, _, reason = lines[0].partition(b' ')[2].partition(b' ')\n\n        # Followed by headers and body\n        headers = []\n        body = b''\n        isBody = False\n        for line in lines[1:]:\n            if line == b'' and not isBody:\n                isBody = True\n            elif isBody:\n                body += line\n            else:\n                headerName, _, headerVal = line.partition(b':')\n                headers.append((headerName.lower(), headerVal))\n\n        return HTTPResponse(int(status), reason.strip(), headers, body)\n\n    def _report_batch_error(self, response):\n        xml = response.body.decode('utf-8')\n        doc = minidom.parseString(xml)\n\n        n = _get_children_from_path(doc, (METADATA_NS, 'error'), 'code')\n        code = n[0].firstChild.nodeValue if n and n[0].firstChild else ''\n\n        n = _get_children_from_path(doc, (METADATA_NS, 'error'), 'message')\n        message = n[0].firstChild.nodeValue if n and n[0].firstChild else xml\n\n        raise WindowsAzureBatchOperationError(message, code)\n"
  },
  {
    "path": "CustomScript/azure/http/httpclient.py",
    "content": "#-------------------------------------------------------------------------\n# Copyright (c) Microsoft.  All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#--------------------------------------------------------------------------\nimport base64\nimport os\nimport sys\n\nif sys.version_info < (3,):\n    from httplib import (\n        HTTPSConnection,\n        HTTPConnection,\n        HTTP_PORT,\n        HTTPS_PORT,\n        )\n    from urlparse import urlparse\nelse:\n    from http.client import (\n        HTTPSConnection,\n        HTTPConnection,\n        HTTP_PORT,\n        HTTPS_PORT,\n        )\n    from urllib.parse import urlparse\n\nfrom azure.http import HTTPError, HTTPResponse\nfrom azure import _USER_AGENT_STRING, _update_request_uri_query\n\n\nclass _HTTPClient(object):\n\n    '''\n    Takes the request and sends it to cloud service and returns the response.\n    '''\n\n    def __init__(self, service_instance, cert_file=None, account_name=None,\n                 account_key=None, protocol='https'):\n        '''\n        service_instance: service client instance.\n        cert_file:\n            certificate file name/location. This is only used in hosted\n            service management.\n        account_name: the storage account.\n        account_key:\n            the storage account access key.\n        '''\n        self.service_instance = service_instance\n        self.status = None\n        self.respheader = None\n        self.message = None\n        self.cert_file = cert_file\n        self.account_name = account_name\n        self.account_key = account_key\n        self.protocol = protocol\n        self.proxy_host = None\n        self.proxy_port = None\n        self.proxy_user = None\n        self.proxy_password = None\n        self.use_httplib = self.should_use_httplib()\n\n    def should_use_httplib(self):\n        if sys.platform.lower().startswith('win') and self.cert_file:\n            # On Windows, auto-detect between Windows Store Certificate\n            # (winhttp) and OpenSSL .pem certificate file (httplib).\n            #\n            # We used to only support certificates installed in the Windows\n            # Certificate Store.\n            #   cert_file example: CURRENT_USER\\my\\CertificateName\n            #\n            # We now support using an OpenSSL .pem certificate file,\n            # for a consistent experience across all platforms.\n            #   cert_file example: account\\certificate.pem\n            #\n            # When using OpenSSL .pem certificate file on Windows, make sure\n            # you are on CPython 2.7.4 or later.\n\n            # If it's not an existing file on disk, then treat it as a path in\n            # the Windows Certificate Store, which means we can't use httplib.\n            if not os.path.isfile(self.cert_file):\n                return False\n\n        return True\n\n    def set_proxy(self, host, port, user, password):\n        '''\n        Sets the proxy server host and port for the HTTP CONNECT Tunnelling.\n\n        host: Address of the proxy. Ex: '192.168.0.100'\n        port: Port of the proxy. Ex: 6000\n        user: User for proxy authorization.\n        password: Password for proxy authorization.\n        '''\n        self.proxy_host = host\n        self.proxy_port = port\n        self.proxy_user = user\n        self.proxy_password = password\n\n    def get_uri(self, request):\n        ''' Return the target uri for the request.'''\n        protocol = request.protocol_override \\\n            if request.protocol_override else self.protocol\n        port = HTTP_PORT if protocol == 'http' else HTTPS_PORT\n        return protocol + '://' + request.host + ':' + str(port) + request.path\n\n    def get_connection(self, request):\n        ''' Create connection for the request. '''\n        protocol = request.protocol_override \\\n            if request.protocol_override else self.protocol\n        target_host = request.host\n        target_port = HTTP_PORT if protocol == 'http' else HTTPS_PORT\n\n        if not self.use_httplib:\n            import azure.http.winhttp\n            connection = azure.http.winhttp._HTTPConnection(\n                target_host, cert_file=self.cert_file, protocol=protocol)\n            proxy_host = self.proxy_host\n            proxy_port = self.proxy_port\n        else:\n            if ':' in target_host:\n                target_host, _, target_port = target_host.rpartition(':')\n            if self.proxy_host:\n                proxy_host = target_host\n                proxy_port = target_port\n                host = self.proxy_host\n                port = self.proxy_port\n            else:\n                host = target_host\n                port = target_port\n\n            if protocol == 'http':\n                connection = HTTPConnection(host, int(port))\n            else:\n                connection = HTTPSConnection(\n                    host, int(port), cert_file=self.cert_file)\n\n        if self.proxy_host:\n            headers = None\n            if self.proxy_user and self.proxy_password:\n                auth = base64.encodestring(\n                    \"{0}:{1}\".format(self.proxy_user, self.proxy_password))\n                headers = {'Proxy-Authorization': 'Basic {0}'.format(auth)}\n            connection.set_tunnel(proxy_host, int(proxy_port), headers)\n\n        return connection\n\n    def send_request_headers(self, connection, request_headers):\n        if self.use_httplib:\n            if self.proxy_host:\n                for i in connection._buffer:\n                    if i.startswith(\"Host: \"):\n                        connection._buffer.remove(i)\n                connection.putheader(\n                    'Host', \"{0}:{1}\".format(connection._tunnel_host,\n                                             connection._tunnel_port))\n\n        for name, value in request_headers:\n            if value:\n                connection.putheader(name, value)\n\n        connection.putheader('User-Agent', _USER_AGENT_STRING)\n        connection.endheaders()\n\n    def send_request_body(self, connection, request_body):\n        if request_body:\n            assert isinstance(request_body, bytes)\n            connection.send(request_body)\n        elif (not isinstance(connection, HTTPSConnection) and\n              not isinstance(connection, HTTPConnection)):\n            connection.send(None)\n\n    def perform_request(self, request):\n        ''' Sends request to cloud service server and return the response. '''\n        connection = self.get_connection(request)\n        try:\n            connection.putrequest(request.method, request.path)\n\n            if not self.use_httplib:\n                if self.proxy_host and self.proxy_user:\n                    connection.set_proxy_credentials(\n                        self.proxy_user, self.proxy_password)\n\n            self.send_request_headers(connection, request.headers)\n            self.send_request_body(connection, request.body)\n\n            resp = connection.getresponse()\n            self.status = int(resp.status)\n            self.message = resp.reason\n            self.respheader = headers = resp.getheaders()\n\n            # for consistency across platforms, make header names lowercase\n            for i, value in enumerate(headers):\n                headers[i] = (value[0].lower(), value[1])\n\n            respbody = None\n            if resp.length is None:\n                respbody = resp.read()\n            elif resp.length > 0:\n                respbody = resp.read(resp.length)\n\n            response = HTTPResponse(\n                int(resp.status), resp.reason, headers, respbody)\n            if self.status == 307:\n                new_url = urlparse(dict(headers)['location'])\n                request.host = new_url.hostname\n                request.path = new_url.path\n                request.path, request.query = _update_request_uri_query(request)\n                return self.perform_request(request)\n            if self.status >= 300:\n                raise HTTPError(self.status, self.message,\n                                self.respheader, respbody)\n\n            return response\n        finally:\n            connection.close()\n"
  },
  {
    "path": "CustomScript/azure/http/winhttp.py",
    "content": "#-------------------------------------------------------------------------\n# Copyright (c) Microsoft.  All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#--------------------------------------------------------------------------\nfrom ctypes import (\n    c_void_p,\n    c_long,\n    c_ulong,\n    c_longlong,\n    c_ulonglong,\n    c_short,\n    c_ushort,\n    c_wchar_p,\n    c_byte,\n    byref,\n    Structure,\n    Union,\n    POINTER,\n    WINFUNCTYPE,\n    HRESULT,\n    oledll,\n    WinDLL,\n    )\nimport ctypes\nimport sys\n\nif sys.version_info >= (3,):\n    def unicode(text):\n        return text\n\n#------------------------------------------------------------------------------\n#  Constants that are used in COM operations\nVT_EMPTY = 0\nVT_NULL = 1\nVT_I2 = 2\nVT_I4 = 3\nVT_BSTR = 8\nVT_BOOL = 11\nVT_I1 = 16\nVT_UI1 = 17\nVT_UI2 = 18\nVT_UI4 = 19\nVT_I8 = 20\nVT_UI8 = 21\nVT_ARRAY = 8192\n\nHTTPREQUEST_PROXYSETTING_PROXY = 2\nHTTPREQUEST_SETCREDENTIALS_FOR_PROXY = 1\n\nHTTPREQUEST_PROXY_SETTING = c_long\nHTTPREQUEST_SETCREDENTIALS_FLAGS = c_long\n#------------------------------------------------------------------------------\n# Com related APIs that are used.\n_ole32 = oledll.ole32\n_oleaut32 = WinDLL('oleaut32')\n_CLSIDFromString = _ole32.CLSIDFromString\n_CoInitialize = _ole32.CoInitialize\n_CoInitialize.argtypes = [c_void_p]\n\n_CoCreateInstance = _ole32.CoCreateInstance\n\n_SysAllocString = _oleaut32.SysAllocString\n_SysAllocString.restype = c_void_p\n_SysAllocString.argtypes = [c_wchar_p]\n\n_SysFreeString = _oleaut32.SysFreeString\n_SysFreeString.argtypes = [c_void_p]\n\n# SAFEARRAY*\n# SafeArrayCreateVector(_In_ VARTYPE vt,_In_ LONG lLbound,_In_ ULONG\n# cElements);\n_SafeArrayCreateVector = _oleaut32.SafeArrayCreateVector\n_SafeArrayCreateVector.restype = c_void_p\n_SafeArrayCreateVector.argtypes = [c_ushort, c_long, c_ulong]\n\n# HRESULT\n# SafeArrayAccessData(_In_ SAFEARRAY *psa, _Out_ void **ppvData);\n_SafeArrayAccessData = _oleaut32.SafeArrayAccessData\n_SafeArrayAccessData.argtypes = [c_void_p, POINTER(c_void_p)]\n\n# HRESULT\n# SafeArrayUnaccessData(_In_ SAFEARRAY *psa);\n_SafeArrayUnaccessData = _oleaut32.SafeArrayUnaccessData\n_SafeArrayUnaccessData.argtypes = [c_void_p]\n\n# HRESULT\n# SafeArrayGetUBound(_In_ SAFEARRAY *psa, _In_ UINT nDim, _Out_ LONG\n# *plUbound);\n_SafeArrayGetUBound = _oleaut32.SafeArrayGetUBound\n_SafeArrayGetUBound.argtypes = [c_void_p, c_ulong, POINTER(c_long)]\n\n\n#------------------------------------------------------------------------------\n\nclass BSTR(c_wchar_p):\n\n    ''' BSTR class in python. '''\n\n    def __init__(self, value):\n        super(BSTR, self).__init__(_SysAllocString(value))\n\n    def __del__(self):\n        _SysFreeString(self)\n\n\nclass VARIANT(Structure):\n\n    '''\n    VARIANT structure in python. Does not match the definition in\n    MSDN exactly & it is only mapping the used fields.  Field names are also\n    slighty different.\n    '''\n\n    class _tagData(Union):\n\n        class _tagRecord(Structure):\n            _fields_ = [('pvoid', c_void_p), ('precord', c_void_p)]\n\n        _fields_ = [('llval', c_longlong),\n                    ('ullval', c_ulonglong),\n                    ('lval', c_long),\n                    ('ulval', c_ulong),\n                    ('ival', c_short),\n                    ('boolval', c_ushort),\n                    ('bstrval', BSTR),\n                    ('parray', c_void_p),\n                    ('record', _tagRecord)]\n\n    _fields_ = [('vt', c_ushort),\n                ('wReserved1', c_ushort),\n                ('wReserved2', c_ushort),\n                ('wReserved3', c_ushort),\n                ('vdata', _tagData)]\n\n    @staticmethod\n    def create_empty():\n        variant = VARIANT()\n        variant.vt = VT_EMPTY\n        variant.vdata.llval = 0\n        return variant\n\n    @staticmethod\n    def create_safearray_from_str(text):\n        variant = VARIANT()\n        variant.vt = VT_ARRAY | VT_UI1\n\n        length = len(text)\n        variant.vdata.parray = _SafeArrayCreateVector(VT_UI1, 0, length)\n        pvdata = c_void_p()\n        _SafeArrayAccessData(variant.vdata.parray, byref(pvdata))\n        ctypes.memmove(pvdata, text, length)\n        _SafeArrayUnaccessData(variant.vdata.parray)\n\n        return variant\n\n    @staticmethod\n    def create_bstr_from_str(text):\n        variant = VARIANT()\n        variant.vt = VT_BSTR\n        variant.vdata.bstrval = BSTR(text)\n        return variant\n\n    @staticmethod\n    def create_bool_false():\n        variant = VARIANT()\n        variant.vt = VT_BOOL\n        variant.vdata.boolval = 0\n        return variant\n\n    def is_safearray_of_bytes(self):\n        return self.vt == VT_ARRAY | VT_UI1\n\n    def str_from_safearray(self):\n        assert self.vt == VT_ARRAY | VT_UI1\n        pvdata = c_void_p()\n        count = c_long()\n        _SafeArrayGetUBound(self.vdata.parray, 1, byref(count))\n        count = c_long(count.value + 1)\n        _SafeArrayAccessData(self.vdata.parray, byref(pvdata))\n        text = ctypes.string_at(pvdata, count)\n        _SafeArrayUnaccessData(self.vdata.parray)\n        return text\n\n    def __del__(self):\n        _VariantClear(self)\n\n# HRESULT VariantClear(_Inout_ VARIANTARG *pvarg);\n_VariantClear = _oleaut32.VariantClear\n_VariantClear.argtypes = [POINTER(VARIANT)]\n\n\nclass GUID(Structure):\n\n    ''' GUID structure in python. '''\n\n    _fields_ = [(\"data1\", c_ulong),\n                (\"data2\", c_ushort),\n                (\"data3\", c_ushort),\n                (\"data4\", c_byte * 8)]\n\n    def __init__(self, name=None):\n        if name is not None:\n            _CLSIDFromString(unicode(name), byref(self))\n\n\nclass _WinHttpRequest(c_void_p):\n\n    '''\n    Maps the Com API to Python class functions. Not all methods in\n    IWinHttpWebRequest are mapped - only the methods we use.\n    '''\n    _AddRef = WINFUNCTYPE(c_long) \\\n        (1, 'AddRef')\n    _Release = WINFUNCTYPE(c_long) \\\n        (2, 'Release')\n    _SetProxy = WINFUNCTYPE(HRESULT,\n                            HTTPREQUEST_PROXY_SETTING,\n                            VARIANT,\n                            VARIANT) \\\n        (7, 'SetProxy')\n    _SetCredentials = WINFUNCTYPE(HRESULT,\n                                  BSTR,\n                                  BSTR,\n                                  HTTPREQUEST_SETCREDENTIALS_FLAGS) \\\n        (8, 'SetCredentials')\n    _Open = WINFUNCTYPE(HRESULT, BSTR, BSTR, VARIANT) \\\n        (9, 'Open')\n    _SetRequestHeader = WINFUNCTYPE(HRESULT, BSTR, BSTR) \\\n        (10, 'SetRequestHeader')\n    _GetResponseHeader = WINFUNCTYPE(HRESULT, BSTR, POINTER(c_void_p)) \\\n        (11, 'GetResponseHeader')\n    _GetAllResponseHeaders = WINFUNCTYPE(HRESULT, POINTER(c_void_p)) \\\n        (12, 'GetAllResponseHeaders')\n    _Send = WINFUNCTYPE(HRESULT, VARIANT) \\\n        (13, 'Send')\n    _Status = WINFUNCTYPE(HRESULT, POINTER(c_long)) \\\n        (14, 'Status')\n    _StatusText = WINFUNCTYPE(HRESULT, POINTER(c_void_p)) \\\n        (15, 'StatusText')\n    _ResponseText = WINFUNCTYPE(HRESULT, POINTER(c_void_p)) \\\n        (16, 'ResponseText')\n    _ResponseBody = WINFUNCTYPE(HRESULT, POINTER(VARIANT)) \\\n        (17, 'ResponseBody')\n    _ResponseStream = WINFUNCTYPE(HRESULT, POINTER(VARIANT)) \\\n        (18, 'ResponseStream')\n    _WaitForResponse = WINFUNCTYPE(HRESULT, VARIANT, POINTER(c_ushort)) \\\n        (21, 'WaitForResponse')\n    _Abort = WINFUNCTYPE(HRESULT) \\\n        (22, 'Abort')\n    _SetTimeouts = WINFUNCTYPE(HRESULT, c_long, c_long, c_long, c_long) \\\n        (23, 'SetTimeouts')\n    _SetClientCertificate = WINFUNCTYPE(HRESULT, BSTR) \\\n        (24, 'SetClientCertificate')\n\n    def open(self, method, url):\n        '''\n        Opens the request.\n\n        method: the request VERB 'GET', 'POST', etc.\n        url: the url to connect\n        '''\n        _WinHttpRequest._SetTimeouts(self, 0, 65000, 65000, 65000)\n\n        flag = VARIANT.create_bool_false()\n        _method = BSTR(method)\n        _url = BSTR(url)\n        _WinHttpRequest._Open(self, _method, _url, flag)\n\n    def set_request_header(self, name, value):\n        ''' Sets the request header. '''\n\n        _name = BSTR(name)\n        _value = BSTR(value)\n        _WinHttpRequest._SetRequestHeader(self, _name, _value)\n\n    def get_all_response_headers(self):\n        ''' Gets back all response headers. '''\n\n        bstr_headers = c_void_p()\n        _WinHttpRequest._GetAllResponseHeaders(self, byref(bstr_headers))\n        bstr_headers = ctypes.cast(bstr_headers, c_wchar_p)\n        headers = bstr_headers.value\n        _SysFreeString(bstr_headers)\n        return headers\n\n    def send(self, request=None):\n        ''' Sends the request body. '''\n\n        # Sends VT_EMPTY if it is GET, HEAD request.\n        if request is None:\n            var_empty = VARIANT.create_empty()\n            _WinHttpRequest._Send(self, var_empty)\n        else:  # Sends request body as SAFEArray.\n            _request = VARIANT.create_safearray_from_str(request)\n            _WinHttpRequest._Send(self, _request)\n\n    def status(self):\n        ''' Gets status of response. '''\n\n        status = c_long()\n        _WinHttpRequest._Status(self, byref(status))\n        return int(status.value)\n\n    def status_text(self):\n        ''' Gets status text of response. '''\n\n        bstr_status_text = c_void_p()\n        _WinHttpRequest._StatusText(self, byref(bstr_status_text))\n        bstr_status_text = ctypes.cast(bstr_status_text, c_wchar_p)\n        status_text = bstr_status_text.value\n        _SysFreeString(bstr_status_text)\n        return status_text\n\n    def response_body(self):\n        '''\n        Gets response body as a SAFEARRAY and converts the SAFEARRAY to str.\n        If it is an xml file, it always contains 3 characters before <?xml,\n        so we remove them.\n        '''\n        var_respbody = VARIANT()\n        _WinHttpRequest._ResponseBody(self, byref(var_respbody))\n        if var_respbody.is_safearray_of_bytes():\n            respbody = var_respbody.str_from_safearray()\n            if respbody[3:].startswith(b'<?xml') and\\\n               respbody.startswith(b'\\xef\\xbb\\xbf'):\n                respbody = respbody[3:]\n            return respbody\n        else:\n            return ''\n\n    def set_client_certificate(self, certificate):\n        '''Sets client certificate for the request. '''\n        _certificate = BSTR(certificate)\n        _WinHttpRequest._SetClientCertificate(self, _certificate)\n\n    def set_tunnel(self, host, port):\n        ''' Sets up the host and the port for the HTTP CONNECT Tunnelling.'''\n        url = host\n        if port:\n            url = url + u':' + port\n\n        var_host = VARIANT.create_bstr_from_str(url)\n        var_empty = VARIANT.create_empty()\n\n        _WinHttpRequest._SetProxy(\n            self, HTTPREQUEST_PROXYSETTING_PROXY, var_host, var_empty)\n\n    def set_proxy_credentials(self, user, password):\n        _WinHttpRequest._SetCredentials(\n            self, BSTR(user), BSTR(password),\n            HTTPREQUEST_SETCREDENTIALS_FOR_PROXY)\n\n    def __del__(self):\n        if self.value is not None:\n            _WinHttpRequest._Release(self)\n\n\nclass _Response(object):\n\n    ''' Response class corresponding to the response returned from httplib\n    HTTPConnection. '''\n\n    def __init__(self, _status, _status_text, _length, _headers, _respbody):\n        self.status = _status\n        self.reason = _status_text\n        self.length = _length\n        self.headers = _headers\n        self.respbody = _respbody\n\n    def getheaders(self):\n        '''Returns response headers.'''\n        return self.headers\n\n    def read(self, _length):\n        '''Returns resonse body. '''\n        return self.respbody[:_length]\n\n\nclass _HTTPConnection(object):\n\n    ''' Class corresponding to httplib HTTPConnection class. '''\n\n    def __init__(self, host, cert_file=None, key_file=None, protocol='http'):\n        ''' initialize the IWinHttpWebRequest Com Object.'''\n        self.host = unicode(host)\n        self.cert_file = cert_file\n        self._httprequest = _WinHttpRequest()\n        self.protocol = protocol\n        clsid = GUID('{2087C2F4-2CEF-4953-A8AB-66779B670495}')\n        iid = GUID('{016FE2EC-B2C8-45F8-B23B-39E53A75396B}')\n        _CoInitialize(None)\n        _CoCreateInstance(byref(clsid), 0, 1, byref(iid),\n                          byref(self._httprequest))\n\n    def close(self):\n        pass\n\n    def set_tunnel(self, host, port=None, headers=None):\n        ''' Sets up the host and the port for the HTTP CONNECT Tunnelling. '''\n        self._httprequest.set_tunnel(unicode(host), unicode(str(port)))\n\n    def set_proxy_credentials(self, user, password):\n        self._httprequest.set_proxy_credentials(\n            unicode(user), unicode(password))\n\n    def putrequest(self, method, uri):\n        ''' Connects to host and sends the request. '''\n\n        protocol = unicode(self.protocol + '://')\n        url = protocol + self.host + unicode(uri)\n        self._httprequest.open(unicode(method), url)\n\n        # sets certificate for the connection if cert_file is set.\n        if self.cert_file is not None:\n            self._httprequest.set_client_certificate(unicode(self.cert_file))\n\n    def putheader(self, name, value):\n        ''' Sends the headers of request. '''\n        if sys.version_info < (3,):\n            name = str(name).decode('utf-8')\n            value = str(value).decode('utf-8')\n        self._httprequest.set_request_header(name, value)\n\n    def endheaders(self):\n        ''' No operation. Exists only to provide the same interface of httplib\n        HTTPConnection.'''\n        pass\n\n    def send(self, request_body):\n        ''' Sends request body. '''\n        if not request_body:\n            self._httprequest.send()\n        else:\n            self._httprequest.send(request_body)\n\n    def getresponse(self):\n        ''' Gets the response and generates the _Response object'''\n        status = self._httprequest.status()\n        status_text = self._httprequest.status_text()\n\n        resp_headers = self._httprequest.get_all_response_headers()\n        fixed_headers = []\n        for resp_header in resp_headers.split('\\n'):\n            if (resp_header.startswith('\\t') or\\\n                resp_header.startswith(' ')) and fixed_headers:\n                # append to previous header\n                fixed_headers[-1] += resp_header\n            else:\n                fixed_headers.append(resp_header)\n\n        headers = []\n        for resp_header in fixed_headers:\n            if ':' in resp_header:\n                pos = resp_header.find(':')\n                headers.append(\n                    (resp_header[:pos].lower(), resp_header[pos + 1:].strip()))\n\n        body = self._httprequest.response_body()\n        length = len(body)\n\n        return _Response(status, status_text, length, headers, body)\n"
  },
  {
    "path": "CustomScript/azure/servicebus/__init__.py",
    "content": "#-------------------------------------------------------------------------\n# Copyright (c) Microsoft.  All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#--------------------------------------------------------------------------\nimport ast\nimport json\nimport sys\n\nfrom datetime import datetime\nfrom xml.dom import minidom\nfrom azure import (\n    WindowsAzureData,\n    WindowsAzureError,\n    xml_escape,\n    _create_entry,\n    _general_error_handler,\n    _get_entry_properties,\n    _get_child_nodes,\n    _get_children_from_path,\n    _get_first_child_node_value,\n    _ERROR_MESSAGE_NOT_PEEK_LOCKED_ON_DELETE,\n    _ERROR_MESSAGE_NOT_PEEK_LOCKED_ON_UNLOCK,\n    _ERROR_QUEUE_NOT_FOUND,\n    _ERROR_TOPIC_NOT_FOUND,\n    )\nfrom azure.http import HTTPError\n\n# default rule name for subscription\nDEFAULT_RULE_NAME = '$Default'\n\n#-----------------------------------------------------------------------------\n# Constants for Azure app environment settings.\nAZURE_SERVICEBUS_NAMESPACE = 'AZURE_SERVICEBUS_NAMESPACE'\nAZURE_SERVICEBUS_ACCESS_KEY = 'AZURE_SERVICEBUS_ACCESS_KEY'\nAZURE_SERVICEBUS_ISSUER = 'AZURE_SERVICEBUS_ISSUER'\n\n# namespace used for converting rules to objects\nXML_SCHEMA_NAMESPACE = 'http://www.w3.org/2001/XMLSchema-instance'\n\n\nclass Queue(WindowsAzureData):\n\n    ''' Queue class corresponding to Queue Description:\n    http://msdn.microsoft.com/en-us/library/windowsazure/hh780773'''\n\n    def __init__(self, lock_duration=None, max_size_in_megabytes=None,\n                 requires_duplicate_detection=None, requires_session=None,\n                 default_message_time_to_live=None,\n                 dead_lettering_on_message_expiration=None,\n                 duplicate_detection_history_time_window=None,\n                 max_delivery_count=None, enable_batched_operations=None,\n                 size_in_bytes=None, message_count=None):\n\n        self.lock_duration = lock_duration\n        self.max_size_in_megabytes = max_size_in_megabytes\n        self.requires_duplicate_detection = requires_duplicate_detection\n        self.requires_session = requires_session\n        self.default_message_time_to_live = default_message_time_to_live\n        self.dead_lettering_on_message_expiration = \\\n            dead_lettering_on_message_expiration\n        self.duplicate_detection_history_time_window = \\\n            duplicate_detection_history_time_window\n        self.max_delivery_count = max_delivery_count\n        self.enable_batched_operations = enable_batched_operations\n        self.size_in_bytes = size_in_bytes\n        self.message_count = message_count\n\n\nclass Topic(WindowsAzureData):\n\n    ''' Topic class corresponding to Topic Description:\n    http://msdn.microsoft.com/en-us/library/windowsazure/hh780749. '''\n\n    def __init__(self, default_message_time_to_live=None,\n                 max_size_in_megabytes=None, requires_duplicate_detection=None,\n                 duplicate_detection_history_time_window=None,\n                 enable_batched_operations=None, size_in_bytes=None):\n\n        self.default_message_time_to_live = default_message_time_to_live\n        self.max_size_in_megabytes = max_size_in_megabytes\n        self.requires_duplicate_detection = requires_duplicate_detection\n        self.duplicate_detection_history_time_window = \\\n            duplicate_detection_history_time_window\n        self.enable_batched_operations = enable_batched_operations\n        self.size_in_bytes = size_in_bytes\n\n    @property\n    def max_size_in_mega_bytes(self):\n        import warnings\n        warnings.warn(\n            'This attribute has been changed to max_size_in_megabytes.')\n        return self.max_size_in_megabytes\n\n    @max_size_in_mega_bytes.setter\n    def max_size_in_mega_bytes(self, value):\n        self.max_size_in_megabytes = value\n\n\nclass Subscription(WindowsAzureData):\n\n    ''' Subscription class corresponding to Subscription Description:\n    http://msdn.microsoft.com/en-us/library/windowsazure/hh780763. '''\n\n    def __init__(self, lock_duration=None, requires_session=None,\n                 default_message_time_to_live=None,\n                 dead_lettering_on_message_expiration=None,\n                 dead_lettering_on_filter_evaluation_exceptions=None,\n                 enable_batched_operations=None, max_delivery_count=None,\n                 message_count=None):\n\n        self.lock_duration = lock_duration\n        self.requires_session = requires_session\n        self.default_message_time_to_live = default_message_time_to_live\n        self.dead_lettering_on_message_expiration = \\\n            dead_lettering_on_message_expiration\n        self.dead_lettering_on_filter_evaluation_exceptions = \\\n            dead_lettering_on_filter_evaluation_exceptions\n        self.enable_batched_operations = enable_batched_operations\n        self.max_delivery_count = max_delivery_count\n        self.message_count = message_count\n\n\nclass Rule(WindowsAzureData):\n\n    ''' Rule class corresponding to Rule Description:\n    http://msdn.microsoft.com/en-us/library/windowsazure/hh780753. '''\n\n    def __init__(self, filter_type=None, filter_expression=None,\n                 action_type=None, action_expression=None):\n        self.filter_type = filter_type\n        self.filter_expression = filter_expression\n        self.action_type = action_type\n        self.action_expression = action_type\n\n\nclass Message(WindowsAzureData):\n\n    ''' Message class that used in send message/get mesage apis. '''\n\n    def __init__(self, body=None, service_bus_service=None, location=None,\n                 custom_properties=None,\n                 type='application/atom+xml;type=entry;charset=utf-8',\n                 broker_properties=None):\n        self.body = body\n        self.location = location\n        self.broker_properties = broker_properties\n        self.custom_properties = custom_properties\n        self.type = type\n        self.service_bus_service = service_bus_service\n        self._topic_name = None\n        self._subscription_name = None\n        self._queue_name = None\n\n        if not service_bus_service:\n            return\n\n        # if location is set, then extracts the queue name for queue message and\n        # extracts the topic and subscriptions name if it is topic message.\n        if location:\n            if '/subscriptions/' in location:\n                pos = location.find('/subscriptions/')\n                pos1 = location.rfind('/', 0, pos - 1)\n                self._topic_name = location[pos1 + 1:pos]\n                pos += len('/subscriptions/')\n                pos1 = location.find('/', pos)\n                self._subscription_name = location[pos:pos1]\n            elif '/messages/' in location:\n                pos = location.find('/messages/')\n                pos1 = location.rfind('/', 0, pos - 1)\n                self._queue_name = location[pos1 + 1:pos]\n\n    def delete(self):\n        ''' Deletes itself if find queue name or topic name and subscription\n        name. '''\n        if self._queue_name:\n            self.service_bus_service.delete_queue_message(\n                self._queue_name,\n                self.broker_properties['SequenceNumber'],\n                self.broker_properties['LockToken'])\n        elif self._topic_name and self._subscription_name:\n            self.service_bus_service.delete_subscription_message(\n                self._topic_name,\n                self._subscription_name,\n                self.broker_properties['SequenceNumber'],\n                self.broker_properties['LockToken'])\n        else:\n            raise WindowsAzureError(_ERROR_MESSAGE_NOT_PEEK_LOCKED_ON_DELETE)\n\n    def unlock(self):\n        ''' Unlocks itself if find queue name or topic name and subscription\n        name. '''\n        if self._queue_name:\n            self.service_bus_service.unlock_queue_message(\n                self._queue_name,\n                self.broker_properties['SequenceNumber'],\n                self.broker_properties['LockToken'])\n        elif self._topic_name and self._subscription_name:\n            self.service_bus_service.unlock_subscription_message(\n                self._topic_name,\n                self._subscription_name,\n                self.broker_properties['SequenceNumber'],\n                self.broker_properties['LockToken'])\n        else:\n            raise WindowsAzureError(_ERROR_MESSAGE_NOT_PEEK_LOCKED_ON_UNLOCK)\n\n    def add_headers(self, request):\n        ''' add addtional headers to request for message request.'''\n\n        # Adds custom properties\n        if self.custom_properties:\n            for name, value in self.custom_properties.items():\n                if sys.version_info < (3,) and isinstance(value, unicode):\n                    request.headers.append(\n                        (name, '\"' + value.encode('utf-8') + '\"'))\n                elif isinstance(value, str):\n                    request.headers.append((name, '\"' + str(value) + '\"'))\n                elif isinstance(value, datetime):\n                    request.headers.append(\n                        (name, '\"' + value.strftime('%a, %d %b %Y %H:%M:%S GMT') + '\"'))\n                else:\n                    request.headers.append((name, str(value).lower()))\n\n        # Adds content-type\n        request.headers.append(('Content-Type', self.type))\n\n        # Adds BrokerProperties\n        if self.broker_properties:\n            request.headers.append(\n                ('BrokerProperties', str(self.broker_properties)))\n\n        return request.headers\n\n\ndef _create_message(response, service_instance):\n    ''' Create message from response.\n\n    response: response from service bus cloud server.\n    service_instance: the service bus client.\n    '''\n    respbody = response.body\n    custom_properties = {}\n    broker_properties = None\n    message_type = None\n    message_location = None\n\n    # gets all information from respheaders.\n    for name, value in response.headers:\n        if name.lower() == 'brokerproperties':\n            broker_properties = json.loads(value)\n        elif name.lower() == 'content-type':\n            message_type = value\n        elif name.lower() == 'location':\n            message_location = value\n        elif name.lower() not in ['content-type',\n                                  'brokerproperties',\n                                  'transfer-encoding',\n                                  'server',\n                                  'location',\n                                  'date']:\n            if '\"' in value:\n                value = value[1:-1]\n                try:\n                    custom_properties[name] = datetime.strptime(\n                        value, '%a, %d %b %Y %H:%M:%S GMT')\n                except ValueError:\n                    custom_properties[name] = value\n            else:  # only int, float or boolean\n                if value.lower() == 'true':\n                    custom_properties[name] = True\n                elif value.lower() == 'false':\n                    custom_properties[name] = False\n                # int('3.1') doesn't work so need to get float('3.14') first\n                elif str(int(float(value))) == value:\n                    custom_properties[name] = int(value)\n                else:\n                    custom_properties[name] = float(value)\n\n    if message_type == None:\n        message = Message(\n            respbody, service_instance, message_location, custom_properties,\n            'application/atom+xml;type=entry;charset=utf-8', broker_properties)\n    else:\n        message = Message(respbody, service_instance, message_location,\n                          custom_properties, message_type, broker_properties)\n    return message\n\n# convert functions\n\n\ndef _convert_response_to_rule(response):\n    return _convert_xml_to_rule(response.body)\n\n\ndef _convert_xml_to_rule(xmlstr):\n    ''' Converts response xml to rule object.\n\n    The format of xml for rule:\n<entry xmlns='http://www.w3.org/2005/Atom'>\n<content type='application/xml'>\n<RuleDescription\n    xmlns:i=\"http://www.w3.org/2001/XMLSchema-instance\"\n    xmlns=\"http://schemas.microsoft.com/netservices/2010/10/servicebus/connect\">\n    <Filter i:type=\"SqlFilterExpression\">\n        <SqlExpression>MyProperty='XYZ'</SqlExpression>\n    </Filter>\n    <Action i:type=\"SqlFilterAction\">\n        <SqlExpression>set MyProperty2 = 'ABC'</SqlExpression>\n    </Action>\n</RuleDescription>\n</content>\n</entry>\n    '''\n    xmldoc = minidom.parseString(xmlstr)\n    rule = Rule()\n\n    for rule_desc in _get_children_from_path(xmldoc,\n                                             'entry',\n                                             'content',\n                                             'RuleDescription'):\n        for xml_filter in _get_child_nodes(rule_desc, 'Filter'):\n            filter_type = xml_filter.getAttributeNS(\n                XML_SCHEMA_NAMESPACE, 'type')\n            setattr(rule, 'filter_type', str(filter_type))\n            if xml_filter.childNodes:\n\n                for expr in _get_child_nodes(xml_filter, 'SqlExpression'):\n                    setattr(rule, 'filter_expression',\n                            expr.firstChild.nodeValue)\n\n        for xml_action in _get_child_nodes(rule_desc, 'Action'):\n            action_type = xml_action.getAttributeNS(\n                XML_SCHEMA_NAMESPACE, 'type')\n            setattr(rule, 'action_type', str(action_type))\n            if xml_action.childNodes:\n                action_expression = xml_action.childNodes[0].firstChild\n                if action_expression:\n                    setattr(rule, 'action_expression',\n                            action_expression.nodeValue)\n\n    # extract id, updated and name value from feed entry and set them of rule.\n    for name, value in _get_entry_properties(xmlstr, True, '/rules').items():\n        setattr(rule, name, value)\n\n    return rule\n\n\ndef _convert_response_to_queue(response):\n    return _convert_xml_to_queue(response.body)\n\n\ndef _parse_bool(value):\n    if value.lower() == 'true':\n        return True\n    return False\n\n\ndef _convert_xml_to_queue(xmlstr):\n    ''' Converts xml response to queue object.\n\n    The format of xml response for queue:\n<QueueDescription\n    xmlns=\\\"http://schemas.microsoft.com/netservices/2010/10/servicebus/connect\\\">\n    <MaxSizeInBytes>10000</MaxSizeInBytes>\n    <DefaultMessageTimeToLive>PT5M</DefaultMessageTimeToLive>\n    <LockDuration>PT2M</LockDuration>\n    <RequiresGroupedReceives>False</RequiresGroupedReceives>\n    <SupportsDuplicateDetection>False</SupportsDuplicateDetection>\n    ...\n</QueueDescription>\n\n    '''\n    xmldoc = minidom.parseString(xmlstr)\n    queue = Queue()\n\n    invalid_queue = True\n    # get node for each attribute in Queue class, if nothing found then the\n    # response is not valid xml for Queue.\n    for desc in _get_children_from_path(xmldoc,\n                                        'entry',\n                                        'content',\n                                        'QueueDescription'):\n        node_value = _get_first_child_node_value(desc, 'LockDuration')\n        if node_value is not None:\n            queue.lock_duration = node_value\n            invalid_queue = False\n\n        node_value = _get_first_child_node_value(desc, 'MaxSizeInMegabytes')\n        if node_value is not None:\n            queue.max_size_in_megabytes = int(node_value)\n            invalid_queue = False\n\n        node_value = _get_first_child_node_value(\n            desc, 'RequiresDuplicateDetection')\n        if node_value is not None:\n            queue.requires_duplicate_detection = _parse_bool(node_value)\n            invalid_queue = False\n\n        node_value = _get_first_child_node_value(desc, 'RequiresSession')\n        if node_value is not None:\n            queue.requires_session = _parse_bool(node_value)\n            invalid_queue = False\n\n        node_value = _get_first_child_node_value(\n            desc, 'DefaultMessageTimeToLive')\n        if node_value is not None:\n            queue.default_message_time_to_live = node_value\n            invalid_queue = False\n\n        node_value = _get_first_child_node_value(\n            desc, 'DeadLetteringOnMessageExpiration')\n        if node_value is not None:\n            queue.dead_lettering_on_message_expiration = _parse_bool(node_value)\n            invalid_queue = False\n\n        node_value = _get_first_child_node_value(\n            desc, 'DuplicateDetectionHistoryTimeWindow')\n        if node_value is not None:\n            queue.duplicate_detection_history_time_window = node_value\n            invalid_queue = False\n\n        node_value = _get_first_child_node_value(\n            desc, 'EnableBatchedOperations')\n        if node_value is not None:\n            queue.enable_batched_operations = _parse_bool(node_value)\n            invalid_queue = False\n\n        node_value = _get_first_child_node_value(desc, 'MaxDeliveryCount')\n        if node_value is not None:\n            queue.max_delivery_count = int(node_value)\n            invalid_queue = False\n\n        node_value = _get_first_child_node_value(desc, 'MessageCount')\n        if node_value is not None:\n            queue.message_count = int(node_value)\n            invalid_queue = False\n\n        node_value = _get_first_child_node_value(desc, 'SizeInBytes')\n        if node_value is not None:\n            queue.size_in_bytes = int(node_value)\n            invalid_queue = False\n\n    if invalid_queue:\n        raise WindowsAzureError(_ERROR_QUEUE_NOT_FOUND)\n\n    # extract id, updated and name value from feed entry and set them of queue.\n    for name, value in _get_entry_properties(xmlstr, True).items():\n        setattr(queue, name, value)\n\n    return queue\n\n\ndef _convert_response_to_topic(response):\n    return _convert_xml_to_topic(response.body)\n\n\ndef _convert_xml_to_topic(xmlstr):\n    '''Converts xml response to topic\n\n    The xml format for topic:\n<entry xmlns='http://www.w3.org/2005/Atom'>\n    <content type='application/xml'>\n    <TopicDescription\n        xmlns:i=\"http://www.w3.org/2001/XMLSchema-instance\"\n        xmlns=\"http://schemas.microsoft.com/netservices/2010/10/servicebus/connect\">\n        <DefaultMessageTimeToLive>P10675199DT2H48M5.4775807S</DefaultMessageTimeToLive>\n        <MaxSizeInMegabytes>1024</MaxSizeInMegabytes>\n        <RequiresDuplicateDetection>false</RequiresDuplicateDetection>\n        <DuplicateDetectionHistoryTimeWindow>P7D</DuplicateDetectionHistoryTimeWindow>\n        <DeadLetteringOnFilterEvaluationExceptions>true</DeadLetteringOnFilterEvaluationExceptions>\n    </TopicDescription>\n    </content>\n</entry>\n    '''\n    xmldoc = minidom.parseString(xmlstr)\n    topic = Topic()\n\n    invalid_topic = True\n\n    # get node for each attribute in Topic class, if nothing found then the\n    # response is not valid xml for Topic.\n    for desc in _get_children_from_path(xmldoc,\n                                        'entry',\n                                        'content',\n                                        'TopicDescription'):\n        invalid_topic = True\n        node_value = _get_first_child_node_value(\n            desc, 'DefaultMessageTimeToLive')\n        if node_value is not None:\n            topic.default_message_time_to_live = node_value\n            invalid_topic = False\n        node_value = _get_first_child_node_value(desc, 'MaxSizeInMegabytes')\n        if node_value is not None:\n            topic.max_size_in_megabytes = int(node_value)\n            invalid_topic = False\n        node_value = _get_first_child_node_value(\n            desc, 'RequiresDuplicateDetection')\n        if node_value is not None:\n            topic.requires_duplicate_detection = _parse_bool(node_value)\n            invalid_topic = False\n        node_value = _get_first_child_node_value(\n            desc, 'DuplicateDetectionHistoryTimeWindow')\n        if node_value is not None:\n            topic.duplicate_detection_history_time_window = node_value\n            invalid_topic = False\n        node_value = _get_first_child_node_value(\n            desc, 'EnableBatchedOperations')\n        if node_value is not None:\n            topic.enable_batched_operations = _parse_bool(node_value)\n            invalid_topic = False\n        node_value = _get_first_child_node_value(desc, 'SizeInBytes')\n        if node_value is not None:\n            topic.size_in_bytes = int(node_value)\n            invalid_topic = False\n\n    if invalid_topic:\n        raise WindowsAzureError(_ERROR_TOPIC_NOT_FOUND)\n\n    # extract id, updated and name value from feed entry and set them of topic.\n    for name, value in _get_entry_properties(xmlstr, True).items():\n        setattr(topic, name, value)\n    return topic\n\n\ndef _convert_response_to_subscription(response):\n    return _convert_xml_to_subscription(response.body)\n\n\ndef _convert_xml_to_subscription(xmlstr):\n    '''Converts xml response to subscription\n\n    The xml format for subscription:\n<entry xmlns='http://www.w3.org/2005/Atom'>\n    <content type='application/xml'>\n    <SubscriptionDescription\n        xmlns:i=\"http://www.w3.org/2001/XMLSchema-instance\"\n        xmlns=\"http://schemas.microsoft.com/netservices/2010/10/servicebus/connect\">\n        <LockDuration>PT5M</LockDuration>\n        <RequiresSession>false</RequiresSession>\n        <DefaultMessageTimeToLive>P10675199DT2H48M5.4775807S</DefaultMessageTimeToLive>\n        <DeadLetteringOnMessageExpiration>false</DeadLetteringOnMessageExpiration>\n        <DeadLetteringOnFilterEvaluationExceptions>true</DeadLetteringOnFilterEvaluationExceptions>\n    </SubscriptionDescription>\n    </content>\n</entry>\n    '''\n    xmldoc = minidom.parseString(xmlstr)\n    subscription = Subscription()\n\n    for desc in _get_children_from_path(xmldoc,\n                                        'entry',\n                                        'content',\n                                        'SubscriptionDescription'):\n        node_value = _get_first_child_node_value(desc, 'LockDuration')\n        if node_value is not None:\n            subscription.lock_duration = node_value\n\n        node_value = _get_first_child_node_value(\n            desc, 'RequiresSession')\n        if node_value is not None:\n            subscription.requires_session = _parse_bool(node_value)\n\n        node_value = _get_first_child_node_value(\n            desc, 'DefaultMessageTimeToLive')\n        if node_value is not None:\n            subscription.default_message_time_to_live = node_value\n\n        node_value = _get_first_child_node_value(\n            desc, 'DeadLetteringOnFilterEvaluationExceptions')\n        if node_value is not None:\n            subscription.dead_lettering_on_filter_evaluation_exceptions = \\\n                _parse_bool(node_value)\n\n        node_value = _get_first_child_node_value(\n            desc, 'DeadLetteringOnMessageExpiration')\n        if node_value is not None:\n            subscription.dead_lettering_on_message_expiration = \\\n                _parse_bool(node_value)\n\n        node_value = _get_first_child_node_value(\n            desc, 'EnableBatchedOperations')\n        if node_value is not None:\n            subscription.enable_batched_operations = _parse_bool(node_value)\n\n        node_value = _get_first_child_node_value(\n            desc, 'MaxDeliveryCount')\n        if node_value is not None:\n            subscription.max_delivery_count = int(node_value)\n\n        node_value = _get_first_child_node_value(\n            desc, 'MessageCount')\n        if node_value is not None:\n            subscription.message_count = int(node_value)\n\n    for name, value in _get_entry_properties(xmlstr,\n                                             True,\n                                             '/subscriptions').items():\n        setattr(subscription, name, value)\n\n    return subscription\n\n\ndef _convert_subscription_to_xml(subscription):\n    '''\n    Converts a subscription object to xml to send.  The order of each field of\n    subscription in xml is very important so we can't simple call\n    convert_class_to_xml.\n\n    subscription: the subsciption object to be converted.\n    '''\n\n    subscription_body = '<SubscriptionDescription xmlns:i=\"http://www.w3.org/2001/XMLSchema-instance\" xmlns=\"http://schemas.microsoft.com/netservices/2010/10/servicebus/connect\">'\n    if subscription:\n        if subscription.lock_duration is not None:\n            subscription_body += ''.join(\n                ['<LockDuration>',\n                 str(subscription.lock_duration),\n                 '</LockDuration>'])\n\n        if subscription.requires_session is not None:\n            subscription_body += ''.join(\n                ['<RequiresSession>',\n                 str(subscription.requires_session).lower(),\n                 '</RequiresSession>'])\n\n        if subscription.default_message_time_to_live is not None:\n            subscription_body += ''.join(\n                ['<DefaultMessageTimeToLive>',\n                 str(subscription.default_message_time_to_live),\n                 '</DefaultMessageTimeToLive>'])\n\n        if subscription.dead_lettering_on_message_expiration is not None:\n            subscription_body += ''.join(\n                ['<DeadLetteringOnMessageExpiration>',\n                 str(subscription.dead_lettering_on_message_expiration).lower(),\n                 '</DeadLetteringOnMessageExpiration>'])\n\n        if subscription.dead_lettering_on_filter_evaluation_exceptions is not None:\n            subscription_body += ''.join(\n                ['<DeadLetteringOnFilterEvaluationExceptions>',\n                 str(subscription.dead_lettering_on_filter_evaluation_exceptions).lower(),\n                 '</DeadLetteringOnFilterEvaluationExceptions>'])\n\n        if subscription.enable_batched_operations is not None:\n            subscription_body += ''.join(\n                ['<EnableBatchedOperations>',\n                 str(subscription.enable_batched_operations).lower(),\n                 '</EnableBatchedOperations>'])\n\n        if subscription.max_delivery_count is not None:\n            subscription_body += ''.join(\n                ['<MaxDeliveryCount>',\n                 str(subscription.max_delivery_count),\n                 '</MaxDeliveryCount>'])\n\n        if subscription.message_count is not None:\n            subscription_body += ''.join(\n                ['<MessageCount>',\n                 str(subscription.message_count),\n                 '</MessageCount>'])\n\n    subscription_body += '</SubscriptionDescription>'\n    return _create_entry(subscription_body)\n\n\ndef _convert_rule_to_xml(rule):\n    '''\n    Converts a rule object to xml to send.  The order of each field of rule\n    in xml is very important so we cann't simple call convert_class_to_xml.\n\n    rule: the rule object to be converted.\n    '''\n    rule_body = '<RuleDescription xmlns:i=\"http://www.w3.org/2001/XMLSchema-instance\" xmlns=\"http://schemas.microsoft.com/netservices/2010/10/servicebus/connect\">'\n    if rule:\n        if rule.filter_type:\n            rule_body += ''.join(\n                ['<Filter i:type=\"',\n                 xml_escape(rule.filter_type),\n                 '\">'])\n            if rule.filter_type == 'CorrelationFilter':\n                rule_body += ''.join(\n                    ['<CorrelationId>',\n                     xml_escape(rule.filter_expression),\n                     '</CorrelationId>'])\n            else:\n                rule_body += ''.join(\n                    ['<SqlExpression>',\n                     xml_escape(rule.filter_expression),\n                     '</SqlExpression>'])\n                rule_body += '<CompatibilityLevel>20</CompatibilityLevel>'\n            rule_body += '</Filter>'\n        if rule.action_type:\n            rule_body += ''.join(\n                ['<Action i:type=\"',\n                 xml_escape(rule.action_type),\n                 '\">'])\n            if rule.action_type == 'SqlRuleAction':\n                rule_body += ''.join(\n                    ['<SqlExpression>',\n                     xml_escape(rule.action_expression),\n                     '</SqlExpression>'])\n                rule_body += '<CompatibilityLevel>20</CompatibilityLevel>'\n            rule_body += '</Action>'\n    rule_body += '</RuleDescription>'\n\n    return _create_entry(rule_body)\n\n\ndef _convert_topic_to_xml(topic):\n    '''\n    Converts a topic object to xml to send.  The order of each field of topic\n    in xml is very important so we cann't simple call convert_class_to_xml.\n\n    topic: the topic object to be converted.\n    '''\n\n    topic_body = '<TopicDescription xmlns:i=\"http://www.w3.org/2001/XMLSchema-instance\" xmlns=\"http://schemas.microsoft.com/netservices/2010/10/servicebus/connect\">'\n    if topic:\n        if topic.default_message_time_to_live is not None:\n            topic_body += ''.join(\n                ['<DefaultMessageTimeToLive>',\n                 str(topic.default_message_time_to_live),\n                 '</DefaultMessageTimeToLive>'])\n\n        if topic.max_size_in_megabytes is not None:\n            topic_body += ''.join(\n                ['<MaxSizeInMegabytes>',\n                 str(topic.max_size_in_megabytes),\n                 '</MaxSizeInMegabytes>'])\n\n        if topic.requires_duplicate_detection is not None:\n            topic_body += ''.join(\n                ['<RequiresDuplicateDetection>',\n                 str(topic.requires_duplicate_detection).lower(),\n                 '</RequiresDuplicateDetection>'])\n\n        if topic.duplicate_detection_history_time_window is not None:\n            topic_body += ''.join(\n                ['<DuplicateDetectionHistoryTimeWindow>',\n                 str(topic.duplicate_detection_history_time_window),\n                 '</DuplicateDetectionHistoryTimeWindow>'])\n\n        if topic.enable_batched_operations is not None:\n            topic_body += ''.join(\n                ['<EnableBatchedOperations>',\n                 str(topic.enable_batched_operations).lower(),\n                 '</EnableBatchedOperations>'])\n\n        if topic.size_in_bytes is not None:\n            topic_body += ''.join(\n                ['<SizeInBytes>',\n                 str(topic.size_in_bytes),\n                 '</SizeInBytes>'])\n\n    topic_body += '</TopicDescription>'\n\n    return _create_entry(topic_body)\n\n\ndef _convert_queue_to_xml(queue):\n    '''\n    Converts a queue object to xml to send.  The order of each field of queue\n    in xml is very important so we cann't simple call convert_class_to_xml.\n\n    queue: the queue object to be converted.\n    '''\n    queue_body = '<QueueDescription xmlns:i=\"http://www.w3.org/2001/XMLSchema-instance\" xmlns=\"http://schemas.microsoft.com/netservices/2010/10/servicebus/connect\">'\n    if queue:\n        if queue.lock_duration:\n            queue_body += ''.join(\n                ['<LockDuration>',\n                 str(queue.lock_duration),\n                 '</LockDuration>'])\n\n        if queue.max_size_in_megabytes is not None:\n            queue_body += ''.join(\n                ['<MaxSizeInMegabytes>',\n                 str(queue.max_size_in_megabytes),\n                 '</MaxSizeInMegabytes>'])\n\n        if queue.requires_duplicate_detection is not None:\n            queue_body += ''.join(\n                ['<RequiresDuplicateDetection>',\n                 str(queue.requires_duplicate_detection).lower(),\n                 '</RequiresDuplicateDetection>'])\n\n        if queue.requires_session is not None:\n            queue_body += ''.join(\n                ['<RequiresSession>',\n                 str(queue.requires_session).lower(),\n                 '</RequiresSession>'])\n\n        if queue.default_message_time_to_live is not None:\n            queue_body += ''.join(\n                ['<DefaultMessageTimeToLive>',\n                 str(queue.default_message_time_to_live),\n                 '</DefaultMessageTimeToLive>'])\n\n        if queue.dead_lettering_on_message_expiration is not None:\n            queue_body += ''.join(\n                ['<DeadLetteringOnMessageExpiration>',\n                 str(queue.dead_lettering_on_message_expiration).lower(),\n                 '</DeadLetteringOnMessageExpiration>'])\n\n        if queue.duplicate_detection_history_time_window is not None:\n            queue_body += ''.join(\n                ['<DuplicateDetectionHistoryTimeWindow>',\n                 str(queue.duplicate_detection_history_time_window),\n                 '</DuplicateDetectionHistoryTimeWindow>'])\n\n        if queue.max_delivery_count is not None:\n            queue_body += ''.join(\n                ['<MaxDeliveryCount>',\n                 str(queue.max_delivery_count),\n                 '</MaxDeliveryCount>'])\n\n        if queue.enable_batched_operations is not None:\n            queue_body += ''.join(\n                ['<EnableBatchedOperations>',\n                 str(queue.enable_batched_operations).lower(),\n                 '</EnableBatchedOperations>'])\n\n        if queue.size_in_bytes is not None:\n            queue_body += ''.join(\n                ['<SizeInBytes>',\n                 str(queue.size_in_bytes),\n                 '</SizeInBytes>'])\n\n        if queue.message_count is not None:\n            queue_body += ''.join(\n                ['<MessageCount>',\n                 str(queue.message_count),\n                 '</MessageCount>'])\n\n    queue_body += '</QueueDescription>'\n    return _create_entry(queue_body)\n\n\ndef _service_bus_error_handler(http_error):\n    ''' Simple error handler for service bus service. '''\n    return _general_error_handler(http_error)\n\nfrom azure.servicebus.servicebusservice import ServiceBusService\n"
  },
  {
    "path": "CustomScript/azure/servicebus/servicebusservice.py",
    "content": "#-------------------------------------------------------------------------\n# Copyright (c) Microsoft.  All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#--------------------------------------------------------------------------\nimport datetime\nimport os\nimport time\n\nfrom azure import (\n    WindowsAzureError,\n    SERVICE_BUS_HOST_BASE,\n    _convert_response_to_feeds,\n    _dont_fail_not_exist,\n    _dont_fail_on_exist,\n    _encode_base64,\n    _get_request_body,\n    _get_request_body_bytes_only,\n    _int_or_none,\n    _sign_string,\n    _str,\n    _unicode_type,\n    _update_request_uri_query,\n    url_quote,\n    url_unquote,\n    _validate_not_none,\n    )\nfrom azure.http import (\n    HTTPError,\n    HTTPRequest,\n    )\nfrom azure.http.httpclient import _HTTPClient\nfrom azure.servicebus import (\n    AZURE_SERVICEBUS_NAMESPACE,\n    AZURE_SERVICEBUS_ACCESS_KEY,\n    AZURE_SERVICEBUS_ISSUER,\n    _convert_topic_to_xml,\n    _convert_response_to_topic,\n    _convert_queue_to_xml,\n    _convert_response_to_queue,\n    _convert_subscription_to_xml,\n    _convert_response_to_subscription,\n    _convert_rule_to_xml,\n    _convert_response_to_rule,\n    _convert_xml_to_queue,\n    _convert_xml_to_topic,\n    _convert_xml_to_subscription,\n    _convert_xml_to_rule,\n    _create_message,\n    _service_bus_error_handler,\n    )\n\n\nclass ServiceBusService(object):\n\n    def __init__(self, service_namespace=None, account_key=None, issuer=None,\n                 x_ms_version='2011-06-01', host_base=SERVICE_BUS_HOST_BASE,\n                 shared_access_key_name=None, shared_access_key_value=None,\n                 authentication=None):\n        '''\n        Initializes the service bus service for a namespace with the specified\n        authentication settings (SAS or ACS).\n\n        service_namespace:\n            Service bus namespace, required for all operations. If None,\n            the value is set to the AZURE_SERVICEBUS_NAMESPACE env variable.\n        account_key:\n            ACS authentication account key. If None, the value is set to the\n            AZURE_SERVICEBUS_ACCESS_KEY env variable.\n            Note that if both SAS and ACS settings are specified, SAS is used.\n        issuer:\n            ACS authentication issuer. If None, the value is set to the\n            AZURE_SERVICEBUS_ISSUER env variable.\n            Note that if both SAS and ACS settings are specified, SAS is used.\n        x_ms_version: Unused. Kept for backwards compatibility.\n        host_base:\n            Optional. Live host base url. Defaults to Azure url. Override this\n            for on-premise.\n        shared_access_key_name:\n            SAS authentication key name.\n            Note that if both SAS and ACS settings are specified, SAS is used.\n        shared_access_key_value:\n            SAS authentication key value.\n            Note that if both SAS and ACS settings are specified, SAS is used.\n        authentication:\n            Instance of authentication class. If this is specified, then\n            ACS and SAS parameters are ignored.\n        '''\n        self.requestid = None\n        self.service_namespace = service_namespace\n        self.host_base = host_base\n\n        if not self.service_namespace:\n            self.service_namespace = os.environ.get(AZURE_SERVICEBUS_NAMESPACE)\n\n        if not self.service_namespace:\n            raise WindowsAzureError('You need to provide servicebus namespace')\n\n        if authentication:\n            self.authentication = authentication\n        else:\n            if not account_key:\n                account_key = os.environ.get(AZURE_SERVICEBUS_ACCESS_KEY)\n            if not issuer:\n                issuer = os.environ.get(AZURE_SERVICEBUS_ISSUER)\n\n            if shared_access_key_name and shared_access_key_value:\n                self.authentication = ServiceBusSASAuthentication(\n                    shared_access_key_name,\n                    shared_access_key_value)\n            elif account_key and issuer:\n                self.authentication = ServiceBusWrapTokenAuthentication(\n                    account_key,\n                    issuer)\n            else:\n                raise WindowsAzureError(\n                    'You need to provide servicebus access key and Issuer OR shared access key and value')\n\n        self._httpclient = _HTTPClient(service_instance=self)\n        self._filter = self._httpclient.perform_request\n\n    # Backwards compatibility:\n    # account_key and issuer used to be stored on the service class, they are\n    # now stored on the authentication class.\n    @property\n    def account_key(self):\n        return self.authentication.account_key\n\n    @account_key.setter\n    def account_key(self, value):\n        self.authentication.account_key = value\n\n    @property\n    def issuer(self):\n        return self.authentication.issuer\n\n    @issuer.setter\n    def issuer(self, value):\n        self.authentication.issuer = value\n\n    def with_filter(self, filter):\n        '''\n        Returns a new service which will process requests with the specified\n        filter.  Filtering operations can include logging, automatic retrying,\n        etc...  The filter is a lambda which receives the HTTPRequest and\n        another lambda.  The filter can perform any pre-processing on the\n        request, pass it off to the next lambda, and then perform any\n        post-processing on the response.\n        '''\n        res = ServiceBusService(\n            service_namespace=self.service_namespace,\n            authentication=self.authentication)\n\n        old_filter = self._filter\n\n        def new_filter(request):\n            return filter(request, old_filter)\n\n        res._filter = new_filter\n        return res\n\n    def set_proxy(self, host, port, user=None, password=None):\n        '''\n        Sets the proxy server host and port for the HTTP CONNECT Tunnelling.\n\n        host: Address of the proxy. Ex: '192.168.0.100'\n        port: Port of the proxy. Ex: 6000\n        user: User for proxy authorization.\n        password: Password for proxy authorization.\n        '''\n        self._httpclient.set_proxy(host, port, user, password)\n\n    def create_queue(self, queue_name, queue=None, fail_on_exist=False):\n        '''\n        Creates a new queue. Once created, this queue's resource manifest is\n        immutable.\n\n        queue_name: Name of the queue to create.\n        queue: Queue object to create.\n        fail_on_exist:\n            Specify whether to throw an exception when the queue exists.\n        '''\n        _validate_not_none('queue_name', queue_name)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + _str(queue_name) + ''\n        request.body = _get_request_body(_convert_queue_to_xml(queue))\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        if not fail_on_exist:\n            try:\n                self._perform_request(request)\n                return True\n            except WindowsAzureError as ex:\n                _dont_fail_on_exist(ex)\n                return False\n        else:\n            self._perform_request(request)\n            return True\n\n    def delete_queue(self, queue_name, fail_not_exist=False):\n        '''\n        Deletes an existing queue. This operation will also remove all\n        associated state including messages in the queue.\n\n        queue_name: Name of the queue to delete.\n        fail_not_exist:\n            Specify whether to throw an exception if the queue doesn't exist.\n        '''\n        _validate_not_none('queue_name', queue_name)\n        request = HTTPRequest()\n        request.method = 'DELETE'\n        request.host = self._get_host()\n        request.path = '/' + _str(queue_name) + ''\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        if not fail_not_exist:\n            try:\n                self._perform_request(request)\n                return True\n            except WindowsAzureError as ex:\n                _dont_fail_not_exist(ex)\n                return False\n        else:\n            self._perform_request(request)\n            return True\n\n    def get_queue(self, queue_name):\n        '''\n        Retrieves an existing queue.\n\n        queue_name: Name of the queue.\n        '''\n        _validate_not_none('queue_name', queue_name)\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/' + _str(queue_name) + ''\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        response = self._perform_request(request)\n\n        return _convert_response_to_queue(response)\n\n    def list_queues(self):\n        '''\n        Enumerates the queues in the service namespace.\n        '''\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/$Resources/Queues'\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        response = self._perform_request(request)\n\n        return _convert_response_to_feeds(response, _convert_xml_to_queue)\n\n    def create_topic(self, topic_name, topic=None, fail_on_exist=False):\n        '''\n        Creates a new topic. Once created, this topic resource manifest is\n        immutable.\n\n        topic_name: Name of the topic to create.\n        topic: Topic object to create.\n        fail_on_exist:\n            Specify whether to throw an exception when the topic exists.\n        '''\n        _validate_not_none('topic_name', topic_name)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + _str(topic_name) + ''\n        request.body = _get_request_body(_convert_topic_to_xml(topic))\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        if not fail_on_exist:\n            try:\n                self._perform_request(request)\n                return True\n            except WindowsAzureError as ex:\n                _dont_fail_on_exist(ex)\n                return False\n        else:\n            self._perform_request(request)\n            return True\n\n    def delete_topic(self, topic_name, fail_not_exist=False):\n        '''\n        Deletes an existing topic. This operation will also remove all\n        associated state including associated subscriptions.\n\n        topic_name: Name of the topic to delete.\n        fail_not_exist:\n            Specify whether throw exception when topic doesn't exist.\n        '''\n        _validate_not_none('topic_name', topic_name)\n        request = HTTPRequest()\n        request.method = 'DELETE'\n        request.host = self._get_host()\n        request.path = '/' + _str(topic_name) + ''\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        if not fail_not_exist:\n            try:\n                self._perform_request(request)\n                return True\n            except WindowsAzureError as ex:\n                _dont_fail_not_exist(ex)\n                return False\n        else:\n            self._perform_request(request)\n            return True\n\n    def get_topic(self, topic_name):\n        '''\n        Retrieves the description for the specified topic.\n\n        topic_name: Name of the topic.\n        '''\n        _validate_not_none('topic_name', topic_name)\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/' + _str(topic_name) + ''\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        response = self._perform_request(request)\n\n        return _convert_response_to_topic(response)\n\n    def list_topics(self):\n        '''\n        Retrieves the topics in the service namespace.\n        '''\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/$Resources/Topics'\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        response = self._perform_request(request)\n\n        return _convert_response_to_feeds(response, _convert_xml_to_topic)\n\n    def create_rule(self, topic_name, subscription_name, rule_name, rule=None,\n                    fail_on_exist=False):\n        '''\n        Creates a new rule. Once created, this rule's resource manifest is\n        immutable.\n\n        topic_name: Name of the topic.\n        subscription_name: Name of the subscription.\n        rule_name: Name of the rule.\n        fail_on_exist:\n            Specify whether to throw an exception when the rule exists.\n        '''\n        _validate_not_none('topic_name', topic_name)\n        _validate_not_none('subscription_name', subscription_name)\n        _validate_not_none('rule_name', rule_name)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + _str(topic_name) + '/subscriptions/' + \\\n            _str(subscription_name) + \\\n            '/rules/' + _str(rule_name) + ''\n        request.body = _get_request_body(_convert_rule_to_xml(rule))\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        if not fail_on_exist:\n            try:\n                self._perform_request(request)\n                return True\n            except WindowsAzureError as ex:\n                _dont_fail_on_exist(ex)\n                return False\n        else:\n            self._perform_request(request)\n            return True\n\n    def delete_rule(self, topic_name, subscription_name, rule_name,\n                    fail_not_exist=False):\n        '''\n        Deletes an existing rule.\n\n        topic_name: Name of the topic.\n        subscription_name: Name of the subscription.\n        rule_name:\n            Name of the rule to delete.  DEFAULT_RULE_NAME=$Default.\n            Use DEFAULT_RULE_NAME to delete default rule for the subscription.\n        fail_not_exist:\n            Specify whether throw exception when rule doesn't exist.\n        '''\n        _validate_not_none('topic_name', topic_name)\n        _validate_not_none('subscription_name', subscription_name)\n        _validate_not_none('rule_name', rule_name)\n        request = HTTPRequest()\n        request.method = 'DELETE'\n        request.host = self._get_host()\n        request.path = '/' + _str(topic_name) + '/subscriptions/' + \\\n            _str(subscription_name) + \\\n            '/rules/' + _str(rule_name) + ''\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        if not fail_not_exist:\n            try:\n                self._perform_request(request)\n                return True\n            except WindowsAzureError as ex:\n                _dont_fail_not_exist(ex)\n                return False\n        else:\n            self._perform_request(request)\n            return True\n\n    def get_rule(self, topic_name, subscription_name, rule_name):\n        '''\n        Retrieves the description for the specified rule.\n\n        topic_name: Name of the topic.\n        subscription_name: Name of the subscription.\n        rule_name: Name of the rule.\n        '''\n        _validate_not_none('topic_name', topic_name)\n        _validate_not_none('subscription_name', subscription_name)\n        _validate_not_none('rule_name', rule_name)\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/' + _str(topic_name) + '/subscriptions/' + \\\n            _str(subscription_name) + \\\n            '/rules/' + _str(rule_name) + ''\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        response = self._perform_request(request)\n\n        return _convert_response_to_rule(response)\n\n    def list_rules(self, topic_name, subscription_name):\n        '''\n        Retrieves the rules that exist under the specified subscription.\n\n        topic_name: Name of the topic.\n        subscription_name: Name of the subscription.\n        '''\n        _validate_not_none('topic_name', topic_name)\n        _validate_not_none('subscription_name', subscription_name)\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(topic_name) + '/subscriptions/' + \\\n            _str(subscription_name) + '/rules/'\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        response = self._perform_request(request)\n\n        return _convert_response_to_feeds(response, _convert_xml_to_rule)\n\n    def create_subscription(self, topic_name, subscription_name,\n                            subscription=None, fail_on_exist=False):\n        '''\n        Creates a new subscription. Once created, this subscription resource\n        manifest is immutable.\n\n        topic_name: Name of the topic.\n        subscription_name: Name of the subscription.\n        fail_on_exist:\n            Specify whether throw exception when subscription exists.\n        '''\n        _validate_not_none('topic_name', topic_name)\n        _validate_not_none('subscription_name', subscription_name)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(topic_name) + '/subscriptions/' + _str(subscription_name) + ''\n        request.body = _get_request_body(\n            _convert_subscription_to_xml(subscription))\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        if not fail_on_exist:\n            try:\n                self._perform_request(request)\n                return True\n            except WindowsAzureError as ex:\n                _dont_fail_on_exist(ex)\n                return False\n        else:\n            self._perform_request(request)\n            return True\n\n    def delete_subscription(self, topic_name, subscription_name,\n                            fail_not_exist=False):\n        '''\n        Deletes an existing subscription.\n\n        topic_name: Name of the topic.\n        subscription_name: Name of the subscription to delete.\n        fail_not_exist:\n            Specify whether to throw an exception when the subscription\n            doesn't exist.\n        '''\n        _validate_not_none('topic_name', topic_name)\n        _validate_not_none('subscription_name', subscription_name)\n        request = HTTPRequest()\n        request.method = 'DELETE'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(topic_name) + '/subscriptions/' + _str(subscription_name) + ''\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        if not fail_not_exist:\n            try:\n                self._perform_request(request)\n                return True\n            except WindowsAzureError as ex:\n                _dont_fail_not_exist(ex)\n                return False\n        else:\n            self._perform_request(request)\n            return True\n\n    def get_subscription(self, topic_name, subscription_name):\n        '''\n        Gets an existing subscription.\n\n        topic_name: Name of the topic.\n        subscription_name: Name of the subscription.\n        '''\n        _validate_not_none('topic_name', topic_name)\n        _validate_not_none('subscription_name', subscription_name)\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(topic_name) + '/subscriptions/' + _str(subscription_name) + ''\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        response = self._perform_request(request)\n\n        return _convert_response_to_subscription(response)\n\n    def list_subscriptions(self, topic_name):\n        '''\n        Retrieves the subscriptions in the specified topic.\n\n        topic_name: Name of the topic.\n        '''\n        _validate_not_none('topic_name', topic_name)\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/' + _str(topic_name) + '/subscriptions/'\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        response = self._perform_request(request)\n\n        return _convert_response_to_feeds(response,\n                                          _convert_xml_to_subscription)\n\n    def send_topic_message(self, topic_name, message=None):\n        '''\n        Enqueues a message into the specified topic. The limit to the number\n        of messages which may be present in the topic is governed by the\n        message size in MaxTopicSizeInBytes. If this message causes the topic\n        to exceed its quota, a quota exceeded error is returned and the\n        message will be rejected.\n\n        topic_name: Name of the topic.\n        message: Message object containing message body and properties.\n        '''\n        _validate_not_none('topic_name', topic_name)\n        _validate_not_none('message', message)\n        request = HTTPRequest()\n        request.method = 'POST'\n        request.host = self._get_host()\n        request.path = '/' + _str(topic_name) + '/messages'\n        request.headers = message.add_headers(request)\n        request.body = _get_request_body_bytes_only(\n            'message.body', message.body)\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        self._perform_request(request)\n\n    def peek_lock_subscription_message(self, topic_name, subscription_name,\n                                       timeout='60'):\n        '''\n        This operation is used to atomically retrieve and lock a message for\n        processing. The message is guaranteed not to be delivered to other\n        receivers during the lock duration period specified in buffer\n        description. Once the lock expires, the message will be available to\n        other receivers (on the same subscription only) during the lock\n        duration period specified in the topic description. Once the lock\n        expires, the message will be available to other receivers. In order to\n        complete processing of the message, the receiver should issue a delete\n        command with the lock ID received from this operation. To abandon\n        processing of the message and unlock it for other receivers, an Unlock\n        Message command should be issued, or the lock duration period can\n        expire.\n\n        topic_name: Name of the topic.\n        subscription_name: Name of the subscription.\n        timeout: Optional. The timeout parameter is expressed in seconds.\n        '''\n        _validate_not_none('topic_name', topic_name)\n        _validate_not_none('subscription_name', subscription_name)\n        request = HTTPRequest()\n        request.method = 'POST'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(topic_name) + '/subscriptions/' + \\\n            _str(subscription_name) + '/messages/head'\n        request.query = [('timeout', _int_or_none(timeout))]\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        response = self._perform_request(request)\n\n        return _create_message(response, self)\n\n    def unlock_subscription_message(self, topic_name, subscription_name,\n                                    sequence_number, lock_token):\n        '''\n        Unlock a message for processing by other receivers on a given\n        subscription. This operation deletes the lock object, causing the\n        message to be unlocked. A message must have first been locked by a\n        receiver before this operation is called.\n\n        topic_name: Name of the topic.\n        subscription_name: Name of the subscription.\n        sequence_number:\n            The sequence number of the message to be unlocked as returned in\n            BrokerProperties['SequenceNumber'] by the Peek Message operation.\n        lock_token:\n            The ID of the lock as returned by the Peek Message operation in\n            BrokerProperties['LockToken']\n        '''\n        _validate_not_none('topic_name', topic_name)\n        _validate_not_none('subscription_name', subscription_name)\n        _validate_not_none('sequence_number', sequence_number)\n        _validate_not_none('lock_token', lock_token)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + _str(topic_name) + \\\n                       '/subscriptions/' + str(subscription_name) + \\\n                       '/messages/' + _str(sequence_number) + \\\n                       '/' + _str(lock_token) + ''\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        self._perform_request(request)\n\n    def read_delete_subscription_message(self, topic_name, subscription_name,\n                                         timeout='60'):\n        '''\n        Read and delete a message from a subscription as an atomic operation.\n        This operation should be used when a best-effort guarantee is\n        sufficient for an application; that is, using this operation it is\n        possible for messages to be lost if processing fails.\n\n        topic_name: Name of the topic.\n        subscription_name: Name of the subscription.\n        timeout: Optional. The timeout parameter is expressed in seconds.\n        '''\n        _validate_not_none('topic_name', topic_name)\n        _validate_not_none('subscription_name', subscription_name)\n        request = HTTPRequest()\n        request.method = 'DELETE'\n        request.host = self._get_host()\n        request.path = '/' + _str(topic_name) + \\\n                       '/subscriptions/' + _str(subscription_name) + \\\n                       '/messages/head'\n        request.query = [('timeout', _int_or_none(timeout))]\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        response = self._perform_request(request)\n\n        return _create_message(response, self)\n\n    def delete_subscription_message(self, topic_name, subscription_name,\n                                    sequence_number, lock_token):\n        '''\n        Completes processing on a locked message and delete it from the\n        subscription. This operation should only be called after processing a\n        previously locked message is successful to maintain At-Least-Once\n        delivery assurances.\n\n        topic_name: Name of the topic.\n        subscription_name: Name of the subscription.\n        sequence_number:\n            The sequence number of the message to be deleted as returned in\n            BrokerProperties['SequenceNumber'] by the Peek Message operation.\n        lock_token:\n            The ID of the lock as returned by the Peek Message operation in\n            BrokerProperties['LockToken']\n        '''\n        _validate_not_none('topic_name', topic_name)\n        _validate_not_none('subscription_name', subscription_name)\n        _validate_not_none('sequence_number', sequence_number)\n        _validate_not_none('lock_token', lock_token)\n        request = HTTPRequest()\n        request.method = 'DELETE'\n        request.host = self._get_host()\n        request.path = '/' + _str(topic_name) + \\\n                       '/subscriptions/' + _str(subscription_name) + \\\n                       '/messages/' + _str(sequence_number) + \\\n                       '/' + _str(lock_token) + ''\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        self._perform_request(request)\n\n    def send_queue_message(self, queue_name, message=None):\n        '''\n        Sends a message into the specified queue. The limit to the number of\n        messages which may be present in the topic is governed by the message\n        size the MaxTopicSizeInMegaBytes. If this message will cause the queue\n        to exceed its quota, a quota exceeded error is returned and the\n        message will be rejected.\n\n        queue_name: Name of the queue.\n        message: Message object containing message body and properties.\n        '''\n        _validate_not_none('queue_name', queue_name)\n        _validate_not_none('message', message)\n        request = HTTPRequest()\n        request.method = 'POST'\n        request.host = self._get_host()\n        request.path = '/' + _str(queue_name) + '/messages'\n        request.headers = message.add_headers(request)\n        request.body = _get_request_body_bytes_only('message.body',\n                                                    message.body)\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        self._perform_request(request)\n\n    def peek_lock_queue_message(self, queue_name, timeout='60'):\n        '''\n        Automically retrieves and locks a message from a queue for processing.\n        The message is guaranteed not to be delivered to other receivers (on\n        the same subscription only) during the lock duration period specified\n        in the queue description. Once the lock expires, the message will be\n        available to other receivers. In order to complete processing of the\n        message, the receiver should issue a delete command with the lock ID\n        received from this operation. To abandon processing of the message and\n        unlock it for other receivers, an Unlock Message command should be\n        issued, or the lock duration period can expire.\n\n        queue_name: Name of the queue.\n        timeout: Optional. The timeout parameter is expressed in seconds.\n        '''\n        _validate_not_none('queue_name', queue_name)\n        request = HTTPRequest()\n        request.method = 'POST'\n        request.host = self._get_host()\n        request.path = '/' + _str(queue_name) + '/messages/head'\n        request.query = [('timeout', _int_or_none(timeout))]\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        response = self._perform_request(request)\n\n        return _create_message(response, self)\n\n    def unlock_queue_message(self, queue_name, sequence_number, lock_token):\n        '''\n        Unlocks a message for processing by other receivers on a given\n        subscription. This operation deletes the lock object, causing the\n        message to be unlocked. A message must have first been locked by a\n        receiver before this operation is called.\n\n        queue_name: Name of the queue.\n        sequence_number:\n            The sequence number of the message to be unlocked as returned in\n            BrokerProperties['SequenceNumber'] by the Peek Message operation.\n        lock_token:\n            The ID of the lock as returned by the Peek Message operation in\n            BrokerProperties['LockToken']\n        '''\n        _validate_not_none('queue_name', queue_name)\n        _validate_not_none('sequence_number', sequence_number)\n        _validate_not_none('lock_token', lock_token)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + _str(queue_name) + \\\n                       '/messages/' + _str(sequence_number) + \\\n                       '/' + _str(lock_token) + ''\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        self._perform_request(request)\n\n    def read_delete_queue_message(self, queue_name, timeout='60'):\n        '''\n        Reads and deletes a message from a queue as an atomic operation. This\n        operation should be used when a best-effort guarantee is sufficient\n        for an application; that is, using this operation it is possible for\n        messages to be lost if processing fails.\n\n        queue_name: Name of the queue.\n        timeout: Optional. The timeout parameter is expressed in seconds.\n        '''\n        _validate_not_none('queue_name', queue_name)\n        request = HTTPRequest()\n        request.method = 'DELETE'\n        request.host = self._get_host()\n        request.path = '/' + _str(queue_name) + '/messages/head'\n        request.query = [('timeout', _int_or_none(timeout))]\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        response = self._perform_request(request)\n\n        return _create_message(response, self)\n\n    def delete_queue_message(self, queue_name, sequence_number, lock_token):\n        '''\n        Completes processing on a locked message and delete it from the queue.\n        This operation should only be called after processing a previously\n        locked message is successful to maintain At-Least-Once delivery\n        assurances.\n\n        queue_name: Name of the queue.\n        sequence_number:\n            The sequence number of the message to be deleted as returned in\n            BrokerProperties['SequenceNumber'] by the Peek Message operation.\n        lock_token:\n            The ID of the lock as returned by the Peek Message operation in\n            BrokerProperties['LockToken']\n        '''\n        _validate_not_none('queue_name', queue_name)\n        _validate_not_none('sequence_number', sequence_number)\n        _validate_not_none('lock_token', lock_token)\n        request = HTTPRequest()\n        request.method = 'DELETE'\n        request.host = self._get_host()\n        request.path = '/' + _str(queue_name) + \\\n                       '/messages/' + _str(sequence_number) + \\\n                       '/' + _str(lock_token) + ''\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        self._perform_request(request)\n\n    def receive_queue_message(self, queue_name, peek_lock=True, timeout=60):\n        '''\n        Receive a message from a queue for processing.\n\n        queue_name: Name of the queue.\n        peek_lock:\n            Optional. True to retrieve and lock the message. False to read and\n            delete the message. Default is True (lock).\n        timeout: Optional. The timeout parameter is expressed in seconds.\n        '''\n        if peek_lock:\n            return self.peek_lock_queue_message(queue_name, timeout)\n        else:\n            return self.read_delete_queue_message(queue_name, timeout)\n\n    def receive_subscription_message(self, topic_name, subscription_name,\n                                     peek_lock=True, timeout=60):\n        '''\n        Receive a message from a subscription for processing.\n\n        topic_name: Name of the topic.\n        subscription_name: Name of the subscription.\n        peek_lock:\n            Optional. True to retrieve and lock the message. False to read and\n            delete the message. Default is True (lock).\n        timeout: Optional. The timeout parameter is expressed in seconds.\n        '''\n        if peek_lock:\n            return self.peek_lock_subscription_message(topic_name,\n                                                       subscription_name,\n                                                       timeout)\n        else:\n            return self.read_delete_subscription_message(topic_name,\n                                                         subscription_name,\n                                                         timeout)\n\n    def _get_host(self):\n        return self.service_namespace + self.host_base\n\n    def _perform_request(self, request):\n        try:\n            resp = self._filter(request)\n        except HTTPError as ex:\n            return _service_bus_error_handler(ex)\n\n        return resp\n\n    def _update_service_bus_header(self, request):\n        ''' Add additional headers for service bus. '''\n\n        if request.method in ['PUT', 'POST', 'MERGE', 'DELETE']:\n            request.headers.append(('Content-Length', str(len(request.body))))\n\n        # if it is not GET or HEAD request, must set content-type.\n        if not request.method in ['GET', 'HEAD']:\n            for name, _ in request.headers:\n                if 'content-type' == name.lower():\n                    break\n            else:\n                request.headers.append(\n                    ('Content-Type',\n                     'application/atom+xml;type=entry;charset=utf-8'))\n\n        # Adds authorization header for authentication.\n        self.authentication.sign_request(request, self._httpclient)\n\n        return request.headers\n\n\n# Token cache for Authentication\n# Shared by the different instances of ServiceBusWrapTokenAuthentication\n_tokens = {}\n\n\nclass ServiceBusWrapTokenAuthentication:\n    def __init__(self, account_key, issuer):\n        self.account_key = account_key\n        self.issuer = issuer\n\n    def sign_request(self, request, httpclient):\n        request.headers.append(\n            ('Authorization', self._get_authorization(request, httpclient)))\n\n    def _get_authorization(self, request, httpclient):\n        ''' return the signed string with token. '''\n        return 'WRAP access_token=\"' + \\\n                self._get_token(request.host, request.path, httpclient) + '\"'\n\n    def _token_is_expired(self, token):\n        ''' Check if token expires or not. '''\n        time_pos_begin = token.find('ExpiresOn=') + len('ExpiresOn=')\n        time_pos_end = token.find('&', time_pos_begin)\n        token_expire_time = int(token[time_pos_begin:time_pos_end])\n        time_now = time.mktime(time.localtime())\n\n        # Adding 30 seconds so the token wouldn't be expired when we send the\n        # token to server.\n        return (token_expire_time - time_now) < 30\n\n    def _get_token(self, host, path, httpclient):\n        '''\n        Returns token for the request.\n\n        host: the service bus service request.\n        path: the service bus service request.\n        '''\n        wrap_scope = 'http://' + host + path + self.issuer + self.account_key\n\n        # Check whether has unexpired cache, return cached token if it is still\n        # usable.\n        if wrap_scope in _tokens:\n            token = _tokens[wrap_scope]\n            if not self._token_is_expired(token):\n                return token\n\n        # get token from accessconstrol server\n        request = HTTPRequest()\n        request.protocol_override = 'https'\n        request.host = host.replace('.servicebus.', '-sb.accesscontrol.')\n        request.method = 'POST'\n        request.path = '/WRAPv0.9'\n        request.body = ('wrap_name=' + url_quote(self.issuer) +\n                        '&wrap_password=' + url_quote(self.account_key) +\n                        '&wrap_scope=' +\n                        url_quote('http://' + host + path)).encode('utf-8')\n        request.headers.append(('Content-Length', str(len(request.body))))\n        resp = httpclient.perform_request(request)\n\n        token = resp.body.decode('utf-8')\n        token = url_unquote(token[token.find('=') + 1:token.rfind('&')])\n        _tokens[wrap_scope] = token\n\n        return token\n\n\nclass ServiceBusSASAuthentication:\n    def __init__(self, key_name, key_value):\n        self.key_name = key_name\n        self.key_value = key_value\n\n    def sign_request(self, request, httpclient):\n        request.headers.append(\n            ('Authorization', self._get_authorization(request, httpclient)))\n\n    def _get_authorization(self, request, httpclient):\n        uri = httpclient.get_uri(request)\n        uri = url_quote(uri, '').lower()\n        expiry = str(self._get_expiry())\n\n        to_sign = uri + '\\n' + expiry\n        signature = url_quote(_sign_string(self.key_value, to_sign, False), '')\n\n        auth_format = 'SharedAccessSignature sig={0}&se={1}&skn={2}&sr={3}'\n        auth = auth_format.format(signature, expiry, self.key_name, uri)\n\n        return auth\n\n    def _get_expiry(self):\n        '''Returns the UTC datetime, in seconds since Epoch, when this signed \n        request expires (5 minutes from now).'''\n        return int(round(time.time() + 300))\n"
  },
  {
    "path": "CustomScript/azure/servicemanagement/__init__.py",
    "content": "#-------------------------------------------------------------------------\n# Copyright (c) Microsoft.  All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#--------------------------------------------------------------------------\nfrom xml.dom import minidom\nfrom azure import (\n    WindowsAzureData,\n    _Base64String,\n    _create_entry,\n    _dict_of,\n    _encode_base64,\n    _general_error_handler,\n    _get_children_from_path,\n    _get_first_child_node_value,\n    _list_of,\n    _scalar_list_of,\n    _str,\n    _xml_attribute,\n    )\n\n#-----------------------------------------------------------------------------\n# Constants for Azure app environment settings.\nAZURE_MANAGEMENT_CERTFILE = 'AZURE_MANAGEMENT_CERTFILE'\nAZURE_MANAGEMENT_SUBSCRIPTIONID = 'AZURE_MANAGEMENT_SUBSCRIPTIONID'\n\n# x-ms-version for service management.\nX_MS_VERSION = '2013-06-01'\n\n#-----------------------------------------------------------------------------\n# Data classes\n\n\nclass StorageServices(WindowsAzureData):\n\n    def __init__(self):\n        self.storage_services = _list_of(StorageService)\n\n    def __iter__(self):\n        return iter(self.storage_services)\n\n    def __len__(self):\n        return len(self.storage_services)\n\n    def __getitem__(self, index):\n        return self.storage_services[index]\n\n\nclass StorageService(WindowsAzureData):\n\n    def __init__(self):\n        self.url = ''\n        self.service_name = ''\n        self.storage_service_properties = StorageAccountProperties()\n        self.storage_service_keys = StorageServiceKeys()\n        self.extended_properties = _dict_of(\n            'ExtendedProperty', 'Name', 'Value')\n        self.capabilities = _scalar_list_of(str, 'Capability')\n\n\nclass StorageAccountProperties(WindowsAzureData):\n\n    def __init__(self):\n        self.description = u''\n        self.affinity_group = u''\n        self.location = u''\n        self.label = _Base64String()\n        self.status = u''\n        self.endpoints = _scalar_list_of(str, 'Endpoint')\n        self.geo_replication_enabled = False\n        self.geo_primary_region = u''\n        self.status_of_primary = u''\n        self.geo_secondary_region = u''\n        self.status_of_secondary = u''\n        self.last_geo_failover_time = u''\n        self.creation_time = u''\n\n\nclass StorageServiceKeys(WindowsAzureData):\n\n    def __init__(self):\n        self.primary = u''\n        self.secondary = u''\n\n\nclass Locations(WindowsAzureData):\n\n    def __init__(self):\n        self.locations = _list_of(Location)\n\n    def __iter__(self):\n        return iter(self.locations)\n\n    def __len__(self):\n        return len(self.locations)\n\n    def __getitem__(self, index):\n        return self.locations[index]\n\n\nclass Location(WindowsAzureData):\n\n    def __init__(self):\n        self.name = u''\n        self.display_name = u''\n        self.available_services = _scalar_list_of(str, 'AvailableService')\n\n\nclass AffinityGroup(WindowsAzureData):\n\n    def __init__(self):\n        self.name = ''\n        self.label = _Base64String()\n        self.description = u''\n        self.location = u''\n        self.hosted_services = HostedServices()\n        self.storage_services = StorageServices()\n        self.capabilities = _scalar_list_of(str, 'Capability')\n\n\nclass AffinityGroups(WindowsAzureData):\n\n    def __init__(self):\n        self.affinity_groups = _list_of(AffinityGroup)\n\n    def __iter__(self):\n        return iter(self.affinity_groups)\n\n    def __len__(self):\n        return len(self.affinity_groups)\n\n    def __getitem__(self, index):\n        return self.affinity_groups[index]\n\n\nclass HostedServices(WindowsAzureData):\n\n    def __init__(self):\n        self.hosted_services = _list_of(HostedService)\n\n    def __iter__(self):\n        return iter(self.hosted_services)\n\n    def __len__(self):\n        return len(self.hosted_services)\n\n    def __getitem__(self, index):\n        return self.hosted_services[index]\n\n\nclass HostedService(WindowsAzureData):\n\n    def __init__(self):\n        self.url = u''\n        self.service_name = u''\n        self.hosted_service_properties = HostedServiceProperties()\n        self.deployments = Deployments()\n\n\nclass HostedServiceProperties(WindowsAzureData):\n\n    def __init__(self):\n        self.description = u''\n        self.location = u''\n        self.affinity_group = u''\n        self.label = _Base64String()\n        self.status = u''\n        self.date_created = u''\n        self.date_last_modified = u''\n        self.extended_properties = _dict_of(\n            'ExtendedProperty', 'Name', 'Value')\n\n\nclass VirtualNetworkSites(WindowsAzureData):\n\n    def __init__(self):\n        self.virtual_network_sites = _list_of(VirtualNetworkSite)\n\n    def __iter__(self):\n        return iter(self.virtual_network_sites)\n\n    def __len__(self):\n        return len(self.virtual_network_sites)\n\n    def __getitem__(self, index):\n        return self.virtual_network_sites[index]\n\n\nclass VirtualNetworkSite(WindowsAzureData):\n\n    def __init__(self):\n        self.name = u''\n        self.id = u''\n        self.affinity_group = u''\n        self.subnets = Subnets()\n\n\nclass Subnets(WindowsAzureData):\n\n    def __init__(self):\n        self.subnets = _list_of(Subnet)\n\n    def __iter__(self):\n        return iter(self.subnets)\n\n    def __len__(self):\n        return len(self.subnets)\n\n    def __getitem__(self, index):\n        return self.subnets[index]\n\n\nclass Subnet(WindowsAzureData):\n\n    def __init__(self):\n        self.name = u''\n        self.address_prefix = u''\n\n\n\nclass Deployments(WindowsAzureData):\n\n    def __init__(self):\n        self.deployments = _list_of(Deployment)\n\n    def __iter__(self):\n        return iter(self.deployments)\n\n    def __len__(self):\n        return len(self.deployments)\n\n    def __getitem__(self, index):\n        return self.deployments[index]\n\n\nclass Deployment(WindowsAzureData):\n\n    def __init__(self):\n        self.name = u''\n        self.deployment_slot = u''\n        self.private_id = u''\n        self.status = u''\n        self.label = _Base64String()\n        self.url = u''\n        self.configuration = _Base64String()\n        self.role_instance_list = RoleInstanceList()\n        self.upgrade_status = UpgradeStatus()\n        self.upgrade_domain_count = u''\n        self.role_list = RoleList()\n        self.sdk_version = u''\n        self.input_endpoint_list = InputEndpoints()\n        self.locked = False\n        self.rollback_allowed = False\n        self.persistent_vm_downtime_info = PersistentVMDowntimeInfo()\n        self.created_time = u''\n        self.virtual_network_name = u''\n        self.last_modified_time = u''\n        self.extended_properties = _dict_of(\n            'ExtendedProperty', 'Name', 'Value')\n\n\nclass RoleInstanceList(WindowsAzureData):\n\n    def __init__(self):\n        self.role_instances = _list_of(RoleInstance)\n\n    def __iter__(self):\n        return iter(self.role_instances)\n\n    def __len__(self):\n        return len(self.role_instances)\n\n    def __getitem__(self, index):\n        return self.role_instances[index]\n\n\nclass RoleInstance(WindowsAzureData):\n\n    def __init__(self):\n        self.role_name = u''\n        self.instance_name = u''\n        self.instance_status = u''\n        self.instance_upgrade_domain = 0\n        self.instance_fault_domain = 0\n        self.instance_size = u''\n        self.instance_state_details = u''\n        self.instance_error_code = u''\n        self.ip_address = u''\n        self.instance_endpoints = InstanceEndpoints()\n        self.power_state = u''\n        self.fqdn = u''\n        self.host_name = u''\n\n\nclass InstanceEndpoints(WindowsAzureData):\n\n    def __init__(self):\n        self.instance_endpoints = _list_of(InstanceEndpoint)\n\n    def __iter__(self):\n        return iter(self.instance_endpoints)\n\n    def __len__(self):\n        return len(self.instance_endpoints)\n\n    def __getitem__(self, index):\n        return self.instance_endpoints[index]\n\n\nclass InstanceEndpoint(WindowsAzureData):\n\n    def __init__(self):\n        self.name = u''\n        self.vip = u''\n        self.public_port = u''\n        self.local_port = u''\n        self.protocol = u''\n\n\nclass UpgradeStatus(WindowsAzureData):\n\n    def __init__(self):\n        self.upgrade_type = u''\n        self.current_upgrade_domain_state = u''\n        self.current_upgrade_domain = u''\n\n\nclass InputEndpoints(WindowsAzureData):\n\n    def __init__(self):\n        self.input_endpoints = _list_of(InputEndpoint)\n\n    def __iter__(self):\n        return iter(self.input_endpoints)\n\n    def __len__(self):\n        return len(self.input_endpoints)\n\n    def __getitem__(self, index):\n        return self.input_endpoints[index]\n\n\nclass InputEndpoint(WindowsAzureData):\n\n    def __init__(self):\n        self.role_name = u''\n        self.vip = u''\n        self.port = u''\n\n\nclass RoleList(WindowsAzureData):\n\n    def __init__(self):\n        self.roles = _list_of(Role)\n\n    def __iter__(self):\n        return iter(self.roles)\n\n    def __len__(self):\n        return len(self.roles)\n\n    def __getitem__(self, index):\n        return self.roles[index]\n\n\nclass Role(WindowsAzureData):\n\n    def __init__(self):\n        self.role_name = u''\n        self.role_type = u''\n        self.os_version = u''\n        self.configuration_sets = ConfigurationSets()\n        self.availability_set_name = u''\n        self.data_virtual_hard_disks = DataVirtualHardDisks()\n        self.os_virtual_hard_disk = OSVirtualHardDisk()\n        self.role_size = u''\n        self.default_win_rm_certificate_thumbprint = u''\n\n\nclass PersistentVMDowntimeInfo(WindowsAzureData):\n\n    def __init__(self):\n        self.start_time = u''\n        self.end_time = u''\n        self.status = u''\n\n\nclass Certificates(WindowsAzureData):\n\n    def __init__(self):\n        self.certificates = _list_of(Certificate)\n\n    def __iter__(self):\n        return iter(self.certificates)\n\n    def __len__(self):\n        return len(self.certificates)\n\n    def __getitem__(self, index):\n        return self.certificates[index]\n\n\nclass Certificate(WindowsAzureData):\n\n    def __init__(self):\n        self.certificate_url = u''\n        self.thumbprint = u''\n        self.thumbprint_algorithm = u''\n        self.data = u''\n\n\nclass OperationError(WindowsAzureData):\n\n    def __init__(self):\n        self.code = u''\n        self.message = u''\n\n\nclass Operation(WindowsAzureData):\n\n    def __init__(self):\n        self.id = u''\n        self.status = u''\n        self.http_status_code = u''\n        self.error = OperationError()\n\n\nclass OperatingSystem(WindowsAzureData):\n\n    def __init__(self):\n        self.version = u''\n        self.label = _Base64String()\n        self.is_default = True\n        self.is_active = True\n        self.family = 0\n        self.family_label = _Base64String()\n\n\nclass OperatingSystems(WindowsAzureData):\n\n    def __init__(self):\n        self.operating_systems = _list_of(OperatingSystem)\n\n    def __iter__(self):\n        return iter(self.operating_systems)\n\n    def __len__(self):\n        return len(self.operating_systems)\n\n    def __getitem__(self, index):\n        return self.operating_systems[index]\n\n\nclass OperatingSystemFamily(WindowsAzureData):\n\n    def __init__(self):\n        self.name = u''\n        self.label = _Base64String()\n        self.operating_systems = OperatingSystems()\n\n\nclass OperatingSystemFamilies(WindowsAzureData):\n\n    def __init__(self):\n        self.operating_system_families = _list_of(OperatingSystemFamily)\n\n    def __iter__(self):\n        return iter(self.operating_system_families)\n\n    def __len__(self):\n        return len(self.operating_system_families)\n\n    def __getitem__(self, index):\n        return self.operating_system_families[index]\n\n\nclass Subscription(WindowsAzureData):\n\n    def __init__(self):\n        self.subscription_id = u''\n        self.subscription_name = u''\n        self.subscription_status = u''\n        self.account_admin_live_email_id = u''\n        self.service_admin_live_email_id = u''\n        self.max_core_count = 0\n        self.max_storage_accounts = 0\n        self.max_hosted_services = 0\n        self.current_core_count = 0\n        self.current_hosted_services = 0\n        self.current_storage_accounts = 0\n        self.max_virtual_network_sites = 0\n        self.max_local_network_sites = 0\n        self.max_dns_servers = 0\n\n\nclass AvailabilityResponse(WindowsAzureData):\n\n    def __init__(self):\n        self.result = False\n\n\nclass SubscriptionCertificates(WindowsAzureData):\n\n    def __init__(self):\n        self.subscription_certificates = _list_of(SubscriptionCertificate)\n\n    def __iter__(self):\n        return iter(self.subscription_certificates)\n\n    def __len__(self):\n        return len(self.subscription_certificates)\n\n    def __getitem__(self, index):\n        return self.subscription_certificates[index]\n\n\nclass SubscriptionCertificate(WindowsAzureData):\n\n    def __init__(self):\n        self.subscription_certificate_public_key = u''\n        self.subscription_certificate_thumbprint = u''\n        self.subscription_certificate_data = u''\n        self.created = u''\n\n\nclass Images(WindowsAzureData):\n\n    def __init__(self):\n        self.images = _list_of(OSImage)\n\n    def __iter__(self):\n        return iter(self.images)\n\n    def __len__(self):\n        return len(self.images)\n\n    def __getitem__(self, index):\n        return self.images[index]\n\n\nclass OSImage(WindowsAzureData):\n\n    def __init__(self):\n        self.affinity_group = u''\n        self.category = u''\n        self.location = u''\n        self.logical_size_in_gb = 0\n        self.label = u''\n        self.media_link = u''\n        self.name = u''\n        self.os = u''\n        self.eula = u''\n        self.description = u''\n\n\nclass Disks(WindowsAzureData):\n\n    def __init__(self):\n        self.disks = _list_of(Disk)\n\n    def __iter__(self):\n        return iter(self.disks)\n\n    def __len__(self):\n        return len(self.disks)\n\n    def __getitem__(self, index):\n        return self.disks[index]\n\n\nclass Disk(WindowsAzureData):\n\n    def __init__(self):\n        self.affinity_group = u''\n        self.attached_to = AttachedTo()\n        self.has_operating_system = u''\n        self.is_corrupted = u''\n        self.location = u''\n        self.logical_disk_size_in_gb = 0\n        self.label = u''\n        self.media_link = u''\n        self.name = u''\n        self.os = u''\n        self.source_image_name = u''\n\n\nclass AttachedTo(WindowsAzureData):\n\n    def __init__(self):\n        self.hosted_service_name = u''\n        self.deployment_name = u''\n        self.role_name = u''\n\n\nclass PersistentVMRole(WindowsAzureData):\n\n    def __init__(self):\n        self.role_name = u''\n        self.role_type = u''\n        self.os_version = u''  # undocumented\n        self.configuration_sets = ConfigurationSets()\n        self.availability_set_name = u''\n        self.data_virtual_hard_disks = DataVirtualHardDisks()\n        self.os_virtual_hard_disk = OSVirtualHardDisk()\n        self.role_size = u''\n        self.default_win_rm_certificate_thumbprint = u''\n\n\nclass ConfigurationSets(WindowsAzureData):\n\n    def __init__(self):\n        self.configuration_sets = _list_of(ConfigurationSet)\n\n    def __iter__(self):\n        return iter(self.configuration_sets)\n\n    def __len__(self):\n        return len(self.configuration_sets)\n\n    def __getitem__(self, index):\n        return self.configuration_sets[index]\n\n\nclass ConfigurationSet(WindowsAzureData):\n\n    def __init__(self):\n        self.configuration_set_type = u'NetworkConfiguration'\n        self.role_type = u''\n        self.input_endpoints = ConfigurationSetInputEndpoints()\n        self.subnet_names = _scalar_list_of(str, 'SubnetName')\n\n\nclass ConfigurationSetInputEndpoints(WindowsAzureData):\n\n    def __init__(self):\n        self.input_endpoints = _list_of(\n            ConfigurationSetInputEndpoint, 'InputEndpoint')\n\n    def __iter__(self):\n        return iter(self.input_endpoints)\n\n    def __len__(self):\n        return len(self.input_endpoints)\n\n    def __getitem__(self, index):\n        return self.input_endpoints[index]\n\n\nclass ConfigurationSetInputEndpoint(WindowsAzureData):\n\n    '''\n    Initializes a network configuration input endpoint.\n\n    name: Specifies the name for the external endpoint.\n    protocol:\n        Specifies the protocol to use to inspect the virtual machine\n        availability status. Possible values are: HTTP, TCP.\n    port: Specifies the external port to use for the endpoint.\n    local_port:\n        Specifies the internal port on which the virtual machine is listening\n        to serve the endpoint.\n    load_balanced_endpoint_set_name:\n        Specifies a name for a set of load-balanced endpoints. Specifying this\n        element for a given endpoint adds it to the set. If you are setting an\n        endpoint to use to connect to the virtual machine via the Remote\n        Desktop, do not set this property.\n    enable_direct_server_return:\n        Specifies whether direct server return load balancing is enabled.\n    '''\n\n    def __init__(self, name=u'', protocol=u'', port=u'', local_port=u'',\n                 load_balanced_endpoint_set_name=u'',\n                 enable_direct_server_return=False):\n        self.enable_direct_server_return = enable_direct_server_return\n        self.load_balanced_endpoint_set_name = load_balanced_endpoint_set_name\n        self.local_port = local_port\n        self.name = name\n        self.port = port\n        self.load_balancer_probe = LoadBalancerProbe()\n        self.protocol = protocol\n\n\nclass WindowsConfigurationSet(WindowsAzureData):\n\n    def __init__(self, computer_name=None, admin_password=None,\n                 reset_password_on_first_logon=None,\n                 enable_automatic_updates=None, time_zone=None,\n                 admin_username=None):\n        self.configuration_set_type = u'WindowsProvisioningConfiguration'\n        self.computer_name = computer_name\n        self.admin_password = admin_password\n        self.admin_username = admin_username\n        self.reset_password_on_first_logon = reset_password_on_first_logon\n        self.enable_automatic_updates = enable_automatic_updates\n        self.time_zone = time_zone\n        self.domain_join = DomainJoin()\n        self.stored_certificate_settings = StoredCertificateSettings()\n        self.win_rm = WinRM()\n\n\nclass DomainJoin(WindowsAzureData):\n\n    def __init__(self):\n        self.credentials = Credentials()\n        self.join_domain = u''\n        self.machine_object_ou = u''\n\n\nclass Credentials(WindowsAzureData):\n\n    def __init__(self):\n        self.domain = u''\n        self.username = u''\n        self.password = u''\n\n\nclass StoredCertificateSettings(WindowsAzureData):\n\n    def __init__(self):\n        self.stored_certificate_settings = _list_of(CertificateSetting)\n\n    def __iter__(self):\n        return iter(self.stored_certificate_settings)\n\n    def __len__(self):\n        return len(self.stored_certificate_settings)\n\n    def __getitem__(self, index):\n        return self.stored_certificate_settings[index]\n\n\nclass CertificateSetting(WindowsAzureData):\n\n    '''\n    Initializes a certificate setting.\n\n    thumbprint:\n        Specifies the thumbprint of the certificate to be provisioned. The\n        thumbprint must specify an existing service certificate.\n    store_name:\n        Specifies the name of the certificate store from which retrieve\n        certificate.\n    store_location:\n        Specifies the target certificate store location on the virtual machine.\n        The only supported value is LocalMachine.\n    '''\n\n    def __init__(self, thumbprint=u'', store_name=u'', store_location=u''):\n        self.thumbprint = thumbprint\n        self.store_name = store_name\n        self.store_location = store_location\n\n\nclass WinRM(WindowsAzureData):\n\n    '''\n    Contains configuration settings for the Windows Remote Management service on\n    the Virtual Machine.\n    '''\n\n    def __init__(self):\n        self.listeners = Listeners()\n\n\nclass Listeners(WindowsAzureData):\n\n    def __init__(self):\n        self.listeners = _list_of(Listener)\n\n    def __iter__(self):\n        return iter(self.listeners)\n\n    def __len__(self):\n        return len(self.listeners)\n\n    def __getitem__(self, index):\n        return self.listeners[index]\n\n\nclass Listener(WindowsAzureData):\n\n    '''\n    Specifies the protocol and certificate information for the listener.\n\n    protocol:\n        Specifies the protocol of listener.  Possible values are: Http, Https.\n        The value is case sensitive.\n    certificate_thumbprint:\n        Optional. Specifies the certificate thumbprint for the secure\n        connection. If this value is not specified, a self-signed certificate is\n        generated and used for the Virtual Machine.\n    '''\n\n    def __init__(self, protocol=u'', certificate_thumbprint=u''):\n        self.protocol = protocol\n        self.certificate_thumbprint = certificate_thumbprint\n\n\nclass LinuxConfigurationSet(WindowsAzureData):\n\n    def __init__(self, host_name=None, user_name=None, user_password=None,\n                 disable_ssh_password_authentication=None):\n        self.configuration_set_type = u'LinuxProvisioningConfiguration'\n        self.host_name = host_name\n        self.user_name = user_name\n        self.user_password = user_password\n        self.disable_ssh_password_authentication =\\\n            disable_ssh_password_authentication\n        self.ssh = SSH()\n\n\nclass SSH(WindowsAzureData):\n\n    def __init__(self):\n        self.public_keys = PublicKeys()\n        self.key_pairs = KeyPairs()\n\n\nclass PublicKeys(WindowsAzureData):\n\n    def __init__(self):\n        self.public_keys = _list_of(PublicKey)\n\n    def __iter__(self):\n        return iter(self.public_keys)\n\n    def __len__(self):\n        return len(self.public_keys)\n\n    def __getitem__(self, index):\n        return self.public_keys[index]\n\n\nclass PublicKey(WindowsAzureData):\n\n    def __init__(self, fingerprint=u'', path=u''):\n        self.fingerprint = fingerprint\n        self.path = path\n\n\nclass KeyPairs(WindowsAzureData):\n\n    def __init__(self):\n        self.key_pairs = _list_of(KeyPair)\n\n    def __iter__(self):\n        return iter(self.key_pairs)\n\n    def __len__(self):\n        return len(self.key_pairs)\n\n    def __getitem__(self, index):\n        return self.key_pairs[index]\n\n\nclass KeyPair(WindowsAzureData):\n\n    def __init__(self, fingerprint=u'', path=u''):\n        self.fingerprint = fingerprint\n        self.path = path\n\n\nclass LoadBalancerProbe(WindowsAzureData):\n\n    def __init__(self):\n        self.path = u''\n        self.port = u''\n        self.protocol = u''\n\n\nclass DataVirtualHardDisks(WindowsAzureData):\n\n    def __init__(self):\n        self.data_virtual_hard_disks = _list_of(DataVirtualHardDisk)\n\n    def __iter__(self):\n        return iter(self.data_virtual_hard_disks)\n\n    def __len__(self):\n        return len(self.data_virtual_hard_disks)\n\n    def __getitem__(self, index):\n        return self.data_virtual_hard_disks[index]\n\n\nclass DataVirtualHardDisk(WindowsAzureData):\n\n    def __init__(self):\n        self.host_caching = u''\n        self.disk_label = u''\n        self.disk_name = u''\n        self.lun = 0\n        self.logical_disk_size_in_gb = 0\n        self.media_link = u''\n\n\nclass OSVirtualHardDisk(WindowsAzureData):\n\n    def __init__(self, source_image_name=None, media_link=None,\n                 host_caching=None, disk_label=None, disk_name=None):\n        self.source_image_name = source_image_name\n        self.media_link = media_link\n        self.host_caching = host_caching\n        self.disk_label = disk_label\n        self.disk_name = disk_name\n        self.os = u''  # undocumented, not used when adding a role\n\n\nclass AsynchronousOperationResult(WindowsAzureData):\n\n    def __init__(self, request_id=None):\n        self.request_id = request_id\n\n\nclass ServiceBusRegion(WindowsAzureData):\n\n    def __init__(self):\n        self.code = u''\n        self.fullname = u''\n\n\nclass ServiceBusNamespace(WindowsAzureData):\n\n    def __init__(self):\n        self.name = u''\n        self.region = u''\n        self.default_key = u''\n        self.status = u''\n        self.created_at = u''\n        self.acs_management_endpoint = u''\n        self.servicebus_endpoint = u''\n        self.connection_string = u''\n        self.subscription_id = u''\n        self.enabled = False\n\n\nclass WebSpaces(WindowsAzureData):\n\n    def __init__(self):\n        self.web_space = _list_of(WebSpace)\n\n    def __iter__(self):\n        return iter(self.web_space)\n\n    def __len__(self):\n        return len(self.web_space)\n\n    def __getitem__(self, index):\n        return self.web_space[index]\n    \n\nclass WebSpace(WindowsAzureData):\n    \n    def __init__(self):\n        self.availability_state = u''\n        self.geo_location = u''\n        self.geo_region = u''\n        self.name = u''\n        self.plan = u''\n        self.status = u''\n        self.subscription = u''\n\n\nclass Sites(WindowsAzureData):\n\n    def __init__(self):\n        self.site = _list_of(Site)\n\n    def __iter__(self):\n        return iter(self.site)\n\n    def __len__(self):\n        return len(self.site)\n\n    def __getitem__(self, index):\n        return self.site[index]\n    \n\nclass Site(WindowsAzureData):\n    \n    def __init__(self):\n        self.admin_enabled = False\n        self.availability_state = ''\n        self.compute_mode = ''\n        self.enabled = False\n        self.enabled_host_names = _scalar_list_of(str, 'a:string')\n        self.host_name_ssl_states = HostNameSslStates()\n        self.host_names = _scalar_list_of(str, 'a:string')\n        self.last_modified_time_utc = ''\n        self.name = ''\n        self.repository_site_name = ''\n        self.self_link = ''\n        self.server_farm = ''\n        self.site_mode = ''\n        self.state = ''\n        self.storage_recovery_default_state = ''\n        self.usage_state = ''\n        self.web_space = ''\n\n\nclass HostNameSslStates(WindowsAzureData):\n\n    def __init__(self):\n        self.host_name_ssl_state = _list_of(HostNameSslState)\n\n    def __iter__(self):\n        return iter(self.host_name_ssl_state)\n\n    def __len__(self):\n        return len(self.host_name_ssl_state)\n\n    def __getitem__(self, index):\n        return self.host_name_ssl_state[index]\n\n\nclass HostNameSslState(WindowsAzureData):\n    \n    def __init__(self):\n        self.name = u''\n        self.ssl_state = u''\n        \n\nclass PublishData(WindowsAzureData):\n    _xml_name = 'publishData'\n    \n    def __init__(self):\n        self.publish_profiles = _list_of(PublishProfile, 'publishProfile')\n\nclass PublishProfile(WindowsAzureData):\n    \n    def __init__(self):\n        self.profile_name = _xml_attribute('profileName')\n        self.publish_method = _xml_attribute('publishMethod')\n        self.publish_url = _xml_attribute('publishUrl')\n        self.msdeploysite = _xml_attribute('msdeploySite')\n        self.user_name = _xml_attribute('userName')\n        self.user_pwd = _xml_attribute('userPWD')\n        self.destination_app_url = _xml_attribute('destinationAppUrl')\n        self.sql_server_db_connection_string = _xml_attribute('SQLServerDBConnectionString')\n        self.my_sqldb_connection_string = _xml_attribute('mySQLDBConnectionString')\n        self.hosting_provider_forum_link = _xml_attribute('hostingProviderForumLink')\n        self.control_panel_link = _xml_attribute('controlPanelLink')\n    \nclass QueueDescription(WindowsAzureData):\n    \n    def __init__(self):\n        self.lock_duration = u''\n        self.max_size_in_megabytes = 0\n        self.requires_duplicate_detection = False\n        self.requires_session = False\n        self.default_message_time_to_live = u''\n        self.dead_lettering_on_message_expiration = False\n        self.duplicate_detection_history_time_window = u''\n        self.max_delivery_count = 0\n        self.enable_batched_operations = False\n        self.size_in_bytes = 0\n        self.message_count = 0\n        self.is_anonymous_accessible = False\n        self.authorization_rules = AuthorizationRules()\n        self.status = u''\n        self.created_at = u''\n        self.updated_at = u''\n        self.accessed_at = u''\n        self.support_ordering = False\n        self.auto_delete_on_idle = u''\n        self.count_details = CountDetails()\n        self.entity_availability_status = u''\n    \nclass TopicDescription(WindowsAzureData):\n    \n    def __init__(self):\n        self.default_message_time_to_live = u''\n        self.max_size_in_megabytes = 0\n        self.requires_duplicate_detection = False\n        self.duplicate_detection_history_time_window = u''\n        self.enable_batched_operations = False\n        self.size_in_bytes = 0\n        self.filtering_messages_before_publishing = False\n        self.is_anonymous_accessible = False\n        self.authorization_rules = AuthorizationRules()\n        self.status = u''\n        self.created_at = u''\n        self.updated_at = u''\n        self.accessed_at = u''\n        self.support_ordering = False\n        self.count_details = CountDetails()\n        self.subscription_count = 0\n\nclass CountDetails(WindowsAzureData):\n    \n    def __init__(self):\n        self.active_message_count = 0\n        self.dead_letter_message_count = 0\n        self.scheduled_message_count = 0\n        self.transfer_message_count = 0\n        self.transfer_dead_letter_message_count = 0\n\nclass NotificationHubDescription(WindowsAzureData):\n    \n    def __init__(self):\n        self.registration_ttl = u''\n        self.authorization_rules = AuthorizationRules()\n\nclass AuthorizationRules(WindowsAzureData):\n\n    def __init__(self):\n        self.authorization_rule = _list_of(AuthorizationRule)\n\n    def __iter__(self):\n        return iter(self.authorization_rule)\n\n    def __len__(self):\n        return len(self.authorization_rule)\n\n    def __getitem__(self, index):\n        return self.authorization_rule[index]\n    \nclass AuthorizationRule(WindowsAzureData):\n    \n    def __init__(self):\n        self.claim_type = u''\n        self.claim_value = u''\n        self.rights = _scalar_list_of(str, 'AccessRights')\n        self.created_time = u''\n        self.modified_time = u''\n        self.key_name = u''\n        self.primary_key = u''\n        self.secondary_keu = u''\n\nclass RelayDescription(WindowsAzureData):\n    \n    def __init__(self):\n        self.path = u''\n        self.listener_type = u''\n        self.listener_count = 0\n        self.created_at = u''\n        self.updated_at = u''\n\n\nclass MetricResponses(WindowsAzureData):\n\n    def __init__(self):\n        self.metric_response = _list_of(MetricResponse)\n\n    def __iter__(self):\n        return iter(self.metric_response)\n\n    def __len__(self):\n        return len(self.metric_response)\n\n    def __getitem__(self, index):\n        return self.metric_response[index]\n\n\nclass MetricResponse(WindowsAzureData):\n\n    def __init__(self):\n        self.code = u''\n        self.data = Data()\n        self.message = u''\n\n\nclass Data(WindowsAzureData):\n\n    def __init__(self):\n        self.display_name = u''\n        self.end_time = u''\n        self.name = u''\n        self.primary_aggregation_type = u''\n        self.start_time = u''\n        self.time_grain = u''\n        self.unit = u''\n        self.values = Values()\n\n\nclass Values(WindowsAzureData):\n\n    def __init__(self):\n        self.metric_sample = _list_of(MetricSample)\n\n    def __iter__(self):\n        return iter(self.metric_sample)\n\n    def __len__(self):\n        return len(self.metric_sample)\n\n    def __getitem__(self, index):\n        return self.metric_sample[index]\n\n\nclass MetricSample(WindowsAzureData):\n\n    def __init__(self):\n        self.count = 0\n        self.time_created = u''\n        self.total = 0\n\n\nclass MetricDefinitions(WindowsAzureData):\n\n    def __init__(self):\n        self.metric_definition = _list_of(MetricDefinition)\n\n    def __iter__(self):\n        return iter(self.metric_definition)\n\n    def __len__(self):\n        return len(self.metric_definition)\n\n    def __getitem__(self, index):\n        return self.metric_definition[index]\n\n\nclass MetricDefinition(WindowsAzureData):\n\n    def __init__(self):\n        self.display_name = u''\n        self.metric_availabilities = MetricAvailabilities()\n        self.name = u''\n        self.primary_aggregation_type = u''\n        self.unit = u''\n\n\nclass MetricAvailabilities(WindowsAzureData):\n\n    def __init__(self):\n        self.metric_availability = _list_of(MetricAvailability, 'MetricAvailabilily')\n\n    def __iter__(self):\n        return iter(self.metric_availability)\n\n    def __len__(self):\n        return len(self.metric_availability)\n\n    def __getitem__(self, index):\n        return self.metric_availability[index]\n\n\nclass MetricAvailability(WindowsAzureData):\n\n    def __init__(self):\n        self.retention = u''\n        self.time_grain = u''\n\n\nclass Servers(WindowsAzureData):\n\n    def __init__(self):\n        self.server = _list_of(Server)\n\n    def __iter__(self):\n        return iter(self.server)\n\n    def __len__(self):\n        return len(self.server)\n\n    def __getitem__(self, index):\n        return self.server[index]\n\n\nclass Server(WindowsAzureData):\n    \n    def __init__(self):\n        self.name = u''\n        self.administrator_login = u''\n        self.location = u''\n        self.fully_qualified_domain_name = u''\n        self.version = u''\n\n\nclass Database(WindowsAzureData):\n\n    def __init__(self):\n        self.name = u''\n        self.type = u''\n        self.state = u''\n        self.self_link = u''\n        self.parent_link = u''\n        self.id = 0\n        self.edition = u''\n        self.collation_name = u''\n        self.creation_date = u''\n        self.is_federation_root = False\n        self.is_system_object = False\n        self.max_size_bytes = 0\n\n\ndef _update_management_header(request):\n    ''' Add additional headers for management. '''\n\n    if request.method in ['PUT', 'POST', 'MERGE', 'DELETE']:\n        request.headers.append(('Content-Length', str(len(request.body))))\n\n    # append additional headers base on the service\n    request.headers.append(('x-ms-version', X_MS_VERSION))\n\n    # if it is not GET or HEAD request, must set content-type.\n    if not request.method in ['GET', 'HEAD']:\n        for name, _ in request.headers:\n            if 'content-type' == name.lower():\n                break\n        else:\n            request.headers.append(\n                ('Content-Type',\n                 'application/atom+xml;type=entry;charset=utf-8'))\n\n    return request.headers\n\n\ndef _parse_response_for_async_op(response):\n    ''' Extracts request id from response header. '''\n\n    if response is None:\n        return None\n\n    result = AsynchronousOperationResult()\n    if response.headers:\n        for name, value in response.headers:\n            if name.lower() == 'x-ms-request-id':\n                result.request_id = value\n\n    return result\n\n\ndef _management_error_handler(http_error):\n    ''' Simple error handler for management service. '''\n    return _general_error_handler(http_error)\n\n\ndef _lower(text):\n    return text.lower()\n\n\nclass _XmlSerializer(object):\n\n    @staticmethod\n    def create_storage_service_input_to_xml(service_name, description, label,\n                                            affinity_group, location,\n                                            geo_replication_enabled,\n                                            extended_properties):\n        return _XmlSerializer.doc_from_data(\n            'CreateStorageServiceInput',\n            [('ServiceName', service_name),\n             ('Description', description),\n             ('Label', label, _encode_base64),\n             ('AffinityGroup', affinity_group),\n             ('Location', location),\n             ('GeoReplicationEnabled', geo_replication_enabled, _lower)],\n            extended_properties)\n\n    @staticmethod\n    def update_storage_service_input_to_xml(description, label,\n                                            geo_replication_enabled,\n                                            extended_properties):\n        return _XmlSerializer.doc_from_data(\n            'UpdateStorageServiceInput',\n            [('Description', description),\n             ('Label', label, _encode_base64),\n             ('GeoReplicationEnabled', geo_replication_enabled, _lower)],\n            extended_properties)\n\n    @staticmethod\n    def regenerate_keys_to_xml(key_type):\n        return _XmlSerializer.doc_from_data('RegenerateKeys',\n                                            [('KeyType', key_type)])\n\n    @staticmethod\n    def update_hosted_service_to_xml(label, description, extended_properties):\n        return _XmlSerializer.doc_from_data('UpdateHostedService',\n                                            [('Label', label, _encode_base64),\n                                             ('Description', description)],\n                                            extended_properties)\n\n    @staticmethod\n    def create_hosted_service_to_xml(service_name, label, description,\n                                     location, affinity_group,\n                                     extended_properties):\n        return _XmlSerializer.doc_from_data(\n            'CreateHostedService',\n            [('ServiceName', service_name),\n             ('Label', label, _encode_base64),\n             ('Description', description),\n             ('Location', location),\n             ('AffinityGroup', affinity_group)],\n            extended_properties)\n\n    @staticmethod\n    def create_deployment_to_xml(name, package_url, label, configuration,\n                                 start_deployment, treat_warnings_as_error,\n                                 extended_properties):\n        return _XmlSerializer.doc_from_data(\n            'CreateDeployment',\n            [('Name', name),\n             ('PackageUrl', package_url),\n             ('Label', label, _encode_base64),\n             ('Configuration', configuration),\n             ('StartDeployment',\n             start_deployment, _lower),\n             ('TreatWarningsAsError', treat_warnings_as_error, _lower)],\n            extended_properties)\n\n    @staticmethod\n    def swap_deployment_to_xml(production, source_deployment):\n        return _XmlSerializer.doc_from_data(\n            'Swap',\n            [('Production', production),\n             ('SourceDeployment', source_deployment)])\n\n    @staticmethod\n    def update_deployment_status_to_xml(status):\n        return _XmlSerializer.doc_from_data(\n            'UpdateDeploymentStatus',\n            [('Status', status)])\n\n    @staticmethod\n    def change_deployment_to_xml(configuration, treat_warnings_as_error, mode,\n                                 extended_properties):\n        return _XmlSerializer.doc_from_data(\n            'ChangeConfiguration',\n            [('Configuration', configuration),\n             ('TreatWarningsAsError', treat_warnings_as_error, _lower),\n             ('Mode', mode)],\n            extended_properties)\n\n    @staticmethod\n    def upgrade_deployment_to_xml(mode, package_url, configuration, label,\n                                  role_to_upgrade, force, extended_properties):\n        return _XmlSerializer.doc_from_data(\n            'UpgradeDeployment',\n            [('Mode', mode),\n             ('PackageUrl', package_url),\n             ('Configuration', configuration),\n             ('Label', label, _encode_base64),\n             ('RoleToUpgrade', role_to_upgrade),\n             ('Force', force, _lower)],\n            extended_properties)\n\n    @staticmethod\n    def rollback_upgrade_to_xml(mode, force):\n        return _XmlSerializer.doc_from_data(\n            'RollbackUpdateOrUpgrade',\n            [('Mode', mode),\n             ('Force', force, _lower)])\n\n    @staticmethod\n    def walk_upgrade_domain_to_xml(upgrade_domain):\n        return _XmlSerializer.doc_from_data(\n            'WalkUpgradeDomain',\n            [('UpgradeDomain', upgrade_domain)])\n\n    @staticmethod\n    def certificate_file_to_xml(data, certificate_format, password):\n        return _XmlSerializer.doc_from_data(\n            'CertificateFile',\n            [('Data', data),\n             ('CertificateFormat', certificate_format),\n             ('Password', password)])\n\n    @staticmethod\n    def create_affinity_group_to_xml(name, label, description, location):\n        return _XmlSerializer.doc_from_data(\n            'CreateAffinityGroup',\n            [('Name', name),\n             ('Label', label, _encode_base64),\n             ('Description', description),\n             ('Location', location)])\n\n    @staticmethod\n    def update_affinity_group_to_xml(label, description):\n        return _XmlSerializer.doc_from_data(\n            'UpdateAffinityGroup',\n            [('Label', label, _encode_base64),\n             ('Description', description)])\n\n    @staticmethod\n    def subscription_certificate_to_xml(public_key, thumbprint, data):\n        return _XmlSerializer.doc_from_data(\n            'SubscriptionCertificate',\n            [('SubscriptionCertificatePublicKey', public_key),\n             ('SubscriptionCertificateThumbprint', thumbprint),\n             ('SubscriptionCertificateData', data)])\n\n    @staticmethod\n    def os_image_to_xml(label, media_link, name, os):\n        return _XmlSerializer.doc_from_data(\n            'OSImage',\n            [('Label', label),\n             ('MediaLink', media_link),\n             ('Name', name),\n             ('OS', os)])\n\n    @staticmethod\n    def data_virtual_hard_disk_to_xml(host_caching, disk_label, disk_name, lun,\n                                      logical_disk_size_in_gb, media_link,\n                                      source_media_link):\n        return _XmlSerializer.doc_from_data(\n            'DataVirtualHardDisk',\n            [('HostCaching', host_caching),\n             ('DiskLabel', disk_label),\n             ('DiskName', disk_name),\n             ('Lun', lun),\n             ('LogicalDiskSizeInGB', logical_disk_size_in_gb),\n             ('MediaLink', media_link),\n             ('SourceMediaLink', source_media_link)])\n\n    @staticmethod\n    def disk_to_xml(has_operating_system, label, media_link, name, os):\n        return _XmlSerializer.doc_from_data(\n            'Disk',\n            [('HasOperatingSystem', has_operating_system, _lower),\n             ('Label', label),\n             ('MediaLink', media_link),\n             ('Name', name),\n             ('OS', os)])\n\n    @staticmethod\n    def restart_role_operation_to_xml():\n        return _XmlSerializer.doc_from_xml(\n            'RestartRoleOperation',\n            '<OperationType>RestartRoleOperation</OperationType>')\n\n    @staticmethod\n    def shutdown_role_operation_to_xml(post_shutdown_action):\n        xml = _XmlSerializer.data_to_xml(\n            [('OperationType', 'ShutdownRoleOperation'),\n             ('PostShutdownAction', post_shutdown_action)])\n        return _XmlSerializer.doc_from_xml('ShutdownRoleOperation', xml)\n\n    @staticmethod\n    def shutdown_roles_operation_to_xml(role_names, post_shutdown_action):\n        xml = _XmlSerializer.data_to_xml(\n            [('OperationType', 'ShutdownRolesOperation')])\n        xml += '<Roles>'\n        for role_name in role_names:\n            xml += _XmlSerializer.data_to_xml([('Name', role_name)])\n        xml += '</Roles>'\n        xml += _XmlSerializer.data_to_xml(\n             [('PostShutdownAction', post_shutdown_action)])\n        return _XmlSerializer.doc_from_xml('ShutdownRolesOperation', xml)\n\n    @staticmethod\n    def start_role_operation_to_xml():\n        return _XmlSerializer.doc_from_xml(\n            'StartRoleOperation',\n            '<OperationType>StartRoleOperation</OperationType>')\n\n    @staticmethod\n    def start_roles_operation_to_xml(role_names):\n        xml = _XmlSerializer.data_to_xml(\n            [('OperationType', 'StartRolesOperation')])\n        xml += '<Roles>'\n        for role_name in role_names:\n            xml += _XmlSerializer.data_to_xml([('Name', role_name)])\n        xml += '</Roles>'\n        return _XmlSerializer.doc_from_xml('StartRolesOperation', xml)\n\n    @staticmethod\n    def windows_configuration_to_xml(configuration):\n        xml = _XmlSerializer.data_to_xml(\n            [('ConfigurationSetType', configuration.configuration_set_type),\n             ('ComputerName', configuration.computer_name),\n             ('AdminPassword', configuration.admin_password),\n             ('ResetPasswordOnFirstLogon',\n              configuration.reset_password_on_first_logon,\n              _lower),\n             ('EnableAutomaticUpdates',\n              configuration.enable_automatic_updates,\n              _lower),\n             ('TimeZone', configuration.time_zone)])\n\n        if configuration.domain_join is not None:\n            xml += '<DomainJoin>'\n            xml += '<Credentials>'\n            xml += _XmlSerializer.data_to_xml(\n                [('Domain', configuration.domain_join.credentials.domain),\n                 ('Username', configuration.domain_join.credentials.username),\n                 ('Password', configuration.domain_join.credentials.password)])\n            xml += '</Credentials>'\n            xml += _XmlSerializer.data_to_xml(\n                [('JoinDomain', configuration.domain_join.join_domain),\n                 ('MachineObjectOU',\n                  configuration.domain_join.machine_object_ou)])\n            xml += '</DomainJoin>'\n        if configuration.stored_certificate_settings is not None:\n            xml += '<StoredCertificateSettings>'\n            for cert in configuration.stored_certificate_settings:\n                xml += '<CertificateSetting>'\n                xml += _XmlSerializer.data_to_xml(\n                    [('StoreLocation', cert.store_location),\n                     ('StoreName', cert.store_name),\n                     ('Thumbprint', cert.thumbprint)])\n                xml += '</CertificateSetting>'\n            xml += '</StoredCertificateSettings>'\n        if configuration.win_rm is not None:\n            xml += '<WinRM><Listeners>'\n            for listener in configuration.win_rm.listeners:\n                xml += '<Listener>'\n                xml += _XmlSerializer.data_to_xml(\n                    [('Protocol', listener.protocol),\n                     ('CertificateThumbprint', listener.certificate_thumbprint)])\n                xml += '</Listener>'\n            xml += '</Listeners></WinRM>'\n        xml += _XmlSerializer.data_to_xml(\n            [('AdminUsername', configuration.admin_username)])\n        return xml\n\n    @staticmethod\n    def linux_configuration_to_xml(configuration):\n        xml = _XmlSerializer.data_to_xml(\n            [('ConfigurationSetType', configuration.configuration_set_type),\n             ('HostName', configuration.host_name),\n             ('UserName', configuration.user_name),\n             ('UserPassword', configuration.user_password),\n             ('DisableSshPasswordAuthentication',\n              configuration.disable_ssh_password_authentication,\n              _lower)])\n\n        if configuration.ssh is not None:\n            xml += '<SSH>'\n            xml += '<PublicKeys>'\n            for key in configuration.ssh.public_keys:\n                xml += '<PublicKey>'\n                xml += _XmlSerializer.data_to_xml(\n                    [('Fingerprint', key.fingerprint),\n                     ('Path', key.path)])\n                xml += '</PublicKey>'\n            xml += '</PublicKeys>'\n            xml += '<KeyPairs>'\n            for key in configuration.ssh.key_pairs:\n                xml += '<KeyPair>'\n                xml += _XmlSerializer.data_to_xml(\n                    [('Fingerprint', key.fingerprint),\n                     ('Path', key.path)])\n                xml += '</KeyPair>'\n            xml += '</KeyPairs>'\n            xml += '</SSH>'\n        return xml\n\n    @staticmethod\n    def network_configuration_to_xml(configuration):\n        xml = _XmlSerializer.data_to_xml(\n            [('ConfigurationSetType', configuration.configuration_set_type)])\n        xml += '<InputEndpoints>'\n        for endpoint in configuration.input_endpoints:\n            xml += '<InputEndpoint>'\n            xml += _XmlSerializer.data_to_xml(\n                [('LoadBalancedEndpointSetName',\n                  endpoint.load_balanced_endpoint_set_name),\n                 ('LocalPort', endpoint.local_port),\n                 ('Name', endpoint.name),\n                 ('Port', endpoint.port)])\n\n            if endpoint.load_balancer_probe.path or\\\n                endpoint.load_balancer_probe.port or\\\n                endpoint.load_balancer_probe.protocol:\n                xml += '<LoadBalancerProbe>'\n                xml += _XmlSerializer.data_to_xml(\n                    [('Path', endpoint.load_balancer_probe.path),\n                     ('Port', endpoint.load_balancer_probe.port),\n                     ('Protocol', endpoint.load_balancer_probe.protocol)])\n                xml += '</LoadBalancerProbe>'\n\n            xml += _XmlSerializer.data_to_xml(\n                [('Protocol', endpoint.protocol),\n                 ('EnableDirectServerReturn',\n                  endpoint.enable_direct_server_return,\n                  _lower)])\n\n            xml += '</InputEndpoint>'\n        xml += '</InputEndpoints>'\n        xml += '<SubnetNames>'\n        for name in configuration.subnet_names:\n            xml += _XmlSerializer.data_to_xml([('SubnetName', name)])\n        xml += '</SubnetNames>'\n        return xml\n\n    @staticmethod\n    def role_to_xml(availability_set_name, data_virtual_hard_disks,\n                    network_configuration_set, os_virtual_hard_disk, role_name,\n                    role_size, role_type, system_configuration_set):\n        xml = _XmlSerializer.data_to_xml([('RoleName', role_name),\n                                          ('RoleType', role_type)])\n\n        xml += '<ConfigurationSets>'\n\n        if system_configuration_set is not None:\n            xml += '<ConfigurationSet>'\n            if isinstance(system_configuration_set, WindowsConfigurationSet):\n                xml += _XmlSerializer.windows_configuration_to_xml(\n                    system_configuration_set)\n            elif isinstance(system_configuration_set, LinuxConfigurationSet):\n                xml += _XmlSerializer.linux_configuration_to_xml(\n                    system_configuration_set)\n            xml += '</ConfigurationSet>'\n\n        if network_configuration_set is not None:\n            xml += '<ConfigurationSet>'\n            xml += _XmlSerializer.network_configuration_to_xml(\n                network_configuration_set)\n            xml += '</ConfigurationSet>'\n\n        xml += '</ConfigurationSets>'\n\n        if availability_set_name is not None:\n            xml += _XmlSerializer.data_to_xml(\n                [('AvailabilitySetName', availability_set_name)])\n\n        if data_virtual_hard_disks is not None:\n            xml += '<DataVirtualHardDisks>'\n            for hd in data_virtual_hard_disks:\n                xml += '<DataVirtualHardDisk>'\n                xml += _XmlSerializer.data_to_xml(\n                    [('HostCaching', hd.host_caching),\n                     ('DiskLabel', hd.disk_label),\n                     ('DiskName', hd.disk_name),\n                     ('Lun', hd.lun),\n                     ('LogicalDiskSizeInGB', hd.logical_disk_size_in_gb),\n                     ('MediaLink', hd.media_link)])\n                xml += '</DataVirtualHardDisk>'\n            xml += '</DataVirtualHardDisks>'\n\n        if os_virtual_hard_disk is not None:\n            xml += '<OSVirtualHardDisk>'\n            xml += _XmlSerializer.data_to_xml(\n                [('HostCaching', os_virtual_hard_disk.host_caching),\n                 ('DiskLabel', os_virtual_hard_disk.disk_label),\n                 ('DiskName', os_virtual_hard_disk.disk_name),\n                 ('MediaLink', os_virtual_hard_disk.media_link),\n                 ('SourceImageName', os_virtual_hard_disk.source_image_name)])\n            xml += '</OSVirtualHardDisk>'\n\n        if role_size is not None:\n            xml += _XmlSerializer.data_to_xml([('RoleSize', role_size)])\n\n        return xml\n\n    @staticmethod\n    def add_role_to_xml(role_name, system_configuration_set,\n                        os_virtual_hard_disk, role_type,\n                        network_configuration_set, availability_set_name,\n                        data_virtual_hard_disks, role_size):\n        xml = _XmlSerializer.role_to_xml(\n            availability_set_name,\n            data_virtual_hard_disks,\n            network_configuration_set,\n            os_virtual_hard_disk,\n            role_name,\n            role_size,\n            role_type,\n            system_configuration_set)\n        return _XmlSerializer.doc_from_xml('PersistentVMRole', xml)\n\n    @staticmethod\n    def update_role_to_xml(role_name, os_virtual_hard_disk, role_type,\n                           network_configuration_set, availability_set_name,\n                           data_virtual_hard_disks, role_size):\n        xml = _XmlSerializer.role_to_xml(\n            availability_set_name,\n            data_virtual_hard_disks,\n            network_configuration_set,\n            os_virtual_hard_disk,\n            role_name,\n            role_size,\n            role_type,\n            None)\n        return _XmlSerializer.doc_from_xml('PersistentVMRole', xml)\n\n    @staticmethod\n    def capture_role_to_xml(post_capture_action, target_image_name,\n                            target_image_label, provisioning_configuration):\n        xml = _XmlSerializer.data_to_xml(\n            [('OperationType', 'CaptureRoleOperation'),\n             ('PostCaptureAction', post_capture_action)])\n\n        if provisioning_configuration is not None:\n            xml += '<ProvisioningConfiguration>'\n            if isinstance(provisioning_configuration, WindowsConfigurationSet):\n                xml += _XmlSerializer.windows_configuration_to_xml(\n                    provisioning_configuration)\n            elif isinstance(provisioning_configuration, LinuxConfigurationSet):\n                xml += _XmlSerializer.linux_configuration_to_xml(\n                    provisioning_configuration)\n            xml += '</ProvisioningConfiguration>'\n\n        xml += _XmlSerializer.data_to_xml(\n            [('TargetImageLabel', target_image_label),\n             ('TargetImageName', target_image_name)])\n\n        return _XmlSerializer.doc_from_xml('CaptureRoleOperation', xml)\n\n    @staticmethod\n    def virtual_machine_deployment_to_xml(deployment_name, deployment_slot,\n                                          label, role_name,\n                                          system_configuration_set,\n                                          os_virtual_hard_disk, role_type,\n                                          network_configuration_set,\n                                          availability_set_name,\n                                          data_virtual_hard_disks, role_size,\n                                          virtual_network_name):\n        xml = _XmlSerializer.data_to_xml([('Name', deployment_name),\n                                          ('DeploymentSlot', deployment_slot),\n                                          ('Label', label)])\n        xml += '<RoleList>'\n        xml += '<Role>'\n        xml += _XmlSerializer.role_to_xml(\n            availability_set_name,\n            data_virtual_hard_disks,\n            network_configuration_set,\n            os_virtual_hard_disk,\n            role_name,\n            role_size,\n            role_type,\n            system_configuration_set)\n        xml += '</Role>'\n        xml += '</RoleList>'\n\n        if virtual_network_name is not None:\n            xml += _XmlSerializer.data_to_xml(\n                [('VirtualNetworkName', virtual_network_name)])\n\n        return _XmlSerializer.doc_from_xml('Deployment', xml)\n\n    @staticmethod\n    def create_website_to_xml(webspace_name, website_name, geo_region, plan,\n                              host_names, compute_mode, server_farm, site_mode):\n        xml = '<HostNames xmlns:a=\"http://schemas.microsoft.com/2003/10/Serialization/Arrays\">'\n        for host_name in host_names:\n            xml += '<a:string>{0}</a:string>'.format(host_name)\n        xml += '</HostNames>'\n        xml += _XmlSerializer.data_to_xml(\n            [('Name', website_name),\n             ('ComputeMode', compute_mode),\n             ('ServerFarm', server_farm),\n             ('SiteMode', site_mode)])\n        xml += '<WebSpaceToCreate>'\n        xml += _XmlSerializer.data_to_xml(\n            [('GeoRegion', geo_region),\n             ('Name', webspace_name),\n             ('Plan', plan)])\n        xml += '</WebSpaceToCreate>'\n        return _XmlSerializer.doc_from_xml('Site', xml)\n\n    @staticmethod\n    def data_to_xml(data):\n        '''Creates an xml fragment from the specified data.\n           data: Array of tuples, where first: xml element name\n                                        second: xml element text\n                                        third: conversion function\n        '''\n        xml = ''\n        for element in data:\n            name = element[0]\n            val = element[1]\n            if len(element) > 2:\n                converter = element[2]\n            else:\n                converter = None\n\n            if val is not None:\n                if converter is not None:\n                    text = _str(converter(_str(val)))\n                else:\n                    text = _str(val)\n\n                xml += ''.join(['<', name, '>', text, '</', name, '>'])\n        return xml\n\n    @staticmethod\n    def doc_from_xml(document_element_name, inner_xml):\n        '''Wraps the specified xml in an xml root element with default azure\n        namespaces'''\n        xml = ''.join(['<', document_element_name,\n                      ' xmlns:i=\"http://www.w3.org/2001/XMLSchema-instance\"',\n                      ' xmlns=\"http://schemas.microsoft.com/windowsazure\">'])\n        xml += inner_xml\n        xml += ''.join(['</', document_element_name, '>'])\n        return xml\n\n    @staticmethod\n    def doc_from_data(document_element_name, data, extended_properties=None):\n        xml = _XmlSerializer.data_to_xml(data)\n        if extended_properties is not None:\n            xml += _XmlSerializer.extended_properties_dict_to_xml_fragment(\n                extended_properties)\n        return _XmlSerializer.doc_from_xml(document_element_name, xml)\n\n    @staticmethod\n    def extended_properties_dict_to_xml_fragment(extended_properties):\n        xml = ''\n        if extended_properties is not None and len(extended_properties) > 0:\n            xml += '<ExtendedProperties>'\n            for key, val in extended_properties.items():\n                xml += ''.join(['<ExtendedProperty>',\n                                '<Name>',\n                                _str(key),\n                                '</Name>',\n                               '<Value>',\n                               _str(val),\n                               '</Value>',\n                               '</ExtendedProperty>'])\n            xml += '</ExtendedProperties>'\n        return xml\n\n\ndef _parse_bool(value):\n    if value.lower() == 'true':\n        return True\n    return False\n\n\nclass _ServiceBusManagementXmlSerializer(object):\n\n    @staticmethod\n    def namespace_to_xml(region):\n        '''Converts a service bus namespace description to xml\n\n        The xml format:\n<?xml version=\"1.0\" encoding=\"utf-8\" standalone=\"yes\"?>\n<entry xmlns=\"http://www.w3.org/2005/Atom\">\n    <content type=\"application/xml\">\n        <NamespaceDescription\n            xmlns=\"http://schemas.microsoft.com/netservices/2010/10/servicebus/connect\">\n            <Region>West US</Region>\n        </NamespaceDescription>\n    </content>\n</entry>\n        '''\n        body = '<NamespaceDescription xmlns=\"http://schemas.microsoft.com/netservices/2010/10/servicebus/connect\">'\n        body += ''.join(['<Region>', region, '</Region>'])\n        body += '</NamespaceDescription>'\n\n        return _create_entry(body)\n\n    @staticmethod\n    def xml_to_namespace(xmlstr):\n        '''Converts xml response to service bus namespace\n\n        The xml format for namespace:\n<entry>\n<id>uuid:00000000-0000-0000-0000-000000000000;id=0000000</id>\n<title type=\"text\">myunittests</title>\n<updated>2012-08-22T16:48:10Z</updated>\n<content type=\"application/xml\">\n    <NamespaceDescription\n        xmlns=\"http://schemas.microsoft.com/netservices/2010/10/servicebus/connect\"\n        xmlns:i=\"http://www.w3.org/2001/XMLSchema-instance\">\n    <Name>myunittests</Name>\n    <Region>West US</Region>\n    <DefaultKey>0000000000000000000000000000000000000000000=</DefaultKey>\n    <Status>Active</Status>\n    <CreatedAt>2012-08-22T16:48:10.217Z</CreatedAt>\n    <AcsManagementEndpoint>https://myunittests-sb.accesscontrol.windows.net/</AcsManagementEndpoint>\n    <ServiceBusEndpoint>https://myunittests.servicebus.windows.net/</ServiceBusEndpoint>\n    <ConnectionString>Endpoint=sb://myunittests.servicebus.windows.net/;SharedSecretIssuer=owner;SharedSecretValue=0000000000000000000000000000000000000000000=</ConnectionString>\n    <SubscriptionId>00000000000000000000000000000000</SubscriptionId>\n    <Enabled>true</Enabled>\n    </NamespaceDescription>\n</content>\n</entry>\n        '''\n        xmldoc = minidom.parseString(xmlstr)\n        namespace = ServiceBusNamespace()\n\n        mappings = (\n            ('Name', 'name', None),\n            ('Region', 'region', None),\n            ('DefaultKey', 'default_key', None),\n            ('Status', 'status', None),\n            ('CreatedAt', 'created_at', None),\n            ('AcsManagementEndpoint', 'acs_management_endpoint', None),\n            ('ServiceBusEndpoint', 'servicebus_endpoint', None),\n            ('ConnectionString', 'connection_string', None),\n            ('SubscriptionId', 'subscription_id', None),\n            ('Enabled', 'enabled', _parse_bool),\n        )\n\n        for desc in _get_children_from_path(xmldoc,\n                                            'entry',\n                                            'content',\n                                            'NamespaceDescription'):\n            for xml_name, field_name, conversion_func in mappings:\n                node_value = _get_first_child_node_value(desc, xml_name)\n                if node_value is not None:\n                    if conversion_func is not None:\n                        node_value = conversion_func(node_value)\n                    setattr(namespace, field_name, node_value)\n\n        return namespace\n\n    @staticmethod\n    def xml_to_region(xmlstr):\n        '''Converts xml response to service bus region\n\n        The xml format for region:\n<entry>\n<id>uuid:157c311f-081f-4b4a-a0ba-a8f990ffd2a3;id=1756759</id>\n<title type=\"text\"></title>\n<updated>2013-04-10T18:25:29Z</updated>\n<content type=\"application/xml\">\n    <RegionCodeDescription\n        xmlns=\"http://schemas.microsoft.com/netservices/2010/10/servicebus/connect\"\n        xmlns:i=\"http://www.w3.org/2001/XMLSchema-instance\">\n    <Code>East Asia</Code>\n    <FullName>East Asia</FullName>\n    </RegionCodeDescription>\n</content>\n</entry>\n          '''\n        xmldoc = minidom.parseString(xmlstr)\n        region = ServiceBusRegion()\n\n        for desc in _get_children_from_path(xmldoc, 'entry', 'content',\n                                            'RegionCodeDescription'):\n            node_value = _get_first_child_node_value(desc, 'Code')\n            if node_value is not None:\n                region.code = node_value\n            node_value = _get_first_child_node_value(desc, 'FullName')\n            if node_value is not None:\n                region.fullname = node_value\n\n        return region\n\n    @staticmethod\n    def xml_to_namespace_availability(xmlstr):\n        '''Converts xml response to service bus namespace availability\n\n        The xml format:\n<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<entry xmlns=\"http://www.w3.org/2005/Atom\">\n    <id>uuid:9fc7c652-1856-47ab-8d74-cd31502ea8e6;id=3683292</id>\n    <title type=\"text\"></title>\n    <updated>2013-04-16T03:03:37Z</updated>\n    <content type=\"application/xml\">\n        <NamespaceAvailability\n            xmlns=\"http://schemas.microsoft.com/netservices/2010/10/servicebus/connect\"\n            xmlns:i=\"http://www.w3.org/2001/XMLSchema-instance\">\n            <Result>false</Result>\n        </NamespaceAvailability>\n    </content>\n</entry>\n        '''\n        xmldoc = minidom.parseString(xmlstr)\n        availability = AvailabilityResponse()\n\n        for desc in _get_children_from_path(xmldoc, 'entry', 'content',\n                                            'NamespaceAvailability'):\n            node_value = _get_first_child_node_value(desc, 'Result')\n            if node_value is not None:\n                availability.result = _parse_bool(node_value)\n\n        return availability\n\n\nfrom azure.servicemanagement.servicemanagementservice import (\n    ServiceManagementService)\nfrom azure.servicemanagement.servicebusmanagementservice import (\n    ServiceBusManagementService)\nfrom azure.servicemanagement.websitemanagementservice import (\n    WebsiteManagementService)\n"
  },
  {
    "path": "CustomScript/azure/servicemanagement/servicebusmanagementservice.py",
    "content": "#-------------------------------------------------------------------------\n# Copyright (c) Microsoft.  All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#--------------------------------------------------------------------------\nfrom azure import (\n    MANAGEMENT_HOST,\n    _convert_response_to_feeds,\n    _str,\n    _validate_not_none,\n    )\nfrom azure.servicemanagement import (\n    _ServiceBusManagementXmlSerializer,\n    QueueDescription,\n    TopicDescription,\n    NotificationHubDescription,\n    RelayDescription,\n    )\nfrom azure.servicemanagement.servicemanagementclient import (\n    _ServiceManagementClient,\n    )\n\n\nclass ServiceBusManagementService(_ServiceManagementClient):\n\n    def __init__(self, subscription_id=None, cert_file=None,\n                 host=MANAGEMENT_HOST):\n        super(ServiceBusManagementService, self).__init__(\n            subscription_id, cert_file, host)\n\n    #--Operations for service bus ----------------------------------------\n    def get_regions(self):\n        '''\n        Get list of available service bus regions.\n        '''\n        response = self._perform_get(\n            self._get_path('services/serviceBus/Regions/', None),\n            None)\n\n        return _convert_response_to_feeds(\n            response,\n            _ServiceBusManagementXmlSerializer.xml_to_region)\n\n    def list_namespaces(self):\n        '''\n        List the service bus namespaces defined on the account.\n        '''\n        response = self._perform_get(\n            self._get_path('services/serviceBus/Namespaces/', None),\n            None)\n\n        return _convert_response_to_feeds(\n            response,\n            _ServiceBusManagementXmlSerializer.xml_to_namespace)\n\n    def get_namespace(self, name):\n        '''\n        Get details about a specific namespace.\n\n        name: Name of the service bus namespace.\n        '''\n        response = self._perform_get(\n            self._get_path('services/serviceBus/Namespaces', name),\n            None)\n\n        return _ServiceBusManagementXmlSerializer.xml_to_namespace(\n            response.body)\n\n    def create_namespace(self, name, region):\n        '''\n        Create a new service bus namespace.\n\n        name: Name of the service bus namespace to create.\n        region: Region to create the namespace in.\n        '''\n        _validate_not_none('name', name)\n\n        return self._perform_put(\n            self._get_path('services/serviceBus/Namespaces', name),\n            _ServiceBusManagementXmlSerializer.namespace_to_xml(region))\n\n    def delete_namespace(self, name):\n        '''\n        Delete a service bus namespace.\n\n        name: Name of the service bus namespace to delete.\n        '''\n        _validate_not_none('name', name)\n\n        return self._perform_delete(\n            self._get_path('services/serviceBus/Namespaces', name),\n            None)\n\n    def check_namespace_availability(self, name):\n        '''\n        Checks to see if the specified service bus namespace is available, or\n        if it has already been taken.\n\n        name: Name of the service bus namespace to validate.\n        '''\n        _validate_not_none('name', name)\n\n        response = self._perform_get(\n            self._get_path('services/serviceBus/CheckNamespaceAvailability',\n                           None) + '/?namespace=' + _str(name), None)\n\n        return _ServiceBusManagementXmlSerializer.xml_to_namespace_availability(\n            response.body)\n\n    def list_queues(self, name):\n        '''\n        Enumerates the queues in the service namespace.\n        \n        name: Name of the service bus namespace.\n        '''\n        _validate_not_none('name', name)\n            \n        response = self._perform_get(\n            self._get_list_queues_path(name),\n            None)\n\n        return _convert_response_to_feeds(response, QueueDescription)    \n\n    def list_topics(self, name):\n        '''\n        Retrieves the topics in the service namespace.\n        \n        name: Name of the service bus namespace.\n        '''\n        response = self._perform_get(\n            self._get_list_topics_path(name),\n            None)\n\n        return _convert_response_to_feeds(response, TopicDescription)\n\n    def list_notification_hubs(self, name):\n        '''\n        Retrieves the notification hubs in the service namespace.\n        \n        name: Name of the service bus namespace.\n        '''\n        response = self._perform_get(\n            self._get_list_notification_hubs_path(name),\n            None)\n\n        return _convert_response_to_feeds(response, NotificationHubDescription)\n\n    def list_relays(self, name):\n        '''\n        Retrieves the relays in the service namespace.\n        \n        name: Name of the service bus namespace.\n        '''\n        response = self._perform_get(\n            self._get_list_relays_path(name),\n            None)\n\n        return _convert_response_to_feeds(response, RelayDescription)\n\n    #--Helper functions --------------------------------------------------\n    def _get_list_queues_path(self, namespace_name):\n        return self._get_path('services/serviceBus/Namespaces/',\n                              namespace_name) + '/Queues'\n\n    def _get_list_topics_path(self, namespace_name):\n        return self._get_path('services/serviceBus/Namespaces/',\n                              namespace_name) + '/Topics'\n\n    def _get_list_notification_hubs_path(self, namespace_name):\n        return self._get_path('services/serviceBus/Namespaces/',\n                              namespace_name) + '/NotificationHubs'\n\n    def _get_list_relays_path(self, namespace_name):\n        return self._get_path('services/serviceBus/Namespaces/',\n                              namespace_name) + '/Relays'\n"
  },
  {
    "path": "CustomScript/azure/servicemanagement/servicemanagementclient.py",
    "content": "#-------------------------------------------------------------------------\n# Copyright (c) Microsoft.  All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#--------------------------------------------------------------------------\nimport os\n\nfrom azure import (\n    WindowsAzureError,\n    MANAGEMENT_HOST,\n    _get_request_body,\n    _parse_response,\n    _str,\n    _update_request_uri_query,\n    )\nfrom azure.http import (\n    HTTPError,\n    HTTPRequest,\n    )\nfrom azure.http.httpclient import _HTTPClient\nfrom azure.servicemanagement import (\n    AZURE_MANAGEMENT_CERTFILE,\n    AZURE_MANAGEMENT_SUBSCRIPTIONID,\n    _management_error_handler,\n    _parse_response_for_async_op,\n    _update_management_header,\n    )\n\n\nclass _ServiceManagementClient(object):\n\n    def __init__(self, subscription_id=None, cert_file=None,\n                 host=MANAGEMENT_HOST):\n        self.requestid = None\n        self.subscription_id = subscription_id\n        self.cert_file = cert_file\n        self.host = host\n\n        if not self.cert_file:\n            if AZURE_MANAGEMENT_CERTFILE in os.environ:\n                self.cert_file = os.environ[AZURE_MANAGEMENT_CERTFILE]\n\n        if not self.subscription_id:\n            if AZURE_MANAGEMENT_SUBSCRIPTIONID in os.environ:\n                self.subscription_id = os.environ[\n                    AZURE_MANAGEMENT_SUBSCRIPTIONID]\n\n        if not self.cert_file or not self.subscription_id:\n            raise WindowsAzureError(\n                'You need to provide subscription id and certificate file')\n\n        self._httpclient = _HTTPClient(\n            service_instance=self, cert_file=self.cert_file)\n        self._filter = self._httpclient.perform_request\n\n    def with_filter(self, filter):\n        '''Returns a new service which will process requests with the\n        specified filter.  Filtering operations can include logging, automatic\n        retrying, etc...  The filter is a lambda which receives the HTTPRequest\n        and another lambda.  The filter can perform any pre-processing on the\n        request, pass it off to the next lambda, and then perform any\n        post-processing on the response.'''\n        res = type(self)(self.subscription_id, self.cert_file, self.host)\n        old_filter = self._filter\n\n        def new_filter(request):\n            return filter(request, old_filter)\n\n        res._filter = new_filter\n        return res\n\n    def set_proxy(self, host, port, user=None, password=None):\n        '''\n        Sets the proxy server host and port for the HTTP CONNECT Tunnelling.\n\n        host: Address of the proxy. Ex: '192.168.0.100'\n        port: Port of the proxy. Ex: 6000\n        user: User for proxy authorization.\n        password: Password for proxy authorization.\n        '''\n        self._httpclient.set_proxy(host, port, user, password)\n\n    #--Helper functions --------------------------------------------------\n    def _perform_request(self, request):\n        try:\n            resp = self._filter(request)\n        except HTTPError as ex:\n            return _management_error_handler(ex)\n\n        return resp\n\n    def _perform_get(self, path, response_type):\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self.host\n        request.path = path\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = _update_management_header(request)\n        response = self._perform_request(request)\n\n        if response_type is not None:\n            return _parse_response(response, response_type)\n\n        return response\n\n    def _perform_put(self, path, body, async=False):\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self.host\n        request.path = path\n        request.body = _get_request_body(body)\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = _update_management_header(request)\n        response = self._perform_request(request)\n\n        if async:\n            return _parse_response_for_async_op(response)\n\n        return None\n\n    def _perform_post(self, path, body, response_type=None, async=False):\n        request = HTTPRequest()\n        request.method = 'POST'\n        request.host = self.host\n        request.path = path\n        request.body = _get_request_body(body)\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = _update_management_header(request)\n        response = self._perform_request(request)\n\n        if response_type is not None:\n            return _parse_response(response, response_type)\n\n        if async:\n            return _parse_response_for_async_op(response)\n\n        return None\n\n    def _perform_delete(self, path, async=False):\n        request = HTTPRequest()\n        request.method = 'DELETE'\n        request.host = self.host\n        request.path = path\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = _update_management_header(request)\n        response = self._perform_request(request)\n\n        if async:\n            return _parse_response_for_async_op(response)\n\n        return None\n\n    def _get_path(self, resource, name):\n        path = '/' + self.subscription_id + '/' + resource\n        if name is not None:\n            path += '/' + _str(name)\n        return path\n"
  },
  {
    "path": "CustomScript/azure/servicemanagement/servicemanagementservice.py",
    "content": "#-------------------------------------------------------------------------\n# Copyright (c) Microsoft.  All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#--------------------------------------------------------------------------\nfrom azure import (\n    WindowsAzureError,\n    MANAGEMENT_HOST,\n    _str,\n    _validate_not_none,\n    )\nfrom azure.servicemanagement import (\n    AffinityGroups,\n    AffinityGroup,\n    AvailabilityResponse,\n    Certificate,\n    Certificates,\n    DataVirtualHardDisk,\n    Deployment,\n    Disk,\n    Disks,\n    Locations,\n    Operation,\n    HostedService,\n    HostedServices,\n    Images,\n    OperatingSystems,\n    OperatingSystemFamilies,\n    OSImage,\n    PersistentVMRole,\n    StorageService,\n    StorageServices,\n    Subscription,\n    SubscriptionCertificate,\n    SubscriptionCertificates,\n    VirtualNetworkSites,\n    _XmlSerializer,\n    )\nfrom azure.servicemanagement.servicemanagementclient import (\n    _ServiceManagementClient,\n    )\n\nclass ServiceManagementService(_ServiceManagementClient):\n\n    def __init__(self, subscription_id=None, cert_file=None,\n                 host=MANAGEMENT_HOST):\n        super(ServiceManagementService, self).__init__(\n            subscription_id, cert_file, host)\n\n    #--Operations for storage accounts -----------------------------------\n    def list_storage_accounts(self):\n        '''\n        Lists the storage accounts available under the current subscription.\n        '''\n        return self._perform_get(self._get_storage_service_path(),\n                                 StorageServices)\n\n    def get_storage_account_properties(self, service_name):\n        '''\n        Returns system properties for the specified storage account.\n\n        service_name: Name of the storage service account.\n        '''\n        _validate_not_none('service_name', service_name)\n        return self._perform_get(self._get_storage_service_path(service_name),\n                                 StorageService)\n\n    def get_storage_account_keys(self, service_name):\n        '''\n        Returns the primary and secondary access keys for the specified\n        storage account.\n\n        service_name: Name of the storage service account.\n        '''\n        _validate_not_none('service_name', service_name)\n        return self._perform_get(\n            self._get_storage_service_path(service_name) + '/keys',\n            StorageService)\n\n    def regenerate_storage_account_keys(self, service_name, key_type):\n        '''\n        Regenerates the primary or secondary access key for the specified\n        storage account.\n\n        service_name: Name of the storage service account.\n        key_type:\n            Specifies which key to regenerate. Valid values are:\n            Primary, Secondary\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('key_type', key_type)\n        return self._perform_post(\n            self._get_storage_service_path(\n                service_name) + '/keys?action=regenerate',\n            _XmlSerializer.regenerate_keys_to_xml(\n                key_type),\n            StorageService)\n\n    def create_storage_account(self, service_name, description, label,\n                               affinity_group=None, location=None,\n                               geo_replication_enabled=True,\n                               extended_properties=None):\n        '''\n        Creates a new storage account in Windows Azure.\n\n        service_name:\n            A name for the storage account that is unique within Windows Azure.\n            Storage account names must be between 3 and 24 characters in length\n            and use numbers and lower-case letters only.\n        description:\n            A description for the storage account. The description may be up\n            to 1024 characters in length.\n        label:\n            A name for the storage account. The name may be up to 100\n            characters in length. The name can be used to identify the storage\n            account for your tracking purposes.\n        affinity_group:\n            The name of an existing affinity group in the specified\n            subscription. You can specify either a location or affinity_group,\n            but not both.\n        location:\n            The location where the storage account is created. You can specify\n            either a location or affinity_group, but not both.\n        geo_replication_enabled:\n            Specifies whether the storage account is created with the\n            geo-replication enabled. If the element is not included in the\n            request body, the default value is true. If set to true, the data\n            in the storage account is replicated across more than one\n            geographic location so as to enable resilience in the face of\n            catastrophic service loss.\n        extended_properties:\n            Dictionary containing name/value pairs of storage account\n            properties. You can have a maximum of 50 extended property\n            name/value pairs. The maximum length of the Name element is 64\n            characters, only alphanumeric characters and underscores are valid\n            in the Name, and the name must start with a letter. The value has\n            a maximum length of 255 characters.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('description', description)\n        _validate_not_none('label', label)\n        if affinity_group is None and location is None:\n            raise WindowsAzureError(\n                'location or affinity_group must be specified')\n        if affinity_group is not None and location is not None:\n            raise WindowsAzureError(\n                'Only one of location or affinity_group needs to be specified')\n        return self._perform_post(\n            self._get_storage_service_path(),\n            _XmlSerializer.create_storage_service_input_to_xml(\n                service_name,\n                description,\n                label,\n                affinity_group,\n                location,\n                geo_replication_enabled,\n                extended_properties),\n            async=True)\n\n    def update_storage_account(self, service_name, description=None,\n                               label=None, geo_replication_enabled=None,\n                               extended_properties=None):\n        '''\n        Updates the label, the description, and enables or disables the\n        geo-replication status for a storage account in Windows Azure.\n\n        service_name: Name of the storage service account.\n        description:\n            A description for the storage account. The description may be up\n            to 1024 characters in length.\n        label:\n            A name for the storage account. The name may be up to 100\n            characters in length. The name can be used to identify the storage\n            account for your tracking purposes.\n        geo_replication_enabled:\n            Specifies whether the storage account is created with the\n            geo-replication enabled. If the element is not included in the\n            request body, the default value is true. If set to true, the data\n            in the storage account is replicated across more than one\n            geographic location so as to enable resilience in the face of\n            catastrophic service loss.\n        extended_properties:\n            Dictionary containing name/value pairs of storage account\n            properties. You can have a maximum of 50 extended property\n            name/value pairs. The maximum length of the Name element is 64\n            characters, only alphanumeric characters and underscores are valid\n            in the Name, and the name must start with a letter. The value has\n            a maximum length of 255 characters.\n        '''\n        _validate_not_none('service_name', service_name)\n        return self._perform_put(\n            self._get_storage_service_path(service_name),\n            _XmlSerializer.update_storage_service_input_to_xml(\n                description,\n                label,\n                geo_replication_enabled,\n                extended_properties))\n\n    def delete_storage_account(self, service_name):\n        '''\n        Deletes the specified storage account from Windows Azure.\n\n        service_name: Name of the storage service account.\n        '''\n        _validate_not_none('service_name', service_name)\n        return self._perform_delete(\n            self._get_storage_service_path(service_name))\n\n    def check_storage_account_name_availability(self, service_name):\n        '''\n        Checks to see if the specified storage account name is available, or\n        if it has already been taken.\n\n        service_name: Name of the storage service account.\n        '''\n        _validate_not_none('service_name', service_name)\n        return self._perform_get(\n            self._get_storage_service_path() +\n            '/operations/isavailable/' +\n            _str(service_name) + '',\n            AvailabilityResponse)\n\n    #--Operations for hosted services ------------------------------------\n    def list_hosted_services(self):\n        '''\n        Lists the hosted services available under the current subscription.\n        '''\n        return self._perform_get(self._get_hosted_service_path(),\n                                 HostedServices)\n\n    def get_hosted_service_properties(self, service_name, embed_detail=False):\n        '''\n        Retrieves system properties for the specified hosted service. These\n        properties include the service name and service type; the name of the\n        affinity group to which the service belongs, or its location if it is\n        not part of an affinity group; and optionally, information on the\n        service's deployments.\n\n        service_name: Name of the hosted service.\n        embed_detail:\n            When True, the management service returns properties for all\n            deployments of the service, as well as for the service itself.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('embed_detail', embed_detail)\n        return self._perform_get(\n            self._get_hosted_service_path(service_name) +\n            '?embed-detail=' +\n            _str(embed_detail).lower(),\n            HostedService)\n\n    def create_hosted_service(self, service_name, label, description=None,\n                              location=None, affinity_group=None,\n                              extended_properties=None):\n        '''\n        Creates a new hosted service in Windows Azure.\n\n        service_name:\n            A name for the hosted service that is unique within Windows Azure.\n            This name is the DNS prefix name and can be used to access the\n            hosted service.\n        label:\n            A name for the hosted service. The name can be up to 100 characters\n            in length. The name can be used to identify the storage account for\n            your tracking purposes.\n        description:\n            A description for the hosted service. The description can be up to\n            1024 characters in length.\n        location:\n            The location where the hosted service will be created. You can\n            specify either a location or affinity_group, but not both.\n        affinity_group:\n            The name of an existing affinity group associated with this\n            subscription. This name is a GUID and can be retrieved by examining\n            the name element of the response body returned by\n            list_affinity_groups. You can specify either a location or\n            affinity_group, but not both.\n        extended_properties:\n            Dictionary containing name/value pairs of storage account\n            properties. You can have a maximum of 50 extended property\n            name/value pairs. The maximum length of the Name element is 64\n            characters, only alphanumeric characters and underscores are valid\n            in the Name, and the name must start with a letter. The value has\n            a maximum length of 255 characters.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('label', label)\n        if affinity_group is None and location is None:\n            raise WindowsAzureError(\n                'location or affinity_group must be specified')\n        if affinity_group is not None and location is not None:\n            raise WindowsAzureError(\n                'Only one of location or affinity_group needs to be specified')\n        return self._perform_post(self._get_hosted_service_path(),\n                                  _XmlSerializer.create_hosted_service_to_xml(\n                                      service_name,\n                                      label,\n                                      description,\n                                      location,\n                                      affinity_group,\n                                      extended_properties))\n\n    def update_hosted_service(self, service_name, label=None, description=None,\n                              extended_properties=None):\n        '''\n        Updates the label and/or the description for a hosted service in\n        Windows Azure.\n\n        service_name: Name of the hosted service.\n        label:\n            A name for the hosted service. The name may be up to 100 characters\n            in length. You must specify a value for either Label or\n            Description, or for both. It is recommended that the label be\n            unique within the subscription. The name can be used\n            identify the hosted service for your tracking purposes.\n        description:\n            A description for the hosted service. The description may be up to\n            1024 characters in length. You must specify a value for either\n            Label or Description, or for both.\n        extended_properties:\n            Dictionary containing name/value pairs of storage account\n            properties. You can have a maximum of 50 extended property\n            name/value pairs. The maximum length of the Name element is 64\n            characters, only alphanumeric characters and underscores are valid\n            in the Name, and the name must start with a letter. The value has\n            a maximum length of 255 characters.\n        '''\n        _validate_not_none('service_name', service_name)\n        return self._perform_put(self._get_hosted_service_path(service_name),\n                                 _XmlSerializer.update_hosted_service_to_xml(\n                                     label,\n                                     description,\n                                     extended_properties))\n\n    def delete_hosted_service(self, service_name):\n        '''\n        Deletes the specified hosted service from Windows Azure.\n\n        service_name: Name of the hosted service.\n        '''\n        _validate_not_none('service_name', service_name)\n        return self._perform_delete(self._get_hosted_service_path(service_name))\n\n    def get_deployment_by_slot(self, service_name, deployment_slot):\n        '''\n        Returns configuration information, status, and system properties for\n        a deployment.\n\n        service_name: Name of the hosted service.\n        deployment_slot:\n            The environment to which the hosted service is deployed. Valid\n            values are: staging, production\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_slot', deployment_slot)\n        return self._perform_get(\n            self._get_deployment_path_using_slot(\n                service_name, deployment_slot),\n            Deployment)\n\n    def get_deployment_by_name(self, service_name, deployment_name):\n        '''\n        Returns configuration information, status, and system properties for a\n        deployment.\n\n        service_name: Name of the hosted service.\n        deployment_name: The name of the deployment.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        return self._perform_get(\n            self._get_deployment_path_using_name(\n                service_name, deployment_name),\n            Deployment)\n\n    def create_deployment(self, service_name, deployment_slot, name,\n                          package_url, label, configuration,\n                          start_deployment=False,\n                          treat_warnings_as_error=False,\n                          extended_properties=None):\n        '''\n        Uploads a new service package and creates a new deployment on staging\n        or production.\n\n        service_name: Name of the hosted service.\n        deployment_slot:\n            The environment to which the hosted service is deployed. Valid\n            values are: staging, production\n        name:\n            The name for the deployment. The deployment name must be unique\n            among other deployments for the hosted service.\n        package_url:\n            A URL that refers to the location of the service package in the\n            Blob service. The service package can be located either in a\n            storage account beneath the same subscription or a Shared Access\n            Signature (SAS) URI from any storage account.\n        label:\n            A name for the hosted service. The name can be up to 100 characters\n            in length. It is recommended that the label be unique within the\n            subscription. The name can be used to identify the hosted service\n            for your tracking purposes.\n        configuration:\n            The base-64 encoded service configuration file for the deployment.\n        start_deployment:\n            Indicates whether to start the deployment immediately after it is\n            created. If false, the service model is still deployed to the\n            virtual machines but the code is not run immediately. Instead, the\n            service is Suspended until you call Update Deployment Status and\n            set the status to Running, at which time the service will be\n            started. A deployed service still incurs charges, even if it is\n            suspended.\n        treat_warnings_as_error:\n            Indicates whether to treat package validation warnings as errors.\n            If set to true, the Created Deployment operation fails if there\n            are validation warnings on the service package.\n        extended_properties:\n            Dictionary containing name/value pairs of storage account\n            properties. You can have a maximum of 50 extended property\n            name/value pairs. The maximum length of the Name element is 64\n            characters, only alphanumeric characters and underscores are valid\n            in the Name, and the name must start with a letter. The value has\n            a maximum length of 255 characters.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_slot', deployment_slot)\n        _validate_not_none('name', name)\n        _validate_not_none('package_url', package_url)\n        _validate_not_none('label', label)\n        _validate_not_none('configuration', configuration)\n        return self._perform_post(\n            self._get_deployment_path_using_slot(\n                service_name, deployment_slot),\n            _XmlSerializer.create_deployment_to_xml(\n                name,\n                package_url,\n                label,\n                configuration,\n                start_deployment,\n                treat_warnings_as_error,\n                extended_properties),\n            async=True)\n\n    def delete_deployment(self, service_name, deployment_name):\n        '''\n        Deletes the specified deployment.\n\n        service_name: Name of the hosted service.\n        deployment_name: The name of the deployment.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        return self._perform_delete(\n            self._get_deployment_path_using_name(\n                service_name, deployment_name),\n            async=True)\n\n    def swap_deployment(self, service_name, production, source_deployment):\n        '''\n        Initiates a virtual IP swap between the staging and production\n        deployment environments for a service. If the service is currently\n        running in the staging environment, it will be swapped to the\n        production environment. If it is running in the production\n        environment, it will be swapped to staging.\n\n        service_name: Name of the hosted service.\n        production: The name of the production deployment.\n        source_deployment: The name of the source deployment.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('production', production)\n        _validate_not_none('source_deployment', source_deployment)\n        return self._perform_post(self._get_hosted_service_path(service_name),\n                                  _XmlSerializer.swap_deployment_to_xml(\n                                      production, source_deployment),\n                                  async=True)\n\n    def change_deployment_configuration(self, service_name, deployment_name,\n                                        configuration,\n                                        treat_warnings_as_error=False,\n                                        mode='Auto', extended_properties=None):\n        '''\n        Initiates a change to the deployment configuration.\n\n        service_name: Name of the hosted service.\n        deployment_name: The name of the deployment.\n        configuration:\n            The base-64 encoded service configuration file for the deployment.\n        treat_warnings_as_error:\n            Indicates whether to treat package validation warnings as errors.\n            If set to true, the Created Deployment operation fails if there\n            are validation warnings on the service package.\n        mode:\n            If set to Manual, WalkUpgradeDomain must be called to apply the\n            update. If set to Auto, the Windows Azure platform will\n            automatically apply the update To each upgrade domain for the\n            service. Possible values are: Auto, Manual\n        extended_properties:\n            Dictionary containing name/value pairs of storage account\n            properties. You can have a maximum of 50 extended property\n            name/value pairs. The maximum length of the Name element is 64\n            characters, only alphanumeric characters and underscores are valid\n            in the Name, and the name must start with a letter. The value has\n            a maximum length of 255 characters.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('configuration', configuration)\n        return self._perform_post(\n            self._get_deployment_path_using_name(\n                service_name, deployment_name) + '/?comp=config',\n            _XmlSerializer.change_deployment_to_xml(\n                configuration,\n                treat_warnings_as_error,\n                mode,\n                extended_properties),\n            async=True)\n\n    def update_deployment_status(self, service_name, deployment_name, status):\n        '''\n        Initiates a change in deployment status.\n\n        service_name: Name of the hosted service.\n        deployment_name: The name of the deployment.\n        status:\n            The change to initiate to the deployment status. Possible values\n            include: Running, Suspended\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('status', status)\n        return self._perform_post(\n            self._get_deployment_path_using_name(\n                service_name, deployment_name) + '/?comp=status',\n            _XmlSerializer.update_deployment_status_to_xml(\n                status),\n            async=True)\n\n    def upgrade_deployment(self, service_name, deployment_name, mode,\n                           package_url, configuration, label, force,\n                           role_to_upgrade=None, extended_properties=None):\n        '''\n        Initiates an upgrade.\n\n        service_name: Name of the hosted service.\n        deployment_name: The name of the deployment.\n        mode:\n            If set to Manual, WalkUpgradeDomain must be called to apply the\n            update. If set to Auto, the Windows Azure platform will\n            automatically apply the update To each upgrade domain for the\n            service. Possible values are: Auto, Manual\n        package_url:\n            A URL that refers to the location of the service package in the\n            Blob service. The service package can be located either in a\n            storage account beneath the same subscription or a Shared Access\n            Signature (SAS) URI from any storage account.\n        configuration:\n            The base-64 encoded service configuration file for the deployment.\n        label:\n            A name for the hosted service. The name can be up to 100 characters\n            in length. It is recommended that the label be unique within the\n            subscription. The name can be used to identify the hosted service\n            for your tracking purposes.\n        force:\n            Specifies whether the rollback should proceed even when it will\n            cause local data to be lost from some role instances. True if the\n            rollback should proceed; otherwise false if the rollback should\n            fail.\n        role_to_upgrade: The name of the specific role to upgrade.\n        extended_properties:\n            Dictionary containing name/value pairs of storage account\n            properties. You can have a maximum of 50 extended property\n            name/value pairs. The maximum length of the Name element is 64\n            characters, only alphanumeric characters and underscores are valid\n            in the Name, and the name must start with a letter. The value has\n            a maximum length of 255 characters.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('mode', mode)\n        _validate_not_none('package_url', package_url)\n        _validate_not_none('configuration', configuration)\n        _validate_not_none('label', label)\n        _validate_not_none('force', force)\n        return self._perform_post(\n            self._get_deployment_path_using_name(\n                service_name, deployment_name) + '/?comp=upgrade',\n            _XmlSerializer.upgrade_deployment_to_xml(\n                mode,\n                package_url,\n                configuration,\n                label,\n                role_to_upgrade,\n                force,\n                extended_properties),\n            async=True)\n\n    def walk_upgrade_domain(self, service_name, deployment_name,\n                            upgrade_domain):\n        '''\n        Specifies the next upgrade domain to be walked during manual in-place\n        upgrade or configuration change.\n\n        service_name: Name of the hosted service.\n        deployment_name: The name of the deployment.\n        upgrade_domain:\n            An integer value that identifies the upgrade domain to walk.\n            Upgrade domains are identified with a zero-based index: the first\n            upgrade domain has an ID of 0, the second has an ID of 1, and so on.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('upgrade_domain', upgrade_domain)\n        return self._perform_post(\n            self._get_deployment_path_using_name(\n                service_name, deployment_name) + '/?comp=walkupgradedomain',\n            _XmlSerializer.walk_upgrade_domain_to_xml(\n                upgrade_domain),\n            async=True)\n\n    def rollback_update_or_upgrade(self, service_name, deployment_name, mode,\n                                   force):\n        '''\n        Cancels an in progress configuration change (update) or upgrade and\n        returns the deployment to its state before the upgrade or\n        configuration change was started.\n\n        service_name: Name of the hosted service.\n        deployment_name: The name of the deployment.\n        mode:\n            Specifies whether the rollback should proceed automatically.\n                auto - The rollback proceeds without further user input.\n                manual - You must call the Walk Upgrade Domain operation to\n                         apply the rollback to each upgrade domain.\n        force:\n            Specifies whether the rollback should proceed even when it will\n            cause local data to be lost from some role instances. True if the\n            rollback should proceed; otherwise false if the rollback should\n            fail.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('mode', mode)\n        _validate_not_none('force', force)\n        return self._perform_post(\n            self._get_deployment_path_using_name(\n                service_name, deployment_name) + '/?comp=rollback',\n            _XmlSerializer.rollback_upgrade_to_xml(\n                mode, force),\n            async=True)\n\n    def reboot_role_instance(self, service_name, deployment_name,\n                             role_instance_name):\n        '''\n        Requests a reboot of a role instance that is running in a deployment.\n\n        service_name: Name of the hosted service.\n        deployment_name: The name of the deployment.\n        role_instance_name: The name of the role instance.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('role_instance_name', role_instance_name)\n        return self._perform_post(\n            self._get_deployment_path_using_name(\n                service_name, deployment_name) + \\\n                    '/roleinstances/' + _str(role_instance_name) + \\\n                    '?comp=reboot',\n            '',\n            async=True)\n\n    def reimage_role_instance(self, service_name, deployment_name,\n                              role_instance_name):\n        '''\n        Requests a reimage of a role instance that is running in a deployment.\n\n        service_name: Name of the hosted service.\n        deployment_name: The name of the deployment.\n        role_instance_name: The name of the role instance.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('role_instance_name', role_instance_name)\n        return self._perform_post(\n            self._get_deployment_path_using_name(\n                service_name, deployment_name) + \\\n                    '/roleinstances/' + _str(role_instance_name) + \\\n                    '?comp=reimage',\n            '',\n            async=True)\n\n    def check_hosted_service_name_availability(self, service_name):\n        '''\n        Checks to see if the specified hosted service name is available, or if\n        it has already been taken.\n\n        service_name: Name of the hosted service.\n        '''\n        _validate_not_none('service_name', service_name)\n        return self._perform_get(\n            '/' + self.subscription_id +\n            '/services/hostedservices/operations/isavailable/' +\n            _str(service_name) + '',\n            AvailabilityResponse)\n\n    #--Operations for service certificates -------------------------------\n    def list_service_certificates(self, service_name):\n        '''\n        Lists all of the service certificates associated with the specified\n        hosted service.\n\n        service_name: Name of the hosted service.\n        '''\n        _validate_not_none('service_name', service_name)\n        return self._perform_get(\n            '/' + self.subscription_id + '/services/hostedservices/' +\n            _str(service_name) + '/certificates',\n            Certificates)\n\n    def get_service_certificate(self, service_name, thumbalgorithm, thumbprint):\n        '''\n        Returns the public data for the specified X.509 certificate associated\n        with a hosted service.\n\n        service_name: Name of the hosted service.\n        thumbalgorithm: The algorithm for the certificate's thumbprint.\n        thumbprint: The hexadecimal representation of the thumbprint.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('thumbalgorithm', thumbalgorithm)\n        _validate_not_none('thumbprint', thumbprint)\n        return self._perform_get(\n            '/' + self.subscription_id + '/services/hostedservices/' +\n            _str(service_name) + '/certificates/' +\n            _str(thumbalgorithm) + '-' + _str(thumbprint) + '',\n            Certificate)\n\n    def add_service_certificate(self, service_name, data, certificate_format,\n                                password):\n        '''\n        Adds a certificate to a hosted service.\n\n        service_name: Name of the hosted service.\n        data: The base-64 encoded form of the pfx file.\n        certificate_format:\n            The service certificate format. The only supported value is pfx.\n        password: The certificate password.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('data', data)\n        _validate_not_none('certificate_format', certificate_format)\n        _validate_not_none('password', password)\n        return self._perform_post(\n            '/' + self.subscription_id + '/services/hostedservices/' +\n            _str(service_name) + '/certificates',\n            _XmlSerializer.certificate_file_to_xml(\n                data, certificate_format, password),\n            async=True)\n\n    def delete_service_certificate(self, service_name, thumbalgorithm,\n                                   thumbprint):\n        '''\n        Deletes a service certificate from the certificate store of a hosted\n        service.\n\n        service_name: Name of the hosted service.\n        thumbalgorithm: The algorithm for the certificate's thumbprint.\n        thumbprint: The hexadecimal representation of the thumbprint.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('thumbalgorithm', thumbalgorithm)\n        _validate_not_none('thumbprint', thumbprint)\n        return self._perform_delete(\n            '/' + self.subscription_id + '/services/hostedservices/' +\n            _str(service_name) + '/certificates/' +\n            _str(thumbalgorithm) + '-' + _str(thumbprint),\n            async=True)\n\n    #--Operations for management certificates ----------------------------\n    def list_management_certificates(self):\n        '''\n        The List Management Certificates operation lists and returns basic\n        information about all of the management certificates associated with\n        the specified subscription. Management certificates, which are also\n        known as subscription certificates, authenticate clients attempting to\n        connect to resources associated with your Windows Azure subscription.\n        '''\n        return self._perform_get('/' + self.subscription_id + '/certificates',\n                                 SubscriptionCertificates)\n\n    def get_management_certificate(self, thumbprint):\n        '''\n        The Get Management Certificate operation retrieves information about\n        the management certificate with the specified thumbprint. Management\n        certificates, which are also known as subscription certificates,\n        authenticate clients attempting to connect to resources associated\n        with your Windows Azure subscription.\n\n        thumbprint: The thumbprint value of the certificate.\n        '''\n        _validate_not_none('thumbprint', thumbprint)\n        return self._perform_get(\n            '/' + self.subscription_id + '/certificates/' + _str(thumbprint),\n            SubscriptionCertificate)\n\n    def add_management_certificate(self, public_key, thumbprint, data):\n        '''\n        The Add Management Certificate operation adds a certificate to the\n        list of management certificates. Management certificates, which are\n        also known as subscription certificates, authenticate clients\n        attempting to connect to resources associated with your Windows Azure\n        subscription.\n\n        public_key:\n            A base64 representation of the management certificate public key.\n        thumbprint:\n            The thumb print that uniquely identifies the management\n            certificate.\n        data: The certificate's raw data in base-64 encoded .cer format.\n        '''\n        _validate_not_none('public_key', public_key)\n        _validate_not_none('thumbprint', thumbprint)\n        _validate_not_none('data', data)\n        return self._perform_post(\n            '/' + self.subscription_id + '/certificates',\n            _XmlSerializer.subscription_certificate_to_xml(\n                public_key, thumbprint, data))\n\n    def delete_management_certificate(self, thumbprint):\n        '''\n        The Delete Management Certificate operation deletes a certificate from\n        the list of management certificates. Management certificates, which\n        are also known as subscription certificates, authenticate clients\n        attempting to connect to resources associated with your Windows Azure\n        subscription.\n\n        thumbprint:\n            The thumb print that uniquely identifies the management\n            certificate.\n        '''\n        _validate_not_none('thumbprint', thumbprint)\n        return self._perform_delete(\n            '/' + self.subscription_id + '/certificates/' + _str(thumbprint))\n\n    #--Operations for affinity groups ------------------------------------\n    def list_affinity_groups(self):\n        '''\n        Lists the affinity groups associated with the specified subscription.\n        '''\n        return self._perform_get(\n            '/' + self.subscription_id + '/affinitygroups',\n            AffinityGroups)\n\n    def get_affinity_group_properties(self, affinity_group_name):\n        '''\n        Returns the system properties associated with the specified affinity\n        group.\n\n        affinity_group_name: The name of the affinity group.\n        '''\n        _validate_not_none('affinity_group_name', affinity_group_name)\n        return self._perform_get(\n            '/' + self.subscription_id + '/affinitygroups/' +\n            _str(affinity_group_name) + '',\n            AffinityGroup)\n\n    def create_affinity_group(self, name, label, location, description=None):\n        '''\n        Creates a new affinity group for the specified subscription.\n\n        name: A name for the affinity group that is unique to the subscription.\n        label:\n            A name for the affinity group. The name can be up to 100 characters\n            in length.\n        location:\n            The data center location where the affinity group will be created.\n            To list available locations, use the list_location function.\n        description:\n            A description for the affinity group. The description can be up to\n            1024 characters in length.\n        '''\n        _validate_not_none('name', name)\n        _validate_not_none('label', label)\n        _validate_not_none('location', location)\n        return self._perform_post(\n            '/' + self.subscription_id + '/affinitygroups',\n            _XmlSerializer.create_affinity_group_to_xml(name,\n                                                        label,\n                                                        description,\n                                                        location))\n\n    def update_affinity_group(self, affinity_group_name, label,\n                              description=None):\n        '''\n        Updates the label and/or the description for an affinity group for the\n        specified subscription.\n\n        affinity_group_name: The name of the affinity group.\n        label:\n            A name for the affinity group. The name can be up to 100 characters\n            in length.\n        description:\n            A description for the affinity group. The description can be up to\n            1024 characters in length.\n        '''\n        _validate_not_none('affinity_group_name', affinity_group_name)\n        _validate_not_none('label', label)\n        return self._perform_put(\n            '/' + self.subscription_id + '/affinitygroups/' +\n            _str(affinity_group_name),\n            _XmlSerializer.update_affinity_group_to_xml(label, description))\n\n    def delete_affinity_group(self, affinity_group_name):\n        '''\n        Deletes an affinity group in the specified subscription.\n\n        affinity_group_name: The name of the affinity group.\n        '''\n        _validate_not_none('affinity_group_name', affinity_group_name)\n        return self._perform_delete('/' + self.subscription_id + \\\n                                    '/affinitygroups/' + \\\n                                    _str(affinity_group_name))\n\n    #--Operations for locations ------------------------------------------\n    def list_locations(self):\n        '''\n        Lists all of the data center locations that are valid for your\n        subscription.\n        '''\n        return self._perform_get('/' + self.subscription_id + '/locations',\n                                 Locations)\n\n    #--Operations for tracking asynchronous requests ---------------------\n    def get_operation_status(self, request_id):\n        '''\n        Returns the status of the specified operation. After calling an\n        asynchronous operation, you can call Get Operation Status to determine\n        whether the operation has succeeded, failed, or is still in progress.\n\n        request_id: The request ID for the request you wish to track.\n        '''\n        _validate_not_none('request_id', request_id)\n        return self._perform_get(\n            '/' + self.subscription_id + '/operations/' + _str(request_id),\n            Operation)\n\n    #--Operations for retrieving operating system information ------------\n    def list_operating_systems(self):\n        '''\n        Lists the versions of the guest operating system that are currently\n        available in Windows Azure.\n        '''\n        return self._perform_get(\n            '/' + self.subscription_id + '/operatingsystems',\n            OperatingSystems)\n\n    def list_operating_system_families(self):\n        '''\n        Lists the guest operating system families available in Windows Azure,\n        and also lists the operating system versions available for each family.\n        '''\n        return self._perform_get(\n            '/' + self.subscription_id + '/operatingsystemfamilies',\n            OperatingSystemFamilies)\n\n    #--Operations for retrieving subscription history --------------------\n    def get_subscription(self):\n        '''\n        Returns account and resource allocation information on the specified\n        subscription.\n        '''\n        return self._perform_get('/' + self.subscription_id + '',\n                                 Subscription)\n\n    #--Operations for virtual machines -----------------------------------\n    def get_role(self, service_name, deployment_name, role_name):\n        '''\n        Retrieves the specified virtual machine.\n\n        service_name: The name of the service.\n        deployment_name: The name of the deployment.\n        role_name: The name of the role.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('role_name', role_name)\n        return self._perform_get(\n            self._get_role_path(service_name, deployment_name, role_name),\n            PersistentVMRole)\n\n    def create_virtual_machine_deployment(self, service_name, deployment_name,\n                                          deployment_slot, label, role_name,\n                                          system_config, os_virtual_hard_disk,\n                                          network_config=None,\n                                          availability_set_name=None,\n                                          data_virtual_hard_disks=None,\n                                          role_size=None,\n                                          role_type='PersistentVMRole',\n                                          virtual_network_name=None):\n        '''\n        Provisions a virtual machine based on the supplied configuration.\n\n        service_name: Name of the hosted service.\n        deployment_name:\n            The name for the deployment. The deployment name must be unique\n            among other deployments for the hosted service.\n        deployment_slot:\n            The environment to which the hosted service is deployed. Valid\n            values are: staging, production\n        label:\n            Specifies an identifier for the deployment. The label can be up to\n            100 characters long. The label can be used for tracking purposes.\n        role_name: The name of the role.\n        system_config:\n            Contains the metadata required to provision a virtual machine from\n            a Windows or Linux OS image.  Use an instance of\n            WindowsConfigurationSet or LinuxConfigurationSet.\n        os_virtual_hard_disk:\n            Contains the parameters Windows Azure uses to create the operating\n            system disk for the virtual machine.\n        network_config:\n            Encapsulates the metadata required to create the virtual network\n            configuration for a virtual machine. If you do not include a\n            network configuration set you will not be able to access the VM\n            through VIPs over the internet. If your virtual machine belongs to\n            a virtual network you can not specify which subnet address space\n            it resides under.\n        availability_set_name:\n            Specifies the name of an availability set to which to add the\n            virtual machine. This value controls the virtual machine\n            allocation in the Windows Azure environment. Virtual machines\n            specified in the same availability set are allocated to different\n            nodes to maximize availability.\n        data_virtual_hard_disks:\n            Contains the parameters Windows Azure uses to create a data disk\n            for a virtual machine.\n        role_size:\n            The size of the virtual machine to allocate. The default value is\n            Small. Possible values are: ExtraSmall, Small, Medium, Large,\n            ExtraLarge. The specified value must be compatible with the disk\n            selected in the OSVirtualHardDisk values.\n        role_type:\n            The type of the role for the virtual machine. The only supported\n            value is PersistentVMRole.\n        virtual_network_name:\n            Specifies the name of an existing virtual network to which the\n            deployment will belong.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('deployment_slot', deployment_slot)\n        _validate_not_none('label', label)\n        _validate_not_none('role_name', role_name)\n        _validate_not_none('system_config', system_config)\n        _validate_not_none('os_virtual_hard_disk', os_virtual_hard_disk)\n        return self._perform_post(\n            self._get_deployment_path_using_name(service_name),\n            _XmlSerializer.virtual_machine_deployment_to_xml(\n                deployment_name,\n                deployment_slot,\n                label,\n                role_name,\n                system_config,\n                os_virtual_hard_disk,\n                role_type,\n                network_config,\n                availability_set_name,\n                data_virtual_hard_disks,\n                role_size,\n                virtual_network_name),\n            async=True)\n\n    def add_role(self, service_name, deployment_name, role_name, system_config,\n                 os_virtual_hard_disk, network_config=None,\n                 availability_set_name=None, data_virtual_hard_disks=None,\n                 role_size=None, role_type='PersistentVMRole'):\n        '''\n        Adds a virtual machine to an existing deployment.\n\n        service_name: The name of the service.\n        deployment_name: The name of the deployment.\n        role_name: The name of the role.\n        system_config:\n            Contains the metadata required to provision a virtual machine from\n            a Windows or Linux OS image.  Use an instance of\n            WindowsConfigurationSet or LinuxConfigurationSet.\n        os_virtual_hard_disk:\n            Contains the parameters Windows Azure uses to create the operating\n            system disk for the virtual machine.\n        network_config:\n            Encapsulates the metadata required to create the virtual network\n            configuration for a virtual machine. If you do not include a\n            network configuration set you will not be able to access the VM\n            through VIPs over the internet. If your virtual machine belongs to\n            a virtual network you can not specify which subnet address space\n            it resides under.\n        availability_set_name:\n            Specifies the name of an availability set to which to add the\n            virtual machine. This value controls the virtual machine allocation\n            in the Windows Azure environment. Virtual machines specified in the\n            same availability set are allocated to different nodes to maximize\n            availability.\n        data_virtual_hard_disks:\n            Contains the parameters Windows Azure uses to create a data disk\n            for a virtual machine.\n        role_size:\n            The size of the virtual machine to allocate. The default value is\n            Small. Possible values are: ExtraSmall, Small, Medium, Large,\n            ExtraLarge. The specified value must be compatible with the disk\n            selected in the OSVirtualHardDisk values.\n        role_type:\n            The type of the role for the virtual machine. The only supported\n            value is PersistentVMRole.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('role_name', role_name)\n        _validate_not_none('system_config', system_config)\n        _validate_not_none('os_virtual_hard_disk', os_virtual_hard_disk)\n        return self._perform_post(\n            self._get_role_path(service_name, deployment_name),\n            _XmlSerializer.add_role_to_xml(\n                role_name,\n                system_config,\n                os_virtual_hard_disk,\n                role_type,\n                network_config,\n                availability_set_name,\n                data_virtual_hard_disks,\n                role_size),\n            async=True)\n\n    def update_role(self, service_name, deployment_name, role_name,\n                    os_virtual_hard_disk=None, network_config=None,\n                    availability_set_name=None, data_virtual_hard_disks=None,\n                    role_size=None, role_type='PersistentVMRole'):\n        '''\n        Updates the specified virtual machine.\n\n        service_name: The name of the service.\n        deployment_name: The name of the deployment.\n        role_name: The name of the role.\n        os_virtual_hard_disk:\n            Contains the parameters Windows Azure uses to create the operating\n            system disk for the virtual machine.\n        network_config:\n            Encapsulates the metadata required to create the virtual network\n            configuration for a virtual machine. If you do not include a\n            network configuration set you will not be able to access the VM\n            through VIPs over the internet. If your virtual machine belongs to\n            a virtual network you can not specify which subnet address space\n            it resides under.\n        availability_set_name:\n            Specifies the name of an availability set to which to add the\n            virtual machine. This value controls the virtual machine allocation\n            in the Windows Azure environment. Virtual machines specified in the\n            same availability set are allocated to different nodes to maximize\n            availability.\n        data_virtual_hard_disks:\n            Contains the parameters Windows Azure uses to create a data disk\n            for a virtual machine.\n        role_size:\n            The size of the virtual machine to allocate. The default value is\n            Small. Possible values are: ExtraSmall, Small, Medium, Large,\n            ExtraLarge. The specified value must be compatible with the disk\n            selected in the OSVirtualHardDisk values.\n        role_type:\n            The type of the role for the virtual machine. The only supported\n            value is PersistentVMRole.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('role_name', role_name)\n        return self._perform_put(\n            self._get_role_path(service_name, deployment_name, role_name),\n            _XmlSerializer.update_role_to_xml(\n                role_name,\n                os_virtual_hard_disk,\n                role_type,\n                network_config,\n                availability_set_name,\n                data_virtual_hard_disks,\n                role_size),\n            async=True)\n\n    def delete_role(self, service_name, deployment_name, role_name):\n        '''\n        Deletes the specified virtual machine.\n\n        service_name: The name of the service.\n        deployment_name: The name of the deployment.\n        role_name: The name of the role.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('role_name', role_name)\n        return self._perform_delete(\n            self._get_role_path(service_name, deployment_name, role_name),\n            async=True)\n\n    def capture_role(self, service_name, deployment_name, role_name,\n                     post_capture_action, target_image_name,\n                     target_image_label, provisioning_configuration=None):\n        '''\n        The Capture Role operation captures a virtual machine image to your\n        image gallery. From the captured image, you can create additional\n        customized virtual machines.\n\n        service_name: The name of the service.\n        deployment_name: The name of the deployment.\n        role_name: The name of the role.\n        post_capture_action:\n            Specifies the action after capture operation completes. Possible\n            values are: Delete, Reprovision.\n        target_image_name:\n            Specifies the image name of the captured virtual machine.\n        target_image_label:\n            Specifies the friendly name of the captured virtual machine.\n        provisioning_configuration:\n            Use an instance of WindowsConfigurationSet or LinuxConfigurationSet.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('role_name', role_name)\n        _validate_not_none('post_capture_action', post_capture_action)\n        _validate_not_none('target_image_name', target_image_name)\n        _validate_not_none('target_image_label', target_image_label)\n        return self._perform_post(\n            self._get_role_instance_operations_path(\n                service_name, deployment_name, role_name),\n            _XmlSerializer.capture_role_to_xml(\n                post_capture_action,\n                target_image_name,\n                target_image_label,\n                provisioning_configuration),\n            async=True)\n\n    def start_role(self, service_name, deployment_name, role_name):\n        '''\n        Starts the specified virtual machine.\n\n        service_name: The name of the service.\n        deployment_name: The name of the deployment.\n        role_name: The name of the role.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('role_name', role_name)\n        return self._perform_post(\n            self._get_role_instance_operations_path(\n                service_name, deployment_name, role_name),\n            _XmlSerializer.start_role_operation_to_xml(),\n            async=True)\n\n    def start_roles(self, service_name, deployment_name, role_names):\n        '''\n        Starts the specified virtual machines.\n\n        service_name: The name of the service.\n        deployment_name: The name of the deployment.\n        role_names: The names of the roles, as an enumerable of strings.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('role_names', role_names)\n        return self._perform_post(\n            self._get_roles_operations_path(service_name, deployment_name),\n            _XmlSerializer.start_roles_operation_to_xml(role_names),\n            async=True)\n\n    def restart_role(self, service_name, deployment_name, role_name):\n        '''\n        Restarts the specified virtual machine.\n\n        service_name: The name of the service.\n        deployment_name: The name of the deployment.\n        role_name: The name of the role.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('role_name', role_name)\n        return self._perform_post(\n            self._get_role_instance_operations_path(\n                service_name, deployment_name, role_name),\n            _XmlSerializer.restart_role_operation_to_xml(\n            ),\n            async=True)\n\n    def shutdown_role(self, service_name, deployment_name, role_name,\n                      post_shutdown_action='Stopped'):\n        '''\n        Shuts down the specified virtual machine.\n\n        service_name: The name of the service.\n        deployment_name: The name of the deployment.\n        role_name: The name of the role.\n        post_shutdown_action:\n            Specifies how the Virtual Machine should be shut down. Values are:\n                Stopped\n                    Shuts down the Virtual Machine but retains the compute\n                    resources. You will continue to be billed for the resources\n                    that the stopped machine uses.\n                StoppedDeallocated\n                    Shuts down the Virtual Machine and releases the compute\n                    resources. You are not billed for the compute resources that\n                    this Virtual Machine uses. If a static Virtual Network IP\n                    address is assigned to the Virtual Machine, it is reserved.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('role_name', role_name)\n        _validate_not_none('post_shutdown_action', post_shutdown_action)\n        return self._perform_post(\n            self._get_role_instance_operations_path(\n                service_name, deployment_name, role_name),\n            _XmlSerializer.shutdown_role_operation_to_xml(post_shutdown_action),\n            async=True)\n\n    def shutdown_roles(self, service_name, deployment_name, role_names,\n                       post_shutdown_action='Stopped'):\n        '''\n        Shuts down the specified virtual machines.\n\n        service_name: The name of the service.\n        deployment_name: The name of the deployment.\n        role_names: The names of the roles, as an enumerable of strings.\n        post_shutdown_action:\n            Specifies how the Virtual Machine should be shut down. Values are:\n                Stopped\n                    Shuts down the Virtual Machine but retains the compute\n                    resources. You will continue to be billed for the resources\n                    that the stopped machine uses.\n                StoppedDeallocated\n                    Shuts down the Virtual Machine and releases the compute\n                    resources. You are not billed for the compute resources that\n                    this Virtual Machine uses. If a static Virtual Network IP\n                    address is assigned to the Virtual Machine, it is reserved.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('role_names', role_names)\n        _validate_not_none('post_shutdown_action', post_shutdown_action)\n        return self._perform_post(\n            self._get_roles_operations_path(service_name, deployment_name),\n            _XmlSerializer.shutdown_roles_operation_to_xml(\n                role_names, post_shutdown_action),\n            async=True)\n\n    #--Operations for virtual machine images -----------------------------\n    def list_os_images(self):\n        '''\n        Retrieves a list of the OS images from the image repository.\n        '''\n        return self._perform_get(self._get_image_path(),\n                                 Images)\n\n    def get_os_image(self, image_name):\n        '''\n        Retrieves an OS image from the image repository.\n        '''\n        return self._perform_get(self._get_image_path(image_name),\n                                 OSImage)\n\n    def add_os_image(self, label, media_link, name, os):\n        '''\n        Adds an OS image that is currently stored in a storage account in your\n        subscription to the image repository.\n\n        label: Specifies the friendly name of the image.\n        media_link:\n            Specifies the location of the blob in Windows Azure blob store\n            where the media for the image is located. The blob location must\n            belong to a storage account in the subscription specified by the\n            <subscription-id> value in the operation call. Example:\n            http://example.blob.core.windows.net/disks/mydisk.vhd\n        name:\n            Specifies a name for the OS image that Windows Azure uses to\n            identify the image when creating one or more virtual machines.\n        os:\n            The operating system type of the OS image. Possible values are:\n            Linux, Windows\n        '''\n        _validate_not_none('label', label)\n        _validate_not_none('media_link', media_link)\n        _validate_not_none('name', name)\n        _validate_not_none('os', os)\n        return self._perform_post(self._get_image_path(),\n                                  _XmlSerializer.os_image_to_xml(\n                                      label, media_link, name, os),\n                                  async=True)\n\n    def update_os_image(self, image_name, label, media_link, name, os):\n        '''\n        Updates an OS image that in your image repository.\n\n        image_name: The name of the image to update.\n        label:\n            Specifies the friendly name of the image to be updated. You cannot\n            use this operation to update images provided by the Windows Azure\n            platform.\n        media_link:\n            Specifies the location of the blob in Windows Azure blob store\n            where the media for the image is located. The blob location must\n            belong to a storage account in the subscription specified by the\n            <subscription-id> value in the operation call. Example:\n            http://example.blob.core.windows.net/disks/mydisk.vhd\n        name:\n            Specifies a name for the OS image that Windows Azure uses to\n            identify the image when creating one or more VM Roles.\n        os:\n            The operating system type of the OS image. Possible values are:\n            Linux, Windows\n        '''\n        _validate_not_none('image_name', image_name)\n        _validate_not_none('label', label)\n        _validate_not_none('media_link', media_link)\n        _validate_not_none('name', name)\n        _validate_not_none('os', os)\n        return self._perform_put(self._get_image_path(image_name),\n                                 _XmlSerializer.os_image_to_xml(\n                                     label, media_link, name, os),\n                                 async=True)\n\n    def delete_os_image(self, image_name, delete_vhd=False):\n        '''\n        Deletes the specified OS image from your image repository.\n\n        image_name: The name of the image.\n        delete_vhd: Deletes the underlying vhd blob in Azure storage.\n        '''\n        _validate_not_none('image_name', image_name)\n        path = self._get_image_path(image_name)\n        if delete_vhd:\n            path += '?comp=media'\n        return self._perform_delete(path, async=True)\n\n    #--Operations for virtual machine disks ------------------------------\n    def get_data_disk(self, service_name, deployment_name, role_name, lun):\n        '''\n        Retrieves the specified data disk from a virtual machine.\n\n        service_name: The name of the service.\n        deployment_name: The name of the deployment.\n        role_name: The name of the role.\n        lun: The Logical Unit Number (LUN) for the disk.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('role_name', role_name)\n        _validate_not_none('lun', lun)\n        return self._perform_get(\n            self._get_data_disk_path(\n                service_name, deployment_name, role_name, lun),\n            DataVirtualHardDisk)\n\n    def add_data_disk(self, service_name, deployment_name, role_name, lun,\n                      host_caching=None, media_link=None, disk_label=None,\n                      disk_name=None, logical_disk_size_in_gb=None,\n                      source_media_link=None):\n        '''\n        Adds a data disk to a virtual machine.\n\n        service_name: The name of the service.\n        deployment_name: The name of the deployment.\n        role_name: The name of the role.\n        lun:\n            Specifies the Logical Unit Number (LUN) for the disk. The LUN\n            specifies the slot in which the data drive appears when mounted\n            for usage by the virtual machine. Valid LUN values are 0 through 15.\n        host_caching:\n            Specifies the platform caching behavior of data disk blob for\n            read/write efficiency. The default vault is ReadOnly. Possible\n            values are: None, ReadOnly, ReadWrite\n        media_link:\n            Specifies the location of the blob in Windows Azure blob store\n            where the media for the disk is located. The blob location must\n            belong to the storage account in the subscription specified by the\n            <subscription-id> value in the operation call. Example:\n            http://example.blob.core.windows.net/disks/mydisk.vhd\n        disk_label:\n            Specifies the description of the data disk. When you attach a disk,\n            either by directly referencing a media using the MediaLink element\n            or specifying the target disk size, you can use the DiskLabel\n            element to customize the name property of the target data disk.\n        disk_name:\n            Specifies the name of the disk. Windows Azure uses the specified\n            disk to create the data disk for the machine and populates this\n            field with the disk name.\n        logical_disk_size_in_gb:\n            Specifies the size, in GB, of an empty disk to be attached to the\n            role. The disk can be created as part of disk attach or create VM\n            role call by specifying the value for this property. Windows Azure\n            creates the empty disk based on size preference and attaches the\n            newly created disk to the Role.\n        source_media_link:\n            Specifies the location of a blob in account storage which is\n            mounted as a data disk when the virtual machine is created.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('role_name', role_name)\n        _validate_not_none('lun', lun)\n        return self._perform_post(\n            self._get_data_disk_path(service_name, deployment_name, role_name),\n            _XmlSerializer.data_virtual_hard_disk_to_xml(\n                host_caching,\n                disk_label,\n                disk_name,\n                lun,\n                logical_disk_size_in_gb,\n                media_link,\n                source_media_link),\n            async=True)\n\n    def update_data_disk(self, service_name, deployment_name, role_name, lun,\n                         host_caching=None, media_link=None, updated_lun=None,\n                         disk_label=None, disk_name=None,\n                         logical_disk_size_in_gb=None):\n        '''\n        Updates the specified data disk attached to the specified virtual\n        machine.\n\n        service_name: The name of the service.\n        deployment_name: The name of the deployment.\n        role_name: The name of the role.\n        lun:\n            Specifies the Logical Unit Number (LUN) for the disk. The LUN\n            specifies the slot in which the data drive appears when mounted\n            for usage by the virtual machine. Valid LUN values are 0 through\n            15.\n        host_caching:\n            Specifies the platform caching behavior of data disk blob for\n            read/write efficiency. The default vault is ReadOnly. Possible\n            values are: None, ReadOnly, ReadWrite\n        media_link:\n            Specifies the location of the blob in Windows Azure blob store\n            where the media for the disk is located. The blob location must\n            belong to the storage account in the subscription specified by\n            the <subscription-id> value in the operation call. Example:\n            http://example.blob.core.windows.net/disks/mydisk.vhd\n        updated_lun:\n            Specifies the Logical Unit Number (LUN) for the disk. The LUN\n            specifies the slot in which the data drive appears when mounted\n            for usage by the virtual machine. Valid LUN values are 0 through 15.\n        disk_label:\n            Specifies the description of the data disk. When you attach a disk,\n            either by directly referencing a media using the MediaLink element\n            or specifying the target disk size, you can use the DiskLabel\n            element to customize the name property of the target data disk.\n        disk_name:\n            Specifies the name of the disk. Windows Azure uses the specified\n            disk to create the data disk for the machine and populates this\n            field with the disk name.\n        logical_disk_size_in_gb:\n            Specifies the size, in GB, of an empty disk to be attached to the\n            role. The disk can be created as part of disk attach or create VM\n            role call by specifying the value for this property. Windows Azure\n            creates the empty disk based on size preference and attaches the\n            newly created disk to the Role.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('role_name', role_name)\n        _validate_not_none('lun', lun)\n        return self._perform_put(\n            self._get_data_disk_path(\n                service_name, deployment_name, role_name, lun),\n            _XmlSerializer.data_virtual_hard_disk_to_xml(\n                host_caching,\n                disk_label,\n                disk_name,\n                updated_lun,\n                logical_disk_size_in_gb,\n                media_link,\n                None),\n            async=True)\n\n    def delete_data_disk(self, service_name, deployment_name, role_name, lun, delete_vhd=False):\n        '''\n        Removes the specified data disk from a virtual machine.\n\n        service_name: The name of the service.\n        deployment_name: The name of the deployment.\n        role_name: The name of the role.\n        lun: The Logical Unit Number (LUN) for the disk.\n        delete_vhd: Deletes the underlying vhd blob in Azure storage.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('role_name', role_name)\n        _validate_not_none('lun', lun)\n        path = self._get_data_disk_path(service_name, deployment_name, role_name, lun)\n        if delete_vhd:\n            path += '?comp=media'\n        return self._perform_delete(path, async=True)\n\n    #--Operations for virtual machine disks ------------------------------\n    def list_disks(self):\n        '''\n        Retrieves a list of the disks in your image repository.\n        '''\n        return self._perform_get(self._get_disk_path(),\n                                 Disks)\n\n    def get_disk(self, disk_name):\n        '''\n        Retrieves a disk from your image repository.\n        '''\n        return self._perform_get(self._get_disk_path(disk_name),\n                                 Disk)\n\n    def add_disk(self, has_operating_system, label, media_link, name, os):\n        '''\n        Adds a disk to the user image repository. The disk can be an OS disk\n        or a data disk.\n\n        has_operating_system:\n            Specifies whether the disk contains an operation system. Only a\n            disk with an operating system installed can be mounted as OS Drive.\n        label: Specifies the description of the disk.\n        media_link:\n            Specifies the location of the blob in Windows Azure blob store\n            where the media for the disk is located. The blob location must\n            belong to the storage account in the current subscription specified\n            by the <subscription-id> value in the operation call. Example:\n            http://example.blob.core.windows.net/disks/mydisk.vhd\n        name:\n            Specifies a name for the disk. Windows Azure uses the name to\n            identify the disk when creating virtual machines from the disk.\n        os: The OS type of the disk. Possible values are: Linux, Windows\n        '''\n        _validate_not_none('has_operating_system', has_operating_system)\n        _validate_not_none('label', label)\n        _validate_not_none('media_link', media_link)\n        _validate_not_none('name', name)\n        _validate_not_none('os', os)\n        return self._perform_post(self._get_disk_path(),\n                                  _XmlSerializer.disk_to_xml(\n                                      has_operating_system,\n                                      label,\n                                      media_link,\n                                      name,\n                                      os))\n\n    def update_disk(self, disk_name, has_operating_system, label, media_link,\n                    name, os):\n        '''\n        Updates an existing disk in your image repository.\n\n        disk_name: The name of the disk to update.\n        has_operating_system:\n            Specifies whether the disk contains an operation system. Only a\n            disk with an operating system installed can be mounted as OS Drive.\n        label: Specifies the description of the disk.\n        media_link:\n            Specifies the location of the blob in Windows Azure blob store\n            where the media for the disk is located. The blob location must\n            belong to the storage account in the current subscription specified\n            by the <subscription-id> value in the operation call. Example:\n            http://example.blob.core.windows.net/disks/mydisk.vhd\n        name:\n            Specifies a name for the disk. Windows Azure uses the name to\n            identify the disk when creating virtual machines from the disk.\n        os: The OS type of the disk. Possible values are: Linux, Windows\n        '''\n        _validate_not_none('disk_name', disk_name)\n        _validate_not_none('has_operating_system', has_operating_system)\n        _validate_not_none('label', label)\n        _validate_not_none('media_link', media_link)\n        _validate_not_none('name', name)\n        _validate_not_none('os', os)\n        return self._perform_put(self._get_disk_path(disk_name),\n                                 _XmlSerializer.disk_to_xml(\n                                     has_operating_system,\n                                     label,\n                                     media_link,\n                                     name,\n                                     os))\n\n    def delete_disk(self, disk_name, delete_vhd=False):\n        '''\n        Deletes the specified data or operating system disk from your image\n        repository.\n\n        disk_name: The name of the disk to delete.\n        delete_vhd: Deletes the underlying vhd blob in Azure storage.\n        '''\n        _validate_not_none('disk_name', disk_name)\n        path = self._get_disk_path(disk_name)\n        if delete_vhd:\n            path += '?comp=media'\n        return self._perform_delete(path)\n\n    #--Operations for virtual networks  ------------------------------\n    def list_virtual_network_sites(self):\n        '''\n        Retrieves a list of the virtual networks.\n        '''\n        return self._perform_get(self._get_virtual_network_site_path(), VirtualNetworkSites)\n  \n      #--Helper functions --------------------------------------------------\n    def _get_virtual_network_site_path(self):\n        return self._get_path('services/networking/virtualnetwork', None)\n\n    def _get_storage_service_path(self, service_name=None):\n        return self._get_path('services/storageservices', service_name)\n\n    def _get_hosted_service_path(self, service_name=None):\n        return self._get_path('services/hostedservices', service_name)\n\n    def _get_deployment_path_using_slot(self, service_name, slot=None):\n        return self._get_path('services/hostedservices/' + _str(service_name) +\n                              '/deploymentslots', slot)\n\n    def _get_deployment_path_using_name(self, service_name,\n                                        deployment_name=None):\n        return self._get_path('services/hostedservices/' + _str(service_name) +\n                              '/deployments', deployment_name)\n\n    def _get_role_path(self, service_name, deployment_name, role_name=None):\n        return self._get_path('services/hostedservices/' + _str(service_name) +\n                              '/deployments/' + deployment_name +\n                              '/roles', role_name)\n\n    def _get_role_instance_operations_path(self, service_name, deployment_name,\n                                           role_name=None):\n        return self._get_path('services/hostedservices/' + _str(service_name) +\n                              '/deployments/' + deployment_name +\n                              '/roleinstances', role_name) + '/Operations'\n\n    def _get_roles_operations_path(self, service_name, deployment_name):\n        return self._get_path('services/hostedservices/' + _str(service_name) +\n                              '/deployments/' + deployment_name +\n                              '/roles/Operations', None)\n\n    def _get_data_disk_path(self, service_name, deployment_name, role_name,\n                            lun=None):\n        return self._get_path('services/hostedservices/' + _str(service_name) +\n                              '/deployments/' + _str(deployment_name) +\n                              '/roles/' + _str(role_name) + '/DataDisks', lun)\n\n    def _get_disk_path(self, disk_name=None):\n        return self._get_path('services/disks', disk_name)\n\n    def _get_image_path(self, image_name=None):\n        return self._get_path('services/images', image_name)\n"
  },
  {
    "path": "CustomScript/azure/servicemanagement/sqldatabasemanagementservice.py",
    "content": "#-------------------------------------------------------------------------\n# Copyright (c) Microsoft.  All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#--------------------------------------------------------------------------\nfrom azure import (\n    MANAGEMENT_HOST,\n    _parse_service_resources_response,\n    )\nfrom azure.servicemanagement import (\n    Servers,\n    Database,\n    )\nfrom azure.servicemanagement.servicemanagementclient import (\n    _ServiceManagementClient,\n    )\n\nclass SqlDatabaseManagementService(_ServiceManagementClient):\n    ''' Note that this class is a preliminary work on SQL Database\n        management. Since it lack a lot a features, final version\n        can be slightly different from the current one.\n    '''\n\n    def __init__(self, subscription_id=None, cert_file=None,\n                 host=MANAGEMENT_HOST):\n        super(SqlDatabaseManagementService, self).__init__(\n            subscription_id, cert_file, host)\n\n    #--Operations for sql servers ----------------------------------------\n    def list_servers(self):\n        '''\n        List the SQL servers defined on the account.\n        '''\n        return self._perform_get(self._get_list_servers_path(),\n                                 Servers)\n\n    #--Operations for sql databases ----------------------------------------\n    def list_databases(self, name):\n        '''\n        List the SQL databases defined on the specified server name\n        '''\n        response = self._perform_get(self._get_list_databases_path(name),\n                                     None)\n        return _parse_service_resources_response(response, Database)\n\n\n    #--Helper functions --------------------------------------------------\n    def _get_list_servers_path(self):\n        return self._get_path('services/sqlservers/servers', None)\n\n    def _get_list_databases_path(self, name):\n        # *contentview=generic is mandatory*\n        return self._get_path('services/sqlservers/servers/',\n                              name) + '/databases?contentview=generic' \n    \n"
  },
  {
    "path": "CustomScript/azure/servicemanagement/websitemanagementservice.py",
    "content": "#-------------------------------------------------------------------------\n# Copyright (c) Microsoft.  All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#--------------------------------------------------------------------------\nfrom azure import (\n    MANAGEMENT_HOST,\n    _str,\n    )\nfrom azure.servicemanagement import (\n    WebSpaces,\n    WebSpace,\n    Sites,\n    Site,\n    MetricResponses,\n    MetricDefinitions,\n    PublishData,\n    _XmlSerializer,\n    )\nfrom azure.servicemanagement.servicemanagementclient import (\n    _ServiceManagementClient,\n    )\n\nclass WebsiteManagementService(_ServiceManagementClient):\n    ''' Note that this class is a preliminary work on WebSite\n        management. Since it lack a lot a features, final version\n        can be slightly different from the current one.\n    '''\n\n    def __init__(self, subscription_id=None, cert_file=None,\n                 host=MANAGEMENT_HOST):\n        super(WebsiteManagementService, self).__init__(\n            subscription_id, cert_file, host)\n\n    #--Operations for web sites ----------------------------------------\n    def list_webspaces(self):\n        '''\n        List the webspaces defined on the account.\n        '''\n        return self._perform_get(self._get_list_webspaces_path(),\n                                 WebSpaces)\n\n    def get_webspace(self, webspace_name):\n        '''\n        Get details of a specific webspace.\n\n        webspace_name: The name of the webspace.\n        '''\n        return self._perform_get(self._get_webspace_details_path(webspace_name),\n                                 WebSpace)\n\n    def list_sites(self, webspace_name):\n        '''\n        List the web sites defined on this webspace.\n\n        webspace_name: The name of the webspace.\n        '''\n        return self._perform_get(self._get_sites_path(webspace_name),\n                                 Sites)\n\n    def get_site(self, webspace_name, website_name):\n        '''\n        List the web sites defined on this webspace.\n\n        webspace_name: The name of the webspace.\n        website_name: The name of the website.\n        '''\n        return self._perform_get(self._get_sites_details_path(webspace_name,\n                                                              website_name),\n                                 Site)\n\n    def create_site(self, webspace_name, website_name, geo_region, host_names,\n                    plan='VirtualDedicatedPlan', compute_mode='Shared',\n                    server_farm=None, site_mode=None):\n        '''\n        Create a website.\n\n        webspace_name: The name of the webspace.\n        website_name: The name of the website.\n        geo_region:\n            The geographical region of the webspace that will be created.\n        host_names:\n            An array of fully qualified domain names for website. Only one\n            hostname can be specified in the azurewebsites.net domain.\n            The hostname should match the name of the website. Custom domains\n            can only be specified for Shared or Standard websites.\n        plan:\n            This value must be 'VirtualDedicatedPlan'.\n        compute_mode:\n            This value should be 'Shared' for the Free or Paid Shared\n            offerings, or 'Dedicated' for the Standard offering. The default\n            value is 'Shared'. If you set it to 'Dedicated', you must specify\n            a value for the server_farm parameter.\n        server_farm:\n            The name of the Server Farm associated with this website. This is\n            a required value for Standard mode.\n        site_mode:\n            Can be None, 'Limited' or 'Basic'. This value is 'Limited' for the\n            Free offering, and 'Basic' for the Paid Shared offering. Standard\n            mode does not use the site_mode parameter; it uses the compute_mode\n            parameter.\n        '''\n        xml = _XmlSerializer.create_website_to_xml(webspace_name, website_name, geo_region, plan, host_names, compute_mode, server_farm, site_mode)\n        return self._perform_post(\n            self._get_sites_path(webspace_name),\n            xml,\n            Site)\n\n    def delete_site(self, webspace_name, website_name,\n                    delete_empty_server_farm=False, delete_metrics=False):\n        '''\n        Delete a website.\n\n        webspace_name: The name of the webspace.\n        website_name: The name of the website.\n        delete_empty_server_farm:\n            If the site being deleted is the last web site in a server farm,\n            you can delete the server farm by setting this to True.\n        delete_metrics:\n            To also delete the metrics for the site that you are deleting, you\n            can set this to True.\n        '''\n        path = self._get_sites_details_path(webspace_name, website_name)\n        query = ''\n        if delete_empty_server_farm:\n            query += '&deleteEmptyServerFarm=true'\n        if delete_metrics:\n            query += '&deleteMetrics=true'\n        if query:\n            path = path + '?' + query.lstrip('&')\n        return self._perform_delete(path)\n\n    def restart_site(self, webspace_name, website_name):\n        '''\n        Restart a web site.\n\n        webspace_name: The name of the webspace.\n        website_name: The name of the website.\n        '''\n        return self._perform_post(\n            self._get_restart_path(webspace_name, website_name),\n            '')\n\n    def get_historical_usage_metrics(self, webspace_name, website_name,\n                                     metrics = None, start_time=None, end_time=None, time_grain=None):\n        '''\n        Get historical usage metrics.\n\n        webspace_name: The name of the webspace.\n        website_name: The name of the website.\n        metrics: Optional. List of metrics name. Otherwise, all metrics returned.\n        start_time: Optional. An ISO8601 date. Otherwise, current hour is used.\n        end_time: Optional. An ISO8601 date. Otherwise, current time is used.\n        time_grain: Optional. A rollup name, as P1D. OTherwise, default rollup for the metrics is used.\n        More information and metrics name at:\n        http://msdn.microsoft.com/en-us/library/azure/dn166964.aspx\n        '''        \n        metrics = ('names='+','.join(metrics)) if metrics else ''\n        start_time = ('StartTime='+start_time) if start_time else ''\n        end_time = ('EndTime='+end_time) if end_time else ''\n        time_grain = ('TimeGrain='+time_grain) if time_grain else ''\n        parameters = ('&'.join(v for v in (metrics, start_time, end_time, time_grain) if v))\n        parameters = '?'+parameters if parameters else ''\n        return self._perform_get(self._get_historical_usage_metrics_path(webspace_name, website_name) + parameters,\n                                 MetricResponses)\n\n    def get_metric_definitions(self, webspace_name, website_name):\n        '''\n        Get metric definitions of metrics available of this web site.\n\n        webspace_name: The name of the webspace.\n        website_name: The name of the website.\n        '''\n        return self._perform_get(self._get_metric_definitions_path(webspace_name, website_name),\n                                 MetricDefinitions)\n\n    def get_publish_profile_xml(self, webspace_name, website_name):\n        '''\n        Get a site's publish profile as a string\n\n        webspace_name: The name of the webspace.\n        website_name: The name of the website.\n        '''\n        return self._perform_get(self._get_publishxml_path(webspace_name, website_name),\n                                 None).body.decode(\"utf-8\")\n\n    def get_publish_profile(self, webspace_name, website_name):\n        '''\n        Get a site's publish profile as an object\n\n        webspace_name: The name of the webspace.\n        website_name: The name of the website.\n        '''\n        return self._perform_get(self._get_publishxml_path(webspace_name, website_name),\n                                 PublishData)\n\n    #--Helper functions --------------------------------------------------\n    def _get_list_webspaces_path(self):\n        return self._get_path('services/webspaces', None)\n\n    def _get_webspace_details_path(self, webspace_name):\n        return self._get_path('services/webspaces/', webspace_name)\n\n    def _get_sites_path(self, webspace_name):\n        return self._get_path('services/webspaces/',\n                              webspace_name) + '/sites'\n\n    def _get_sites_details_path(self, webspace_name, website_name):\n        return self._get_path('services/webspaces/',\n                              webspace_name) + '/sites/' + _str(website_name)\n\n    def _get_restart_path(self, webspace_name, website_name):\n        return self._get_path('services/webspaces/',\n                              webspace_name) + '/sites/' + _str(website_name) + '/restart/' \n\n    def _get_historical_usage_metrics_path(self, webspace_name, website_name):\n        return self._get_path('services/webspaces/',\n                              webspace_name) + '/sites/' + _str(website_name) + '/metrics/' \n                               \n    def _get_metric_definitions_path(self, webspace_name, website_name):\n        return self._get_path('services/webspaces/',\n                              webspace_name) + '/sites/' + _str(website_name) + '/metricdefinitions/' \n\n    def _get_publishxml_path(self, webspace_name, website_name):\n        return self._get_path('services/webspaces/',\n                              webspace_name) + '/sites/' + _str(website_name) + '/publishxml/' \n"
  },
  {
    "path": "CustomScript/azure/storage/__init__.py",
    "content": "#-------------------------------------------------------------------------\n# Copyright (c) Microsoft.  All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#--------------------------------------------------------------------------\nimport sys\nimport types\n\nfrom datetime import datetime\nfrom xml.dom import minidom\nfrom azure import (WindowsAzureData,\n                   WindowsAzureError,\n                   METADATA_NS,\n                   xml_escape,\n                   _create_entry,\n                   _decode_base64_to_text,\n                   _decode_base64_to_bytes,\n                   _encode_base64,\n                   _fill_data_minidom,\n                   _fill_instance_element,\n                   _get_child_nodes,\n                   _get_child_nodesNS,\n                   _get_children_from_path,\n                   _get_entry_properties,\n                   _general_error_handler,\n                   _list_of,\n                   _parse_response_for_dict,\n                   _sign_string,\n                   _unicode_type,\n                   _ERROR_CANNOT_SERIALIZE_VALUE_TO_ENTITY,\n                   )\n\n# x-ms-version for storage service.\nX_MS_VERSION = '2012-02-12'\n\n\nclass EnumResultsBase(object):\n\n    ''' base class for EnumResults. '''\n\n    def __init__(self):\n        self.prefix = u''\n        self.marker = u''\n        self.max_results = 0\n        self.next_marker = u''\n\n\nclass ContainerEnumResults(EnumResultsBase):\n\n    ''' Blob Container list. '''\n\n    def __init__(self):\n        EnumResultsBase.__init__(self)\n        self.containers = _list_of(Container)\n\n    def __iter__(self):\n        return iter(self.containers)\n\n    def __len__(self):\n        return len(self.containers)\n\n    def __getitem__(self, index):\n        return self.containers[index]\n\n\nclass Container(WindowsAzureData):\n\n    ''' Blob container class. '''\n\n    def __init__(self):\n        self.name = u''\n        self.url = u''\n        self.properties = Properties()\n        self.metadata = {}\n\n\nclass Properties(WindowsAzureData):\n\n    ''' Blob container's properties class. '''\n\n    def __init__(self):\n        self.last_modified = u''\n        self.etag = u''\n\n\nclass RetentionPolicy(WindowsAzureData):\n\n    ''' RetentionPolicy in service properties. '''\n\n    def __init__(self):\n        self.enabled = False\n        self.__dict__['days'] = None\n\n    def get_days(self):\n        # convert days to int value\n        return int(self.__dict__['days'])\n\n    def set_days(self, value):\n        ''' set default days if days is set to empty. '''\n        self.__dict__['days'] = value\n\n    days = property(fget=get_days, fset=set_days)\n\n\nclass Logging(WindowsAzureData):\n\n    ''' Logging class in service properties. '''\n\n    def __init__(self):\n        self.version = u'1.0'\n        self.delete = False\n        self.read = False\n        self.write = False\n        self.retention_policy = RetentionPolicy()\n\n\nclass Metrics(WindowsAzureData):\n\n    ''' Metrics class in service properties. '''\n\n    def __init__(self):\n        self.version = u'1.0'\n        self.enabled = False\n        self.include_apis = None\n        self.retention_policy = RetentionPolicy()\n\n\nclass StorageServiceProperties(WindowsAzureData):\n\n    ''' Storage Service Propeties class. '''\n\n    def __init__(self):\n        self.logging = Logging()\n        self.metrics = Metrics()\n\n\nclass AccessPolicy(WindowsAzureData):\n\n    ''' Access Policy class in service properties. '''\n\n    def __init__(self, start=u'', expiry=u'', permission='u'):\n        self.start = start\n        self.expiry = expiry\n        self.permission = permission\n\n\nclass SignedIdentifier(WindowsAzureData):\n\n    ''' Signed Identifier class for service properties. '''\n\n    def __init__(self):\n        self.id = u''\n        self.access_policy = AccessPolicy()\n\n\nclass SignedIdentifiers(WindowsAzureData):\n\n    ''' SignedIdentifier list. '''\n\n    def __init__(self):\n        self.signed_identifiers = _list_of(SignedIdentifier)\n\n    def __iter__(self):\n        return iter(self.signed_identifiers)\n\n    def __len__(self):\n        return len(self.signed_identifiers)\n\n    def __getitem__(self, index):\n        return self.signed_identifiers[index]\n\n\nclass BlobEnumResults(EnumResultsBase):\n\n    ''' Blob list.'''\n\n    def __init__(self):\n        EnumResultsBase.__init__(self)\n        self.blobs = _list_of(Blob)\n        self.prefixes = _list_of(BlobPrefix)\n        self.delimiter = ''\n\n    def __iter__(self):\n        return iter(self.blobs)\n\n    def __len__(self):\n        return len(self.blobs)\n\n    def __getitem__(self, index):\n        return self.blobs[index]\n\n\nclass BlobResult(bytes):\n\n    def __new__(cls, blob, properties):\n        return bytes.__new__(cls, blob if blob else b'')\n\n    def __init__(self, blob, properties):\n        self.properties = properties\n\n\nclass Blob(WindowsAzureData):\n\n    ''' Blob class. '''\n\n    def __init__(self):\n        self.name = u''\n        self.snapshot = u''\n        self.url = u''\n        self.properties = BlobProperties()\n        self.metadata = {}\n\n\nclass BlobProperties(WindowsAzureData):\n\n    ''' Blob Properties '''\n\n    def __init__(self):\n        self.last_modified = u''\n        self.etag = u''\n        self.content_length = 0\n        self.content_type = u''\n        self.content_encoding = u''\n        self.content_language = u''\n        self.content_md5 = u''\n        self.xms_blob_sequence_number = 0\n        self.blob_type = u''\n        self.lease_status = u''\n        self.lease_state = u''\n        self.lease_duration = u''\n        self.copy_id = u''\n        self.copy_source = u''\n        self.copy_status = u''\n        self.copy_progress = u''\n        self.copy_completion_time = u''\n        self.copy_status_description = u''\n\n\nclass BlobPrefix(WindowsAzureData):\n\n    ''' BlobPrefix in Blob. '''\n\n    def __init__(self):\n        self.name = ''\n\n\nclass BlobBlock(WindowsAzureData):\n\n    ''' BlobBlock class '''\n\n    def __init__(self, id=None, size=None):\n        self.id = id\n        self.size = size\n\n\nclass BlobBlockList(WindowsAzureData):\n\n    ''' BlobBlockList class '''\n\n    def __init__(self):\n        self.committed_blocks = []\n        self.uncommitted_blocks = []\n\n\nclass PageRange(WindowsAzureData):\n\n    ''' Page Range for page blob. '''\n\n    def __init__(self):\n        self.start = 0\n        self.end = 0\n\n\nclass PageList(object):\n\n    ''' Page list for page blob. '''\n\n    def __init__(self):\n        self.page_ranges = _list_of(PageRange)\n\n    def __iter__(self):\n        return iter(self.page_ranges)\n\n    def __len__(self):\n        return len(self.page_ranges)\n\n    def __getitem__(self, index):\n        return self.page_ranges[index]\n\n\nclass QueueEnumResults(EnumResultsBase):\n\n    ''' Queue list'''\n\n    def __init__(self):\n        EnumResultsBase.__init__(self)\n        self.queues = _list_of(Queue)\n\n    def __iter__(self):\n        return iter(self.queues)\n\n    def __len__(self):\n        return len(self.queues)\n\n    def __getitem__(self, index):\n        return self.queues[index]\n\n\nclass Queue(WindowsAzureData):\n\n    ''' Queue class '''\n\n    def __init__(self):\n        self.name = u''\n        self.url = u''\n        self.metadata = {}\n\n\nclass QueueMessagesList(WindowsAzureData):\n\n    ''' Queue message list. '''\n\n    def __init__(self):\n        self.queue_messages = _list_of(QueueMessage)\n\n    def __iter__(self):\n        return iter(self.queue_messages)\n\n    def __len__(self):\n        return len(self.queue_messages)\n\n    def __getitem__(self, index):\n        return self.queue_messages[index]\n\n\nclass QueueMessage(WindowsAzureData):\n\n    ''' Queue message class. '''\n\n    def __init__(self):\n        self.message_id = u''\n        self.insertion_time = u''\n        self.expiration_time = u''\n        self.pop_receipt = u''\n        self.time_next_visible = u''\n        self.dequeue_count = u''\n        self.message_text = u''\n\n\nclass Entity(WindowsAzureData):\n\n    ''' Entity class. The attributes of entity will be created dynamically. '''\n    pass\n\n\nclass EntityProperty(WindowsAzureData):\n\n    ''' Entity property. contains type and value.  '''\n\n    def __init__(self, type=None, value=None):\n        self.type = type\n        self.value = value\n\n\nclass Table(WindowsAzureData):\n\n    ''' Only for intellicens and telling user the return type. '''\n    pass\n\n\ndef _parse_blob_enum_results_list(response):\n    respbody = response.body\n    return_obj = BlobEnumResults()\n    doc = minidom.parseString(respbody)\n\n    for enum_results in _get_child_nodes(doc, 'EnumerationResults'):\n        for child in _get_children_from_path(enum_results, 'Blobs', 'Blob'):\n            return_obj.blobs.append(_fill_instance_element(child, Blob))\n\n        for child in _get_children_from_path(enum_results,\n                                             'Blobs',\n                                             'BlobPrefix'):\n            return_obj.prefixes.append(\n                _fill_instance_element(child, BlobPrefix))\n\n        for name, value in vars(return_obj).items():\n            if name == 'blobs' or name == 'prefixes':\n                continue\n            value = _fill_data_minidom(enum_results, name, value)\n            if value is not None:\n                setattr(return_obj, name, value)\n\n    return return_obj\n\n\ndef _update_storage_header(request):\n    ''' add additional headers for storage request. '''\n    if request.body:\n        assert isinstance(request.body, bytes)\n\n    # if it is PUT, POST, MERGE, DELETE, need to add content-lengt to header.\n    if request.method in ['PUT', 'POST', 'MERGE', 'DELETE']:\n        request.headers.append(('Content-Length', str(len(request.body))))\n\n    # append addtional headers base on the service\n    request.headers.append(('x-ms-version', X_MS_VERSION))\n\n    # append x-ms-meta name, values to header\n    for name, value in request.headers:\n        if 'x-ms-meta-name-values' in name and value:\n            for meta_name, meta_value in value.items():\n                request.headers.append(('x-ms-meta-' + meta_name, meta_value))\n            request.headers.remove((name, value))\n            break\n    return request\n\n\ndef _update_storage_blob_header(request, account_name, account_key):\n    ''' add additional headers for storage blob request. '''\n\n    request = _update_storage_header(request)\n    current_time = datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')\n    request.headers.append(('x-ms-date', current_time))\n    request.headers.append(\n        ('Content-Type', 'application/octet-stream Charset=UTF-8'))\n    request.headers.append(('Authorization',\n                            _sign_storage_blob_request(request,\n                                                       account_name,\n                                                       account_key)))\n\n    return request.headers\n\n\ndef _update_storage_queue_header(request, account_name, account_key):\n    ''' add additional headers for storage queue request. '''\n    return _update_storage_blob_header(request, account_name, account_key)\n\n\ndef _update_storage_table_header(request):\n    ''' add additional headers for storage table request. '''\n\n    request = _update_storage_header(request)\n    for name, _ in request.headers:\n        if name.lower() == 'content-type':\n            break\n    else:\n        request.headers.append(('Content-Type', 'application/atom+xml'))\n    request.headers.append(('DataServiceVersion', '2.0;NetFx'))\n    request.headers.append(('MaxDataServiceVersion', '2.0;NetFx'))\n    current_time = datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')\n    request.headers.append(('x-ms-date', current_time))\n    request.headers.append(('Date', current_time))\n    return request.headers\n\n\ndef _sign_storage_blob_request(request, account_name, account_key):\n    '''\n    Returns the signed string for blob request which is used to set\n    Authorization header. This is also used to sign queue request.\n    '''\n\n    uri_path = request.path.split('?')[0]\n\n    # method to sign\n    string_to_sign = request.method + '\\n'\n\n    # get headers to sign\n    headers_to_sign = [\n        'content-encoding', 'content-language', 'content-length',\n        'content-md5', 'content-type', 'date', 'if-modified-since',\n        'if-match', 'if-none-match', 'if-unmodified-since', 'range']\n\n    request_header_dict = dict((name.lower(), value)\n                               for name, value in request.headers if value)\n    string_to_sign += '\\n'.join(request_header_dict.get(x, '')\n                                for x in headers_to_sign) + '\\n'\n\n    # get x-ms header to sign\n    x_ms_headers = []\n    for name, value in request.headers:\n        if 'x-ms' in name:\n            x_ms_headers.append((name.lower(), value))\n    x_ms_headers.sort()\n    for name, value in x_ms_headers:\n        if value:\n            string_to_sign += ''.join([name, ':', value, '\\n'])\n\n    # get account_name and uri path to sign\n    string_to_sign += '/' + account_name + uri_path\n\n    # get query string to sign if it is not table service\n    query_to_sign = request.query\n    query_to_sign.sort()\n\n    current_name = ''\n    for name, value in query_to_sign:\n        if value:\n            if current_name != name:\n                string_to_sign += '\\n' + name + ':' + value\n            else:\n                string_to_sign += '\\n' + ',' + value\n\n    # sign the request\n    auth_string = 'SharedKey ' + account_name + ':' + \\\n        _sign_string(account_key, string_to_sign)\n    return auth_string\n\n\ndef _sign_storage_table_request(request, account_name, account_key):\n    uri_path = request.path.split('?')[0]\n\n    string_to_sign = request.method + '\\n'\n    headers_to_sign = ['content-md5', 'content-type', 'date']\n    request_header_dict = dict((name.lower(), value)\n                               for name, value in request.headers if value)\n    string_to_sign += '\\n'.join(request_header_dict.get(x, '')\n                                for x in headers_to_sign) + '\\n'\n\n    # get account_name and uri path to sign\n    string_to_sign += ''.join(['/', account_name, uri_path])\n\n    for name, value in request.query:\n        if name == 'comp' and uri_path == '/':\n            string_to_sign += '?comp=' + value\n            break\n\n    # sign the request\n    auth_string = 'SharedKey ' + account_name + ':' + \\\n        _sign_string(account_key, string_to_sign)\n    return auth_string\n\n\ndef _to_python_bool(value):\n    if value.lower() == 'true':\n        return True\n    return False\n\n\ndef _to_entity_int(data):\n    int_max = (2 << 30) - 1\n    if data > (int_max) or data < (int_max + 1) * (-1):\n        return 'Edm.Int64', str(data)\n    else:\n        return 'Edm.Int32', str(data)\n\n\ndef _to_entity_bool(value):\n    if value:\n        return 'Edm.Boolean', 'true'\n    return 'Edm.Boolean', 'false'\n\n\ndef _to_entity_datetime(value):\n    return 'Edm.DateTime', value.strftime('%Y-%m-%dT%H:%M:%S')\n\n\ndef _to_entity_float(value):\n    return 'Edm.Double', str(value)\n\n\ndef _to_entity_property(value):\n    if value.type == 'Edm.Binary':\n        return value.type, _encode_base64(value.value)\n\n    return value.type, str(value.value)\n\n\ndef _to_entity_none(value):\n    return None, None\n\n\ndef _to_entity_str(value):\n    return 'Edm.String', value\n\n\n# Tables of conversions to and from entity types.  We support specific\n# datatypes, and beyond that the user can use an EntityProperty to get\n# custom data type support.\n\ndef _from_entity_binary(value):\n    return EntityProperty('Edm.Binary', _decode_base64_to_bytes(value))\n\n\ndef _from_entity_int(value):\n    return int(value)\n\n\ndef _from_entity_datetime(value):\n    format = '%Y-%m-%dT%H:%M:%S'\n    if '.' in value:\n        format = format + '.%f'\n    if value.endswith('Z'):\n        format = format + 'Z'\n    return datetime.strptime(value, format)\n\n_ENTITY_TO_PYTHON_CONVERSIONS = {\n    'Edm.Binary': _from_entity_binary,\n    'Edm.Int32': _from_entity_int,\n    'Edm.Int64': _from_entity_int,\n    'Edm.Double': float,\n    'Edm.Boolean': _to_python_bool,\n    'Edm.DateTime': _from_entity_datetime,\n}\n\n# Conversion from Python type to a function which returns a tuple of the\n# type string and content string.\n_PYTHON_TO_ENTITY_CONVERSIONS = {\n    int: _to_entity_int,\n    bool: _to_entity_bool,\n    datetime: _to_entity_datetime,\n    float: _to_entity_float,\n    EntityProperty: _to_entity_property,\n    str: _to_entity_str,\n}\n\nif sys.version_info < (3,):\n    _PYTHON_TO_ENTITY_CONVERSIONS.update({\n        long: _to_entity_int,\n        types.NoneType: _to_entity_none,\n        unicode: _to_entity_str,\n    })\n\n\ndef _convert_entity_to_xml(source):\n    ''' Converts an entity object to xml to send.\n\n    The entity format is:\n    <entry xmlns:d=\"http://schemas.microsoft.com/ado/2007/08/dataservices\" xmlns:m=\"http://schemas.microsoft.com/ado/2007/08/dataservices/metadata\" xmlns=\"http://www.w3.org/2005/Atom\">\n      <title />\n      <updated>2008-09-18T23:46:19.3857256Z</updated>\n      <author>\n        <name />\n      </author>\n      <id />\n      <content type=\"application/xml\">\n        <m:properties>\n          <d:Address>Mountain View</d:Address>\n          <d:Age m:type=\"Edm.Int32\">23</d:Age>\n          <d:AmountDue m:type=\"Edm.Double\">200.23</d:AmountDue>\n          <d:BinaryData m:type=\"Edm.Binary\" m:null=\"true\" />\n          <d:CustomerCode m:type=\"Edm.Guid\">c9da6455-213d-42c9-9a79-3e9149a57833</d:CustomerCode>\n          <d:CustomerSince m:type=\"Edm.DateTime\">2008-07-10T00:00:00</d:CustomerSince>\n          <d:IsActive m:type=\"Edm.Boolean\">true</d:IsActive>\n          <d:NumOfOrders m:type=\"Edm.Int64\">255</d:NumOfOrders>\n          <d:PartitionKey>mypartitionkey</d:PartitionKey>\n          <d:RowKey>myrowkey1</d:RowKey>\n          <d:Timestamp m:type=\"Edm.DateTime\">0001-01-01T00:00:00</d:Timestamp>\n        </m:properties>\n      </content>\n    </entry>\n    '''\n\n    # construct the entity body included in <m:properties> and </m:properties>\n    entity_body = '<m:properties xml:space=\"preserve\">{properties}</m:properties>'\n\n    if isinstance(source, WindowsAzureData):\n        source = vars(source)\n\n    properties_str = ''\n\n    # set properties type for types we know if value has no type info.\n    # if value has type info, then set the type to value.type\n    for name, value in source.items():\n        mtype = ''\n        conv = _PYTHON_TO_ENTITY_CONVERSIONS.get(type(value))\n        if conv is None and sys.version_info >= (3,) and value is None:\n            conv = _to_entity_none\n        if conv is None:\n            raise WindowsAzureError(\n                _ERROR_CANNOT_SERIALIZE_VALUE_TO_ENTITY.format(\n                    type(value).__name__))\n\n        mtype, value = conv(value)\n\n        # form the property node\n        properties_str += ''.join(['<d:', name])\n        if value is None:\n            properties_str += ' m:null=\"true\" />'\n        else:\n            if mtype:\n                properties_str += ''.join([' m:type=\"', mtype, '\"'])\n            properties_str += ''.join(['>',\n                                      xml_escape(value), '</d:', name, '>'])\n\n    if sys.version_info < (3,):\n        if isinstance(properties_str, unicode):\n            properties_str = properties_str.encode('utf-8')\n\n    # generate the entity_body\n    entity_body = entity_body.format(properties=properties_str)\n    xmlstr = _create_entry(entity_body)\n    return xmlstr\n\n\ndef _convert_table_to_xml(table_name):\n    '''\n    Create xml to send for a given table name. Since xml format for table is\n    the same as entity and the only difference is that table has only one\n    property 'TableName', so we just call _convert_entity_to_xml.\n\n    table_name: the name of the table\n    '''\n    return _convert_entity_to_xml({'TableName': table_name})\n\n\ndef _convert_block_list_to_xml(block_id_list):\n    '''\n    Convert a block list to xml to send.\n\n    block_id_list:\n        a str list containing the block ids that are used in put_block_list.\n    Only get block from latest blocks.\n    '''\n    if block_id_list is None:\n        return ''\n    xml = '<?xml version=\"1.0\" encoding=\"utf-8\"?><BlockList>'\n    for value in block_id_list:\n        xml += '<Latest>{0}</Latest>'.format(_encode_base64(value))\n\n    return xml + '</BlockList>'\n\n\ndef _create_blob_result(response):\n    blob_properties = _parse_response_for_dict(response)\n    return BlobResult(response.body, blob_properties)\n\n\ndef _convert_response_to_block_list(response):\n    '''\n    Converts xml response to block list class.\n    '''\n    blob_block_list = BlobBlockList()\n\n    xmldoc = minidom.parseString(response.body)\n    for xml_block in _get_children_from_path(xmldoc,\n                                             'BlockList',\n                                             'CommittedBlocks',\n                                             'Block'):\n        xml_block_id = _decode_base64_to_text(\n            _get_child_nodes(xml_block, 'Name')[0].firstChild.nodeValue)\n        xml_block_size = int(\n            _get_child_nodes(xml_block, 'Size')[0].firstChild.nodeValue)\n        blob_block_list.committed_blocks.append(\n            BlobBlock(xml_block_id, xml_block_size))\n\n    for xml_block in _get_children_from_path(xmldoc,\n                                             'BlockList',\n                                             'UncommittedBlocks',\n                                             'Block'):\n        xml_block_id = _decode_base64_to_text(\n            _get_child_nodes(xml_block, 'Name')[0].firstChild.nodeValue)\n        xml_block_size = int(\n            _get_child_nodes(xml_block, 'Size')[0].firstChild.nodeValue)\n        blob_block_list.uncommitted_blocks.append(\n            BlobBlock(xml_block_id, xml_block_size))\n\n    return blob_block_list\n\n\ndef _remove_prefix(name):\n    colon = name.find(':')\n    if colon != -1:\n        return name[colon + 1:]\n    return name\n\n\ndef _convert_response_to_entity(response):\n    if response is None:\n        return response\n    return _convert_xml_to_entity(response.body)\n\n\ndef _convert_xml_to_entity(xmlstr):\n    ''' Convert xml response to entity.\n\n    The format of entity:\n    <entry xmlns:d=\"http://schemas.microsoft.com/ado/2007/08/dataservices\" xmlns:m=\"http://schemas.microsoft.com/ado/2007/08/dataservices/metadata\" xmlns=\"http://www.w3.org/2005/Atom\">\n      <title />\n      <updated>2008-09-18T23:46:19.3857256Z</updated>\n      <author>\n        <name />\n      </author>\n      <id />\n      <content type=\"application/xml\">\n        <m:properties>\n          <d:Address>Mountain View</d:Address>\n          <d:Age m:type=\"Edm.Int32\">23</d:Age>\n          <d:AmountDue m:type=\"Edm.Double\">200.23</d:AmountDue>\n          <d:BinaryData m:type=\"Edm.Binary\" m:null=\"true\" />\n          <d:CustomerCode m:type=\"Edm.Guid\">c9da6455-213d-42c9-9a79-3e9149a57833</d:CustomerCode>\n          <d:CustomerSince m:type=\"Edm.DateTime\">2008-07-10T00:00:00</d:CustomerSince>\n          <d:IsActive m:type=\"Edm.Boolean\">true</d:IsActive>\n          <d:NumOfOrders m:type=\"Edm.Int64\">255</d:NumOfOrders>\n          <d:PartitionKey>mypartitionkey</d:PartitionKey>\n          <d:RowKey>myrowkey1</d:RowKey>\n          <d:Timestamp m:type=\"Edm.DateTime\">0001-01-01T00:00:00</d:Timestamp>\n        </m:properties>\n      </content>\n    </entry>\n    '''\n    xmldoc = minidom.parseString(xmlstr)\n\n    xml_properties = None\n    for entry in _get_child_nodes(xmldoc, 'entry'):\n        for content in _get_child_nodes(entry, 'content'):\n            # TODO: Namespace\n            xml_properties = _get_child_nodesNS(\n                content, METADATA_NS, 'properties')\n\n    if not xml_properties:\n        return None\n\n    entity = Entity()\n    # extract each property node and get the type from attribute and node value\n    for xml_property in xml_properties[0].childNodes:\n        name = _remove_prefix(xml_property.nodeName)\n        # exclude the Timestamp since it is auto added by azure when\n        # inserting entity. We don't want this to mix with real properties\n        if name in ['Timestamp']:\n            continue\n\n        if xml_property.firstChild:\n            value = xml_property.firstChild.nodeValue\n        else:\n            value = ''\n\n        isnull = xml_property.getAttributeNS(METADATA_NS, 'null')\n        mtype = xml_property.getAttributeNS(METADATA_NS, 'type')\n\n        # if not isnull and no type info, then it is a string and we just\n        # need the str type to hold the property.\n        if not isnull and not mtype:\n            _set_entity_attr(entity, name, value)\n        elif isnull == 'true':\n            if mtype:\n                property = EntityProperty(mtype, None)\n            else:\n                property = EntityProperty('Edm.String', None)\n        else:  # need an object to hold the property\n            conv = _ENTITY_TO_PYTHON_CONVERSIONS.get(mtype)\n            if conv is not None:\n                property = conv(value)\n            else:\n                property = EntityProperty(mtype, value)\n            _set_entity_attr(entity, name, property)\n\n        # extract id, updated and name value from feed entry and set them of\n        # rule.\n    for name, value in _get_entry_properties(xmlstr, True).items():\n        if name in ['etag']:\n            _set_entity_attr(entity, name, value)\n\n    return entity\n\n\ndef _set_entity_attr(entity, name, value):\n    try:\n        setattr(entity, name, value)\n    except UnicodeEncodeError:\n        # Python 2 doesn't support unicode attribute names, so we'll\n        # add them and access them directly through the dictionary\n        entity.__dict__[name] = value\n\n\ndef _convert_xml_to_table(xmlstr):\n    ''' Converts the xml response to table class.\n    Simply call convert_xml_to_entity and extract the table name, and add\n    updated and author info\n    '''\n    table = Table()\n    entity = _convert_xml_to_entity(xmlstr)\n    setattr(table, 'name', entity.TableName)\n    for name, value in _get_entry_properties(xmlstr, False).items():\n        setattr(table, name, value)\n    return table\n\n\ndef _storage_error_handler(http_error):\n    ''' Simple error handler for storage service. '''\n    return _general_error_handler(http_error)\n\n# make these available just from storage.\nfrom azure.storage.blobservice import BlobService\nfrom azure.storage.queueservice import QueueService\nfrom azure.storage.tableservice import TableService\nfrom azure.storage.cloudstorageaccount import CloudStorageAccount\nfrom azure.storage.sharedaccesssignature import (\n    SharedAccessSignature,\n    SharedAccessPolicy,\n    Permission,\n    WebResource,\n    )\n"
  },
  {
    "path": "CustomScript/azure/storage/blobservice.py",
    "content": "#-------------------------------------------------------------------------\n# Copyright (c) Microsoft.  All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#--------------------------------------------------------------------------\nfrom azure import (\n    WindowsAzureError,\n    BLOB_SERVICE_HOST_BASE,\n    DEV_BLOB_HOST,\n    _ERROR_VALUE_NEGATIVE,\n    _ERROR_PAGE_BLOB_SIZE_ALIGNMENT,\n    _convert_class_to_xml,\n    _dont_fail_not_exist,\n    _dont_fail_on_exist,\n    _encode_base64,\n    _get_request_body,\n    _get_request_body_bytes_only,\n    _int_or_none,\n    _parse_enum_results_list,\n    _parse_response,\n    _parse_response_for_dict,\n    _parse_response_for_dict_filter,\n    _parse_response_for_dict_prefix,\n    _parse_simple_list,\n    _str,\n    _str_or_none,\n    _update_request_uri_query_local_storage,\n    _validate_type_bytes,\n    _validate_not_none,\n    )\nfrom azure.http import HTTPRequest\nfrom azure.storage import (\n    Container,\n    ContainerEnumResults,\n    PageList,\n    PageRange,\n    SignedIdentifiers,\n    StorageServiceProperties,\n    _convert_block_list_to_xml,\n    _convert_response_to_block_list,\n    _create_blob_result,\n    _parse_blob_enum_results_list,\n    _update_storage_blob_header,\n    )\nfrom azure.storage.storageclient import _StorageClient\nfrom os import path\nimport sys\nif sys.version_info >= (3,):\n    from io import BytesIO\nelse:\n    from cStringIO import StringIO as BytesIO\n\n# Keep this value sync with _ERROR_PAGE_BLOB_SIZE_ALIGNMENT\n_PAGE_SIZE = 512\n\nclass BlobService(_StorageClient):\n\n    '''\n    This is the main class managing Blob resources.\n    '''\n\n    def __init__(self, account_name=None, account_key=None, protocol='https',\n                 host_base=BLOB_SERVICE_HOST_BASE, dev_host=DEV_BLOB_HOST):\n        '''\n        account_name: your storage account name, required for all operations.\n        account_key: your storage account key, required for all operations.\n        protocol: Optional. Protocol. Defaults to https.\n        host_base:\n            Optional. Live host base url. Defaults to Azure url. Override this\n            for on-premise.\n        dev_host: Optional. Dev host url. Defaults to localhost.\n        '''\n        self._BLOB_MAX_DATA_SIZE = 64 * 1024 * 1024\n        self._BLOB_MAX_CHUNK_DATA_SIZE = 4 * 1024 * 1024\n        super(BlobService, self).__init__(\n            account_name, account_key, protocol, host_base, dev_host)\n\n    def make_blob_url(self, container_name, blob_name, account_name=None,\n                      protocol=None, host_base=None):\n        '''\n        Creates the url to access a blob.\n\n        container_name: Name of container.\n        blob_name: Name of blob.\n        account_name:\n            Name of the storage account. If not specified, uses the account\n            specified when BlobService was initialized.\n        protocol:\n            Protocol to use: 'http' or 'https'. If not specified, uses the\n            protocol specified when BlobService was initialized.\n        host_base:\n            Live host base url.  If not specified, uses the host base specified\n            when BlobService was initialized.\n        '''\n        if not account_name:\n            account_name = self.account_name\n        if not protocol:\n            protocol = self.protocol\n        if not host_base:\n            host_base = self.host_base\n\n        return '{0}://{1}{2}/{3}/{4}'.format(protocol,\n                                             account_name,\n                                             host_base,\n                                             container_name,\n                                             blob_name)\n\n    def list_containers(self, prefix=None, marker=None, maxresults=None,\n                        include=None):\n        '''\n        The List Containers operation returns a list of the containers under\n        the specified account.\n\n        prefix:\n            Optional. Filters the results to return only containers whose names\n            begin with the specified prefix.\n        marker:\n            Optional. A string value that identifies the portion of the list to\n            be returned with the next list operation.\n        maxresults:\n            Optional. Specifies the maximum number of containers to return.\n        include:\n            Optional. Include this parameter to specify that the container's\n            metadata be returned as part of the response body. set this\n            parameter to string 'metadata' to get container's metadata.\n        '''\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/?comp=list'\n        request.query = [\n            ('prefix', _str_or_none(prefix)),\n            ('marker', _str_or_none(marker)),\n            ('maxresults', _int_or_none(maxresults)),\n            ('include', _str_or_none(include))\n        ]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request)\n\n        return _parse_enum_results_list(response,\n                                        ContainerEnumResults,\n                                        \"Containers\",\n                                        Container)\n\n    def create_container(self, container_name, x_ms_meta_name_values=None,\n                         x_ms_blob_public_access=None, fail_on_exist=False):\n        '''\n        Creates a new container under the specified account. If the container\n        with the same name already exists, the operation fails.\n\n        container_name: Name of container to create.\n        x_ms_meta_name_values:\n            Optional. A dict with name_value pairs to associate with the\n            container as metadata. Example:{'Category':'test'}\n        x_ms_blob_public_access:\n            Optional. Possible values include: container, blob\n        fail_on_exist:\n            specify whether to throw an exception when the container exists.\n        '''\n        _validate_not_none('container_name', container_name)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + _str(container_name) + '?restype=container'\n        request.headers = [\n            ('x-ms-meta-name-values', x_ms_meta_name_values),\n            ('x-ms-blob-public-access', _str_or_none(x_ms_blob_public_access))\n        ]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        if not fail_on_exist:\n            try:\n                self._perform_request(request)\n                return True\n            except WindowsAzureError as ex:\n                _dont_fail_on_exist(ex)\n                return False\n        else:\n            self._perform_request(request)\n            return True\n\n    def get_container_properties(self, container_name, x_ms_lease_id=None):\n        '''\n        Returns all user-defined metadata and system properties for the\n        specified container.\n\n        container_name: Name of existing container.\n        x_ms_lease_id:\n            If specified, get_container_properties only succeeds if the\n            container's lease is active and matches this ID.\n        '''\n        _validate_not_none('container_name', container_name)\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/' + _str(container_name) + '?restype=container'\n        request.headers = [('x-ms-lease-id', _str_or_none(x_ms_lease_id))]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request)\n\n        return _parse_response_for_dict(response)\n\n    def get_container_metadata(self, container_name, x_ms_lease_id=None):\n        '''\n        Returns all user-defined metadata for the specified container. The\n        metadata will be in returned dictionary['x-ms-meta-(name)'].\n\n        container_name: Name of existing container.\n        x_ms_lease_id:\n            If specified, get_container_metadata only succeeds if the\n            container's lease is active and matches this ID.\n        '''\n        _validate_not_none('container_name', container_name)\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(container_name) + '?restype=container&comp=metadata'\n        request.headers = [('x-ms-lease-id', _str_or_none(x_ms_lease_id))]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request)\n\n        return _parse_response_for_dict_prefix(response, prefixes=['x-ms-meta'])\n\n    def set_container_metadata(self, container_name,\n                               x_ms_meta_name_values=None, x_ms_lease_id=None):\n        '''\n        Sets one or more user-defined name-value pairs for the specified\n        container.\n\n        container_name: Name of existing container.\n        x_ms_meta_name_values:\n            A dict containing name, value for metadata.\n            Example: {'category':'test'}\n        x_ms_lease_id:\n            If specified, set_container_metadata only succeeds if the\n            container's lease is active and matches this ID.\n        '''\n        _validate_not_none('container_name', container_name)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(container_name) + '?restype=container&comp=metadata'\n        request.headers = [\n            ('x-ms-meta-name-values', x_ms_meta_name_values),\n            ('x-ms-lease-id', _str_or_none(x_ms_lease_id)),\n        ]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        self._perform_request(request)\n\n    def get_container_acl(self, container_name, x_ms_lease_id=None):\n        '''\n        Gets the permissions for the specified container.\n\n        container_name: Name of existing container.\n        x_ms_lease_id:\n            If specified, get_container_acl only succeeds if the\n            container's lease is active and matches this ID.\n        '''\n        _validate_not_none('container_name', container_name)\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(container_name) + '?restype=container&comp=acl'\n        request.headers = [('x-ms-lease-id', _str_or_none(x_ms_lease_id))]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request)\n\n        return _parse_response(response, SignedIdentifiers)\n\n    def set_container_acl(self, container_name, signed_identifiers=None,\n                          x_ms_blob_public_access=None, x_ms_lease_id=None):\n        '''\n        Sets the permissions for the specified container.\n\n        container_name: Name of existing container.\n        signed_identifiers: SignedIdentifers instance\n        x_ms_blob_public_access:\n            Optional. Possible values include: container, blob\n        x_ms_lease_id:\n            If specified, set_container_acl only succeeds if the\n            container's lease is active and matches this ID.\n        '''\n        _validate_not_none('container_name', container_name)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(container_name) + '?restype=container&comp=acl'\n        request.headers = [\n            ('x-ms-blob-public-access', _str_or_none(x_ms_blob_public_access)),\n            ('x-ms-lease-id', _str_or_none(x_ms_lease_id)),\n        ]\n        request.body = _get_request_body(\n            _convert_class_to_xml(signed_identifiers))\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        self._perform_request(request)\n\n    def delete_container(self, container_name, fail_not_exist=False,\n                         x_ms_lease_id=None):\n        '''\n        Marks the specified container for deletion.\n\n        container_name: Name of container to delete.\n        fail_not_exist:\n            Specify whether to throw an exception when the container doesn't\n            exist.\n        x_ms_lease_id: Required if the container has an active lease.\n        '''\n        _validate_not_none('container_name', container_name)\n        request = HTTPRequest()\n        request.method = 'DELETE'\n        request.host = self._get_host()\n        request.path = '/' + _str(container_name) + '?restype=container'\n        request.headers = [('x-ms-lease-id', _str_or_none(x_ms_lease_id))]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        if not fail_not_exist:\n            try:\n                self._perform_request(request)\n                return True\n            except WindowsAzureError as ex:\n                _dont_fail_not_exist(ex)\n                return False\n        else:\n            self._perform_request(request)\n            return True\n\n    def lease_container(self, container_name, x_ms_lease_action,\n                        x_ms_lease_id=None, x_ms_lease_duration=60,\n                        x_ms_lease_break_period=None,\n                        x_ms_proposed_lease_id=None):\n        '''\n        Establishes and manages a lock on a container for delete operations.\n        The lock duration can be 15 to 60 seconds, or can be infinite.\n\n        container_name: Name of existing container.\n        x_ms_lease_action:\n            Required. Possible values: acquire|renew|release|break|change\n        x_ms_lease_id: Required if the container has an active lease.\n        x_ms_lease_duration:\n            Specifies the duration of the lease, in seconds, or negative one\n            (-1) for a lease that never expires. A non-infinite lease can be\n            between 15 and 60 seconds. A lease duration cannot be changed\n            using renew or change. For backwards compatibility, the default is\n            60, and the value is only used on an acquire operation.\n        x_ms_lease_break_period:\n            Optional. For a break operation, this is the proposed duration of\n            seconds that the lease should continue before it is broken, between\n            0 and 60 seconds. This break period is only used if it is shorter\n            than the time remaining on the lease. If longer, the time remaining\n            on the lease is used. A new lease will not be available before the\n            break period has expired, but the lease may be held for longer than\n            the break period. If this header does not appear with a break\n            operation, a fixed-duration lease breaks after the remaining lease\n            period elapses, and an infinite lease breaks immediately.\n        x_ms_proposed_lease_id:\n            Optional for acquire, required for change. Proposed lease ID, in a\n            GUID string format.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('x_ms_lease_action', x_ms_lease_action)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(container_name) + '?restype=container&comp=lease'\n        request.headers = [\n            ('x-ms-lease-id', _str_or_none(x_ms_lease_id)),\n            ('x-ms-lease-action', _str_or_none(x_ms_lease_action)),\n            ('x-ms-lease-duration',\n             _str_or_none(\n                 x_ms_lease_duration if x_ms_lease_action == 'acquire'\\\n                     else None)),\n            ('x-ms-lease-break-period', _str_or_none(x_ms_lease_break_period)),\n            ('x-ms-proposed-lease-id', _str_or_none(x_ms_proposed_lease_id)),\n        ]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request)\n\n        return _parse_response_for_dict_filter(\n            response,\n            filter=['x-ms-lease-id', 'x-ms-lease-time'])\n\n    def list_blobs(self, container_name, prefix=None, marker=None,\n                   maxresults=None, include=None, delimiter=None):\n        '''\n        Returns the list of blobs under the specified container.\n\n        container_name: Name of existing container.\n        prefix:\n            Optional. Filters the results to return only blobs whose names\n            begin with the specified prefix.\n        marker:\n            Optional. A string value that identifies the portion of the list\n            to be returned with the next list operation. The operation returns\n            a marker value within the response body if the list returned was\n            not complete. The marker value may then be used in a subsequent\n            call to request the next set of list items. The marker value is\n            opaque to the client.\n        maxresults:\n            Optional. Specifies the maximum number of blobs to return,\n            including all BlobPrefix elements. If the request does not specify\n            maxresults or specifies a value greater than 5,000, the server will\n            return up to 5,000 items. Setting maxresults to a value less than\n            or equal to zero results in error response code 400 (Bad Request).\n        include:\n            Optional. Specifies one or more datasets to include in the\n            response. To specify more than one of these options on the URI,\n            you must separate each option with a comma. Valid values are:\n                snapshots:\n                    Specifies that snapshots should be included in the\n                    enumeration. Snapshots are listed from oldest to newest in\n                    the response.\n                metadata:\n                    Specifies that blob metadata be returned in the response.\n                uncommittedblobs:\n                    Specifies that blobs for which blocks have been uploaded,\n                    but which have not been committed using Put Block List\n                    (REST API), be included in the response.\n                copy:\n                    Version 2012-02-12 and newer. Specifies that metadata\n                    related to any current or previous Copy Blob operation\n                    should be included in the response.\n        delimiter:\n            Optional. When the request includes this parameter, the operation\n            returns a BlobPrefix element in the response body that acts as a\n            placeholder for all blobs whose names begin with the same\n            substring up to the appearance of the delimiter character. The\n            delimiter may be a single character or a string.\n        '''\n        _validate_not_none('container_name', container_name)\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(container_name) + '?restype=container&comp=list'\n        request.query = [\n            ('prefix', _str_or_none(prefix)),\n            ('delimiter', _str_or_none(delimiter)),\n            ('marker', _str_or_none(marker)),\n            ('maxresults', _int_or_none(maxresults)),\n            ('include', _str_or_none(include))\n        ]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request)\n\n        return _parse_blob_enum_results_list(response)\n\n    def set_blob_service_properties(self, storage_service_properties,\n                                    timeout=None):\n        '''\n        Sets the properties of a storage account's Blob service, including\n        Windows Azure Storage Analytics. You can also use this operation to\n        set the default request version for all incoming requests that do not\n        have a version specified.\n\n        storage_service_properties: a StorageServiceProperties object.\n        timeout: Optional. The timeout parameter is expressed in seconds.\n        '''\n        _validate_not_none('storage_service_properties',\n                           storage_service_properties)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/?restype=service&comp=properties'\n        request.query = [('timeout', _int_or_none(timeout))]\n        request.body = _get_request_body(\n            _convert_class_to_xml(storage_service_properties))\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        self._perform_request(request)\n\n    def get_blob_service_properties(self, timeout=None):\n        '''\n        Gets the properties of a storage account's Blob service, including\n        Windows Azure Storage Analytics.\n\n        timeout: Optional. The timeout parameter is expressed in seconds.\n        '''\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/?restype=service&comp=properties'\n        request.query = [('timeout', _int_or_none(timeout))]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request)\n\n        return _parse_response(response, StorageServiceProperties)\n\n    def get_blob_properties(self, container_name, blob_name,\n                            x_ms_lease_id=None):\n        '''\n        Returns all user-defined metadata, standard HTTP properties, and\n        system properties for the blob.\n\n        container_name: Name of existing container.\n        blob_name: Name of existing blob.\n        x_ms_lease_id: Required if the blob has an active lease.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        request = HTTPRequest()\n        request.method = 'HEAD'\n        request.host = self._get_host()\n        request.path = '/' + _str(container_name) + '/' + _str(blob_name) + ''\n        request.headers = [('x-ms-lease-id', _str_or_none(x_ms_lease_id))]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request)\n\n        return _parse_response_for_dict(response)\n\n    def set_blob_properties(self, container_name, blob_name,\n                            x_ms_blob_cache_control=None,\n                            x_ms_blob_content_type=None,\n                            x_ms_blob_content_md5=None,\n                            x_ms_blob_content_encoding=None,\n                            x_ms_blob_content_language=None,\n                            x_ms_lease_id=None):\n        '''\n        Sets system properties on the blob.\n\n        container_name: Name of existing container.\n        blob_name: Name of existing blob.\n        x_ms_blob_cache_control:\n            Optional. Modifies the cache control string for the blob.\n        x_ms_blob_content_type: Optional. Sets the blob's content type.\n        x_ms_blob_content_md5: Optional. Sets the blob's MD5 hash.\n        x_ms_blob_content_encoding: Optional. Sets the blob's content encoding.\n        x_ms_blob_content_language: Optional. Sets the blob's content language.\n        x_ms_lease_id: Required if the blob has an active lease.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(container_name) + '/' + _str(blob_name) + '?comp=properties'\n        request.headers = [\n            ('x-ms-blob-cache-control', _str_or_none(x_ms_blob_cache_control)),\n            ('x-ms-blob-content-type', _str_or_none(x_ms_blob_content_type)),\n            ('x-ms-blob-content-md5', _str_or_none(x_ms_blob_content_md5)),\n            ('x-ms-blob-content-encoding',\n             _str_or_none(x_ms_blob_content_encoding)),\n            ('x-ms-blob-content-language',\n             _str_or_none(x_ms_blob_content_language)),\n            ('x-ms-lease-id', _str_or_none(x_ms_lease_id))\n        ]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        self._perform_request(request)\n\n    def put_blob(self, container_name, blob_name, blob, x_ms_blob_type,\n                 content_encoding=None, content_language=None,\n                 content_md5=None, cache_control=None,\n                 x_ms_blob_content_type=None, x_ms_blob_content_encoding=None,\n                 x_ms_blob_content_language=None, x_ms_blob_content_md5=None,\n                 x_ms_blob_cache_control=None, x_ms_meta_name_values=None,\n                 x_ms_lease_id=None, x_ms_blob_content_length=None,\n                 x_ms_blob_sequence_number=None):\n        '''\n        Creates a new block blob or page blob, or updates the content of an\n        existing block blob.\n\n        See put_block_blob_from_* and put_page_blob_from_* for high level\n        functions that handle the creation and upload of large blobs with\n        automatic chunking and progress notifications.\n\n        container_name: Name of existing container.\n        blob_name: Name of blob to create or update.\n        blob:\n            For BlockBlob:\n                Content of blob as bytes (size < 64MB). For larger size, you\n                must call put_block and put_block_list to set content of blob.\n            For PageBlob:\n                Use None and call put_page to set content of blob.\n        x_ms_blob_type: Required. Could be BlockBlob or PageBlob.\n        content_encoding:\n            Optional. Specifies which content encodings have been applied to\n            the blob. This value is returned to the client when the Get Blob\n            (REST API) operation is performed on the blob resource. The client\n            can use this value when returned to decode the blob content.\n        content_language:\n            Optional. Specifies the natural languages used by this resource.\n        content_md5:\n            Optional. An MD5 hash of the blob content. This hash is used to\n            verify the integrity of the blob during transport. When this header\n            is specified, the storage service checks the hash that has arrived\n            with the one that was sent. If the two hashes do not match, the\n            operation will fail with error code 400 (Bad Request).\n        cache_control:\n            Optional. The Blob service stores this value but does not use or\n            modify it.\n        x_ms_blob_content_type: Optional. Set the blob's content type.\n        x_ms_blob_content_encoding: Optional. Set the blob's content encoding.\n        x_ms_blob_content_language: Optional. Set the blob's content language.\n        x_ms_blob_content_md5: Optional. Set the blob's MD5 hash.\n        x_ms_blob_cache_control: Optional. Sets the blob's cache control.\n        x_ms_meta_name_values: A dict containing name, value for metadata.\n        x_ms_lease_id: Required if the blob has an active lease.\n        x_ms_blob_content_length:\n            Required for page blobs. This header specifies the maximum size\n            for the page blob, up to 1 TB. The page blob size must be aligned\n            to a 512-byte boundary.\n        x_ms_blob_sequence_number:\n            Optional. Set for page blobs only. The sequence number is a\n            user-controlled value that you can use to track requests. The\n            value of the sequence number must be between 0 and 2^63 - 1. The\n            default value is 0.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        _validate_not_none('x_ms_blob_type', x_ms_blob_type)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + _str(container_name) + '/' + _str(blob_name) + ''\n        request.headers = [\n            ('x-ms-blob-type', _str_or_none(x_ms_blob_type)),\n            ('Content-Encoding', _str_or_none(content_encoding)),\n            ('Content-Language', _str_or_none(content_language)),\n            ('Content-MD5', _str_or_none(content_md5)),\n            ('Cache-Control', _str_or_none(cache_control)),\n            ('x-ms-blob-content-type', _str_or_none(x_ms_blob_content_type)),\n            ('x-ms-blob-content-encoding',\n             _str_or_none(x_ms_blob_content_encoding)),\n            ('x-ms-blob-content-language',\n             _str_or_none(x_ms_blob_content_language)),\n            ('x-ms-blob-content-md5', _str_or_none(x_ms_blob_content_md5)),\n            ('x-ms-blob-cache-control', _str_or_none(x_ms_blob_cache_control)),\n            ('x-ms-meta-name-values', x_ms_meta_name_values),\n            ('x-ms-lease-id', _str_or_none(x_ms_lease_id)),\n            ('x-ms-blob-content-length',\n             _str_or_none(x_ms_blob_content_length)),\n            ('x-ms-blob-sequence-number',\n             _str_or_none(x_ms_blob_sequence_number))\n        ]\n        request.body = _get_request_body_bytes_only('blob', blob)\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        self._perform_request(request)\n\n    def put_block_blob_from_path(self, container_name, blob_name, file_path,\n                                 content_encoding=None, content_language=None,\n                                 content_md5=None, cache_control=None,\n                                 x_ms_blob_content_type=None,\n                                 x_ms_blob_content_encoding=None,\n                                 x_ms_blob_content_language=None,\n                                 x_ms_blob_content_md5=None,\n                                 x_ms_blob_cache_control=None,\n                                 x_ms_meta_name_values=None,\n                                 x_ms_lease_id=None, progress_callback=None):\n        '''\n        Creates a new block blob from a file path, or updates the content of an\n        existing block blob, with automatic chunking and progress notifications.\n\n        container_name: Name of existing container.\n        blob_name: Name of blob to create or update.\n        file_path: Path of the file to upload as the blob content.\n        content_encoding:\n            Optional. Specifies which content encodings have been applied to\n            the blob. This value is returned to the client when the Get Blob\n            (REST API) operation is performed on the blob resource. The client\n            can use this value when returned to decode the blob content.\n        content_language:\n            Optional. Specifies the natural languages used by this resource.\n        content_md5:\n            Optional. An MD5 hash of the blob content. This hash is used to\n            verify the integrity of the blob during transport. When this header\n            is specified, the storage service checks the hash that has arrived\n            with the one that was sent. If the two hashes do not match, the\n            operation will fail with error code 400 (Bad Request).\n        cache_control:\n            Optional. The Blob service stores this value but does not use or\n            modify it.\n        x_ms_blob_content_type: Optional. Set the blob's content type.\n        x_ms_blob_content_encoding: Optional. Set the blob's content encoding.\n        x_ms_blob_content_language: Optional. Set the blob's content language.\n        x_ms_blob_content_md5: Optional. Set the blob's MD5 hash.\n        x_ms_blob_cache_control: Optional. Sets the blob's cache control.\n        x_ms_meta_name_values: A dict containing name, value for metadata.\n        x_ms_lease_id: Required if the blob has an active lease.\n        progress_callback:\n            Callback for progress with signature function(current, total) where\n            current is the number of bytes transfered so far, and total is the\n            size of the blob, or None if the total size is unknown.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        _validate_not_none('file_path', file_path)\n\n        count = path.getsize(file_path)\n        with open(file_path, 'rb') as stream:\n            self.put_block_blob_from_file(container_name,\n                                          blob_name,\n                                          stream,\n                                          count,\n                                          content_encoding,\n                                          content_language,\n                                          content_md5,\n                                          cache_control,\n                                          x_ms_blob_content_type,\n                                          x_ms_blob_content_encoding,\n                                          x_ms_blob_content_language,\n                                          x_ms_blob_content_md5,\n                                          x_ms_blob_cache_control,\n                                          x_ms_meta_name_values,\n                                          x_ms_lease_id,\n                                          progress_callback)\n\n    def put_block_blob_from_file(self, container_name, blob_name, stream,\n                                 count=None, content_encoding=None,\n                                 content_language=None, content_md5=None,\n                                 cache_control=None,\n                                 x_ms_blob_content_type=None,\n                                 x_ms_blob_content_encoding=None,\n                                 x_ms_blob_content_language=None,\n                                 x_ms_blob_content_md5=None,\n                                 x_ms_blob_cache_control=None,\n                                 x_ms_meta_name_values=None,\n                                 x_ms_lease_id=None, progress_callback=None):\n        '''\n        Creates a new block blob from a file/stream, or updates the content of\n        an existing block blob, with automatic chunking and progress\n        notifications.\n\n        container_name: Name of existing container.\n        blob_name: Name of blob to create or update.\n        stream: Opened file/stream to upload as the blob content.\n        count:\n            Number of bytes to read from the stream. This is optional, but\n            should be supplied for optimal performance.\n        content_encoding:\n            Optional. Specifies which content encodings have been applied to\n            the blob. This value is returned to the client when the Get Blob\n            (REST API) operation is performed on the blob resource. The client\n            can use this value when returned to decode the blob content.\n        content_language:\n            Optional. Specifies the natural languages used by this resource.\n        content_md5:\n            Optional. An MD5 hash of the blob content. This hash is used to\n            verify the integrity of the blob during transport. When this header\n            is specified, the storage service checks the hash that has arrived\n            with the one that was sent. If the two hashes do not match, the\n            operation will fail with error code 400 (Bad Request).\n        cache_control:\n            Optional. The Blob service stores this value but does not use or\n            modify it.\n        x_ms_blob_content_type: Optional. Set the blob's content type.\n        x_ms_blob_content_encoding: Optional. Set the blob's content encoding.\n        x_ms_blob_content_language: Optional. Set the blob's content language.\n        x_ms_blob_content_md5: Optional. Set the blob's MD5 hash.\n        x_ms_blob_cache_control: Optional. Sets the blob's cache control.\n        x_ms_meta_name_values: A dict containing name, value for metadata.\n        x_ms_lease_id: Required if the blob has an active lease.\n        progress_callback:\n            Callback for progress with signature function(current, total) where\n            current is the number of bytes transfered so far, and total is the\n            size of the blob, or None if the total size is unknown.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        _validate_not_none('stream', stream)\n\n        if count and count < self._BLOB_MAX_DATA_SIZE:\n            if progress_callback:\n                progress_callback(0, count)\n\n            data = stream.read(count)\n            self.put_blob(container_name,\n                          blob_name,\n                          data,\n                          'BlockBlob',\n                          content_encoding,\n                          content_language,\n                          content_md5,\n                          cache_control,\n                          x_ms_blob_content_type,\n                          x_ms_blob_content_encoding,\n                          x_ms_blob_content_language,\n                          x_ms_blob_content_md5,\n                          x_ms_blob_cache_control,\n                          x_ms_meta_name_values,\n                          x_ms_lease_id)\n\n            if progress_callback:\n                progress_callback(count, count)\n        else:\n            if progress_callback:\n                progress_callback(0, count)\n\n            self.put_blob(container_name,\n                          blob_name,\n                          None,\n                          'BlockBlob',\n                          content_encoding,\n                          content_language,\n                          content_md5,\n                          cache_control,\n                          x_ms_blob_content_type,\n                          x_ms_blob_content_encoding,\n                          x_ms_blob_content_language,\n                          x_ms_blob_content_md5,\n                          x_ms_blob_cache_control,\n                          x_ms_meta_name_values,\n                          x_ms_lease_id)\n\n            remain_bytes = count\n            block_ids = []\n            block_index = 0\n            index = 0\n            while True:\n                request_count = self._BLOB_MAX_CHUNK_DATA_SIZE\\\n                    if remain_bytes is None else min(\n                        remain_bytes,\n                        self._BLOB_MAX_CHUNK_DATA_SIZE)\n                data = stream.read(request_count)\n                if data:\n                    length = len(data)\n                    index += length\n                    remain_bytes = remain_bytes - \\\n                        length if remain_bytes else None\n                    block_id = '{0:08d}'.format(block_index)\n                    self.put_block(container_name, blob_name,\n                                   data, block_id, x_ms_lease_id=x_ms_lease_id)\n                    block_ids.append(block_id)\n                    block_index += 1\n                    if progress_callback:\n                        progress_callback(index, count)\n                else:\n                    break\n\n            self.put_block_list(container_name, blob_name, block_ids,\n                                content_md5, x_ms_blob_cache_control,\n                                x_ms_blob_content_type,\n                                x_ms_blob_content_encoding,\n                                x_ms_blob_content_language,\n                                x_ms_blob_content_md5,\n                                x_ms_meta_name_values,\n                                x_ms_lease_id)\n\n    def put_block_blob_from_bytes(self, container_name, blob_name, blob,\n                                  index=0, count=None, content_encoding=None,\n                                  content_language=None, content_md5=None,\n                                  cache_control=None,\n                                  x_ms_blob_content_type=None,\n                                  x_ms_blob_content_encoding=None,\n                                  x_ms_blob_content_language=None,\n                                  x_ms_blob_content_md5=None,\n                                  x_ms_blob_cache_control=None,\n                                  x_ms_meta_name_values=None,\n                                  x_ms_lease_id=None, progress_callback=None):\n        '''\n        Creates a new block blob from an array of bytes, or updates the content\n        of an existing block blob, with automatic chunking and progress\n        notifications.\n\n        container_name: Name of existing container.\n        blob_name: Name of blob to create or update.\n        blob: Content of blob as an array of bytes.\n        index: Start index in the array of bytes.\n        count:\n            Number of bytes to upload. Set to None or negative value to upload\n            all bytes starting from index.\n        content_encoding:\n            Optional. Specifies which content encodings have been applied to\n            the blob. This value is returned to the client when the Get Blob\n            (REST API) operation is performed on the blob resource. The client\n            can use this value when returned to decode the blob content.\n        content_language:\n            Optional. Specifies the natural languages used by this resource.\n        content_md5:\n            Optional. An MD5 hash of the blob content. This hash is used to\n            verify the integrity of the blob during transport. When this header\n            is specified, the storage service checks the hash that has arrived\n            with the one that was sent. If the two hashes do not match, the\n            operation will fail with error code 400 (Bad Request).\n        cache_control:\n            Optional. The Blob service stores this value but does not use or\n            modify it.\n        x_ms_blob_content_type: Optional. Set the blob's content type.\n        x_ms_blob_content_encoding: Optional. Set the blob's content encoding.\n        x_ms_blob_content_language: Optional. Set the blob's content language.\n        x_ms_blob_content_md5: Optional. Set the blob's MD5 hash.\n        x_ms_blob_cache_control: Optional. Sets the blob's cache control.\n        x_ms_meta_name_values: A dict containing name, value for metadata.\n        x_ms_lease_id: Required if the blob has an active lease.\n        progress_callback:\n            Callback for progress with signature function(current, total) where\n            current is the number of bytes transfered so far, and total is the\n            size of the blob, or None if the total size is unknown.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        _validate_not_none('blob', blob)\n        _validate_not_none('index', index)\n        _validate_type_bytes('blob', blob)\n\n        if index < 0:\n            raise TypeError(_ERROR_VALUE_NEGATIVE.format('index'))\n\n        if count is None or count < 0:\n            count = len(blob) - index\n\n        if count < self._BLOB_MAX_DATA_SIZE:\n            if progress_callback:\n                progress_callback(0, count)\n\n            data = blob[index: index + count]\n            self.put_blob(container_name,\n                          blob_name,\n                          data,\n                          'BlockBlob',\n                          content_encoding,\n                          content_language,\n                          content_md5,\n                          cache_control,\n                          x_ms_blob_content_type,\n                          x_ms_blob_content_encoding,\n                          x_ms_blob_content_language,\n                          x_ms_blob_content_md5,\n                          x_ms_blob_cache_control,\n                          x_ms_meta_name_values,\n                          x_ms_lease_id)\n\n            if progress_callback:\n                progress_callback(count, count)\n        else:\n            stream = BytesIO(blob)\n            stream.seek(index)\n\n            self.put_block_blob_from_file(container_name,\n                                          blob_name,\n                                          stream,\n                                          count,\n                                          content_encoding,\n                                          content_language,\n                                          content_md5,\n                                          cache_control,\n                                          x_ms_blob_content_type,\n                                          x_ms_blob_content_encoding,\n                                          x_ms_blob_content_language,\n                                          x_ms_blob_content_md5,\n                                          x_ms_blob_cache_control,\n                                          x_ms_meta_name_values,\n                                          x_ms_lease_id,\n                                          progress_callback)\n\n    def put_block_blob_from_text(self, container_name, blob_name, text,\n                                 text_encoding='utf-8',\n                                 content_encoding=None, content_language=None,\n                                 content_md5=None, cache_control=None,\n                                 x_ms_blob_content_type=None,\n                                 x_ms_blob_content_encoding=None,\n                                 x_ms_blob_content_language=None,\n                                 x_ms_blob_content_md5=None,\n                                 x_ms_blob_cache_control=None,\n                                 x_ms_meta_name_values=None,\n                                 x_ms_lease_id=None, progress_callback=None):\n        '''\n        Creates a new block blob from str/unicode, or updates the content of an\n        existing block blob, with automatic chunking and progress notifications.\n\n        container_name: Name of existing container.\n        blob_name: Name of blob to create or update.\n        text: Text to upload to the blob.\n        text_encoding: Encoding to use to convert the text to bytes.\n        content_encoding:\n            Optional. Specifies which content encodings have been applied to\n            the blob. This value is returned to the client when the Get Blob\n            (REST API) operation is performed on the blob resource. The client\n            can use this value when returned to decode the blob content.\n        content_language:\n            Optional. Specifies the natural languages used by this resource.\n        content_md5:\n            Optional. An MD5 hash of the blob content. This hash is used to\n            verify the integrity of the blob during transport. When this header\n            is specified, the storage service checks the hash that has arrived\n            with the one that was sent. If the two hashes do not match, the\n            operation will fail with error code 400 (Bad Request).\n        cache_control:\n            Optional. The Blob service stores this value but does not use or\n            modify it.\n        x_ms_blob_content_type: Optional. Set the blob's content type.\n        x_ms_blob_content_encoding: Optional. Set the blob's content encoding.\n        x_ms_blob_content_language: Optional. Set the blob's content language.\n        x_ms_blob_content_md5: Optional. Set the blob's MD5 hash.\n        x_ms_blob_cache_control: Optional. Sets the blob's cache control.\n        x_ms_meta_name_values: A dict containing name, value for metadata.\n        x_ms_lease_id: Required if the blob has an active lease.\n        progress_callback:\n            Callback for progress with signature function(current, total) where\n            current is the number of bytes transfered so far, and total is the\n            size of the blob, or None if the total size is unknown.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        _validate_not_none('text', text)\n\n        if not isinstance(text, bytes):\n            _validate_not_none('text_encoding', text_encoding)\n            text = text.encode(text_encoding)\n\n        self.put_block_blob_from_bytes(container_name,\n                                       blob_name,\n                                       text,\n                                       0,\n                                       len(text),\n                                       content_encoding,\n                                       content_language,\n                                       content_md5,\n                                       cache_control,\n                                       x_ms_blob_content_type,\n                                       x_ms_blob_content_encoding,\n                                       x_ms_blob_content_language,\n                                       x_ms_blob_content_md5,\n                                       x_ms_blob_cache_control,\n                                       x_ms_meta_name_values,\n                                       x_ms_lease_id,\n                                       progress_callback)\n\n    def put_page_blob_from_path(self, container_name, blob_name, file_path,\n                                content_encoding=None, content_language=None,\n                                content_md5=None, cache_control=None,\n                                x_ms_blob_content_type=None,\n                                x_ms_blob_content_encoding=None,\n                                x_ms_blob_content_language=None,\n                                x_ms_blob_content_md5=None,\n                                x_ms_blob_cache_control=None,\n                                x_ms_meta_name_values=None,\n                                x_ms_lease_id=None,\n                                x_ms_blob_sequence_number=None,\n                                progress_callback=None):\n        '''\n        Creates a new page blob from a file path, or updates the content of an\n        existing page blob, with automatic chunking and progress notifications.\n\n        container_name: Name of existing container.\n        blob_name: Name of blob to create or update.\n        file_path: Path of the file to upload as the blob content.\n        content_encoding:\n            Optional. Specifies which content encodings have been applied to\n            the blob. This value is returned to the client when the Get Blob\n            (REST API) operation is performed on the blob resource. The client\n            can use this value when returned to decode the blob content.\n        content_language:\n            Optional. Specifies the natural languages used by this resource.\n        content_md5:\n            Optional. An MD5 hash of the blob content. This hash is used to\n            verify the integrity of the blob during transport. When this header\n            is specified, the storage service checks the hash that has arrived\n            with the one that was sent. If the two hashes do not match, the\n            operation will fail with error code 400 (Bad Request).\n        cache_control:\n            Optional. The Blob service stores this value but does not use or\n            modify it.\n        x_ms_blob_content_type: Optional. Set the blob's content type.\n        x_ms_blob_content_encoding: Optional. Set the blob's content encoding.\n        x_ms_blob_content_language: Optional. Set the blob's content language.\n        x_ms_blob_content_md5: Optional. Set the blob's MD5 hash.\n        x_ms_blob_cache_control: Optional. Sets the blob's cache control.\n        x_ms_meta_name_values: A dict containing name, value for metadata.\n        x_ms_lease_id: Required if the blob has an active lease.\n        x_ms_blob_sequence_number:\n            Optional. Set for page blobs only. The sequence number is a\n            user-controlled value that you can use to track requests. The\n            value of the sequence number must be between 0 and 2^63 - 1. The\n            default value is 0.\n        progress_callback:\n            Callback for progress with signature function(current, total) where\n            current is the number of bytes transfered so far, and total is the\n            size of the blob, or None if the total size is unknown.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        _validate_not_none('file_path', file_path)\n\n        count = path.getsize(file_path)\n        with open(file_path, 'rb') as stream:\n            self.put_page_blob_from_file(container_name,\n                                         blob_name,\n                                         stream,\n                                         count,\n                                         content_encoding,\n                                         content_language,\n                                         content_md5,\n                                         cache_control,\n                                         x_ms_blob_content_type,\n                                         x_ms_blob_content_encoding,\n                                         x_ms_blob_content_language,\n                                         x_ms_blob_content_md5,\n                                         x_ms_blob_cache_control,\n                                         x_ms_meta_name_values,\n                                         x_ms_lease_id,\n                                         x_ms_blob_sequence_number,\n                                         progress_callback)\n\n    def put_page_blob_from_file(self, container_name, blob_name, stream, count,\n                                content_encoding=None, content_language=None,\n                                content_md5=None, cache_control=None,\n                                x_ms_blob_content_type=None,\n                                x_ms_blob_content_encoding=None,\n                                x_ms_blob_content_language=None,\n                                x_ms_blob_content_md5=None,\n                                x_ms_blob_cache_control=None,\n                                x_ms_meta_name_values=None,\n                                x_ms_lease_id=None,\n                                x_ms_blob_sequence_number=None,\n                                progress_callback=None):\n        '''\n        Creates a new page blob from a file/stream, or updates the content of an\n        existing page blob, with automatic chunking and progress notifications.\n\n        container_name: Name of existing container.\n        blob_name: Name of blob to create or update.\n        stream: Opened file/stream to upload as the blob content.\n        count:\n            Number of bytes to read from the stream. This is required, a page\n            blob cannot be created if the count is unknown.\n        content_encoding:\n            Optional. Specifies which content encodings have been applied to\n            the blob. This value is returned to the client when the Get Blob\n            (REST API) operation is performed on the blob resource. The client\n            can use this value when returned to decode the blob content.\n        content_language:\n            Optional. Specifies the natural languages used by this resource.\n        content_md5:\n            Optional. An MD5 hash of the blob content. This hash is used to\n            verify the integrity of the blob during transport. When this header\n            is specified, the storage service checks the hash that has arrived\n            with the one that was sent. If the two hashes do not match, the\n            operation will fail with error code 400 (Bad Request).\n        cache_control:\n            Optional. The Blob service stores this value but does not use or\n            modify it.\n        x_ms_blob_content_type: Optional. Set the blob's content type.\n        x_ms_blob_content_encoding: Optional. Set the blob's content encoding.\n        x_ms_blob_content_language: Optional. Set the blob's content language.\n        x_ms_blob_content_md5: Optional. Set the blob's MD5 hash.\n        x_ms_blob_cache_control: Optional. Sets the blob's cache control.\n        x_ms_meta_name_values: A dict containing name, value for metadata.\n        x_ms_lease_id: Required if the blob has an active lease.\n        x_ms_blob_sequence_number:\n            Optional. Set for page blobs only. The sequence number is a\n            user-controlled value that you can use to track requests. The\n            value of the sequence number must be between 0 and 2^63 - 1. The\n            default value is 0.\n        progress_callback:\n            Callback for progress with signature function(current, total) where\n            current is the number of bytes transfered so far, and total is the\n            size of the blob, or None if the total size is unknown.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        _validate_not_none('stream', stream)\n        _validate_not_none('count', count)\n\n        if count < 0:\n            raise TypeError(_ERROR_VALUE_NEGATIVE.format('count'))\n\n        if count % _PAGE_SIZE != 0:\n            raise TypeError(_ERROR_PAGE_BLOB_SIZE_ALIGNMENT.format(count))\n\n        if progress_callback:\n            progress_callback(0, count)\n\n        self.put_blob(container_name,\n                      blob_name,\n                      b'',\n                      'PageBlob',\n                      content_encoding,\n                      content_language,\n                      content_md5,\n                      cache_control,\n                      x_ms_blob_content_type,\n                      x_ms_blob_content_encoding,\n                      x_ms_blob_content_language,\n                      x_ms_blob_content_md5,\n                      x_ms_blob_cache_control,\n                      x_ms_meta_name_values,\n                      x_ms_lease_id,\n                      count,\n                      x_ms_blob_sequence_number)\n\n        remain_bytes = count\n        page_start = 0\n        while True:\n            request_count = min(remain_bytes, self._BLOB_MAX_CHUNK_DATA_SIZE)\n            data = stream.read(request_count)\n            if data:\n                length = len(data)\n                remain_bytes = remain_bytes - length\n                page_end = page_start + length - 1\n                self.put_page(container_name,\n                              blob_name,\n                              data,\n                              'bytes={0}-{1}'.format(page_start, page_end),\n                              'update',\n                              x_ms_lease_id=x_ms_lease_id)\n                page_start = page_start + length\n\n                if progress_callback:\n                    progress_callback(page_start, count)\n            else:\n                break\n\n    def put_page_blob_from_bytes(self, container_name, blob_name, blob,\n                                 index=0, count=None, content_encoding=None,\n                                 content_language=None, content_md5=None,\n                                 cache_control=None,\n                                 x_ms_blob_content_type=None,\n                                 x_ms_blob_content_encoding=None,\n                                 x_ms_blob_content_language=None,\n                                 x_ms_blob_content_md5=None,\n                                 x_ms_blob_cache_control=None,\n                                 x_ms_meta_name_values=None,\n                                 x_ms_lease_id=None,\n                                 x_ms_blob_sequence_number=None,\n                                 progress_callback=None):\n        '''\n        Creates a new page blob from an array of bytes, or updates the content\n        of an existing page blob, with automatic chunking and progress\n        notifications.\n\n        container_name: Name of existing container.\n        blob_name: Name of blob to create or update.\n        blob: Content of blob as an array of bytes.\n        index: Start index in the array of bytes.\n        count:\n            Number of bytes to upload. Set to None or negative value to upload\n            all bytes starting from index.\n        content_encoding:\n            Optional. Specifies which content encodings have been applied to\n            the blob. This value is returned to the client when the Get Blob\n            (REST API) operation is performed on the blob resource. The client\n            can use this value when returned to decode the blob content.\n        content_language:\n            Optional. Specifies the natural languages used by this resource.\n        content_md5:\n            Optional. An MD5 hash of the blob content. This hash is used to\n            verify the integrity of the blob during transport. When this header\n            is specified, the storage service checks the hash that has arrived\n            with the one that was sent. If the two hashes do not match, the\n            operation will fail with error code 400 (Bad Request).\n        cache_control:\n            Optional. The Blob service stores this value but does not use or\n            modify it.\n        x_ms_blob_content_type: Optional. Set the blob's content type.\n        x_ms_blob_content_encoding: Optional. Set the blob's content encoding.\n        x_ms_blob_content_language: Optional. Set the blob's content language.\n        x_ms_blob_content_md5: Optional. Set the blob's MD5 hash.\n        x_ms_blob_cache_control: Optional. Sets the blob's cache control.\n        x_ms_meta_name_values: A dict containing name, value for metadata.\n        x_ms_lease_id: Required if the blob has an active lease.\n        x_ms_blob_sequence_number:\n            Optional. Set for page blobs only. The sequence number is a\n            user-controlled value that you can use to track requests. The\n            value of the sequence number must be between 0 and 2^63 - 1. The\n            default value is 0.\n        progress_callback:\n            Callback for progress with signature function(current, total) where\n            current is the number of bytes transfered so far, and total is the\n            size of the blob, or None if the total size is unknown.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        _validate_not_none('blob', blob)\n        _validate_type_bytes('blob', blob)\n\n        if index < 0:\n            raise TypeError(_ERROR_VALUE_NEGATIVE.format('index'))\n\n        if count is None or count < 0:\n            count = len(blob) - index\n\n        stream = BytesIO(blob)\n        stream.seek(index)\n\n        self.put_page_blob_from_file(container_name,\n                                     blob_name,\n                                     stream,\n                                     count,\n                                     content_encoding,\n                                     content_language,\n                                     content_md5,\n                                     cache_control,\n                                     x_ms_blob_content_type,\n                                     x_ms_blob_content_encoding,\n                                     x_ms_blob_content_language,\n                                     x_ms_blob_content_md5,\n                                     x_ms_blob_cache_control,\n                                     x_ms_meta_name_values,\n                                     x_ms_lease_id,\n                                     x_ms_blob_sequence_number,\n                                     progress_callback)\n\n    def get_blob(self, container_name, blob_name, snapshot=None,\n                 x_ms_range=None, x_ms_lease_id=None,\n                 x_ms_range_get_content_md5=None):\n        '''\n        Reads or downloads a blob from the system, including its metadata and\n        properties.\n\n        See get_blob_to_* for high level functions that handle the download\n        of large blobs with automatic chunking and progress notifications.\n\n        container_name: Name of existing container.\n        blob_name: Name of existing blob.\n        snapshot:\n            Optional. The snapshot parameter is an opaque DateTime value that,\n            when present, specifies the blob snapshot to retrieve.\n        x_ms_range:\n            Optional. Return only the bytes of the blob in the specified range.\n        x_ms_lease_id: Required if the blob has an active lease.\n        x_ms_range_get_content_md5:\n            Optional. When this header is set to true and specified together\n            with the Range header, the service returns the MD5 hash for the\n            range, as long as the range is less than or equal to 4 MB in size.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/' + _str(container_name) + '/' + _str(blob_name) + ''\n        request.headers = [\n            ('x-ms-range', _str_or_none(x_ms_range)),\n            ('x-ms-lease-id', _str_or_none(x_ms_lease_id)),\n            ('x-ms-range-get-content-md5',\n             _str_or_none(x_ms_range_get_content_md5))\n        ]\n        request.query = [('snapshot', _str_or_none(snapshot))]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request, None)\n\n        return _create_blob_result(response)\n\n    def get_blob_to_path(self, container_name, blob_name, file_path,\n                         open_mode='wb', snapshot=None, x_ms_lease_id=None,\n                         progress_callback=None):\n        '''\n        Downloads a blob to a file path, with automatic chunking and progress\n        notifications.\n\n        container_name: Name of existing container.\n        blob_name: Name of existing blob.\n        file_path: Path of file to write to.\n        open_mode: Mode to use when opening the file.\n        snapshot:\n            Optional. The snapshot parameter is an opaque DateTime value that,\n            when present, specifies the blob snapshot to retrieve.\n        x_ms_lease_id: Required if the blob has an active lease.\n        progress_callback:\n            Callback for progress with signature function(current, total) where\n            current is the number of bytes transfered so far, and total is the\n            size of the blob.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        _validate_not_none('file_path', file_path)\n        _validate_not_none('open_mode', open_mode)\n\n        with open(file_path, open_mode) as stream:\n            self.get_blob_to_file(container_name,\n                                  blob_name,\n                                  stream,\n                                  snapshot,\n                                  x_ms_lease_id,\n                                  progress_callback)\n\n    def get_blob_to_file(self, container_name, blob_name, stream,\n                         snapshot=None, x_ms_lease_id=None,\n                         progress_callback=None):\n        '''\n        Downloads a blob to a file/stream, with automatic chunking and progress\n        notifications.\n\n        container_name: Name of existing container.\n        blob_name: Name of existing blob.\n        stream: Opened file/stream to write to.\n        snapshot:\n            Optional. The snapshot parameter is an opaque DateTime value that,\n            when present, specifies the blob snapshot to retrieve.\n        x_ms_lease_id: Required if the blob has an active lease.\n        progress_callback:\n            Callback for progress with signature function(current, total) where\n            current is the number of bytes transfered so far, and total is the\n            size of the blob.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        _validate_not_none('stream', stream)\n\n        props = self.get_blob_properties(container_name, blob_name)\n        blob_size = int(props['content-length'])\n\n        if blob_size < self._BLOB_MAX_DATA_SIZE:\n            if progress_callback:\n                progress_callback(0, blob_size)\n\n            data = self.get_blob(container_name,\n                                 blob_name,\n                                 snapshot,\n                                 x_ms_lease_id=x_ms_lease_id)\n\n            stream.write(data)\n\n            if progress_callback:\n                progress_callback(blob_size, blob_size)\n        else:\n            if progress_callback:\n                progress_callback(0, blob_size)\n\n            index = 0\n            while index < blob_size:\n                chunk_range = 'bytes={0}-{1}'.format(\n                    index,\n                    index + self._BLOB_MAX_CHUNK_DATA_SIZE - 1)\n                data = self.get_blob(\n                    container_name, blob_name, x_ms_range=chunk_range)\n                length = len(data)\n                index += length\n                if length > 0:\n                    stream.write(data)\n                    if progress_callback:\n                        progress_callback(index, blob_size)\n                    if length < self._BLOB_MAX_CHUNK_DATA_SIZE:\n                        break\n                else:\n                    break\n\n    def get_blob_to_bytes(self, container_name, blob_name, snapshot=None,\n                          x_ms_lease_id=None, progress_callback=None):\n        '''\n        Downloads a blob as an array of bytes, with automatic chunking and\n        progress notifications.\n\n        container_name: Name of existing container.\n        blob_name: Name of existing blob.\n        snapshot:\n            Optional. The snapshot parameter is an opaque DateTime value that,\n            when present, specifies the blob snapshot to retrieve.\n        x_ms_lease_id: Required if the blob has an active lease.\n        progress_callback:\n            Callback for progress with signature function(current, total) where\n            current is the number of bytes transfered so far, and total is the\n            size of the blob.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n\n        stream = BytesIO()\n        self.get_blob_to_file(container_name,\n                              blob_name,\n                              stream,\n                              snapshot,\n                              x_ms_lease_id,\n                              progress_callback)\n\n        return stream.getvalue()\n\n    def get_blob_to_text(self, container_name, blob_name, text_encoding='utf-8',\n                         snapshot=None, x_ms_lease_id=None,\n                         progress_callback=None):\n        '''\n        Downloads a blob as unicode text, with automatic chunking and progress\n        notifications.\n\n        container_name: Name of existing container.\n        blob_name: Name of existing blob.\n        text_encoding: Encoding to use when decoding the blob data.\n        snapshot:\n            Optional. The snapshot parameter is an opaque DateTime value that,\n            when present, specifies the blob snapshot to retrieve.\n        x_ms_lease_id: Required if the blob has an active lease.\n        progress_callback:\n            Callback for progress with signature function(current, total) where\n            current is the number of bytes transfered so far, and total is the\n            size of the blob.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        _validate_not_none('text_encoding', text_encoding)\n\n        result = self.get_blob_to_bytes(container_name,\n                                        blob_name,\n                                        snapshot,\n                                        x_ms_lease_id,\n                                        progress_callback)\n\n        return result.decode(text_encoding)\n\n    def get_blob_metadata(self, container_name, blob_name, snapshot=None,\n                          x_ms_lease_id=None):\n        '''\n        Returns all user-defined metadata for the specified blob or snapshot.\n\n        container_name: Name of existing container.\n        blob_name: Name of existing blob.\n        snapshot:\n            Optional. The snapshot parameter is an opaque DateTime value that,\n            when present, specifies the blob snapshot to retrieve.\n        x_ms_lease_id: Required if the blob has an active lease.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(container_name) + '/' + _str(blob_name) + '?comp=metadata'\n        request.headers = [('x-ms-lease-id', _str_or_none(x_ms_lease_id))]\n        request.query = [('snapshot', _str_or_none(snapshot))]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request)\n\n        return _parse_response_for_dict_prefix(response, prefixes=['x-ms-meta'])\n\n    def set_blob_metadata(self, container_name, blob_name,\n                          x_ms_meta_name_values=None, x_ms_lease_id=None):\n        '''\n        Sets user-defined metadata for the specified blob as one or more\n        name-value pairs.\n\n        container_name: Name of existing container.\n        blob_name: Name of existing blob.\n        x_ms_meta_name_values: Dict containing name and value pairs.\n        x_ms_lease_id: Required if the blob has an active lease.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(container_name) + '/' + _str(blob_name) + '?comp=metadata'\n        request.headers = [\n            ('x-ms-meta-name-values', x_ms_meta_name_values),\n            ('x-ms-lease-id', _str_or_none(x_ms_lease_id))\n        ]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        self._perform_request(request)\n\n    def lease_blob(self, container_name, blob_name, x_ms_lease_action,\n                   x_ms_lease_id=None, x_ms_lease_duration=60,\n                   x_ms_lease_break_period=None, x_ms_proposed_lease_id=None):\n        '''\n        Establishes and manages a one-minute lock on a blob for write\n        operations.\n\n        container_name: Name of existing container.\n        blob_name: Name of existing blob.\n        x_ms_lease_action:\n            Required. Possible values: acquire|renew|release|break|change\n        x_ms_lease_id: Required if the blob has an active lease.\n        x_ms_lease_duration:\n            Specifies the duration of the lease, in seconds, or negative one\n            (-1) for a lease that never expires. A non-infinite lease can be\n            between 15 and 60 seconds. A lease duration cannot be changed\n            using renew or change. For backwards compatibility, the default is\n            60, and the value is only used on an acquire operation.\n        x_ms_lease_break_period:\n            Optional. For a break operation, this is the proposed duration of\n            seconds that the lease should continue before it is broken, between\n            0 and 60 seconds. This break period is only used if it is shorter\n            than the time remaining on the lease. If longer, the time remaining\n            on the lease is used. A new lease will not be available before the\n            break period has expired, but the lease may be held for longer than\n            the break period. If this header does not appear with a break\n            operation, a fixed-duration lease breaks after the remaining lease\n            period elapses, and an infinite lease breaks immediately.\n        x_ms_proposed_lease_id:\n            Optional for acquire, required for change. Proposed lease ID, in a\n            GUID string format.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        _validate_not_none('x_ms_lease_action', x_ms_lease_action)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(container_name) + '/' + _str(blob_name) + '?comp=lease'\n        request.headers = [\n            ('x-ms-lease-id', _str_or_none(x_ms_lease_id)),\n            ('x-ms-lease-action', _str_or_none(x_ms_lease_action)),\n            ('x-ms-lease-duration', _str_or_none(x_ms_lease_duration\\\n                if x_ms_lease_action == 'acquire' else None)),\n            ('x-ms-lease-break-period', _str_or_none(x_ms_lease_break_period)),\n            ('x-ms-proposed-lease-id', _str_or_none(x_ms_proposed_lease_id)),\n        ]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request)\n\n        return _parse_response_for_dict_filter(\n            response,\n            filter=['x-ms-lease-id', 'x-ms-lease-time'])\n\n    def snapshot_blob(self, container_name, blob_name,\n                      x_ms_meta_name_values=None, if_modified_since=None,\n                      if_unmodified_since=None, if_match=None,\n                      if_none_match=None, x_ms_lease_id=None):\n        '''\n        Creates a read-only snapshot of a blob.\n\n        container_name: Name of existing container.\n        blob_name: Name of existing blob.\n        x_ms_meta_name_values: Optional. Dict containing name and value pairs.\n        if_modified_since: Optional. Datetime string.\n        if_unmodified_since: DateTime string.\n        if_match:\n            Optional. snapshot the blob only if its ETag value matches the\n            value specified.\n        if_none_match: Optional. An ETag value\n        x_ms_lease_id: Required if the blob has an active lease.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(container_name) + '/' + _str(blob_name) + '?comp=snapshot'\n        request.headers = [\n            ('x-ms-meta-name-values', x_ms_meta_name_values),\n            ('If-Modified-Since', _str_or_none(if_modified_since)),\n            ('If-Unmodified-Since', _str_or_none(if_unmodified_since)),\n            ('If-Match', _str_or_none(if_match)),\n            ('If-None-Match', _str_or_none(if_none_match)),\n            ('x-ms-lease-id', _str_or_none(x_ms_lease_id))\n        ]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request)\n\n        return _parse_response_for_dict_filter(\n            response,\n            filter=['x-ms-snapshot', 'etag', 'last-modified'])\n\n    def copy_blob(self, container_name, blob_name, x_ms_copy_source,\n                  x_ms_meta_name_values=None,\n                  x_ms_source_if_modified_since=None,\n                  x_ms_source_if_unmodified_since=None,\n                  x_ms_source_if_match=None, x_ms_source_if_none_match=None,\n                  if_modified_since=None, if_unmodified_since=None,\n                  if_match=None, if_none_match=None, x_ms_lease_id=None,\n                  x_ms_source_lease_id=None):\n        '''\n        Copies a blob to a destination within the storage account.\n\n        container_name: Name of existing container.\n        blob_name: Name of existing blob.\n        x_ms_copy_source:\n            URL up to 2 KB in length that specifies a blob. A source blob in\n            the same account can be private, but a blob in another account\n            must be public or accept credentials included in this URL, such as\n            a Shared Access Signature. Examples:\n            https://myaccount.blob.core.windows.net/mycontainer/myblob\n            https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot=<DateTime>\n        x_ms_meta_name_values: Optional. Dict containing name and value pairs.\n        x_ms_source_if_modified_since:\n            Optional. An ETag value. Specify this conditional header to copy\n            the source blob only if its ETag matches the value specified.\n        x_ms_source_if_unmodified_since:\n            Optional. An ETag value. Specify this conditional header to copy\n            the blob only if its ETag does not match the value specified.\n        x_ms_source_if_match:\n            Optional. A DateTime value. Specify this conditional header to\n            copy the blob only if the source blob has been modified since the\n            specified date/time.\n        x_ms_source_if_none_match:\n            Optional. An ETag value. Specify this conditional header to copy\n            the source blob only if its ETag matches the value specified.\n        if_modified_since: Optional. Datetime string.\n        if_unmodified_since: DateTime string.\n        if_match:\n            Optional. Snapshot the blob only if its ETag value matches the\n            value specified.\n        if_none_match: Optional. An ETag value\n        x_ms_lease_id: Required if the blob has an active lease.\n        x_ms_source_lease_id:\n            Optional. Specify this to perform the Copy Blob operation only if\n            the lease ID given matches the active lease ID of the source blob.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        _validate_not_none('x_ms_copy_source', x_ms_copy_source)\n\n        if x_ms_copy_source.startswith('/'):\n            # Backwards compatibility for earlier versions of the SDK where\n            # the copy source can be in the following formats:\n            # - Blob in named container:\n            #     /accountName/containerName/blobName\n            # - Snapshot in named container:\n            #     /accountName/containerName/blobName?snapshot=<DateTime>\n            # - Blob in root container:\n            #     /accountName/blobName\n            # - Snapshot in root container:\n            #     /accountName/blobName?snapshot=<DateTime>\n            account, _, source =\\\n                x_ms_copy_source.partition('/')[2].partition('/')\n            x_ms_copy_source = self.protocol + '://' + \\\n                account + self.host_base + '/' + source\n\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + _str(container_name) + '/' + _str(blob_name) + ''\n        request.headers = [\n            ('x-ms-copy-source', _str_or_none(x_ms_copy_source)),\n            ('x-ms-meta-name-values', x_ms_meta_name_values),\n            ('x-ms-source-if-modified-since',\n             _str_or_none(x_ms_source_if_modified_since)),\n            ('x-ms-source-if-unmodified-since',\n             _str_or_none(x_ms_source_if_unmodified_since)),\n            ('x-ms-source-if-match', _str_or_none(x_ms_source_if_match)),\n            ('x-ms-source-if-none-match',\n             _str_or_none(x_ms_source_if_none_match)),\n            ('If-Modified-Since', _str_or_none(if_modified_since)),\n            ('If-Unmodified-Since', _str_or_none(if_unmodified_since)),\n            ('If-Match', _str_or_none(if_match)),\n            ('If-None-Match', _str_or_none(if_none_match)),\n            ('x-ms-lease-id', _str_or_none(x_ms_lease_id)),\n            ('x-ms-source-lease-id', _str_or_none(x_ms_source_lease_id))\n        ]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request)\n\n        return _parse_response_for_dict(response)\n\n    def abort_copy_blob(self, container_name, blob_name, x_ms_copy_id,\n                        x_ms_lease_id=None):\n        '''\n         Aborts a pending copy_blob operation, and leaves a destination blob\n         with zero length and full metadata.\n\n         container_name: Name of destination container.\n         blob_name: Name of destination blob.\n         x_ms_copy_id:\n            Copy identifier provided in the x-ms-copy-id of the original\n            copy_blob operation.\n         x_ms_lease_id:\n            Required if the destination blob has an active infinite lease.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        _validate_not_none('x_ms_copy_id', x_ms_copy_id)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + _str(container_name) + '/' + \\\n            _str(blob_name) + '?comp=copy&copyid=' + \\\n            _str(x_ms_copy_id)\n        request.headers = [\n            ('x-ms-lease-id', _str_or_none(x_ms_lease_id)),\n            ('x-ms-copy-action', 'abort'),\n        ]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        self._perform_request(request)\n\n    def delete_blob(self, container_name, blob_name, snapshot=None,\n                    x_ms_lease_id=None):\n        '''\n        Marks the specified blob or snapshot for deletion. The blob is later\n        deleted during garbage collection.\n\n        To mark a specific snapshot for deletion provide the date/time of the\n        snapshot via the snapshot parameter.\n\n        container_name: Name of existing container.\n        blob_name: Name of existing blob.\n        snapshot:\n            Optional. The snapshot parameter is an opaque DateTime value that,\n            when present, specifies the blob snapshot to delete.\n        x_ms_lease_id: Required if the blob has an active lease.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        request = HTTPRequest()\n        request.method = 'DELETE'\n        request.host = self._get_host()\n        request.path = '/' + _str(container_name) + '/' + _str(blob_name) + ''\n        request.headers = [('x-ms-lease-id', _str_or_none(x_ms_lease_id))]\n        request.query = [('snapshot', _str_or_none(snapshot))]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        self._perform_request(request)\n\n    def put_block(self, container_name, blob_name, block, blockid,\n                  content_md5=None, x_ms_lease_id=None):\n        '''\n        Creates a new block to be committed as part of a blob.\n\n        container_name: Name of existing container.\n        blob_name: Name of existing blob.\n        block: Content of the block.\n        blockid:\n            Required. A value that identifies the block. The string must be\n            less than or equal to 64 bytes in size.\n        content_md5:\n            Optional. An MD5 hash of the block content. This hash is used to\n            verify the integrity of the blob during transport. When this\n            header is specified, the storage service checks the hash that has\n            arrived with the one that was sent.\n        x_ms_lease_id: Required if the blob has an active lease.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        _validate_not_none('block', block)\n        _validate_not_none('blockid', blockid)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(container_name) + '/' + _str(blob_name) + '?comp=block'\n        request.headers = [\n            ('Content-MD5', _str_or_none(content_md5)),\n            ('x-ms-lease-id', _str_or_none(x_ms_lease_id))\n        ]\n        request.query = [('blockid', _encode_base64(_str_or_none(blockid)))]\n        request.body = _get_request_body_bytes_only('block', block)\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        self._perform_request(request)\n\n    def put_block_list(self, container_name, blob_name, block_list,\n                       content_md5=None, x_ms_blob_cache_control=None,\n                       x_ms_blob_content_type=None,\n                       x_ms_blob_content_encoding=None,\n                       x_ms_blob_content_language=None,\n                       x_ms_blob_content_md5=None, x_ms_meta_name_values=None,\n                       x_ms_lease_id=None):\n        '''\n        Writes a blob by specifying the list of block IDs that make up the\n        blob. In order to be written as part of a blob, a block must have been\n        successfully written to the server in a prior Put Block (REST API)\n        operation.\n\n        container_name: Name of existing container.\n        blob_name: Name of existing blob.\n        block_list: A str list containing the block ids.\n        content_md5:\n            Optional. An MD5 hash of the block content. This hash is used to\n            verify the integrity of the blob during transport. When this header\n            is specified, the storage service checks the hash that has arrived\n            with the one that was sent.\n        x_ms_blob_cache_control:\n            Optional. Sets the blob's cache control. If specified, this\n            property is stored with the blob and returned with a read request.\n        x_ms_blob_content_type:\n            Optional. Sets the blob's content type. If specified, this property\n            is stored with the blob and returned with a read request.\n        x_ms_blob_content_encoding:\n            Optional. Sets the blob's content encoding. If specified, this\n            property is stored with the blob and returned with a read request.\n        x_ms_blob_content_language:\n            Optional. Set the blob's content language. If specified, this\n            property is stored with the blob and returned with a read request.\n        x_ms_blob_content_md5:\n            Optional. An MD5 hash of the blob content. Note that this hash is\n            not validated, as the hashes for the individual blocks were\n            validated when each was uploaded.\n        x_ms_meta_name_values: Optional. Dict containing name and value pairs.\n        x_ms_lease_id: Required if the blob has an active lease.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        _validate_not_none('block_list', block_list)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(container_name) + '/' + _str(blob_name) + '?comp=blocklist'\n        request.headers = [\n            ('Content-MD5', _str_or_none(content_md5)),\n            ('x-ms-blob-cache-control', _str_or_none(x_ms_blob_cache_control)),\n            ('x-ms-blob-content-type', _str_or_none(x_ms_blob_content_type)),\n            ('x-ms-blob-content-encoding',\n             _str_or_none(x_ms_blob_content_encoding)),\n            ('x-ms-blob-content-language',\n             _str_or_none(x_ms_blob_content_language)),\n            ('x-ms-blob-content-md5', _str_or_none(x_ms_blob_content_md5)),\n            ('x-ms-meta-name-values', x_ms_meta_name_values),\n            ('x-ms-lease-id', _str_or_none(x_ms_lease_id))\n        ]\n        request.body = _get_request_body(\n            _convert_block_list_to_xml(block_list))\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        self._perform_request(request)\n\n    def get_block_list(self, container_name, blob_name, snapshot=None,\n                       blocklisttype=None, x_ms_lease_id=None):\n        '''\n        Retrieves the list of blocks that have been uploaded as part of a\n        block blob.\n\n        container_name: Name of existing container.\n        blob_name: Name of existing blob.\n        snapshot:\n            Optional. Datetime to determine the time to retrieve the blocks.\n        blocklisttype:\n            Specifies whether to return the list of committed blocks, the list\n            of uncommitted blocks, or both lists together. Valid values are:\n            committed, uncommitted, or all.\n        x_ms_lease_id: Required if the blob has an active lease.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(container_name) + '/' + _str(blob_name) + '?comp=blocklist'\n        request.headers = [('x-ms-lease-id', _str_or_none(x_ms_lease_id))]\n        request.query = [\n            ('snapshot', _str_or_none(snapshot)),\n            ('blocklisttype', _str_or_none(blocklisttype))\n        ]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request)\n\n        return _convert_response_to_block_list(response)\n\n    def put_page(self, container_name, blob_name, page, x_ms_range,\n                 x_ms_page_write, timeout=None, content_md5=None,\n                 x_ms_lease_id=None, x_ms_if_sequence_number_lte=None,\n                 x_ms_if_sequence_number_lt=None,\n                 x_ms_if_sequence_number_eq=None,\n                 if_modified_since=None, if_unmodified_since=None,\n                 if_match=None, if_none_match=None):\n        '''\n        Writes a range of pages to a page blob.\n\n        container_name: Name of existing container.\n        blob_name: Name of existing blob.\n        page: Content of the page.\n        x_ms_range:\n            Required. Specifies the range of bytes to be written as a page.\n            Both the start and end of the range must be specified. Must be in\n            format: bytes=startByte-endByte. Given that pages must be aligned\n            with 512-byte boundaries, the start offset must be a modulus of\n            512 and the end offset must be a modulus of 512-1. Examples of\n            valid byte ranges are 0-511, 512-1023, etc.\n        x_ms_page_write:\n            Required. You may specify one of the following options:\n                update (lower case):\n                    Writes the bytes specified by the request body into the\n                    specified range. The Range and Content-Length headers must\n                    match to perform the update.\n                clear (lower case):\n                    Clears the specified range and releases the space used in\n                    storage for that range. To clear a range, set the\n                    Content-Length header to zero, and the Range header to a\n                    value that indicates the range to clear, up to maximum\n                    blob size.\n        timeout: the timeout parameter is expressed in seconds.\n        content_md5:\n            Optional. An MD5 hash of the page content. This hash is used to\n            verify the integrity of the page during transport. When this header\n            is specified, the storage service compares the hash of the content\n            that has arrived with the header value that was sent. If the two\n            hashes do not match, the operation will fail with error code 400\n            (Bad Request).\n        x_ms_lease_id: Required if the blob has an active lease.\n        x_ms_if_sequence_number_lte:\n            Optional. If the blob's sequence number is less than or equal to\n            the specified value, the request proceeds; otherwise it fails.\n        x_ms_if_sequence_number_lt:\n            Optional. If the blob's sequence number is less than the specified\n            value, the request proceeds; otherwise it fails.\n        x_ms_if_sequence_number_eq:\n            Optional. If the blob's sequence number is equal to the specified\n            value, the request proceeds; otherwise it fails.\n        if_modified_since:\n            Optional. A DateTime value. Specify this conditional header to\n            write the page only if the blob has been modified since the\n            specified date/time. If the blob has not been modified, the Blob\n            service fails.\n        if_unmodified_since:\n            Optional. A DateTime value. Specify this conditional header to\n            write the page only if the blob has not been modified since the\n            specified date/time. If the blob has been modified, the Blob\n            service fails.\n        if_match:\n            Optional. An ETag value. Specify an ETag value for this conditional\n            header to write the page only if the blob's ETag value matches the\n            value specified. If the values do not match, the Blob service fails.\n        if_none_match:\n            Optional. An ETag value. Specify an ETag value for this conditional\n            header to write the page only if the blob's ETag value does not\n            match the value specified. If the values are identical, the Blob\n            service fails.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        _validate_not_none('page', page)\n        _validate_not_none('x_ms_range', x_ms_range)\n        _validate_not_none('x_ms_page_write', x_ms_page_write)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(container_name) + '/' + _str(blob_name) + '?comp=page'\n        request.headers = [\n            ('x-ms-range', _str_or_none(x_ms_range)),\n            ('Content-MD5', _str_or_none(content_md5)),\n            ('x-ms-page-write', _str_or_none(x_ms_page_write)),\n            ('x-ms-lease-id', _str_or_none(x_ms_lease_id)),\n            ('x-ms-if-sequence-number-le',\n             _str_or_none(x_ms_if_sequence_number_lte)),\n            ('x-ms-if-sequence-number-lt',\n             _str_or_none(x_ms_if_sequence_number_lt)),\n            ('x-ms-if-sequence-number-eq',\n             _str_or_none(x_ms_if_sequence_number_eq)),\n            ('If-Modified-Since', _str_or_none(if_modified_since)),\n            ('If-Unmodified-Since', _str_or_none(if_unmodified_since)),\n            ('If-Match', _str_or_none(if_match)),\n            ('If-None-Match', _str_or_none(if_none_match))\n        ]\n        request.query = [('timeout', _int_or_none(timeout))]\n        request.body = _get_request_body_bytes_only('page', page)\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        self._perform_request(request)\n\n    def get_page_ranges(self, container_name, blob_name, snapshot=None,\n                        range=None, x_ms_range=None, x_ms_lease_id=None):\n        '''\n        Retrieves the page ranges for a blob.\n\n        container_name: Name of existing container.\n        blob_name: Name of existing blob.\n        snapshot:\n            Optional. The snapshot parameter is an opaque DateTime value that,\n            when present, specifies the blob snapshot to retrieve information\n            from.\n        range:\n            Optional. Specifies the range of bytes over which to list ranges,\n            inclusively. If omitted, then all ranges for the blob are returned.\n        x_ms_range:\n            Optional. Specifies the range of bytes to be written as a page.\n            Both the start and end of the range must be specified. Must be in\n            format: bytes=startByte-endByte. Given that pages must be aligned\n            with 512-byte boundaries, the start offset must be a modulus of\n            512 and the end offset must be a modulus of 512-1. Examples of\n            valid byte ranges are 0-511, 512-1023, etc.\n        x_ms_lease_id: Required if the blob has an active lease.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(container_name) + '/' + _str(blob_name) + '?comp=pagelist'\n        request.headers = [\n            ('Range', _str_or_none(range)),\n            ('x-ms-range', _str_or_none(x_ms_range)),\n            ('x-ms-lease-id', _str_or_none(x_ms_lease_id))\n        ]\n        request.query = [('snapshot', _str_or_none(snapshot))]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request)\n\n        return _parse_simple_list(response, PageList, PageRange, \"page_ranges\")\n"
  },
  {
    "path": "CustomScript/azure/storage/cloudstorageaccount.py",
    "content": "#-------------------------------------------------------------------------\n# Copyright (c) Microsoft.  All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#--------------------------------------------------------------------------\nfrom azure.storage.blobservice import BlobService\nfrom azure.storage.tableservice import TableService\nfrom azure.storage.queueservice import QueueService\n\n\nclass CloudStorageAccount(object):\n\n    \"\"\"\n    Provides a factory for creating the blob, queue, and table services\n    with a common account name and account key.  Users can either use the\n    factory or can construct the appropriate service directly.\n    \"\"\"\n\n    def __init__(self, account_name=None, account_key=None):\n        self.account_name = account_name\n        self.account_key = account_key\n\n    def create_blob_service(self):\n        return BlobService(self.account_name, self.account_key)\n\n    def create_table_service(self):\n        return TableService(self.account_name, self.account_key)\n\n    def create_queue_service(self):\n        return QueueService(self.account_name, self.account_key)\n"
  },
  {
    "path": "CustomScript/azure/storage/queueservice.py",
    "content": "#-------------------------------------------------------------------------\n# Copyright (c) Microsoft.  All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#--------------------------------------------------------------------------\nfrom azure import (\n    WindowsAzureConflictError,\n    WindowsAzureError,\n    DEV_QUEUE_HOST,\n    QUEUE_SERVICE_HOST_BASE,\n    xml_escape,\n    _convert_class_to_xml,\n    _dont_fail_not_exist,\n    _dont_fail_on_exist,\n    _get_request_body,\n    _int_or_none,\n    _parse_enum_results_list,\n    _parse_response,\n    _parse_response_for_dict_filter,\n    _parse_response_for_dict_prefix,\n    _str,\n    _str_or_none,\n    _update_request_uri_query_local_storage,\n    _validate_not_none,\n    _ERROR_CONFLICT,\n    )\nfrom azure.http import (\n    HTTPRequest,\n    HTTP_RESPONSE_NO_CONTENT,\n    )\nfrom azure.storage import (\n    Queue,\n    QueueEnumResults,\n    QueueMessagesList,\n    StorageServiceProperties,\n    _update_storage_queue_header,\n    )\nfrom azure.storage.storageclient import _StorageClient\n\n\nclass QueueService(_StorageClient):\n\n    '''\n    This is the main class managing queue resources.\n    '''\n\n    def __init__(self, account_name=None, account_key=None, protocol='https',\n                 host_base=QUEUE_SERVICE_HOST_BASE, dev_host=DEV_QUEUE_HOST):\n        '''\n        account_name: your storage account name, required for all operations.\n        account_key: your storage account key, required for all operations.\n        protocol: Optional. Protocol. Defaults to http.\n        host_base:\n            Optional. Live host base url. Defaults to Azure url. Override this\n            for on-premise.\n        dev_host: Optional. Dev host url. Defaults to localhost.\n        '''\n        super(QueueService, self).__init__(\n            account_name, account_key, protocol, host_base, dev_host)\n\n    def get_queue_service_properties(self, timeout=None):\n        '''\n        Gets the properties of a storage account's Queue Service, including\n        Windows Azure Storage Analytics.\n\n        timeout: Optional. The timeout parameter is expressed in seconds.\n        '''\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/?restype=service&comp=properties'\n        request.query = [('timeout', _int_or_none(timeout))]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_queue_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request)\n\n        return _parse_response(response, StorageServiceProperties)\n\n    def list_queues(self, prefix=None, marker=None, maxresults=None,\n                    include=None):\n        '''\n        Lists all of the queues in a given storage account.\n\n        prefix:\n            Filters the results to return only queues with names that begin\n            with the specified prefix.\n        marker:\n            A string value that identifies the portion of the list to be\n            returned with the next list operation. The operation returns a\n            NextMarker element within the response body if the list returned\n            was not complete. This value may then be used as a query parameter\n            in a subsequent call to request the next portion of the list of\n            queues. The marker value is opaque to the client.\n        maxresults:\n            Specifies the maximum number of queues to return. If maxresults is\n            not specified, the server will return up to 5,000 items.\n        include:\n            Optional. Include this parameter to specify that the container's\n            metadata be returned as part of the response body.\n        '''\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/?comp=list'\n        request.query = [\n            ('prefix', _str_or_none(prefix)),\n            ('marker', _str_or_none(marker)),\n            ('maxresults', _int_or_none(maxresults)),\n            ('include', _str_or_none(include))\n        ]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_queue_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request)\n\n        return _parse_enum_results_list(\n            response, QueueEnumResults, \"Queues\", Queue)\n\n    def create_queue(self, queue_name, x_ms_meta_name_values=None,\n                     fail_on_exist=False):\n        '''\n        Creates a queue under the given account.\n\n        queue_name: name of the queue.\n        x_ms_meta_name_values:\n            Optional. A dict containing name-value pairs to associate with the\n            queue as metadata.\n        fail_on_exist: Specify whether throw exception when queue exists.\n        '''\n        _validate_not_none('queue_name', queue_name)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + _str(queue_name) + ''\n        request.headers = [('x-ms-meta-name-values', x_ms_meta_name_values)]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_queue_header(\n            request, self.account_name, self.account_key)\n        if not fail_on_exist:\n            try:\n                response = self._perform_request(request)\n                if response.status == HTTP_RESPONSE_NO_CONTENT:\n                    return False\n                return True\n            except WindowsAzureError as ex:\n                _dont_fail_on_exist(ex)\n                return False\n        else:\n            response = self._perform_request(request)\n            if response.status == HTTP_RESPONSE_NO_CONTENT:\n                raise WindowsAzureConflictError(\n                    _ERROR_CONFLICT.format(response.message))\n            return True\n\n    def delete_queue(self, queue_name, fail_not_exist=False):\n        '''\n        Permanently deletes the specified queue.\n\n        queue_name: Name of the queue.\n        fail_not_exist:\n            Specify whether throw exception when queue doesn't exist.\n        '''\n        _validate_not_none('queue_name', queue_name)\n        request = HTTPRequest()\n        request.method = 'DELETE'\n        request.host = self._get_host()\n        request.path = '/' + _str(queue_name) + ''\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_queue_header(\n            request, self.account_name, self.account_key)\n        if not fail_not_exist:\n            try:\n                self._perform_request(request)\n                return True\n            except WindowsAzureError as ex:\n                _dont_fail_not_exist(ex)\n                return False\n        else:\n            self._perform_request(request)\n            return True\n\n    def get_queue_metadata(self, queue_name):\n        '''\n        Retrieves user-defined metadata and queue properties on the specified\n        queue. Metadata is associated with the queue as name-values pairs.\n\n        queue_name: Name of the queue.\n        '''\n        _validate_not_none('queue_name', queue_name)\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/' + _str(queue_name) + '?comp=metadata'\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_queue_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request)\n\n        return _parse_response_for_dict_prefix(\n            response,\n            prefixes=['x-ms-meta', 'x-ms-approximate-messages-count'])\n\n    def set_queue_metadata(self, queue_name, x_ms_meta_name_values=None):\n        '''\n        Sets user-defined metadata on the specified queue. Metadata is\n        associated with the queue as name-value pairs.\n\n        queue_name: Name of the queue.\n        x_ms_meta_name_values:\n            Optional. A dict containing name-value pairs to associate with the\n            queue as metadata.\n        '''\n        _validate_not_none('queue_name', queue_name)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + _str(queue_name) + '?comp=metadata'\n        request.headers = [('x-ms-meta-name-values', x_ms_meta_name_values)]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_queue_header(\n            request, self.account_name, self.account_key)\n        self._perform_request(request)\n\n    def put_message(self, queue_name, message_text, visibilitytimeout=None,\n                    messagettl=None):\n        '''\n        Adds a new message to the back of the message queue. A visibility\n        timeout can also be specified to make the message invisible until the\n        visibility timeout expires. A message must be in a format that can be\n        included in an XML request with UTF-8 encoding. The encoded message can\n        be up to 64KB in size for versions 2011-08-18 and newer, or 8KB in size\n        for previous versions.\n\n        queue_name: Name of the queue.\n        message_text: Message content.\n        visibilitytimeout:\n            Optional. If not specified, the default value is 0. Specifies the\n            new visibility timeout value, in seconds, relative to server time.\n            The new value must be larger than or equal to 0, and cannot be\n            larger than 7 days. The visibility timeout of a message cannot be\n            set to a value later than the expiry time. visibilitytimeout\n            should be set to a value smaller than the time-to-live value.\n        messagettl:\n            Optional. Specifies the time-to-live interval for the message, in\n            seconds. The maximum time-to-live allowed is 7 days. If this\n            parameter is omitted, the default time-to-live is 7 days.\n        '''\n        _validate_not_none('queue_name', queue_name)\n        _validate_not_none('message_text', message_text)\n        request = HTTPRequest()\n        request.method = 'POST'\n        request.host = self._get_host()\n        request.path = '/' + _str(queue_name) + '/messages'\n        request.query = [\n            ('visibilitytimeout', _str_or_none(visibilitytimeout)),\n            ('messagettl', _str_or_none(messagettl))\n        ]\n        request.body = _get_request_body(\n            '<?xml version=\"1.0\" encoding=\"utf-8\"?> \\\n<QueueMessage> \\\n    <MessageText>' + xml_escape(_str(message_text)) + '</MessageText> \\\n</QueueMessage>')\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_queue_header(\n            request, self.account_name, self.account_key)\n        self._perform_request(request)\n\n    def get_messages(self, queue_name, numofmessages=None,\n                     visibilitytimeout=None):\n        '''\n        Retrieves one or more messages from the front of the queue.\n\n        queue_name: Name of the queue.\n        numofmessages:\n            Optional. A nonzero integer value that specifies the number of\n            messages to retrieve from the queue, up to a maximum of 32. If\n            fewer are visible, the visible messages are returned. By default,\n            a single message is retrieved from the queue with this operation.\n        visibilitytimeout:\n            Specifies the new visibility timeout value, in seconds, relative\n            to server time. The new value must be larger than or equal to 1\n            second, and cannot be larger than 7 days, or larger than 2 hours\n            on REST protocol versions prior to version 2011-08-18. The\n            visibility timeout of a message can be set to a value later than\n            the expiry time.\n        '''\n        _validate_not_none('queue_name', queue_name)\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/' + _str(queue_name) + '/messages'\n        request.query = [\n            ('numofmessages', _str_or_none(numofmessages)),\n            ('visibilitytimeout', _str_or_none(visibilitytimeout))\n        ]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_queue_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request)\n\n        return _parse_response(response, QueueMessagesList)\n\n    def peek_messages(self, queue_name, numofmessages=None):\n        '''\n        Retrieves one or more messages from the front of the queue, but does\n        not alter the visibility of the message.\n\n        queue_name: Name of the queue.\n        numofmessages:\n            Optional. A nonzero integer value that specifies the number of\n            messages to peek from the queue, up to a maximum of 32. By default,\n            a single message is peeked from the queue with this operation.\n        '''\n        _validate_not_none('queue_name', queue_name)\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/' + _str(queue_name) + '/messages?peekonly=true'\n        request.query = [('numofmessages', _str_or_none(numofmessages))]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_queue_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request)\n\n        return _parse_response(response, QueueMessagesList)\n\n    def delete_message(self, queue_name, message_id, popreceipt):\n        '''\n        Deletes the specified message.\n\n        queue_name: Name of the queue.\n        message_id: Message to delete.\n        popreceipt:\n            Required. A valid pop receipt value returned from an earlier call\n            to the Get Messages or Update Message operation.\n        '''\n        _validate_not_none('queue_name', queue_name)\n        _validate_not_none('message_id', message_id)\n        _validate_not_none('popreceipt', popreceipt)\n        request = HTTPRequest()\n        request.method = 'DELETE'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(queue_name) + '/messages/' + _str(message_id) + ''\n        request.query = [('popreceipt', _str_or_none(popreceipt))]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_queue_header(\n            request, self.account_name, self.account_key)\n        self._perform_request(request)\n\n    def clear_messages(self, queue_name):\n        '''\n        Deletes all messages from the specified queue.\n\n        queue_name: Name of the queue.\n        '''\n        _validate_not_none('queue_name', queue_name)\n        request = HTTPRequest()\n        request.method = 'DELETE'\n        request.host = self._get_host()\n        request.path = '/' + _str(queue_name) + '/messages'\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_queue_header(\n            request, self.account_name, self.account_key)\n        self._perform_request(request)\n\n    def update_message(self, queue_name, message_id, message_text, popreceipt,\n                       visibilitytimeout):\n        '''\n        Updates the visibility timeout of a message. You can also use this\n        operation to update the contents of a message.\n\n        queue_name: Name of the queue.\n        message_id: Message to update.\n        message_text: Content of message.\n        popreceipt:\n            Required. A valid pop receipt value returned from an earlier call\n            to the Get Messages or Update Message operation.\n        visibilitytimeout:\n            Required. Specifies the new visibility timeout value, in seconds,\n            relative to server time. The new value must be larger than or equal\n            to 0, and cannot be larger than 7 days. The visibility timeout of a\n            message cannot be set to a value later than the expiry time. A\n            message can be updated until it has been deleted or has expired.\n        '''\n        _validate_not_none('queue_name', queue_name)\n        _validate_not_none('message_id', message_id)\n        _validate_not_none('message_text', message_text)\n        _validate_not_none('popreceipt', popreceipt)\n        _validate_not_none('visibilitytimeout', visibilitytimeout)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(queue_name) + '/messages/' + _str(message_id) + ''\n        request.query = [\n            ('popreceipt', _str_or_none(popreceipt)),\n            ('visibilitytimeout', _str_or_none(visibilitytimeout))\n        ]\n        request.body = _get_request_body(\n            '<?xml version=\"1.0\" encoding=\"utf-8\"?> \\\n<QueueMessage> \\\n    <MessageText>' + xml_escape(_str(message_text)) + '</MessageText> \\\n</QueueMessage>')\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_queue_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request)\n\n        return _parse_response_for_dict_filter(\n            response,\n            filter=['x-ms-popreceipt', 'x-ms-time-next-visible'])\n\n    def set_queue_service_properties(self, storage_service_properties,\n                                     timeout=None):\n        '''\n        Sets the properties of a storage account's Queue service, including\n        Windows Azure Storage Analytics.\n\n        storage_service_properties: StorageServiceProperties object.\n        timeout: Optional. The timeout parameter is expressed in seconds.\n        '''\n        _validate_not_none('storage_service_properties',\n                           storage_service_properties)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/?restype=service&comp=properties'\n        request.query = [('timeout', _int_or_none(timeout))]\n        request.body = _get_request_body(\n            _convert_class_to_xml(storage_service_properties))\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_queue_header(\n            request, self.account_name, self.account_key)\n        self._perform_request(request)\n"
  },
  {
    "path": "CustomScript/azure/storage/sharedaccesssignature.py",
    "content": "#-------------------------------------------------------------------------\n# Copyright (c) Microsoft.  All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#--------------------------------------------------------------------------\nfrom azure import _sign_string, url_quote\nfrom azure.storage import X_MS_VERSION\n\n#-------------------------------------------------------------------------\n# Constants for the share access signature\nSIGNED_START = 'st'\nSIGNED_EXPIRY = 'se'\nSIGNED_RESOURCE = 'sr'\nSIGNED_PERMISSION = 'sp'\nSIGNED_IDENTIFIER = 'si'\nSIGNED_SIGNATURE = 'sig'\nSIGNED_VERSION = 'sv'\nRESOURCE_BLOB = 'b'\nRESOURCE_CONTAINER = 'c'\nSIGNED_RESOURCE_TYPE = 'resource'\nSHARED_ACCESS_PERMISSION = 'permission'\n\n#--------------------------------------------------------------------------\n\n\nclass WebResource(object):\n\n    '''\n    Class that stands for the resource to get the share access signature\n\n    path: the resource path.\n    properties: dict of name and values. Contains 2 item: resource type and\n            permission\n    request_url: the url of the webresource include all the queries.\n    '''\n\n    def __init__(self, path=None, request_url=None, properties=None):\n        self.path = path\n        self.properties = properties or {}\n        self.request_url = request_url\n\n\nclass Permission(object):\n\n    '''\n    Permission class. Contains the path and query_string for the path.\n\n    path: the resource path\n    query_string: dict of name, values. Contains SIGNED_START, SIGNED_EXPIRY\n            SIGNED_RESOURCE, SIGNED_PERMISSION, SIGNED_IDENTIFIER,\n            SIGNED_SIGNATURE name values.\n    '''\n\n    def __init__(self, path=None, query_string=None):\n        self.path = path\n        self.query_string = query_string\n\n\nclass SharedAccessPolicy(object):\n\n    ''' SharedAccessPolicy class. '''\n\n    def __init__(self, access_policy, signed_identifier=None):\n        self.id = signed_identifier\n        self.access_policy = access_policy\n\n\nclass SharedAccessSignature(object):\n\n    '''\n    The main class used to do the signing and generating the signature.\n\n    account_name:\n        the storage account name used to generate shared access signature\n    account_key: the access key to genenerate share access signature\n    permission_set: the permission cache used to signed the request url.\n    '''\n\n    def __init__(self, account_name, account_key, permission_set=None):\n        self.account_name = account_name\n        self.account_key = account_key\n        self.permission_set = permission_set\n\n    def generate_signed_query_string(self, path, resource_type,\n                                     shared_access_policy,\n                                     version=X_MS_VERSION):\n        '''\n        Generates the query string for path, resource type and shared access\n        policy.\n\n        path: the resource\n        resource_type: could be blob or container\n        shared_access_policy: shared access policy\n        version:\n            x-ms-version for storage service, or None to get a signed query\n            string compatible with pre 2012-02-12 clients, where the version\n            is not included in the query string.\n        '''\n\n        query_string = {}\n        if shared_access_policy.access_policy.start:\n            query_string[\n                SIGNED_START] = shared_access_policy.access_policy.start\n\n        if version:\n            query_string[SIGNED_VERSION] = version\n        query_string[SIGNED_EXPIRY] = shared_access_policy.access_policy.expiry\n        query_string[SIGNED_RESOURCE] = resource_type\n        query_string[\n            SIGNED_PERMISSION] = shared_access_policy.access_policy.permission\n\n        if shared_access_policy.id:\n            query_string[SIGNED_IDENTIFIER] = shared_access_policy.id\n\n        query_string[SIGNED_SIGNATURE] = self._generate_signature(\n            path, shared_access_policy, version)\n        return query_string\n\n    def sign_request(self, web_resource):\n        ''' sign request to generate request_url with sharedaccesssignature\n        info for web_resource.'''\n\n        if self.permission_set:\n            for shared_access_signature in self.permission_set:\n                if self._permission_matches_request(\n                        shared_access_signature, web_resource,\n                        web_resource.properties[\n                            SIGNED_RESOURCE_TYPE],\n                        web_resource.properties[SHARED_ACCESS_PERMISSION]):\n                    if web_resource.request_url.find('?') == -1:\n                        web_resource.request_url += '?'\n                    else:\n                        web_resource.request_url += '&'\n\n                    web_resource.request_url += self._convert_query_string(\n                        shared_access_signature.query_string)\n                    break\n        return web_resource\n\n    def _convert_query_string(self, query_string):\n        ''' Converts query string to str. The order of name, values is very\n        important and can't be wrong.'''\n\n        convert_str = ''\n        if SIGNED_START in query_string:\n            convert_str += SIGNED_START + '=' + \\\n                url_quote(query_string[SIGNED_START]) + '&'\n        convert_str += SIGNED_EXPIRY + '=' + \\\n            url_quote(query_string[SIGNED_EXPIRY]) + '&'\n        convert_str += SIGNED_PERMISSION + '=' + \\\n            query_string[SIGNED_PERMISSION] + '&'\n        convert_str += SIGNED_RESOURCE + '=' + \\\n            query_string[SIGNED_RESOURCE] + '&'\n\n        if SIGNED_IDENTIFIER in query_string:\n            convert_str += SIGNED_IDENTIFIER + '=' + \\\n                query_string[SIGNED_IDENTIFIER] + '&'\n        if SIGNED_VERSION in query_string:\n            convert_str += SIGNED_VERSION + '=' + \\\n                query_string[SIGNED_VERSION] + '&'\n        convert_str += SIGNED_SIGNATURE + '=' + \\\n            url_quote(query_string[SIGNED_SIGNATURE]) + '&'\n        return convert_str\n\n    def _generate_signature(self, path, shared_access_policy, version):\n        ''' Generates signature for a given path and shared access policy. '''\n\n        def get_value_to_append(value, no_new_line=False):\n            return_value = ''\n            if value:\n                return_value = value\n            if not no_new_line:\n                return_value += '\\n'\n            return return_value\n\n        if path[0] != '/':\n            path = '/' + path\n\n        canonicalized_resource = '/' + self.account_name + path\n\n        # Form the string to sign from shared_access_policy and canonicalized\n        # resource. The order of values is important.\n        string_to_sign = \\\n            (get_value_to_append(shared_access_policy.access_policy.permission) +\n             get_value_to_append(shared_access_policy.access_policy.start) +\n             get_value_to_append(shared_access_policy.access_policy.expiry) +\n             get_value_to_append(canonicalized_resource))\n\n        if version:\n            string_to_sign += get_value_to_append(shared_access_policy.id)\n            string_to_sign += get_value_to_append(version, True)\n        else:\n            string_to_sign += get_value_to_append(shared_access_policy.id, True)\n\n        return self._sign(string_to_sign)\n\n    def _permission_matches_request(self, shared_access_signature,\n                                    web_resource, resource_type,\n                                    required_permission):\n        ''' Check whether requested permission matches given\n        shared_access_signature, web_resource and resource type. '''\n\n        required_resource_type = resource_type\n        if required_resource_type == RESOURCE_BLOB:\n            required_resource_type += RESOURCE_CONTAINER\n\n        for name, value in shared_access_signature.query_string.items():\n            if name == SIGNED_RESOURCE and \\\n                required_resource_type.find(value) == -1:\n                return False\n            elif name == SIGNED_PERMISSION and \\\n                required_permission.find(value) == -1:\n                return False\n\n        return web_resource.path.find(shared_access_signature.path) != -1\n\n    def _sign(self, string_to_sign):\n        ''' use HMAC-SHA256 to sign the string and convert it as base64\n        encoded string. '''\n\n        return _sign_string(self.account_key, string_to_sign)\n"
  },
  {
    "path": "CustomScript/azure/storage/storageclient.py",
    "content": "#-------------------------------------------------------------------------\n# Copyright (c) Microsoft.  All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#--------------------------------------------------------------------------\nimport os\nimport sys\n\nfrom azure import (\n    WindowsAzureError,\n    DEV_ACCOUNT_NAME,\n    DEV_ACCOUNT_KEY,\n    _ERROR_STORAGE_MISSING_INFO,\n    )\nfrom azure.http import HTTPError\nfrom azure.http.httpclient import _HTTPClient\nfrom azure.storage import _storage_error_handler\n\n#--------------------------------------------------------------------------\n# constants for azure app setting environment variables\nAZURE_STORAGE_ACCOUNT = 'AZURE_STORAGE_ACCOUNT'\nAZURE_STORAGE_ACCESS_KEY = 'AZURE_STORAGE_ACCESS_KEY'\nEMULATED = 'EMULATED'\n\n#--------------------------------------------------------------------------\n\n\nclass _StorageClient(object):\n\n    '''\n    This is the base class for BlobManager, TableManager and QueueManager.\n    '''\n\n    def __init__(self, account_name=None, account_key=None, protocol='https',\n                 host_base='', dev_host=''):\n        '''\n        account_name: your storage account name, required for all operations.\n        account_key: your storage account key, required for all operations.\n        protocol: Optional. Protocol. Defaults to http.\n        host_base:\n            Optional. Live host base url. Defaults to Azure url. Override this\n            for on-premise.\n        dev_host: Optional. Dev host url. Defaults to localhost.\n        '''\n        self.account_name = account_name\n        self.account_key = account_key\n        self.requestid = None\n        self.protocol = protocol\n        self.host_base = host_base\n        self.dev_host = dev_host\n\n        # the app is not run in azure emulator or use default development\n        # storage account and key if app is run in emulator.\n        self.use_local_storage = False\n\n        # check whether it is run in emulator.\n        if EMULATED in os.environ:\n            self.is_emulated = os.environ[EMULATED].lower() != 'false'\n        else:\n            self.is_emulated = False\n\n        # get account_name and account key. If they are not set when\n        # constructing, get the account and key from environment variables if\n        # the app is not run in azure emulator or use default development\n        # storage account and key if app is run in emulator.\n        if not self.account_name or not self.account_key:\n            if self.is_emulated:\n                self.account_name = DEV_ACCOUNT_NAME\n                self.account_key = DEV_ACCOUNT_KEY\n                self.protocol = 'http'\n                self.use_local_storage = True\n            else:\n                self.account_name = os.environ.get(AZURE_STORAGE_ACCOUNT)\n                self.account_key = os.environ.get(AZURE_STORAGE_ACCESS_KEY)\n\n        if not self.account_name or not self.account_key:\n            raise WindowsAzureError(_ERROR_STORAGE_MISSING_INFO)\n\n        self._httpclient = _HTTPClient(\n            service_instance=self,\n            account_key=self.account_key,\n            account_name=self.account_name,\n            protocol=self.protocol)\n        self._batchclient = None\n        self._filter = self._perform_request_worker\n\n    def with_filter(self, filter):\n        '''\n        Returns a new service which will process requests with the specified\n        filter.  Filtering operations can include logging, automatic retrying,\n        etc...  The filter is a lambda which receives the HTTPRequest and\n        another lambda.  The filter can perform any pre-processing on the\n        request, pass it off to the next lambda, and then perform any\n        post-processing on the response.\n        '''\n        res = type(self)(self.account_name, self.account_key, self.protocol)\n        old_filter = self._filter\n\n        def new_filter(request):\n            return filter(request, old_filter)\n\n        res._filter = new_filter\n        return res\n\n    def set_proxy(self, host, port, user=None, password=None):\n        '''\n        Sets the proxy server host and port for the HTTP CONNECT Tunnelling.\n\n        host: Address of the proxy. Ex: '192.168.0.100'\n        port: Port of the proxy. Ex: 6000\n        user: User for proxy authorization.\n        password: Password for proxy authorization.\n        '''\n        self._httpclient.set_proxy(host, port, user, password)\n\n    def _get_host(self):\n        if self.use_local_storage:\n            return self.dev_host\n        else:\n            return self.account_name + self.host_base\n\n    def _perform_request_worker(self, request):\n        return self._httpclient.perform_request(request)\n\n    def _perform_request(self, request, text_encoding='utf-8'):\n        '''\n        Sends the request and return response. Catches HTTPError and hand it\n        to error handler\n        '''\n        try:\n            if self._batchclient is not None:\n                return self._batchclient.insert_request_to_batch(request)\n            else:\n                resp = self._filter(request)\n\n            if sys.version_info >= (3,) and isinstance(resp, bytes) and \\\n                text_encoding:\n                resp = resp.decode(text_encoding)\n\n        except HTTPError as ex:\n            _storage_error_handler(ex)\n\n        return resp\n"
  },
  {
    "path": "CustomScript/azure/storage/tableservice.py",
    "content": "#-------------------------------------------------------------------------\n# Copyright (c) Microsoft.  All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#--------------------------------------------------------------------------\nfrom azure import (\n    WindowsAzureError,\n    TABLE_SERVICE_HOST_BASE,\n    DEV_TABLE_HOST,\n    _convert_class_to_xml,\n    _convert_response_to_feeds,\n    _dont_fail_not_exist,\n    _dont_fail_on_exist,\n    _get_request_body,\n    _int_or_none,\n    _parse_response,\n    _parse_response_for_dict,\n    _parse_response_for_dict_filter,\n    _str,\n    _str_or_none,\n    _update_request_uri_query_local_storage,\n    _validate_not_none,\n    )\nfrom azure.http import HTTPRequest\nfrom azure.http.batchclient import _BatchClient\nfrom azure.storage import (\n    StorageServiceProperties,\n    _convert_entity_to_xml,\n    _convert_response_to_entity,\n    _convert_table_to_xml,\n    _convert_xml_to_entity,\n    _convert_xml_to_table,\n    _sign_storage_table_request,\n    _update_storage_table_header,\n    )\nfrom azure.storage.storageclient import _StorageClient\n\n\nclass TableService(_StorageClient):\n\n    '''\n    This is the main class managing Table resources.\n    '''\n\n    def __init__(self, account_name=None, account_key=None, protocol='https',\n                 host_base=TABLE_SERVICE_HOST_BASE, dev_host=DEV_TABLE_HOST):\n        '''\n        account_name: your storage account name, required for all operations.\n        account_key: your storage account key, required for all operations.\n        protocol: Optional. Protocol. Defaults to http.\n        host_base:\n            Optional. Live host base url. Defaults to Azure url. Override this\n            for on-premise.\n        dev_host: Optional. Dev host url. Defaults to localhost.\n        '''\n        super(TableService, self).__init__(\n            account_name, account_key, protocol, host_base, dev_host)\n\n    def begin_batch(self):\n        if self._batchclient is None:\n            self._batchclient = _BatchClient(\n                service_instance=self,\n                account_key=self.account_key,\n                account_name=self.account_name)\n        return self._batchclient.begin_batch()\n\n    def commit_batch(self):\n        try:\n            ret = self._batchclient.commit_batch()\n        finally:\n            self._batchclient = None\n        return ret\n\n    def cancel_batch(self):\n        self._batchclient = None\n\n    def get_table_service_properties(self):\n        '''\n        Gets the properties of a storage account's Table service, including\n        Windows Azure Storage Analytics.\n        '''\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/?restype=service&comp=properties'\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_table_header(request)\n        response = self._perform_request(request)\n\n        return _parse_response(response, StorageServiceProperties)\n\n    def set_table_service_properties(self, storage_service_properties):\n        '''\n        Sets the properties of a storage account's Table Service, including\n        Windows Azure Storage Analytics.\n\n        storage_service_properties: StorageServiceProperties object.\n        '''\n        _validate_not_none('storage_service_properties',\n                           storage_service_properties)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/?restype=service&comp=properties'\n        request.body = _get_request_body(\n            _convert_class_to_xml(storage_service_properties))\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_table_header(request)\n        response = self._perform_request(request)\n\n        return _parse_response_for_dict(response)\n\n    def query_tables(self, table_name=None, top=None, next_table_name=None):\n        '''\n        Returns a list of tables under the specified account.\n\n        table_name: Optional.  The specific table to query.\n        top: Optional. Maximum number of tables to return.\n        next_table_name:\n            Optional. When top is used, the next table name is stored in\n            result.x_ms_continuation['NextTableName']\n        '''\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        if table_name is not None:\n            uri_part_table_name = \"('\" + table_name + \"')\"\n        else:\n            uri_part_table_name = \"\"\n        request.path = '/Tables' + uri_part_table_name + ''\n        request.query = [\n            ('$top', _int_or_none(top)),\n            ('NextTableName', _str_or_none(next_table_name))\n        ]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_table_header(request)\n        response = self._perform_request(request)\n\n        return _convert_response_to_feeds(response, _convert_xml_to_table)\n\n    def create_table(self, table, fail_on_exist=False):\n        '''\n        Creates a new table in the storage account.\n\n        table:\n            Name of the table to create. Table name may contain only\n            alphanumeric characters and cannot begin with a numeric character.\n            It is case-insensitive and must be from 3 to 63 characters long.\n        fail_on_exist: Specify whether throw exception when table exists.\n        '''\n        _validate_not_none('table', table)\n        request = HTTPRequest()\n        request.method = 'POST'\n        request.host = self._get_host()\n        request.path = '/Tables'\n        request.body = _get_request_body(_convert_table_to_xml(table))\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_table_header(request)\n        if not fail_on_exist:\n            try:\n                self._perform_request(request)\n                return True\n            except WindowsAzureError as ex:\n                _dont_fail_on_exist(ex)\n                return False\n        else:\n            self._perform_request(request)\n            return True\n\n    def delete_table(self, table_name, fail_not_exist=False):\n        '''\n        table_name: Name of the table to delete.\n        fail_not_exist:\n            Specify whether throw exception when table doesn't exist.\n        '''\n        _validate_not_none('table_name', table_name)\n        request = HTTPRequest()\n        request.method = 'DELETE'\n        request.host = self._get_host()\n        request.path = '/Tables(\\'' + _str(table_name) + '\\')'\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_table_header(request)\n        if not fail_not_exist:\n            try:\n                self._perform_request(request)\n                return True\n            except WindowsAzureError as ex:\n                _dont_fail_not_exist(ex)\n                return False\n        else:\n            self._perform_request(request)\n            return True\n\n    def get_entity(self, table_name, partition_key, row_key, select=''):\n        '''\n        Get an entity in a table; includes the $select options.\n\n        partition_key: PartitionKey of the entity.\n        row_key: RowKey of the entity.\n        select: Property names to select.\n        '''\n        _validate_not_none('table_name', table_name)\n        _validate_not_none('partition_key', partition_key)\n        _validate_not_none('row_key', row_key)\n        _validate_not_none('select', select)\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/' + _str(table_name) + \\\n            '(PartitionKey=\\'' + _str(partition_key) + \\\n            '\\',RowKey=\\'' + \\\n            _str(row_key) + '\\')?$select=' + \\\n            _str(select) + ''\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_table_header(request)\n        response = self._perform_request(request)\n\n        return _convert_response_to_entity(response)\n\n    def query_entities(self, table_name, filter=None, select=None, top=None,\n                       next_partition_key=None, next_row_key=None):\n        '''\n        Get entities in a table; includes the $filter and $select options.\n\n        table_name: Table to query.\n        filter:\n            Optional. Filter as described at\n            http://msdn.microsoft.com/en-us/library/windowsazure/dd894031.aspx\n        select: Optional. Property names to select from the entities.\n        top: Optional. Maximum number of entities to return.\n        next_partition_key:\n            Optional. When top is used, the next partition key is stored in\n            result.x_ms_continuation['NextPartitionKey']\n        next_row_key:\n            Optional. When top is used, the next partition key is stored in\n            result.x_ms_continuation['NextRowKey']\n        '''\n        _validate_not_none('table_name', table_name)\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/' + _str(table_name) + '()'\n        request.query = [\n            ('$filter', _str_or_none(filter)),\n            ('$select', _str_or_none(select)),\n            ('$top', _int_or_none(top)),\n            ('NextPartitionKey', _str_or_none(next_partition_key)),\n            ('NextRowKey', _str_or_none(next_row_key))\n        ]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_table_header(request)\n        response = self._perform_request(request)\n\n        return _convert_response_to_feeds(response, _convert_xml_to_entity)\n\n    def insert_entity(self, table_name, entity,\n                      content_type='application/atom+xml'):\n        '''\n        Inserts a new entity into a table.\n\n        table_name: Table name.\n        entity:\n            Required. The entity object to insert. Could be a dict format or\n            entity object.\n        content_type: Required. Must be set to application/atom+xml\n        '''\n        _validate_not_none('table_name', table_name)\n        _validate_not_none('entity', entity)\n        _validate_not_none('content_type', content_type)\n        request = HTTPRequest()\n        request.method = 'POST'\n        request.host = self._get_host()\n        request.path = '/' + _str(table_name) + ''\n        request.headers = [('Content-Type', _str_or_none(content_type))]\n        request.body = _get_request_body(_convert_entity_to_xml(entity))\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_table_header(request)\n        response = self._perform_request(request)\n\n        return _convert_response_to_entity(response)\n\n    def update_entity(self, table_name, partition_key, row_key, entity,\n                      content_type='application/atom+xml', if_match='*'):\n        '''\n        Updates an existing entity in a table. The Update Entity operation\n        replaces the entire entity and can be used to remove properties.\n\n        table_name: Table name.\n        partition_key: PartitionKey of the entity.\n        row_key: RowKey of the entity.\n        entity:\n            Required. The entity object to insert. Could be a dict format or\n            entity object.\n        content_type: Required. Must be set to application/atom+xml\n        if_match:\n            Optional. Specifies the condition for which the merge should be\n            performed. To force an unconditional merge, set to the wildcard\n            character (*).\n        '''\n        _validate_not_none('table_name', table_name)\n        _validate_not_none('partition_key', partition_key)\n        _validate_not_none('row_key', row_key)\n        _validate_not_none('entity', entity)\n        _validate_not_none('content_type', content_type)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(table_name) + '(PartitionKey=\\'' + \\\n            _str(partition_key) + '\\',RowKey=\\'' + _str(row_key) + '\\')'\n        request.headers = [\n            ('Content-Type', _str_or_none(content_type)),\n            ('If-Match', _str_or_none(if_match))\n        ]\n        request.body = _get_request_body(_convert_entity_to_xml(entity))\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_table_header(request)\n        response = self._perform_request(request)\n\n        return _parse_response_for_dict_filter(response, filter=['etag'])\n\n    def merge_entity(self, table_name, partition_key, row_key, entity,\n                     content_type='application/atom+xml', if_match='*'):\n        '''\n        Updates an existing entity by updating the entity's properties. This\n        operation does not replace the existing entity as the Update Entity\n        operation does.\n\n        table_name: Table name.\n        partition_key: PartitionKey of the entity.\n        row_key: RowKey of the entity.\n        entity:\n            Required. The entity object to insert. Can be a dict format or\n            entity object.\n        content_type: Required. Must be set to application/atom+xml\n        if_match:\n            Optional. Specifies the condition for which the merge should be\n            performed. To force an unconditional merge, set to the wildcard\n            character (*).\n        '''\n        _validate_not_none('table_name', table_name)\n        _validate_not_none('partition_key', partition_key)\n        _validate_not_none('row_key', row_key)\n        _validate_not_none('entity', entity)\n        _validate_not_none('content_type', content_type)\n        request = HTTPRequest()\n        request.method = 'MERGE'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(table_name) + '(PartitionKey=\\'' + \\\n            _str(partition_key) + '\\',RowKey=\\'' + _str(row_key) + '\\')'\n        request.headers = [\n            ('Content-Type', _str_or_none(content_type)),\n            ('If-Match', _str_or_none(if_match))\n        ]\n        request.body = _get_request_body(_convert_entity_to_xml(entity))\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_table_header(request)\n        response = self._perform_request(request)\n\n        return _parse_response_for_dict_filter(response, filter=['etag'])\n\n    def delete_entity(self, table_name, partition_key, row_key,\n                      content_type='application/atom+xml', if_match='*'):\n        '''\n        Deletes an existing entity in a table.\n\n        table_name: Table name.\n        partition_key: PartitionKey of the entity.\n        row_key: RowKey of the entity.\n        content_type: Required. Must be set to application/atom+xml\n        if_match:\n            Optional. Specifies the condition for which the delete should be\n            performed. To force an unconditional delete, set to the wildcard\n            character (*).\n        '''\n        _validate_not_none('table_name', table_name)\n        _validate_not_none('partition_key', partition_key)\n        _validate_not_none('row_key', row_key)\n        _validate_not_none('content_type', content_type)\n        _validate_not_none('if_match', if_match)\n        request = HTTPRequest()\n        request.method = 'DELETE'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(table_name) + '(PartitionKey=\\'' + \\\n            _str(partition_key) + '\\',RowKey=\\'' + _str(row_key) + '\\')'\n        request.headers = [\n            ('Content-Type', _str_or_none(content_type)),\n            ('If-Match', _str_or_none(if_match))\n        ]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_table_header(request)\n        self._perform_request(request)\n\n    def insert_or_replace_entity(self, table_name, partition_key, row_key,\n                                 entity, content_type='application/atom+xml'):\n        '''\n        Replaces an existing entity or inserts a new entity if it does not\n        exist in the table. Because this operation can insert or update an\n        entity, it is also known as an \"upsert\" operation.\n\n        table_name: Table name.\n        partition_key: PartitionKey of the entity.\n        row_key: RowKey of the entity.\n        entity:\n            Required. The entity object to insert. Could be a dict format or\n            entity object.\n        content_type: Required. Must be set to application/atom+xml\n        '''\n        _validate_not_none('table_name', table_name)\n        _validate_not_none('partition_key', partition_key)\n        _validate_not_none('row_key', row_key)\n        _validate_not_none('entity', entity)\n        _validate_not_none('content_type', content_type)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(table_name) + '(PartitionKey=\\'' + \\\n            _str(partition_key) + '\\',RowKey=\\'' + _str(row_key) + '\\')'\n        request.headers = [('Content-Type', _str_or_none(content_type))]\n        request.body = _get_request_body(_convert_entity_to_xml(entity))\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_table_header(request)\n        response = self._perform_request(request)\n\n        return _parse_response_for_dict_filter(response, filter=['etag'])\n\n    def insert_or_merge_entity(self, table_name, partition_key, row_key,\n                               entity, content_type='application/atom+xml'):\n        '''\n        Merges an existing entity or inserts a new entity if it does not exist\n        in the table. Because this operation can insert or update an entity,\n        it is also known as an \"upsert\" operation.\n\n        table_name: Table name.\n        partition_key: PartitionKey of the entity.\n        row_key: RowKey of the entity.\n        entity:\n            Required. The entity object to insert. Could be a dict format or\n            entity object.\n        content_type: Required. Must be set to application/atom+xml\n        '''\n        _validate_not_none('table_name', table_name)\n        _validate_not_none('partition_key', partition_key)\n        _validate_not_none('row_key', row_key)\n        _validate_not_none('entity', entity)\n        _validate_not_none('content_type', content_type)\n        request = HTTPRequest()\n        request.method = 'MERGE'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(table_name) + '(PartitionKey=\\'' + \\\n            _str(partition_key) + '\\',RowKey=\\'' + _str(row_key) + '\\')'\n        request.headers = [('Content-Type', _str_or_none(content_type))]\n        request.body = _get_request_body(_convert_entity_to_xml(entity))\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_table_header(request)\n        response = self._perform_request(request)\n\n        return _parse_response_for_dict_filter(response, filter=['etag'])\n\n    def _perform_request_worker(self, request):\n        auth = _sign_storage_table_request(request,\n                                           self.account_name,\n                                           self.account_key)\n        request.headers.append(('Authorization', auth))\n        return self._httpclient.perform_request(request)\n"
  },
  {
    "path": "CustomScript/customscript.py",
    "content": "#!/usr/bin/env python\n#\n# CustomScript extension\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport os\nimport os.path\nimport re\nimport shutil\nimport subprocess\nimport sys\nimport time\nimport traceback\n\nfrom azure.storage import BlobService\nfrom codecs import *\nfrom Utils.WAAgentUtil import waagent\n\nimport Utils.HandlerUtil as Util\nimport Utils.ScriptUtil as ScriptUtil\n\nif sys.version_info[0] == 3:\n    import urllib.request as urllib\n    from urllib.parse import urlparse\n\nelif sys.version_info[0] == 2:\n    import urllib2 as urllib\n    from urlparse import urlparse\n\nExtensionShortName = 'CustomScriptForLinux'\n\n# Global Variables\nDownloadDirectory = 'download'\n\n# CustomScript-specific Operation\nDownloadOp = \"Download\"\nRunScriptOp = \"RunScript\"\n\n# Change permission of log path\next_log_path = '/var/log/azure/'\nif os.path.exists(ext_log_path):\n    os.chmod('/var/log/azure/', 0o700)\n\n#Main function is the only entrence to this extension handler\ndef main():\n    #Global Variables definition\n    waagent.LoggerInit('/var/log/waagent.log','/dev/stdout')\n    waagent.Log(\"%s started to handle.\" %(ExtensionShortName))\n    hutil = None\n\n    try:\n        for a in sys.argv[1:]:\n            if re.match(\"^([-/]*)(disable)\", a):\n                dummy_command(\"Disable\", \"success\", \"Disable succeeded\")\n            elif re.match(\"^([-/]*)(uninstall)\", a):\n                dummy_command(\"Uninstall\", \"success\", \"Uninstall succeeded\")\n            elif re.match(\"^([-/]*)(install)\", a):\n                dummy_command(\"Install\", \"success\", \"Install succeeded\")\n            elif re.match(\"^([-/]*)(enable)\", a):\n                hutil = parse_context(\"Enable\")\n                enable(hutil)\n            elif re.match(\"^([-/]*)(daemon)\", a):\n                hutil = parse_context(\"Executing\")\n                daemon(hutil)\n            elif re.match(\"^([-/]*)(update)\", a):\n                dummy_command(\"Update\", \"success\", \"Update succeeded\")\n    except Exception as e:\n        err_msg = \"Failed with error: {0}, {1}\".format(e, traceback.format_exc())\n        waagent.Error(err_msg)\n\n        if hutil is not None:\n            hutil.error(err_msg)\n            hutil.do_exit(1, 'Enable','failed','0',\n                          'Enable failed: {0}'.format(err_msg))\n\n\ndef dummy_command(operation, status, msg):\n    hutil = parse_context(operation)\n    hutil.do_exit(0, operation, status, '0', msg)\n\n\ndef parse_context(operation):\n    hutil = Util.HandlerUtility(waagent.Log, waagent.Error, ExtensionShortName, console_logger=waagent.LogToConsole, file_logger=waagent.LogToFile)\n    hutil.do_parse_context(operation)\n    return hutil\n\n\ndef enable(hutil):\n    \"\"\"\n    Ensure the same configuration is executed only once\n    If the previous enable failed, we do not have retry logic here,\n    since the custom script may not work in an intermediate state.\n    \"\"\"\n    hutil.exit_if_enabled()\n\n    start_daemon(hutil)\n\n\ndef download_files_with_retry(hutil, retry_count, wait):\n    hutil.log((\"Will try to download files, \"\n               \"number of retries = {0}, \"\n               \"wait SECONDS between retrievals = {1}s\").format(retry_count, wait))\n    for download_retry_count in range(0, retry_count + 1):\n        try:\n            download_files(hutil)\n            break\n        except Exception as e:\n            error_msg = \"{0}, retry = {1}, maxRetry = {2}.\".format(e, download_retry_count, retry_count)\n            hutil.error(error_msg)\n            if download_retry_count < retry_count:\n                hutil.log(\"Sleep {0} seconds\".format(wait))\n                time.sleep(wait)\n            else:\n                waagent.AddExtensionEvent(name=ExtensionShortName,\n                                          op=DownloadOp,\n                                          isSuccess=False,\n                                          version=hutil.get_extension_version(),\n                                          message=\"(01100)\"+error_msg)\n                raise\n\n    msg = (\"Succeeded to download files, \"\n           \"retry count = {0}\").format(download_retry_count)\n    hutil.log(msg)\n    waagent.AddExtensionEvent(name=ExtensionShortName,\n                              op=DownloadOp,\n                              isSuccess=True,\n                              version=hutil.get_extension_version(),\n                              message=\"(01303)\"+msg)\n    return retry_count - download_retry_count\n\n\ndef check_idns_with_retry(hutil, retry_count, wait):\n    is_idns_ready = False\n    for check_idns_retry_count in range(0, retry_count + 1):\n        is_idns_ready = check_idns()\n        if is_idns_ready:\n            break\n        else:\n            if check_idns_retry_count < retry_count:\n                hutil.error(\"Internal DNS is not ready, retry to check.\")\n                hutil.log(\"Sleep {0} seconds\".format(wait))\n                time.sleep(wait)\n\n    if is_idns_ready:\n        msg = (\"Internal DNS is ready, \"\n               \"retry count = {0}\").format(check_idns_retry_count)\n        hutil.log(msg)\n        waagent.AddExtensionEvent(name=ExtensionShortName,\n                                  op=\"CheckIDNS\",\n                                  isSuccess=True,\n                                  version=hutil.get_extension_version(),\n                                  message=\"(01306)\"+msg)\n    else:\n        error_msg = (\"Internal DNS is not ready, \"\n                     \"retry count = {0}, ignore it.\").format(check_idns_retry_count)\n        hutil.error(error_msg)\n        waagent.AddExtensionEvent(name=ExtensionShortName,\n                                  op=\"CheckIDNS\",\n                                  isSuccess=False,\n                                  version=hutil.get_extension_version(),\n                                  message=\"(01306)\"+error_msg)\n\n\ndef check_idns():\n    ret = waagent.Run(\"host $(hostname)\")\n    return not ret\n\n\ndef download_files(hutil):\n    public_settings = hutil.get_public_settings()\n    if public_settings is None:\n        raise ValueError(\"Public configuration couldn't be None.\")\n    cmd = get_command_to_execute(hutil)\n    blob_uris = public_settings.get('fileUris')\n\n    protected_settings = hutil.get_protected_settings()\n    storage_account_name = None\n    storage_account_key = None\n    if protected_settings:\n        storage_account_name = protected_settings.get(\"storageAccountName\")\n        storage_account_key = protected_settings.get(\"storageAccountKey\")\n        if storage_account_name is not None:\n            storage_account_name = storage_account_name.strip()\n        if storage_account_key is not None:\n            storage_account_key = storage_account_key.strip()\n\n    if (not blob_uris or not isinstance(blob_uris, list) or len(blob_uris) == 0):\n        error_msg = \"fileUris value provided is empty or invalid.\"\n        hutil.log(error_msg + \" Continue with executing command...\")\n        waagent.AddExtensionEvent(name=ExtensionShortName,\n                                  op=DownloadOp,\n                                  isSuccess=False,\n                                  version=hutil.get_extension_version(),\n                                  message=\"(01001)\"+error_msg)\n        return\n\n    hutil.do_status_report('Downloading','transitioning', '0',\n                           'Downloading files...')\n\n    if storage_account_name and storage_account_key:\n        hutil.log(\"Downloading scripts from azure storage...\")\n        download_blobs(storage_account_name,\n                       storage_account_key,\n                       blob_uris,\n                       cmd,\n                       hutil)\n    elif not(storage_account_name or storage_account_key):\n        hutil.log(\"No azure storage account and key specified in protected \"\n                  \"settings. Downloading scripts from external links...\")\n        download_external_files(blob_uris, cmd, hutil)\n    else:\n        #Storage account and key should appear in pairs\n        error_msg = \"Azure storage account and key should appear in pairs.\"\n        hutil.error(error_msg)\n        waagent.AddExtensionEvent(name=ExtensionShortName,\n                                  op=DownloadOp,\n                                  isSuccess=False,\n                                  version=hutil.get_extension_version(),\n                                  message=\"(01000)\"+error_msg)\n        raise ValueError(error_msg)\n\n\ndef start_daemon(hutil):\n    cmd = get_command_to_execute(hutil)\n    if cmd:\n        args = [os.path.join(os.getcwd(), \"shim.sh\"), \"-daemon\"]\n\n        # This process will start a new background process by calling\n        #     shim.sh -daemon\n        # to run the script and will exit itself immediately.\n\n        # Redirect stdout and stderr to /dev/null. Otherwise daemon process\n        # will throw Broke pipe exception when parent process exit.\n        devnull = open(os.devnull, 'w')\n        subprocess.Popen(args, stdout=devnull, stderr=devnull)\n        hutil.do_exit(0, 'Enable', 'transitioning', '0',\n                      'Launching the script...')\n    else:\n        error_msg = \"commandToExecute is empty or invalid\"\n        hutil.error(error_msg)\n        waagent.AddExtensionEvent(name=ExtensionShortName,\n                                  op=RunScriptOp,\n                                  isSuccess=False,\n                                  version=hutil.get_extension_version(),\n                                  message=\"(01002)\"+error_msg)\n        raise ValueError(error_msg)\n\n\ndef daemon(hutil):\n    retry_count = 10\n    wait = 20\n    enable_idns_check = True\n\n    public_settings = hutil.get_public_settings()\n    if public_settings:\n        if 'retrycount' in public_settings:\n            retry_count = public_settings.get('retrycount')\n        if 'wait' in public_settings:\n            wait = public_settings.get('wait')\n        if 'enableInternalDNSCheck' in public_settings:\n            # removed strtobool/distutils dependency, implementation is based on strtobool specification\n            enable_idns_check_setting = public_settings.get('enableInternalDNSCheck')\n            enable_idns_check = True if ((enable_idns_check_setting.lower() == \"yes\") |\n                                 (enable_idns_check_setting.lower() == \"y\") |\n                                 (enable_idns_check_setting.lower() == \"true\") |\n                                 (enable_idns_check_setting.lower() == \"t\") |\n                                 (enable_idns_check_setting.lower() == \"on\") |\n                                 (enable_idns_check_setting.lower() == \"1\")) else False\n\n    prepare_download_dir(hutil.get_seq_no())\n    retry_count = download_files_with_retry(hutil, retry_count, wait)\n\n    # The internal DNS needs some time to be ready.\n    # Wait and retry to check if there is time in retry window.\n    # The check may be removed safely if iDNS is always ready.\n    if enable_idns_check:\n        check_idns_with_retry(hutil, retry_count, wait)\n\n    cmd = get_command_to_execute(hutil)\n    args = ScriptUtil.parse_args(cmd)\n    if args:\n        ScriptUtil.run_command(hutil, args, prepare_download_dir(hutil.get_seq_no()), 'Daemon', ExtensionShortName, hutil.get_extension_version())\n    else:\n        error_msg = \"commandToExecute is empty or invalid.\"\n        hutil.error(error_msg)\n        waagent.AddExtensionEvent(name=ExtensionShortName,\n                                  op=RunScriptOp,\n                                  isSuccess=False,\n                                  version=hutil.get_extension_version(),\n                                  message=\"(01002)\"+error_msg)\n        raise ValueError(error_msg)\n\n\ndef download_blobs(storage_account_name, storage_account_key,\n                   blob_uris, command, hutil):\n    for blob_uri in blob_uris:\n        if blob_uri:\n            download_blob(storage_account_name,\n                          storage_account_key,\n                          blob_uri,\n                          command,\n                          hutil)\n\n\ndef download_blob(storage_account_name, storage_account_key,\n                  blob_uri, command, hutil):\n    try:\n        seqNo = hutil.get_seq_no()\n        download_dir = prepare_download_dir(seqNo)\n        result = download_and_save_blob(storage_account_name,\n                                        storage_account_key,\n                                        blob_uri,\n                                        download_dir,\n                                        hutil)\n        blob_name, _, _, download_path = result\n        preprocess_files(download_path, hutil)\n        if command and blob_name in command:\n            os.chmod(download_path, 0o100)\n    except Exception as e:\n        error_msg = \"Failed to download blob with uri: {0} with error {1}\".format(blob_uri, e)\n        raise Exception(error_msg)\n\n\ndef download_and_save_blob(storage_account_name,\n                           storage_account_key,\n                           blob_uri,\n                           download_dir,\n                           hutil):\n    container_name = get_container_name_from_uri(blob_uri, hutil)\n    blob_name = get_blob_name_from_uri(blob_uri, hutil)\n    host_base = get_host_base_from_uri(blob_uri)\n    # If blob_name is a path, extract the file_name\n    last_sep = blob_name.rfind('/')\n    if last_sep != -1:\n        file_name = blob_name[last_sep+1:]\n    else:\n        file_name = blob_name\n    download_path = os.path.join(download_dir, file_name)\n    # Guest agent already ensure the plugin is enabled one after another.\n    # The blob download will not conflict.\n    blob_service = BlobService(storage_account_name,\n                               storage_account_key,\n                               host_base=host_base)\n    blob_service.get_blob_to_path(container_name, blob_name, download_path)\n    return blob_name, container_name, host_base, download_path\n\n\ndef download_external_files(uris, command, hutil):\n    for uri in uris:\n        if uri:\n            download_external_file(uri, command, hutil)\n\n\ndef download_external_file(uri, command, hutil):\n    seqNo = hutil.get_seq_no()\n    download_dir = prepare_download_dir(seqNo)\n    path = get_path_from_uri(uri)\n    file_name = path.split('/')[-1]\n    file_path = os.path.join(download_dir, file_name)\n    try:\n        download_and_save_file(uri, file_path)\n        preprocess_files(file_path, hutil)\n        if command and file_name in command:\n            os.chmod(file_path, 0o100)\n    except Exception as e:\n        error_msg = (\"Failed to download external file with uri: {0} \"\n                     \"with error {1}\").format(uri, e)\n        raise Exception(error_msg)\n\n\ndef download_and_save_file(uri, file_path, timeout=30, buf_size=1024):\n    src = urllib.urlopen(uri, timeout=timeout)\n    with open(file_path, 'wb') as dest:\n        buf = src.read(buf_size)\n        while(buf):\n            dest.write(buf)\n            buf = src.read(buf_size)\n\n\ndef preprocess_files(file_path, hutil):\n    \"\"\"\n        The file is preprocessed if it satisfies any of the following\n        condistions:\n            the file's extension is '.sh' or '.py'\n            the content of the file starts with '#!'\n    \"\"\"\n    ret = to_process(file_path)\n    if ret:\n        dos2unix(file_path)\n        hutil.log(\"Converting {0} from DOS to Unix formats: Done\".format(file_path))\n        remove_bom(file_path)\n        hutil.log(\"Removing BOM of {0}: Done\".format(file_path))\n\n\ndef to_process(file_path, extensions=['.sh', \".py\"]):\n    for extension in extensions:\n        if file_path.endswith(extension):\n            return True\n    with open(file_path, 'rb') as f:\n        contents = f.read(64)\n    if b'#!' in contents:\n        return True\n    return False\n\n\ndef dos2unix(file_path):\n    with open(file_path, 'rU') as f:\n        contents = f.read()\n    temp_file_path = file_path + \".tmp\"\n    with open(temp_file_path, 'wb') as f_temp:\n        f_temp.write(contents.encode())\n    shutil.move(temp_file_path, file_path)\n\n\ndef remove_bom(file_path):\n    with open(file_path, 'rb') as f:\n        contents = f.read()\n    bom_list = [BOM, BOM_BE, BOM_LE, BOM_UTF16, BOM_UTF16_BE, BOM_UTF16_LE, BOM_UTF8]\n    for bom in bom_list:\n        if contents.startswith(bom):\n            break\n    else:\n        return\n    new_contents = None\n    for encoding in [\"utf-8-sig\", \"utf-16\"]:\n        try:\n            new_contents = contents.decode(encoding).encode('utf-8')\n            break\n        except UnicodeDecodeError:\n            continue\n    if new_contents is not None:\n        temp_file_path = file_path + \".tmp\"\n        with open(temp_file_path, 'wb') as f_temp:\n            f_temp.write(new_contents)\n        shutil.move(temp_file_path, file_path)\n\n\ndef get_blob_name_from_uri(uri, hutil):\n    return get_properties_from_uri(uri, hutil)['blob_name']\n\n\ndef get_container_name_from_uri(uri, hutil):\n    return get_properties_from_uri(uri, hutil)['container_name']\n\n\ndef get_host_base_from_uri(blob_uri):\n    uri = urlparse(blob_uri)\n    netloc = uri.netloc\n    if netloc is None:\n        return None\n    return netloc[netloc.find('.'):]\n\n\ndef get_properties_from_uri(uri, hutil):\n    path = get_path_from_uri(uri)\n    if path.endswith('/'):\n        path = path[:-1]\n    if path[0] == '/':\n        path = path[1:]\n    first_sep = path.find('/')\n    if first_sep == -1:\n        hutil.error(\"Failed to extract container, blob, from {}\".format(path))\n    blob_name = path[first_sep+1:]\n    container_name = path[:first_sep]\n    return {'blob_name': blob_name, 'container_name': container_name}\n\n\ndef get_path_from_uri(uriStr):\n    uri = urlparse(uriStr)\n    return uri.path\n\n\ndef prepare_download_dir(seqNo):\n    download_dir_main = os.path.join(os.getcwd(), DownloadDirectory)\n    create_directory_if_not_exists(download_dir_main)\n    download_dir = os.path.join(download_dir_main, seqNo)\n    create_directory_if_not_exists(download_dir)\n    return download_dir\n\n\ndef create_directory_if_not_exists(directory):\n    \"\"\"create directory if no exists\"\"\"\n    if not os.path.exists(directory):\n        os.makedirs(directory)\n\n\ndef get_command_to_execute(hutil):\n    public_settings = hutil.get_public_settings()\n    protected_settings = hutil.get_protected_settings()\n    cmd_public = public_settings.get('commandToExecute')\n    cmd_protected = None\n    if protected_settings is not None:\n        cmd_protected = protected_settings.get('commandToExecute')\n    if cmd_public and cmd_protected:\n        err_msg = (\"commandToExecute was specified both in public settings \"\n            \"and protected settings. It can only be specified in one of them.\")\n        hutil.error(err_msg)\n        hutil.do_exit(1, 'Enable','failed','0',\n            'Enable failed: {0}'.format(err_msg))\n\n    if cmd_public:\n        hutil.log(\"Command to execute:\" + cmd_public)\n        return cmd_public\n    else:\n        return cmd_protected\n\n\nif __name__ == '__main__' :\n    main()\n"
  },
  {
    "path": "CustomScript/manifest.xml",
    "content": "<?xml version='1.0' encoding='utf-8' ?>\n<ExtensionImage xmlns=\"http://schemas.microsoft.com/windowsazure\">\n  <ProviderNameSpace>Microsoft.OSTCExtensions</ProviderNameSpace>\n  <Type>CustomScriptForLinux</Type>\n  <Version>1.5.5</Version>\n  <Label>Microsoft Azure Custom Script Extension for Linux Virtual Machines</Label>\n  <HostingResources>VmRole</HostingResources>\n  <MediaLink></MediaLink>\n  <Description>Please consider using Microsoft.Azure.Extensions.CustomScript instead.</Description>\n  <IsInternalExtension>true</IsInternalExtension>\n  <Eula>https://github.com/Azure/azure-linux-extensions/blob/master/LICENSE-2_0.txt</Eula>\n  <PrivacyUri>http://www.microsoft.com/privacystatement/en-us/OnlineServices/Default.aspx</PrivacyUri>\n  <HomepageUri>https://github.com/Azure/azure-linux-extensions</HomepageUri>\n  <IsJsonExtension>true</IsJsonExtension>\n  <SupportedOS>Linux</SupportedOS>\n  <CompanyName>Microsoft</CompanyName>\n  <!--%REGIONS%-->\n</ExtensionImage>\n"
  },
  {
    "path": "CustomScript/references",
    "content": "Utils/\n"
  },
  {
    "path": "CustomScript/shim.sh",
    "content": "#!/usr/bin/env bash\n\n# The shim scripts provide a single entry point for CSE and will invoke the customscript.py entry point using the\n# appropriate python interpreter version.\n# Arguments passed to the shim layer are redirected to the invoked script without any validation.\n\nCOMMAND=\"./customscript.py\"\nPYTHON=\"\"\nARG=\"$@\"\n\nfunction find_python(){\n    local python_exec_command=$1\n\n    # Check if there is python defined.\n    if command -v python >/dev/null 2>&1 ; then\n        eval ${python_exec_command}=\"python\"\n    else\n        # Python was not found. Searching for Python3 now.\n        if command -v python3 >/dev/null 2>&1 ; then\n            eval ${python_exec_command}=\"python3\"\n        fi\n    fi\n}\n\nfind_python PYTHON\n\nif [ -z \"$PYTHON\" ]\nthen\n   echo \"No Python interpreter found on the box\" >&2\n   exit 51 # Not Supported\nelse\n    echo \"Found: `${PYTHON} --version`\"\nfi\n\n${PYTHON} ${COMMAND} ${ARG}\nexit $?\n\n# DONE"
  },
  {
    "path": "CustomScript/test/HandlerEnvironment.json",
    "content": "[{  \"name\": \"Microsoft.OSTCExtensions.CustomScriptForLinuxTest\", \"seqNo\": \"0\", \"version\": 1.0,  \"handlerEnvironment\": {    \"logFolder\": \"/root/CustomScriptForLinuxTest\",    \"configFolder\": \"/root/CustomScriptForLinuxTest/config\",    \"statusFolder\": \"/root/CustomScriptForLinuxTest/status\",    \"heartbeatFile\": \"/root/CustomScriptForLinuxTest/heartbeat.log\"}}]\n"
  },
  {
    "path": "CustomScript/test/MockUtil.py",
    "content": "#!/usr/bin/env python\n#\n#CustomScript extension\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nclass MockUtil:\n    def __init__(self, test):\n        self.test = test\n\n    def get_log_dir(self):\n        return \"/tmp\"\n\n    def log(self, msg):\n        print(msg)\n\n    def error(self, msg):\n        print(msg)\n\n    def get_seq_no(self):\n        return \"0\"\n\n    def do_status_report(self, operation, status, status_code, message):\n        self.test.assertNotEqual(None, message)\n\n    def do_exit(self,exit_code,operation,status,code,message):\n        self.test.assertNotEqual(None, message)\n"
  },
  {
    "path": "CustomScript/test/create_test_blob.py",
    "content": "import blob\nimport blob_mooncake\nimport customscript as cs\nfrom azure.storage import BlobService\n\ndef create_blob(blob, txt):\n    uri = blob.uri\n    host_base = cs.get_host_base_from_uri(uri)\n    service = BlobService(blob.name,\n                          blob.key,\n                          host_base=host_base)\n    \n    container_name = cs.get_container_name_from_uri(uri)\n    blob_name = cs.get_blob_name_from_uri(uri)\n    service.put_block_blob_from_text(container_name,\n                                     blob_name,\n                                     txt)\n\nif __name__ == \"__main__\":\n    create_blob(blob, \"public azure\\n\") \n    create_blob(blob_mooncake, \"mooncake\\n\") \n"
  },
  {
    "path": "CustomScript/test/env.py",
    "content": "#!/usr/bin/env python\n#\n# CustomScript extension\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nimport os\n\nroot = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append(root)\n"
  },
  {
    "path": "CustomScript/test/run_all.sh",
    "content": "#!/bin/bash\n#\n# This script is used to set up a test env for extensions\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nscript=$(dirname $0)\nroot=$script\ncd $root\nroot=`pwd`\n\necho \"Run unit test:\"\nls test_*.py\nls test_*.py | sed -e 's/\\.py//'|xargs python -m unittest\n"
  },
  {
    "path": "CustomScript/test/test_blob_download.py",
    "content": "#!/usr/bin/env python\n#\n#CustomScript extension\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nimport customscript as cs\nimport blob as test_blob\nimport blob_mooncake as test_blob_mooncake\n\nclass TestBlobDownload(unittest.TestCase):\n    def test_download_blob(self):\n        download_dir = \"/tmp\"\n        cs.download_and_save_blob(test_blob.name, \n                                  test_blob.key, \n                                  test_blob.uri,\n                                  download_dir)\n        \n        cs.download_and_save_blob(test_blob_mooncake.name, \n                                  test_blob_mooncake.key, \n                                  test_blob_mooncake.uri,\n                                  download_dir)\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "CustomScript/test/test_file_download.py",
    "content": "#!/usr/bin/env python\n#\n#CustomScript extension\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nimport os\nimport tempfile\nimport customscript as cs\n\nclass TestFileDownload(unittest.TestCase):\n    def test_download_blob(self):\n        pass\n\n    def download_to_tmp(self, uri):\n        tmpFile = tempfile.TemporaryFile()\n        file_path = os.path.abspath(tmpFile.name)\n        cs.download_and_save_file(uri, file_path)\n        file_size = os.path.getsize(file_path)\n        self.assertNotEqual(file_size, 0)\n        tmpFile.close()\n        os.unlink(tmpFile.name)\n        \n    def test_download_bin_file(self):\n        uri = \"http://www.bing.com/rms/Homepage$HPBottomBrand_default/ic/1f76acf2/d3a8cfeb.png\"\n        self.download_to_tmp(uri)\n\n    def test_download_text_file(self):\n        uri = \"http://www.bing.com/\"\n        self.download_to_tmp(uri)\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "CustomScript/test/test_preprocess_file.py",
    "content": "#!/usr/bin/env python\n#\n#CustomScript extension\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nimport os\nimport zipfile\nimport codecs\nimport shutil\n\nfrom MockUtil import MockUtil\nimport customscript as cs\n\nclass TestPreprocessFile(unittest.TestCase):\n    @classmethod\n    def setUpClass(cls):\n        try:\n            os.remove('master.zip')\n            shutil.rmtree('encoding')\n        except:\n            pass\n        os.system('wget https://github.com/bingosummer/scripts/archive/master.zip')\n        zipFile = zipfile.ZipFile('master.zip')\n        zipFile.extractall()\n        zipFile.close()\n        shutil.move('scripts-master', 'encoding')\n\n    def test_bin_file(self):\n        print(\"\\nTest: Is it a binary file\")\n        file_path = \"encoding/mslogo.png\"\n        self.assertFalse(cs.is_text_file(file_path)[0])\n\n    def test_text_file(self):\n        print(\"\\nTest: Is it a text file\")\n        files = [file for file in os.listdir('encoding') if file.endswith('py') or file.endswith('sh') or file.endswith('txt')]\n        for file in files:\n            file_path = os.path.join('encoding', file)\n            try:\n                self.assertTrue(cs.is_text_file(file_path)[0])\n            except:\n                print(file)\n                raise\n\n    def test_bom(self):\n        print(\"\\nTest: Remove BOM\")\n        hutil = MockUtil(self)\n        files = [file for file in os.listdir('encoding') if 'bom' in file]\n        for file in files:\n            file_path = os.path.join('encoding', file)\n            cs.preprocess_files(file_path, hutil)\n            with open(file_path, 'r') as f:\n                contents = f.read()\n            if \"utf8\" in file:\n                self.assertFalse(contents.startswith(codecs.BOM_UTF8))\n            if \"utf16_le\" in file:\n                self.assertFalse(contents.startswith(codecs.BOM_LE))\n            if \"utf16_be\" in file:\n                self.assertFalse(contents.startswith(codecs.BOM_BE))\n\n    def test_windows_line_break(self):\n        print(\"\\nTest: Convert text files from DOS to Unix formats\")\n        hutil = MockUtil(self)\n        files = [file for file in os.listdir('encoding') if 'dos' in file]\n        for file in files:\n            file_path = os.path.join('encoding', file)\n            cs.preprocess_files(file_path, hutil)\n            with open(file_path, 'r') as f:\n                contents = f.read()\n            self.assertFalse(\"\\r\\n\" in contents)\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "CustomScript/test/test_uri_utils.py",
    "content": "#!/usr/bin/env python\n#\n#CustomScript extension\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nimport customscript as cs\n\n\nclass TestUriUtils(unittest.TestCase):\n    def test_get_path_from_uri(self):\n        uri = \"http://qingfu2.blob.core.windows.net/vhds/abc.sh?st=2014-06-27Z&se=2014-06-27&sr=c&sp=r&sig=KBwcWOx\"\n        path = cs.get_path_from_uri(uri)\n        self.assertEqual(path, \"/vhds/abc.sh\")\n\n    def test_get_blob_name_from_uri(self):\n        uri = \"http://qingfu2.blob.core.windows.net/vhds/abc.sh?st=2014-06-27Z&se=2014-06-27&sr=c&sp=r&sig=KBwcWOx\"\n        blob = cs.get_blob_name_from_uri(uri)\n        self.assertEqual(blob, \"abc.sh\")\n\n    def test_get_container_name_from_uri(self):\n        uri = \"http://qingfu2.blob.core.windows.net/vhds/abc.sh?st=2014-06-27Z&se=2014-06-27&sr=c&sp=r&sig=KBwcWOx\"\n        container = cs.get_container_name_from_uri(uri)\n        self.assertEqual(container, \"vhds\")\n\n    def test_get_host_base_from_uri(self):\n        blob_uri = \"http://qingfu2.blob.core.windows.net/vhds/abc.sh?st=2014-06-27Z&se=2014-06-27&sr=c&sp=r&sig=KBwcWOx\"\n        host_base = cs.get_host_base_from_uri(blob_uri)\n        self.assertEqual(host_base, \".blob.core.windows.net\")\n        blob_uri = \"https://yue.blob.core.chinacloudapi.cn/\"\n        host_base = cs.get_host_base_from_uri(blob_uri)\n        self.assertEqual(host_base, \".blob.core.chinacloudapi.cn\")\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "CustomScript/test/timeout.sh",
    "content": "#!/bin/bash\n\nfor i in $(seq 1500)\ndo\n    echo `date` + The script is running...\n    >&2 echo `date` + ERROR:The script is running...\n    sleep 1\ndone\n"
  },
  {
    "path": "DSC/HandlerManifest.json",
    "content": "[\n  {\n    \"name\" : \"DSCForLinux\",   \n    \"version\": 1.0,\n    \"handlerManifest\": {\n      \"disableCommand\": \"./extension_shim.sh -c ./dsc.py -d\",\n      \"enableCommand\": \"./extension_shim.sh -c ./dsc.py -e\",\n      \"installCommand\": \"./extension_shim.sh -c ./dsc.py -i\",\n      \"uninstallCommand\": \"./extension_shim.sh -c ./dsc.py -u\",\n      \"updateCommand\": \"./extension_shim.sh -c ./dsc.py -p\",\n      \"rebootAfterInstall\": false,\n      \"reportHeartbeat\": false\n    }\n  }\n]\n"
  },
  {
    "path": "DSC/Makefile",
    "content": "all: package\n\nSOURCES = \\\n\thttpclientfactory.py \\\n    subprocessfactory.py \\\n\tcurlhttpclient.py \\\n    serializerfactory.py \\\n    httpclient.py \\\n    urllib2httpclient.py \\\n\turllib3httpclient.py \\\n    dsc.py \\\n\ttest \\\n\tHandlerManifest.json \\\n\tmanifest.xml \\\n\tazure \\\n\tpackages \\\n\t../Utils \\\n    ../Common/WALinuxAgent-2.0.16/waagent\n\nclean:\n\trm -rf output\n\npackage: $(SOURCES) \n\tmkdir -p output\n\tcp -t output -r $(SOURCES)\n         \n\tcd output && zip -r ../DSC.zip * > /dev/null\n        \n.PHONY: all clean package\n\n"
  },
  {
    "path": "DSC/README.md",
    "content": "# DSCForLinux Extension\nAllow the owner of the Azure Virtual Machines to configure the VM using Desired State Configuration (DSC) for Linux.\n\nLatest version is 2.71\n\nAbout how to create MOF document, please refer to below documents.\n* [Get started with Desired State Configuration (DSC) for Linux](https://technet.microsoft.com/en-us/library/mt126211.aspx)\n* [Built-In Desired State Configuration Resources for Linux](https://msdn.microsoft.com/en-us/powershell/dsc/lnxbuiltinresources)\n* [DSC for Linux releases] (https://github.com/Microsoft/PowerShell-DSC-for-Linux/releases)\n\nDSCForLinux Extension can:\n* Register the Linux VM to Azure Automation account in order to pull configurations from Azure Automation service (Register ExtensionAction)\n* Push MOF configurations to the Linux VM (Push ExtensionAction)\n* Applies Meta MOF configuration to the Linux VM to configure Pull Server in order to pull Node Configuration (Pull ExtensionAction)\n* Install custom DSC modules to the Linux VM (Install ExtensionAction)\n* Remove custom DSC modules to the Linux VM (Remove ExtensionAction)\n\n# User Guide\n\n## 1. Configuration schema\n\n### 1.1. Public configuration\n\nHere're all the supported public configuration parameters:\n\n* `FileUri`: (optional, string) the uri of the MOF file/Meta MOF file/custom resource ZIP file.\n* `ResourceName`: (optional, string) the name of the custom resource module\n* `ExtensionAction`: (optional, string) Specifies what an extension does. valid values: Register, Push, Pull, Install, Remove. If not specified, it's considered as Push Action by default.\n* `NodeConfigurationName`: (optional, string) the name of a node configuration to apply.\n* `RefreshFrequencyMins`: (optional, int) Specifies how often (in minutes) DSC attempts to obtain the configuration from the pull server. \n       If configuration on the pull server differs from the current one on the target node, it is copied to the pending store and applied.\n* `ConfigurationMode`: (optional, string) Specifies how DSC should apply the configuration. Valid values are: ApplyOnly, ApplyAndMonitor, ApplyAndAutoCorrect.\n* `ConfigurationModeFrequencyMins`: (optional, int) Specifies how often (in minutes) DSC ensures that the configuration is in the desired state.\n\n> **NOTE:** If you are using a version < 2.3, mode parameter is same as ExtensionAction. Mode seems to be a overloaded term. Therefore to avoid the confusion, ExtensionAction is being used from 2.3 version onwards. For backward compatibility, the extension supports both mode and ExtensionAction. \n\n### 1.2 Protected configuration\n\nHere're all the supported protected configuration parameters:\n\n* `StorageAccountName`: (optional, string) the name of the storage account that contains the file\n* `StorageAccountKey`: (optional, string) the key of the storage account that contains the file\n* `RegistrationUrl`: (optional, string) the URL of the Azure Automation account\n* `RegistrationKey`: (optional, string) the access key of the Azure Automation account\n\n## 2. Deploying the Extension to a VM\n\nYou can deploy it using Azure CLI, Azure PowerShell and ARM template.\n\n### 2.1. Using [**Azure CLI**][azure-cli]\nBefore deploying DSCForLinux Extension, you should configure your `public.json` and `protected.json`, according to the different scenarios in section 3.\n\n#### 2.1.1. Classic\nThe Classic mode is also called Azure Service Management mode. You can switch to it by running:\n```\n$ azure config mode asm\n```\n\nYou can deploy DSCForLinux Extension by running:\n```\n$ azure vm extension set <vm-name> DSCForLinux Microsoft.OSTCExtensions <version> \\\n--private-config-path protected.json --public-config-path public.json\n```\n\nTo learn the latest extension version available, run:\n```\n$ azure vm extension list\n```\n\n#### 2.1.2. Resource Manager\nYou can switch to Azure Resource Manager mode by running:\n```\n$ azure config mode arm\n```\n\nYou can deploy DSCForLinux Extension by running:\n```\n$ azure vm extension set <resource-group> <vm-name> \\\nDSCForLinux Microsoft.OSTCExtensions <version> \\\n--private-config-path protected.json --public-config-path public.json\n```\n\n> **NOTE:** In ARM mode, `azure vm extension list` is not available for now.\n\n### 2.2. Using [**Azure PowerShell**][azure-powershell]\n\n#### 2.2.1 Classic\n\nYou can login to your Azure account (Azure Service Management mode) by running:\n\n```powershell\nAdd-AzureAccount\n```\n\nAnd deploy DSCForLinux Extension by running:\n\n```powershell\n$vmname = '<vm-name>'\n$vm = Get-AzureVM -ServiceName $vmname -Name $vmname\n\n$extensionName = 'DSCForLinux'\n$publisher = 'Microsoft.OSTCExtensions'\n$version = '<version>'\n\n# You need to change the content of the $privateConfig and $publicConfig \n# according to different scenarios in section 3\n$privateConfig = '{\n  \"StorageAccountName\": \"<storage-account-name>\",\n  \"StorageAccountKey\": \"<storage-account-key>\"\n}'\n\n$publicConfig = '{\n  \"ExtensionAction\": \"Push\",\n  \"FileUri\": \"<mof-file-uri>\"\n}'\n\nSet-AzureVMExtension -ExtensionName $extensionName -VM $vm -Publisher $publisher `\n  -Version $version -PrivateConfiguration $privateConfig `\n  -PublicConfiguration $publicConfig | Update-AzureVM\n```\n\n#### 2.2.2.Resource Manager\n\nYou can login to your Azure account (Azure Resource Manager mode) by running:\n\n```powershell\nLogin-AzureRmAccount\n```\n\nClick [**HERE**](https://azure.microsoft.com/en-us/documentation/articles/powershell-azure-resource-manager/) to learn more about how to use Azure PowerShell with Azure Resource Manager.\n\nYou can deploy DSCForLinux Extension by running:\n\n```powershell\n$rgName = '<resource-group-name>'\n$vmName = '<vm-name>'\n$location = '<location>'\n\n$extensionName = 'DSCForLinux'\n$publisher = 'Microsoft.OSTCExtensions'\n$version = '<version>'\n\n# You need to change the content of the $privateConfig and $publicConfig \n# according to different scenarios in section 3\n$privateConfig = '{\n  \"StorageAccountName\": \"<storage-account-name>\",\n  \"StorageAccountKey\": \"<storage-account-key>\"\n}'\n\n$publicConfig = '{\n  \"ExtensionAction\": \"Push\",\n  \"FileUri\": \"<mof-file-uri>\"\n}'\n\nSet-AzureRmVMExtension -ResourceGroupName $rgName -VMName $vmName -Location $location `\n  -Name $extensionName -Publisher $publisher -ExtensionType $extensionName `\n  -TypeHandlerVersion $version -SettingString $publicConfig -ProtectedSettingString $privateConfig\n```\n\n### 2.3. Using [**ARM Template**][arm-template]\n\nThe sample ARM template is [201-dsc-linux-azure-storage-on-ubuntu](https://github.com/Azure/azure-quickstart-templates/tree/master/201-dsc-linux-azure-storage-on-ubuntu) and [201-dsc-linux-public-storage-on-ubuntu](https://github.com/Azure/azure-quickstart-templates/tree/master/201-dsc-linux-public-storage-on-ubuntu).\n\nFor more details about ARM template, please visit [Authoring Azure Resource Manager templates](https://azure.microsoft.com/en-us/documentation/articles/resource-group-authoring-templates/).\n\n## 3. Scenarios\n\n### 3.1 Register to Azure Automation account\nprotected.json\n```json\n{\n  \"RegistrationUrl\": \"<azure-automation-account-url>\",\n  \"RegistrationKey\": \"<azure-automation-account-key>\"\n}\n```\npublic.json\n```json\n{\n  \"ExtensionAction\" : \"Register\",\n  \"NodeConfigurationName\" : \"<node-configuration-name>\",\n  \"RefreshFrequencyMins\" : \"<value>\",\n  \"ConfigurationMode\" : \"<ApplyAndMonitor | ApplyAndAutoCorrect | ApplyOnly>\",\n  \"ConfigurationModeFrequencyMins\" : \"<value>\"\n}\n```\n\npowershell format\n```powershell\n$privateConfig = '{\n  \"RegistrationUrl\": \"<azure-automation-account-url>\",\n  \"RegistrationKey\": \"<azure-automation-account-key>\"\n}'\n\n$publicConfig = '{\n  \"ExtensionAction\" : \"Register\",\n  \"NodeConfigurationName\": \"<node-configuration-name>\",\n  \"RefreshFrequencyMins\": \"<value>\",\n  \"ConfigurationMode\": \"<ApplyAndMonitor | ApplyAndAutoCorrect | ApplyOnly>\",\n  \"ConfigurationModeFrequencyMins\": \"<value>\"\n}'\n```\n\n### 3.2 Apply a MOF configuration file (in Azure Storage Account) to the VM\n\nprotected.json\n```json\n{\n  \"StorageAccountName\": \"<storage-account-name>\",\n  \"StorageAccountKey\": \"<storage-account-key>\"\n}\n```\n\npublic.json\n```json\n{\n  \"FileUri\": \"<mof-file-uri>\",\n  \"ExtensionAction\": \"Push\"\n}\n```\n\npowershell format\n```powershell\n$privateConfig = '{\n  \"StorageAccountName\": \"<storage-account-name>\",\n  \"StorageAccountKey\": \"<storage-account-key>\"\n}'\n\n$publicConfig = '{\n  \"FileUri\": \"<mof-file-uri>\",\n  \"ExtensionAction\": \"Push\"\n}'\n```\n\n\n### 3.3. Apply a MOF configuration file (in public storage) to the VM\n\npublic.json\n```json\n{\n  \"FileUri\": \"<mof-file-uri>\"\n}\n```\n\npowershell format\n```powershell\n$publicConfig = '{\n  \"FileUri\": \"<mof-file-uri>\"\n}'\n```\n\n### 3.4. Apply a meta MOF configuration file (in Azure Storage Account) to the VM\n\nprotected.json\n```json\n{\n  \"StorageAccountName\": \"<storage-account-name>\",\n  \"StorageAccountKey\": \"<storage-account-key>\"\n}\n```\n\npublic.json\n```json\n{\n  \"ExtensionAction\": \"Pull\",\n  \"FileUri\": \"<meta-mof-file-uri>\"\n}\n```\n\npowershell format\n```powershell\n$privateConfig = '{\n  \"StorageAccountName\": \"<storage-account-name>\",\n  \"StorageAccountKey\": \"<storage-account-key>\"\n}'\n\n$publicConfig = '{\n  \"ExtensionAction\": \"Pull\",\n  \"FileUri\": \"<meta-mof-file-uri>\"\n}'\n```\n\n### 3.5. Apply a meta MOF configuration file (in public storage) to the VM\npublic.json\n```json\n{\n  \"FileUri\": \"<meta-mof-file-uri>\",\n  \"ExtensionAction\": \"Pull\"\n}\n```\npowershell format\n```powershell\n$publicConfig = '{\n  \"FileUri\": \"<meta-mof-file-uri>\",\n  \"ExtensionAction\": \"Pull\"\n}'\n```\n\n### 3.6. Install a custom resource module (ZIP file in Azure Storage Account) to the VM\nprotected.json\n```json\n{\n  \"StorageAccountName\": \"<storage-account-name>\",\n  \"StorageAccountKey\": \"<storage-account-key>\"\n}\n```\npublic.json\n```json\n{\n  \"ExtensionAction\": \"Install\",\n  \"FileUri\": \"<resource-zip-file-uri>\"\n}\n```\n\npowershell format\n```powershell\n$privateConfig = '{\n  \"StorageAccountName\": \"<storage-account-name>\",\n  \"StorageAccountKey\": \"<storage-account-key>\"\n}'\n\n$publicConfig = '{\n  \"ExtensionAction\": \"Install\",\n  \"FileUri\": \"<resource-zip-file-uri>\"\n}'\n```\n\n### 3.7. Install a custom resource module (ZIP file in public storage) to the VM\npublic.json\n```json\n{\n  \"ExtensionAction\": \"Install\",\n  \"FileUri\": \"<resource-zip-file-uri>\"\n}\n```\npowershell format\n```powershell\n$publicConfig = '{\n  \"ExtensionAction\": \"Install\",\n  \"FileUri\": \"<resource-zip-file-uri>\"\n}'\n```\n\n### 3.8. Remove a custom resource module from the VM\npublic.json\n```json\n{\n  \"ResourceName\": \"<resource-name>\",\n  \"ExtensionAction\": \"Remove\"\n}\n```\npowershell format\n```powershell\n$publicConfig = '{\n  \"ResourceName\": \"<resource-name>\",\n  \"ExtensionAction\": \"Remove\"\n}'\n```\n\n## 4. Supported Linux Distributions\n- Ubuntu 14.04 LTS, 16.04 LTS, 18.04 LTS and 20.04 LTS\n- Debian 8, 9 and 10\n- Oracle Linux 6 and 7\n- CentOS 6, 7 and 8\n- RHEL 6, 7 and 8\n- SUSE Linux Enterprise Server 12 and 15\n\n## 5. Debug\n* The status of the extension is reported back to Azure so that user can see the status on Azure Portal\n* The operation log of the extension is `/var/log/azure/<extension-name>/<version>/extension.log` file.\n\n## 6. Known issue\n* To distribute MOF configurations to the Linux VM with Pull Servers, you need to make sure the cron service is running in the VM.\n\n## Changelog\n\n```\n# 2.5 (2017-05-25)\n- Added support Oracle Distros\n# 2.4 (2017-05-14)\n- Added more logging\n# 2.3 (2017-05-08)\n- Update to OMI v1.1.0-8 and Linux DSC v1.1.1-294\n- Added optional public.json parmeters: 'NodeConfigurationName', 'RefreshFrequencyMins', 'ConfigurationMode' and 'ConfigurationModeFrequencyMins'.\n- Added a new parameter 'ExtensionAction' to replace 'mode' to avoid confusion with DSC terminology: push/pull mode.\n- Supports mode parameter for backward compatibility.\n\n# 2.0 (2016-03-10)\n- Pick up Linux DSC v1.1.1\n- Add function to register Azure Automation\n- Refine extension configurations\n\n# 1.0 (2015-09-24)\n- Initial version\n```\n\n[azure-powershell]: https://azure.microsoft.com/en-us/documentation/articles/powershell-install-configure/\n[azure-cli]: https://azure.microsoft.com/en-us/documentation/articles/xplat-cli/\n[arm-template]: http://azure.microsoft.com/en-us/documentation/templates/ \n[arm-overview]: https://azure.microsoft.com/en-us/documentation/articles/resource-group-overview/\n"
  },
  {
    "path": "DSC/azure/__init__.py",
    "content": "#-------------------------------------------------------------------------\n# Copyright (c) Microsoft.  All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#--------------------------------------------------------------------------\nimport ast\nimport base64\nimport hashlib\nimport hmac\nimport sys\nimport types\nimport warnings\nimport inspect\nif sys.version_info < (3,):\n    from urllib2 import quote as url_quote\n    from urllib2 import unquote as url_unquote\n    _strtype = basestring\nelse:\n    from urllib.parse import quote as url_quote\n    from urllib.parse import unquote as url_unquote\n    _strtype = str\n\nfrom datetime import datetime\nfrom xml.dom import minidom\nfrom xml.sax.saxutils import escape as xml_escape\n\n#--------------------------------------------------------------------------\n# constants\n\n__author__ = 'Microsoft Corp. <ptvshelp@microsoft.com>'\n__version__ = '0.8.4'\n\n# Live ServiceClient URLs\nBLOB_SERVICE_HOST_BASE = '.blob.core.windows.net'\nQUEUE_SERVICE_HOST_BASE = '.queue.core.windows.net'\nTABLE_SERVICE_HOST_BASE = '.table.core.windows.net'\nSERVICE_BUS_HOST_BASE = '.servicebus.windows.net'\nMANAGEMENT_HOST = 'management.core.windows.net'\n\n# Development ServiceClient URLs\nDEV_BLOB_HOST = '127.0.0.1:10000'\nDEV_QUEUE_HOST = '127.0.0.1:10001'\nDEV_TABLE_HOST = '127.0.0.1:10002'\n\n# Default credentials for Development Storage Service\nDEV_ACCOUNT_NAME = 'devstoreaccount1'\nDEV_ACCOUNT_KEY = 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=='\n\n# All of our error messages\n_ERROR_CANNOT_FIND_PARTITION_KEY = 'Cannot find partition key in request.'\n_ERROR_CANNOT_FIND_ROW_KEY = 'Cannot find row key in request.'\n_ERROR_INCORRECT_TABLE_IN_BATCH = \\\n    'Table should be the same in a batch operations'\n_ERROR_INCORRECT_PARTITION_KEY_IN_BATCH = \\\n    'Partition Key should be the same in a batch operations'\n_ERROR_DUPLICATE_ROW_KEY_IN_BATCH = \\\n    'Row Keys should not be the same in a batch operations'\n_ERROR_BATCH_COMMIT_FAIL = 'Batch Commit Fail'\n_ERROR_MESSAGE_NOT_PEEK_LOCKED_ON_DELETE = \\\n    'Message is not peek locked and cannot be deleted.'\n_ERROR_MESSAGE_NOT_PEEK_LOCKED_ON_UNLOCK = \\\n    'Message is not peek locked and cannot be unlocked.'\n_ERROR_QUEUE_NOT_FOUND = 'Queue was not found'\n_ERROR_TOPIC_NOT_FOUND = 'Topic was not found'\n_ERROR_CONFLICT = 'Conflict ({0})'\n_ERROR_NOT_FOUND = 'Not found ({0})'\n_ERROR_UNKNOWN = 'Unknown error ({0})'\n_ERROR_SERVICEBUS_MISSING_INFO = \\\n    'You need to provide servicebus namespace, access key and Issuer'\n_ERROR_STORAGE_MISSING_INFO = \\\n    'You need to provide both account name and access key'\n_ERROR_ACCESS_POLICY = \\\n    'share_access_policy must be either SignedIdentifier or AccessPolicy ' + \\\n    'instance'\n_WARNING_VALUE_SHOULD_BE_BYTES = \\\n    'Warning: {0} must be bytes data type. It will be converted ' + \\\n    'automatically, with utf-8 text encoding.'\n_ERROR_VALUE_SHOULD_BE_BYTES = '{0} should be of type bytes.'\n_ERROR_VALUE_NONE = '{0} should not be None.'\n_ERROR_VALUE_NEGATIVE = '{0} should not be negative.'\n_ERROR_CANNOT_SERIALIZE_VALUE_TO_ENTITY = \\\n    'Cannot serialize the specified value ({0}) to an entity.  Please use ' + \\\n    'an EntityProperty (which can specify custom types), int, str, bool, ' + \\\n    'or datetime.'\n_ERROR_PAGE_BLOB_SIZE_ALIGNMENT = \\\n    'Invalid page blob size: {0}. ' + \\\n    'The size must be aligned to a 512-byte boundary.'\n\n_USER_AGENT_STRING = 'pyazure/' + __version__\n\nMETADATA_NS = 'http://schemas.microsoft.com/ado/2007/08/dataservices/metadata'\n\n\nclass WindowsAzureData(object):\n\n    ''' This is the base of data class.\n    It is only used to check whether it is instance or not. '''\n    pass\n\n\nclass WindowsAzureError(Exception):\n\n    ''' WindowsAzure Excpetion base class. '''\n\n    def __init__(self, message):\n        super(WindowsAzureError, self).__init__(message)\n\n\nclass WindowsAzureConflictError(WindowsAzureError):\n\n    '''Indicates that the resource could not be created because it already\n    exists'''\n\n    def __init__(self, message):\n        super(WindowsAzureConflictError, self).__init__(message)\n\n\nclass WindowsAzureMissingResourceError(WindowsAzureError):\n\n    '''Indicates that a request for a request for a resource (queue, table,\n    container, etc...) failed because the specified resource does not exist'''\n\n    def __init__(self, message):\n        super(WindowsAzureMissingResourceError, self).__init__(message)\n\n\nclass WindowsAzureBatchOperationError(WindowsAzureError):\n\n    '''Indicates that a batch operation failed'''\n\n    def __init__(self, message, code):\n        super(WindowsAzureBatchOperationError, self).__init__(message)\n        self.code = code\n\n\nclass Feed(object):\n    pass\n\n\nclass _Base64String(str):\n    pass\n\n\nclass HeaderDict(dict):\n\n    def __getitem__(self, index):\n        return super(HeaderDict, self).__getitem__(index.lower())\n\n\ndef _encode_base64(data):\n    if isinstance(data, _unicode_type):\n        data = data.encode('utf-8')\n    encoded = base64.b64encode(data)\n    return encoded.decode('utf-8')\n\n\ndef _decode_base64_to_bytes(data):\n    if isinstance(data, _unicode_type):\n        data = data.encode('utf-8')\n    return base64.b64decode(data)\n\n\ndef _decode_base64_to_text(data):\n    decoded_bytes = _decode_base64_to_bytes(data)\n    return decoded_bytes.decode('utf-8')\n\n\ndef _get_readable_id(id_name, id_prefix_to_skip):\n    \"\"\"simplified an id to be more friendly for us people\"\"\"\n    # id_name is in the form 'https://namespace.host.suffix/name'\n    # where name may contain a forward slash!\n    pos = id_name.find('//')\n    if pos != -1:\n        pos += 2\n        if id_prefix_to_skip:\n            pos = id_name.find(id_prefix_to_skip, pos)\n            if pos != -1:\n                pos += len(id_prefix_to_skip)\n        pos = id_name.find('/', pos)\n        if pos != -1:\n            return id_name[pos + 1:]\n    return id_name\n\n\ndef _get_entry_properties_from_node(entry, include_id, id_prefix_to_skip=None, use_title_as_id=False):\n    ''' get properties from entry xml '''\n    properties = {}\n\n    etag = entry.getAttributeNS(METADATA_NS, 'etag')\n    if etag:\n        properties['etag'] = etag\n    for updated in _get_child_nodes(entry, 'updated'):\n        properties['updated'] = updated.firstChild.nodeValue\n    for name in _get_children_from_path(entry, 'author', 'name'):\n        if name.firstChild is not None:\n            properties['author'] = name.firstChild.nodeValue\n\n    if include_id:\n        if use_title_as_id:\n            for title in _get_child_nodes(entry, 'title'):\n                properties['name'] = title.firstChild.nodeValue\n        else:\n            for id in _get_child_nodes(entry, 'id'):\n                properties['name'] = _get_readable_id(\n                    id.firstChild.nodeValue, id_prefix_to_skip)\n\n    return properties\n\n\ndef _get_entry_properties(xmlstr, include_id, id_prefix_to_skip=None):\n    ''' get properties from entry xml '''\n    xmldoc = minidom.parseString(xmlstr)\n    properties = {}\n\n    for entry in _get_child_nodes(xmldoc, 'entry'):\n        properties.update(_get_entry_properties_from_node(entry, include_id, id_prefix_to_skip))\n\n    return properties\n\n\ndef _get_first_child_node_value(parent_node, node_name):\n    xml_attrs = _get_child_nodes(parent_node, node_name)\n    if xml_attrs:\n        xml_attr = xml_attrs[0]\n        if xml_attr.firstChild:\n            value = xml_attr.firstChild.nodeValue\n            return value\n\n\ndef _get_child_nodes(node, tagName):\n    return [childNode for childNode in node.getElementsByTagName(tagName)\n            if childNode.parentNode == node]\n\n\ndef _get_children_from_path(node, *path):\n    '''descends through a hierarchy of nodes returning the list of children\n    at the inner most level.  Only returns children who share a common parent,\n    not cousins.'''\n    cur = node\n    for index, child in enumerate(path):\n        if isinstance(child, _strtype):\n            next = _get_child_nodes(cur, child)\n        else:\n            next = _get_child_nodesNS(cur, *child)\n        if index == len(path) - 1:\n            return next\n        elif not next:\n            break\n\n        cur = next[0]\n    return []\n\n\ndef _get_child_nodesNS(node, ns, tagName):\n    return [childNode for childNode in node.getElementsByTagNameNS(ns, tagName)\n            if childNode.parentNode == node]\n\n\ndef _create_entry(entry_body):\n    ''' Adds common part of entry to a given entry body and return the whole\n    xml. '''\n    updated_str = datetime.utcnow().isoformat()\n    if datetime.utcnow().utcoffset() is None:\n        updated_str += '+00:00'\n\n    entry_start = '''<?xml version=\"1.0\" encoding=\"utf-8\" standalone=\"yes\"?>\n<entry xmlns:d=\"http://schemas.microsoft.com/ado/2007/08/dataservices\" xmlns:m=\"http://schemas.microsoft.com/ado/2007/08/dataservices/metadata\" xmlns=\"http://www.w3.org/2005/Atom\" >\n<title /><updated>{updated}</updated><author><name /></author><id />\n<content type=\"application/xml\">\n    {body}</content></entry>'''\n    return entry_start.format(updated=updated_str, body=entry_body)\n\n\ndef _to_datetime(strtime):\n    return datetime.strptime(strtime, \"%Y-%m-%dT%H:%M:%S.%f\")\n\n_KNOWN_SERIALIZATION_XFORMS = {\n    'include_apis': 'IncludeAPIs',\n    'message_id': 'MessageId',\n    'content_md5': 'Content-MD5',\n    'last_modified': 'Last-Modified',\n    'cache_control': 'Cache-Control',\n    'account_admin_live_email_id': 'AccountAdminLiveEmailId',\n    'service_admin_live_email_id': 'ServiceAdminLiveEmailId',\n    'subscription_id': 'SubscriptionID',\n    'fqdn': 'FQDN',\n    'private_id': 'PrivateID',\n    'os_virtual_hard_disk': 'OSVirtualHardDisk',\n    'logical_disk_size_in_gb': 'LogicalDiskSizeInGB',\n    'logical_size_in_gb': 'LogicalSizeInGB',\n    'os': 'OS',\n    'persistent_vm_downtime_info': 'PersistentVMDowntimeInfo',\n    'copy_id': 'CopyId',\n    }\n\n\ndef _get_serialization_name(element_name):\n    \"\"\"converts a Python name into a serializable name\"\"\"\n    known = _KNOWN_SERIALIZATION_XFORMS.get(element_name)\n    if known is not None:\n        return known\n\n    if element_name.startswith('x_ms_'):\n        return element_name.replace('_', '-')\n    if element_name.endswith('_id'):\n        element_name = element_name.replace('_id', 'ID')\n    for name in ['content_', 'last_modified', 'if_', 'cache_control']:\n        if element_name.startswith(name):\n            element_name = element_name.replace('_', '-_')\n\n    return ''.join(name.capitalize() for name in element_name.split('_'))\n\nif sys.version_info < (3,):\n    _unicode_type = unicode\n\n    def _str(value):\n        if isinstance(value, unicode):\n            return value.encode('utf-8')\n\n        return str(value)\nelse:\n    _str = str\n    _unicode_type = str\n\n\ndef _str_or_none(value):\n    if value is None:\n        return None\n\n    return _str(value)\n\n\ndef _int_or_none(value):\n    if value is None:\n        return None\n\n    return str(int(value))\n\n\ndef _bool_or_none(value):\n    if value is None:\n        return None\n\n    if isinstance(value, bool):\n        if value:\n            return 'true'\n        else:\n            return 'false'\n\n    return str(value)\n\n\ndef _convert_class_to_xml(source, xml_prefix=True):\n    if source is None:\n        return ''\n\n    xmlstr = ''\n    if xml_prefix:\n        xmlstr = '<?xml version=\"1.0\" encoding=\"utf-8\"?>'\n\n    if isinstance(source, list):\n        for value in source:\n            xmlstr += _convert_class_to_xml(value, False)\n    elif isinstance(source, WindowsAzureData):\n        class_name = source.__class__.__name__\n        xmlstr += '<' + class_name + '>'\n        for name, value in vars(source).items():\n            if value is not None:\n                if isinstance(value, list) or \\\n                    isinstance(value, WindowsAzureData):\n                    xmlstr += _convert_class_to_xml(value, False)\n                else:\n                    xmlstr += ('<' + _get_serialization_name(name) + '>' +\n                               xml_escape(str(value)) + '</' +\n                               _get_serialization_name(name) + '>')\n        xmlstr += '</' + class_name + '>'\n    return xmlstr\n\n\ndef _find_namespaces_from_child(parent, child, namespaces):\n    \"\"\"Recursively searches from the parent to the child,\n    gathering all the applicable namespaces along the way\"\"\"\n    for cur_child in parent.childNodes:\n        if cur_child is child:\n            return True\n        if _find_namespaces_from_child(cur_child, child, namespaces):\n            # we are the parent node\n            for key in cur_child.attributes.keys():\n                if key.startswith('xmlns:') or key == 'xmlns':\n                    namespaces[key] = cur_child.attributes[key]\n            break\n    return False\n\n\ndef _find_namespaces(parent, child):\n    res = {}\n    for key in parent.documentElement.attributes.keys():\n        if key.startswith('xmlns:') or key == 'xmlns':\n            res[key] = parent.documentElement.attributes[key]\n    _find_namespaces_from_child(parent, child, res)\n    return res\n\n\ndef _clone_node_with_namespaces(node_to_clone, original_doc):\n    clone = node_to_clone.cloneNode(True)\n\n    for key, value in _find_namespaces(original_doc, node_to_clone).items():\n        clone.attributes[key] = value\n\n    return clone\n\n\ndef _convert_response_to_feeds(response, convert_callback):\n    if response is None:\n        return None\n\n    feeds = _list_of(Feed)\n\n    x_ms_continuation = HeaderDict()\n    for name, value in response.headers:\n        if 'x-ms-continuation' in name:\n            x_ms_continuation[name[len('x-ms-continuation') + 1:]] = value\n    if x_ms_continuation:\n        setattr(feeds, 'x_ms_continuation', x_ms_continuation)\n\n    xmldoc = minidom.parseString(response.body)\n    xml_entries = _get_children_from_path(xmldoc, 'feed', 'entry')\n    if not xml_entries:\n        # in some cases, response contains only entry but no feed\n        xml_entries = _get_children_from_path(xmldoc, 'entry')\n    if inspect.isclass(convert_callback) and issubclass(convert_callback, WindowsAzureData):\n        for xml_entry in xml_entries:\n            return_obj = convert_callback()\n            for node in _get_children_from_path(xml_entry,\n                                                'content',\n                                                convert_callback.__name__):\n                _fill_data_to_return_object(node, return_obj)\n            for name, value in _get_entry_properties_from_node(xml_entry,\n                                                               include_id=True,\n                                                               use_title_as_id=True).items():\n                setattr(return_obj, name, value)\n            feeds.append(return_obj)\n    else:\n        for xml_entry in xml_entries:\n            new_node = _clone_node_with_namespaces(xml_entry, xmldoc)\n            feeds.append(convert_callback(new_node.toxml('utf-8')))\n\n    return feeds\n\n\ndef _validate_type_bytes(param_name, param):\n    if not isinstance(param, bytes):\n        raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES.format(param_name))\n\n\ndef _validate_not_none(param_name, param):\n    if param is None:\n        raise TypeError(_ERROR_VALUE_NONE.format(param_name))\n\n\ndef _fill_list_of(xmldoc, element_type, xml_element_name):\n    xmlelements = _get_child_nodes(xmldoc, xml_element_name)\n    return [_parse_response_body_from_xml_node(xmlelement, element_type) \\\n        for xmlelement in xmlelements]\n\n\ndef _fill_scalar_list_of(xmldoc, element_type, parent_xml_element_name,\n                         xml_element_name):\n    '''Converts an xml fragment into a list of scalar types.  The parent xml\n    element contains a flat list of xml elements which are converted into the\n    specified scalar type and added to the list.\n    Example:\n    xmldoc=\n<Endpoints>\n    <Endpoint>http://{storage-service-name}.blob.core.windows.net/</Endpoint>\n    <Endpoint>http://{storage-service-name}.queue.core.windows.net/</Endpoint>\n    <Endpoint>http://{storage-service-name}.table.core.windows.net/</Endpoint>\n</Endpoints>\n    element_type=str\n    parent_xml_element_name='Endpoints'\n    xml_element_name='Endpoint'\n    '''\n    xmlelements = _get_child_nodes(xmldoc, parent_xml_element_name)\n    if xmlelements:\n        xmlelements = _get_child_nodes(xmlelements[0], xml_element_name)\n        return [_get_node_value(xmlelement, element_type) \\\n            for xmlelement in xmlelements]\n\n\ndef _fill_dict(xmldoc, element_name):\n    xmlelements = _get_child_nodes(xmldoc, element_name)\n    if xmlelements:\n        return_obj = {}\n        for child in xmlelements[0].childNodes:\n            if child.firstChild:\n                return_obj[child.nodeName] = child.firstChild.nodeValue\n        return return_obj\n\n\ndef _fill_dict_of(xmldoc, parent_xml_element_name, pair_xml_element_name,\n                  key_xml_element_name, value_xml_element_name):\n    '''Converts an xml fragment into a dictionary. The parent xml element\n    contains a list of xml elements where each element has a child element for\n    the key, and another for the value.\n    Example:\n    xmldoc=\n<ExtendedProperties>\n    <ExtendedProperty>\n        <Name>Ext1</Name>\n        <Value>Val1</Value>\n    </ExtendedProperty>\n    <ExtendedProperty>\n        <Name>Ext2</Name>\n        <Value>Val2</Value>\n    </ExtendedProperty>\n</ExtendedProperties>\n    element_type=str\n    parent_xml_element_name='ExtendedProperties'\n    pair_xml_element_name='ExtendedProperty'\n    key_xml_element_name='Name'\n    value_xml_element_name='Value'\n    '''\n    return_obj = {}\n\n    xmlelements = _get_child_nodes(xmldoc, parent_xml_element_name)\n    if xmlelements:\n        xmlelements = _get_child_nodes(xmlelements[0], pair_xml_element_name)\n        for pair in xmlelements:\n            keys = _get_child_nodes(pair, key_xml_element_name)\n            values = _get_child_nodes(pair, value_xml_element_name)\n            if keys and values:\n                key = keys[0].firstChild.nodeValue\n                value = values[0].firstChild.nodeValue\n                return_obj[key] = value\n\n    return return_obj\n\n\ndef _fill_instance_child(xmldoc, element_name, return_type):\n    '''Converts a child of the current dom element to the specified type.\n    '''\n    xmlelements = _get_child_nodes(\n        xmldoc, _get_serialization_name(element_name))\n\n    if not xmlelements:\n        return None\n\n    return_obj = return_type()\n    _fill_data_to_return_object(xmlelements[0], return_obj)\n\n    return return_obj\n\n\ndef _fill_instance_element(element, return_type):\n    \"\"\"Converts a DOM element into the specified object\"\"\"\n    return _parse_response_body_from_xml_node(element, return_type)\n\n\ndef _fill_data_minidom(xmldoc, element_name, data_member):\n    xmlelements = _get_child_nodes(\n        xmldoc, _get_serialization_name(element_name))\n\n    if not xmlelements or not xmlelements[0].childNodes:\n        return None\n\n    value = xmlelements[0].firstChild.nodeValue\n\n    if data_member is None:\n        return value\n    elif isinstance(data_member, datetime):\n        return _to_datetime(value)\n    elif type(data_member) is bool:\n        return value.lower() != 'false'\n    else:\n        return type(data_member)(value)\n\n\ndef _get_node_value(xmlelement, data_type):\n    value = xmlelement.firstChild.nodeValue\n    if data_type is datetime:\n        return _to_datetime(value)\n    elif data_type is bool:\n        return value.lower() != 'false'\n    else:\n        return data_type(value)\n\n\ndef _get_request_body_bytes_only(param_name, param_value):\n    '''Validates the request body passed in and converts it to bytes\n    if our policy allows it.'''\n    if param_value is None:\n        return b''\n\n    if isinstance(param_value, bytes):\n        return param_value\n\n    # Previous versions of the SDK allowed data types other than bytes to be\n    # passed in, and they would be auto-converted to bytes.  We preserve this\n    # behavior when running under 2.7, but issue a warning.\n    # Python 3 support is new, so we reject anything that's not bytes.\n    if sys.version_info < (3,):\n        warnings.warn(_WARNING_VALUE_SHOULD_BE_BYTES.format(param_name))\n        return _get_request_body(param_value)\n\n    raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES.format(param_name))\n\n\ndef _get_request_body(request_body):\n    '''Converts an object into a request body.  If it's None\n    we'll return an empty string, if it's one of our objects it'll\n    convert it to XML and return it.  Otherwise we just use the object\n    directly'''\n    if request_body is None:\n        return b''\n\n    if isinstance(request_body, WindowsAzureData):\n        request_body = _convert_class_to_xml(request_body)\n\n    if isinstance(request_body, bytes):\n        return request_body\n\n    if isinstance(request_body, _unicode_type):\n        return request_body.encode('utf-8')\n\n    request_body = str(request_body)\n    if isinstance(request_body, _unicode_type):\n        return request_body.encode('utf-8')\n\n    return request_body\n\n\ndef _parse_enum_results_list(response, return_type, resp_type, item_type):\n    \"\"\"resp_body is the XML we received\nresp_type is a string, such as Containers,\nreturn_type is the type we're constructing, such as ContainerEnumResults\nitem_type is the type object of the item to be created, such as Container\n\nThis function then returns a ContainerEnumResults object with the\ncontainers member populated with the results.\n\"\"\"\n\n    # parsing something like:\n    # <EnumerationResults ... >\n    #   <Queues>\n    #       <Queue>\n    #           <Something />\n    #           <SomethingElse />\n    #       </Queue>\n    #   </Queues>\n    # </EnumerationResults>\n    respbody = response.body\n    return_obj = return_type()\n    doc = minidom.parseString(respbody)\n\n    items = []\n    for enum_results in _get_child_nodes(doc, 'EnumerationResults'):\n        # path is something like Queues, Queue\n        for child in _get_children_from_path(enum_results,\n                                             resp_type,\n                                             resp_type[:-1]):\n            items.append(_fill_instance_element(child, item_type))\n\n        for name, value in vars(return_obj).items():\n            # queues, Queues, this is the list its self which we populated\n            # above\n            if name == resp_type.lower():\n                # the list its self.\n                continue\n            value = _fill_data_minidom(enum_results, name, value)\n            if value is not None:\n                setattr(return_obj, name, value)\n\n    setattr(return_obj, resp_type.lower(), items)\n    return return_obj\n\n\ndef _parse_simple_list(response, type, item_type, list_name):\n    respbody = response.body\n    res = type()\n    res_items = []\n    doc = minidom.parseString(respbody)\n    type_name = type.__name__\n    item_name = item_type.__name__\n    for item in _get_children_from_path(doc, type_name, item_name):\n        res_items.append(_fill_instance_element(item, item_type))\n\n    setattr(res, list_name, res_items)\n    return res\n\n\ndef _parse_response(response, return_type):\n    '''\n    Parse the HTTPResponse's body and fill all the data into a class of\n    return_type.\n    '''\n    return _parse_response_body_from_xml_text(response.body, return_type)\n\ndef _parse_service_resources_response(response, return_type):\n    '''\n    Parse the HTTPResponse's body and fill all the data into a class of\n    return_type.\n    '''\n    return _parse_response_body_from_service_resources_xml_text(response.body, return_type)\n\n\ndef _fill_data_to_return_object(node, return_obj):\n    members = dict(vars(return_obj))\n    for name, value in members.items():\n        if isinstance(value, _list_of):\n            setattr(return_obj,\n                    name,\n                    _fill_list_of(node,\n                                  value.list_type,\n                                  value.xml_element_name))\n        elif isinstance(value, _scalar_list_of):\n            setattr(return_obj,\n                    name,\n                    _fill_scalar_list_of(node,\n                                         value.list_type,\n                                         _get_serialization_name(name),\n                                         value.xml_element_name))\n        elif isinstance(value, _dict_of):\n            setattr(return_obj,\n                    name,\n                    _fill_dict_of(node,\n                                  _get_serialization_name(name),\n                                  value.pair_xml_element_name,\n                                  value.key_xml_element_name,\n                                  value.value_xml_element_name))\n        elif isinstance(value, _xml_attribute):\n            real_value = None\n            if node.hasAttribute(value.xml_element_name):\n                real_value = node.getAttribute(value.xml_element_name)\n            if real_value is not None:\n                setattr(return_obj, name, real_value)\n        elif isinstance(value, WindowsAzureData):\n            setattr(return_obj,\n                    name,\n                    _fill_instance_child(node, name, value.__class__))\n        elif isinstance(value, dict):\n            setattr(return_obj,\n                    name,\n                    _fill_dict(node, _get_serialization_name(name)))\n        elif isinstance(value, _Base64String):\n            value = _fill_data_minidom(node, name, '')\n            if value is not None:\n                value = _decode_base64_to_text(value)\n            # always set the attribute, so we don't end up returning an object\n            # with type _Base64String\n            setattr(return_obj, name, value)\n        else:\n            value = _fill_data_minidom(node, name, value)\n            if value is not None:\n                setattr(return_obj, name, value)\n\n\ndef _parse_response_body_from_xml_node(node, return_type):\n    '''\n    parse the xml and fill all the data into a class of return_type\n    '''\n    return_obj = return_type()\n    _fill_data_to_return_object(node, return_obj)\n\n    return return_obj\n\n\ndef _parse_response_body_from_xml_text(respbody, return_type):\n    '''\n    parse the xml and fill all the data into a class of return_type\n    '''\n    doc = minidom.parseString(respbody)\n    return_obj = return_type()\n    xml_name = return_type._xml_name if hasattr(return_type, '_xml_name') else return_type.__name__ \n    for node in _get_child_nodes(doc, xml_name):\n        _fill_data_to_return_object(node, return_obj)\n\n    return return_obj\n\ndef _parse_response_body_from_service_resources_xml_text(respbody, return_type):\n    '''\n    parse the xml and fill all the data into a class of return_type\n    '''\n    doc = minidom.parseString(respbody)\n    return_obj = _list_of(return_type)\n    for node in _get_children_from_path(doc, \"ServiceResources\", \"ServiceResource\"):\n        local_obj = return_type()\n        _fill_data_to_return_object(node, local_obj)\n        return_obj.append(local_obj)\n\n    return return_obj\n\nclass _dict_of(dict):\n\n    \"\"\"a dict which carries with it the xml element names for key,val.\n    Used for deserializaion and construction of the lists\"\"\"\n\n    def __init__(self, pair_xml_element_name, key_xml_element_name,\n                 value_xml_element_name):\n        self.pair_xml_element_name = pair_xml_element_name\n        self.key_xml_element_name = key_xml_element_name\n        self.value_xml_element_name = value_xml_element_name\n        super(_dict_of, self).__init__()\n\n\nclass _list_of(list):\n\n    \"\"\"a list which carries with it the type that's expected to go in it.\n    Used for deserializaion and construction of the lists\"\"\"\n\n    def __init__(self, list_type, xml_element_name=None):\n        self.list_type = list_type\n        if xml_element_name is None:\n            self.xml_element_name = list_type.__name__\n        else:\n            self.xml_element_name = xml_element_name\n        super(_list_of, self).__init__()\n\n\nclass _scalar_list_of(list):\n\n    \"\"\"a list of scalar types which carries with it the type that's\n    expected to go in it along with its xml element name.\n    Used for deserializaion and construction of the lists\"\"\"\n\n    def __init__(self, list_type, xml_element_name):\n        self.list_type = list_type\n        self.xml_element_name = xml_element_name\n        super(_scalar_list_of, self).__init__()\n        \nclass _xml_attribute:\n    \n    \"\"\"a accessor to XML attributes\n    expected to go in it along with its xml element name.\n    Used for deserialization and construction\"\"\"\n    \n    def __init__(self, xml_element_name):\n        self.xml_element_name = xml_element_name\n\n\ndef _update_request_uri_query_local_storage(request, use_local_storage):\n    ''' create correct uri and query for the request '''\n    uri, query = _update_request_uri_query(request)\n    if use_local_storage:\n        return '/' + DEV_ACCOUNT_NAME + uri, query\n    return uri, query\n\n\ndef _update_request_uri_query(request):\n    '''pulls the query string out of the URI and moves it into\n    the query portion of the request object.  If there are already\n    query parameters on the request the parameters in the URI will\n    appear after the existing parameters'''\n\n    if '?' in request.path:\n        request.path, _, query_string = request.path.partition('?')\n        if query_string:\n            query_params = query_string.split('&')\n            for query in query_params:\n                if '=' in query:\n                    name, _, value = query.partition('=')\n                    request.query.append((name, value))\n\n    request.path = url_quote(request.path, '/()$=\\',')\n\n    # add encoded queries to request.path.\n    if request.query:\n        request.path += '?'\n        for name, value in request.query:\n            if value is not None:\n                request.path += name + '=' + url_quote(value, '/()$=\\',') + '&'\n        request.path = request.path[:-1]\n\n    return request.path, request.query\n\n\ndef _dont_fail_on_exist(error):\n    ''' don't throw exception if the resource exists.\n    This is called by create_* APIs with fail_on_exist=False'''\n    if isinstance(error, WindowsAzureConflictError):\n        return False\n    else:\n        raise error\n\n\ndef _dont_fail_not_exist(error):\n    ''' don't throw exception if the resource doesn't exist.\n    This is called by create_* APIs with fail_on_exist=False'''\n    if isinstance(error, WindowsAzureMissingResourceError):\n        return False\n    else:\n        raise error\n\n\ndef _general_error_handler(http_error):\n    ''' Simple error handler for azure.'''\n    if http_error.status == 409:\n        raise WindowsAzureConflictError(\n            _ERROR_CONFLICT.format(str(http_error)))\n    elif http_error.status == 404:\n        raise WindowsAzureMissingResourceError(\n            _ERROR_NOT_FOUND.format(str(http_error)))\n    else:\n        if http_error.respbody is not None:\n            raise WindowsAzureError(\n                _ERROR_UNKNOWN.format(str(http_error)) + '\\n' + \\\n                    http_error.respbody.decode('utf-8'))\n        else:\n            raise WindowsAzureError(_ERROR_UNKNOWN.format(str(http_error)))\n\n\ndef _parse_response_for_dict(response):\n    ''' Extracts name-values from response header. Filter out the standard\n    http headers.'''\n\n    if response is None:\n        return None\n    http_headers = ['server', 'date', 'location', 'host',\n                    'via', 'proxy-connection', 'connection']\n    return_dict = HeaderDict()\n    if response.headers:\n        for name, value in response.headers:\n            if not name.lower() in http_headers:\n                return_dict[name] = value\n\n    return return_dict\n\n\ndef _parse_response_for_dict_prefix(response, prefixes):\n    ''' Extracts name-values for names starting with prefix from response\n    header. Filter out the standard http headers.'''\n\n    if response is None:\n        return None\n    return_dict = {}\n    orig_dict = _parse_response_for_dict(response)\n    if orig_dict:\n        for name, value in orig_dict.items():\n            for prefix_value in prefixes:\n                if name.lower().startswith(prefix_value.lower()):\n                    return_dict[name] = value\n                    break\n        return return_dict\n    else:\n        return None\n\n\ndef _parse_response_for_dict_filter(response, filter):\n    ''' Extracts name-values for names in filter from response header. Filter\n    out the standard http headers.'''\n    if response is None:\n        return None\n    return_dict = {}\n    orig_dict = _parse_response_for_dict(response)\n    if orig_dict:\n        for name, value in orig_dict.items():\n            if name.lower() in filter:\n                return_dict[name] = value\n        return return_dict\n    else:\n        return None\n\n\ndef _sign_string(key, string_to_sign, key_is_base64=True):\n    if key_is_base64:\n        key = _decode_base64_to_bytes(key)\n    else:\n        if isinstance(key, _unicode_type):\n            key = key.encode('utf-8')\n    if isinstance(string_to_sign, _unicode_type):\n        string_to_sign = string_to_sign.encode('utf-8')\n    signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256)\n    digest = signed_hmac_sha256.digest()\n    encoded_digest = _encode_base64(digest)\n    return encoded_digest\n"
  },
  {
    "path": "DSC/azure/azure.pyproj",
    "content": "﻿<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<Project DefaultTargets=\"Build\" xmlns=\"http://schemas.microsoft.com/developer/msbuild/2003\" ToolsVersion=\"4.0\">\n  <PropertyGroup>\n    <Configuration Condition=\" '$(Configuration)' == '' \">Debug</Configuration>\n    <SchemaVersion>2.0</SchemaVersion>\n    <ProjectGuid>{25b2c65a-0553-4452-8907-8b5b17544e68}</ProjectGuid>\n    <ProjectHome>\n    </ProjectHome>\n    <StartupFile>storage\\blobservice.py</StartupFile>\n    <SearchPath>..</SearchPath>\n    <WorkingDirectory>.</WorkingDirectory>\n    <OutputPath>.</OutputPath>\n    <Name>azure</Name>\n    <RootNamespace>azure</RootNamespace>\n    <IsWindowsApplication>False</IsWindowsApplication>\n    <LaunchProvider>Standard Python launcher</LaunchProvider>\n    <CommandLineArguments />\n    <InterpreterPath />\n    <InterpreterArguments />\n    <InterpreterId>{9a7a9026-48c1-4688-9d5d-e5699d47d074}</InterpreterId>\n    <InterpreterVersion>3.4</InterpreterVersion>\n    <SccProjectName>SAK</SccProjectName>\n    <SccProvider>SAK</SccProvider>\n    <SccAuxPath>SAK</SccAuxPath>\n    <SccLocalPath>SAK</SccLocalPath>\n  </PropertyGroup>\n  <PropertyGroup Condition=\" '$(Configuration)' == 'Debug' \">\n    <DebugSymbols>true</DebugSymbols>\n    <EnableUnmanagedDebugging>false</EnableUnmanagedDebugging>\n  </PropertyGroup>\n  <PropertyGroup Condition=\" '$(Configuration)' == 'Release' \">\n    <DebugSymbols>true</DebugSymbols>\n    <EnableUnmanagedDebugging>false</EnableUnmanagedDebugging>\n  </PropertyGroup>\n  <ItemGroup>\n    <Compile Include=\"http\\batchclient.py\" />\n    <Compile Include=\"http\\httpclient.py\" />\n    <Compile Include=\"http\\winhttp.py\" />\n    <Compile Include=\"http\\__init__.py\" />\n    <Compile Include=\"servicemanagement\\servicebusmanagementservice.py\" />\n    <Compile Include=\"servicemanagement\\servicemanagementclient.py\" />\n    <Compile Include=\"servicemanagement\\servicemanagementservice.py\" />\n    <Compile Include=\"servicemanagement\\sqldatabasemanagementservice.py\" />\n    <Compile Include=\"servicemanagement\\websitemanagementservice.py\" />\n    <Compile Include=\"servicemanagement\\__init__.py\" />\n    <Compile Include=\"servicebus\\servicebusservice.py\" />\n    <Compile Include=\"storage\\blobservice.py\" />\n    <Compile Include=\"storage\\queueservice.py\" />\n    <Compile Include=\"storage\\cloudstorageaccount.py\" />\n    <Compile Include=\"storage\\tableservice.py\" />\n    <Compile Include=\"storage\\sharedaccesssignature.py\" />\n    <Compile Include=\"__init__.py\" />\n    <Compile Include=\"servicebus\\__init__.py\" />\n    <Compile Include=\"storage\\storageclient.py\" />\n    <Compile Include=\"storage\\__init__.py\" />\n  </ItemGroup>\n  <ItemGroup>\n    <Folder Include=\"http\" />\n    <Folder Include=\"servicemanagement\" />\n    <Folder Include=\"servicebus\\\" />\n    <Folder Include=\"storage\" />\n  </ItemGroup>\n  <ItemGroup>\n    <InterpreterReference Include=\"{2af0f10d-7135-4994-9156-5d01c9c11b7e}\\2.6\" />\n    <InterpreterReference Include=\"{2af0f10d-7135-4994-9156-5d01c9c11b7e}\\2.7\" />\n    <InterpreterReference Include=\"{2af0f10d-7135-4994-9156-5d01c9c11b7e}\\3.3\" />\n    <InterpreterReference Include=\"{2af0f10d-7135-4994-9156-5d01c9c11b7e}\\3.4\" />\n    <InterpreterReference Include=\"{9a7a9026-48c1-4688-9d5d-e5699d47d074}\\2.7\" />\n    <InterpreterReference Include=\"{9a7a9026-48c1-4688-9d5d-e5699d47d074}\\3.3\" />\n    <InterpreterReference Include=\"{9a7a9026-48c1-4688-9d5d-e5699d47d074}\\3.4\" />\n  </ItemGroup>\n  <PropertyGroup>\n    <VisualStudioVersion Condition=\"'$(VisualStudioVersion)' == ''\">10.0</VisualStudioVersion>\n    <VSToolsPath Condition=\"'$(VSToolsPath)' == ''\">$(MSBuildExtensionsPath32)\\Microsoft\\VisualStudio\\v$(VisualStudioVersion)</VSToolsPath>\n    <PtvsTargetsFile>$(VSToolsPath)\\Python Tools\\Microsoft.PythonTools.targets</PtvsTargetsFile>\n  </PropertyGroup>\n  <Import Condition=\"Exists($(PtvsTargetsFile))\" Project=\"$(PtvsTargetsFile)\" />\n  <Import Condition=\"!Exists($(PtvsTargetsFile))\" Project=\"$(MSBuildToolsPath)\\Microsoft.Common.targets\" />\n</Project>"
  },
  {
    "path": "DSC/azure/http/__init__.py",
    "content": "#-------------------------------------------------------------------------\n# Copyright (c) Microsoft.  All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#--------------------------------------------------------------------------\n\nHTTP_RESPONSE_NO_CONTENT = 204\n\n\nclass HTTPError(Exception):\n\n    ''' HTTP Exception when response status code >= 300 '''\n\n    def __init__(self, status, message, respheader, respbody):\n        '''Creates a new HTTPError with the specified status, message,\n        response headers and body'''\n        self.status = status\n        self.respheader = respheader\n        self.respbody = respbody\n        Exception.__init__(self, message)\n\n\nclass HTTPResponse(object):\n\n    \"\"\"Represents a response from an HTTP request.  An HTTPResponse has the\n    following attributes:\n\n    status: the status code of the response\n    message: the message\n    headers: the returned headers, as a list of (name, value) pairs\n    body: the body of the response\n    \"\"\"\n\n    def __init__(self, status, message, headers, body):\n        self.status = status\n        self.message = message\n        self.headers = headers\n        self.body = body\n\n\nclass HTTPRequest(object):\n\n    '''Represents an HTTP Request.  An HTTP Request consists of the following\n    attributes:\n\n    host: the host name to connect to\n    method: the method to use to connect (string such as GET, POST, PUT, etc.)\n    path: the uri fragment\n    query: query parameters specified as a list of (name, value) pairs\n    headers: header values specified as (name, value) pairs\n    body: the body of the request.\n    protocol_override:\n        specify to use this protocol instead of the global one stored in\n        _HTTPClient.\n    '''\n\n    def __init__(self):\n        self.host = ''\n        self.method = ''\n        self.path = ''\n        self.query = []      # list of (name, value)\n        self.headers = []    # list of (header name, header value)\n        self.body = ''\n        self.protocol_override = None\n"
  },
  {
    "path": "DSC/azure/http/batchclient.py",
    "content": "#-------------------------------------------------------------------------\n# Copyright (c) Microsoft.  All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#--------------------------------------------------------------------------\nimport sys\nimport uuid\n\nfrom azure import (\n    _update_request_uri_query,\n    WindowsAzureError,\n    WindowsAzureBatchOperationError,\n    _get_children_from_path,\n    url_unquote,\n    _ERROR_CANNOT_FIND_PARTITION_KEY,\n    _ERROR_CANNOT_FIND_ROW_KEY,\n    _ERROR_INCORRECT_TABLE_IN_BATCH,\n    _ERROR_INCORRECT_PARTITION_KEY_IN_BATCH,\n    _ERROR_DUPLICATE_ROW_KEY_IN_BATCH,\n    _ERROR_BATCH_COMMIT_FAIL,\n    )\nfrom azure.http import HTTPError, HTTPRequest, HTTPResponse\nfrom azure.http.httpclient import _HTTPClient\nfrom azure.storage import (\n    _update_storage_table_header,\n    METADATA_NS,\n    _sign_storage_table_request,\n    )\nfrom xml.dom import minidom\n\n_DATASERVICES_NS = 'http://schemas.microsoft.com/ado/2007/08/dataservices'\n\nif sys.version_info < (3,):\n    def _new_boundary():\n        return str(uuid.uuid1())\nelse:\n    def _new_boundary():\n        return str(uuid.uuid1()).encode('utf-8')\n\n\nclass _BatchClient(_HTTPClient):\n\n    '''\n    This is the class that is used for batch operation for storage table\n    service. It only supports one changeset.\n    '''\n\n    def __init__(self, service_instance, account_key, account_name,\n                 protocol='http'):\n        _HTTPClient.__init__(self, service_instance, account_name=account_name,\n                             account_key=account_key, protocol=protocol)\n        self.is_batch = False\n        self.batch_requests = []\n        self.batch_table = ''\n        self.batch_partition_key = ''\n        self.batch_row_keys = []\n\n    def get_request_table(self, request):\n        '''\n        Extracts table name from request.uri. The request.uri has either\n        \"/mytable(...)\" or \"/mytable\" format.\n\n        request: the request to insert, update or delete entity\n        '''\n        if '(' in request.path:\n            pos = request.path.find('(')\n            return request.path[1:pos]\n        else:\n            return request.path[1:]\n\n    def get_request_partition_key(self, request):\n        '''\n        Extracts PartitionKey from request.body if it is a POST request or from\n        request.path if it is not a POST request. Only insert operation request\n        is a POST request and the PartitionKey is in the request body.\n\n        request: the request to insert, update or delete entity\n        '''\n        if request.method == 'POST':\n            doc = minidom.parseString(request.body)\n            part_key = _get_children_from_path(\n                doc, 'entry', 'content', (METADATA_NS, 'properties'),\n                (_DATASERVICES_NS, 'PartitionKey'))\n            if not part_key:\n                raise WindowsAzureError(_ERROR_CANNOT_FIND_PARTITION_KEY)\n            return part_key[0].firstChild.nodeValue\n        else:\n            uri = url_unquote(request.path)\n            pos1 = uri.find('PartitionKey=\\'')\n            pos2 = uri.find('\\',', pos1)\n            if pos1 == -1 or pos2 == -1:\n                raise WindowsAzureError(_ERROR_CANNOT_FIND_PARTITION_KEY)\n            return uri[pos1 + len('PartitionKey=\\''):pos2]\n\n    def get_request_row_key(self, request):\n        '''\n        Extracts RowKey from request.body if it is a POST request or from\n        request.path if it is not a POST request. Only insert operation request\n        is a POST request and the Rowkey is in the request body.\n\n        request: the request to insert, update or delete entity\n        '''\n        if request.method == 'POST':\n            doc = minidom.parseString(request.body)\n            row_key = _get_children_from_path(\n                doc, 'entry', 'content', (METADATA_NS, 'properties'),\n                (_DATASERVICES_NS, 'RowKey'))\n            if not row_key:\n                raise WindowsAzureError(_ERROR_CANNOT_FIND_ROW_KEY)\n            return row_key[0].firstChild.nodeValue\n        else:\n            uri = url_unquote(request.path)\n            pos1 = uri.find('RowKey=\\'')\n            pos2 = uri.find('\\')', pos1)\n            if pos1 == -1 or pos2 == -1:\n                raise WindowsAzureError(_ERROR_CANNOT_FIND_ROW_KEY)\n            row_key = uri[pos1 + len('RowKey=\\''):pos2]\n            return row_key\n\n    def validate_request_table(self, request):\n        '''\n        Validates that all requests have the same table name. Set the table\n        name if it is the first request for the batch operation.\n\n        request: the request to insert, update or delete entity\n        '''\n        if self.batch_table:\n            if self.get_request_table(request) != self.batch_table:\n                raise WindowsAzureError(_ERROR_INCORRECT_TABLE_IN_BATCH)\n        else:\n            self.batch_table = self.get_request_table(request)\n\n    def validate_request_partition_key(self, request):\n        '''\n        Validates that all requests have the same PartitiionKey. Set the\n        PartitionKey if it is the first request for the batch operation.\n\n        request: the request to insert, update or delete entity\n        '''\n        if self.batch_partition_key:\n            if self.get_request_partition_key(request) != \\\n                self.batch_partition_key:\n                raise WindowsAzureError(_ERROR_INCORRECT_PARTITION_KEY_IN_BATCH)\n        else:\n            self.batch_partition_key = self.get_request_partition_key(request)\n\n    def validate_request_row_key(self, request):\n        '''\n        Validates that all requests have the different RowKey and adds RowKey\n        to existing RowKey list.\n\n        request: the request to insert, update or delete entity\n        '''\n        if self.batch_row_keys:\n            if self.get_request_row_key(request) in self.batch_row_keys:\n                raise WindowsAzureError(_ERROR_DUPLICATE_ROW_KEY_IN_BATCH)\n        else:\n            self.batch_row_keys.append(self.get_request_row_key(request))\n\n    def begin_batch(self):\n        '''\n        Starts the batch operation. Intializes the batch variables\n\n        is_batch: batch operation flag.\n        batch_table: the table name of the batch operation\n        batch_partition_key: the PartitionKey of the batch requests.\n        batch_row_keys: the RowKey list of adding requests.\n        batch_requests: the list of the requests.\n        '''\n        self.is_batch = True\n        self.batch_table = ''\n        self.batch_partition_key = ''\n        self.batch_row_keys = []\n        self.batch_requests = []\n\n    def insert_request_to_batch(self, request):\n        '''\n        Adds request to batch operation.\n\n        request: the request to insert, update or delete entity\n        '''\n        self.validate_request_table(request)\n        self.validate_request_partition_key(request)\n        self.validate_request_row_key(request)\n        self.batch_requests.append(request)\n\n    def commit_batch(self):\n        ''' Resets batch flag and commits the batch requests. '''\n        if self.is_batch:\n            self.is_batch = False\n            self.commit_batch_requests()\n\n    def commit_batch_requests(self):\n        ''' Commits the batch requests. '''\n\n        batch_boundary = b'batch_' + _new_boundary()\n        changeset_boundary = b'changeset_' + _new_boundary()\n\n        # Commits batch only the requests list is not empty.\n        if self.batch_requests:\n            request = HTTPRequest()\n            request.method = 'POST'\n            request.host = self.batch_requests[0].host\n            request.path = '/$batch'\n            request.headers = [\n                ('Content-Type', 'multipart/mixed; boundary=' + \\\n                    batch_boundary.decode('utf-8')),\n                ('Accept', 'application/atom+xml,application/xml'),\n                ('Accept-Charset', 'UTF-8')]\n\n            request.body = b'--' + batch_boundary + b'\\n'\n            request.body += b'Content-Type: multipart/mixed; boundary='\n            request.body += changeset_boundary + b'\\n\\n'\n\n            content_id = 1\n\n            # Adds each request body to the POST data.\n            for batch_request in self.batch_requests:\n                request.body += b'--' + changeset_boundary + b'\\n'\n                request.body += b'Content-Type: application/http\\n'\n                request.body += b'Content-Transfer-Encoding: binary\\n\\n'\n                request.body += batch_request.method.encode('utf-8')\n                request.body += b' http://'\n                request.body += batch_request.host.encode('utf-8')\n                request.body += batch_request.path.encode('utf-8')\n                request.body += b' HTTP/1.1\\n'\n                request.body += b'Content-ID: '\n                request.body += str(content_id).encode('utf-8') + b'\\n'\n                content_id += 1\n\n                # Add different headers for different type requests.\n                if not batch_request.method == 'DELETE':\n                    request.body += \\\n                        b'Content-Type: application/atom+xml;type=entry\\n'\n                    for name, value in batch_request.headers:\n                        if name == 'If-Match':\n                            request.body += name.encode('utf-8') + b': '\n                            request.body += value.encode('utf-8') + b'\\n'\n                            break\n                    request.body += b'Content-Length: '\n                    request.body += str(len(batch_request.body)).encode('utf-8')\n                    request.body += b'\\n\\n'\n                    request.body += batch_request.body + b'\\n'\n                else:\n                    for name, value in batch_request.headers:\n                        # If-Match should be already included in\n                        # batch_request.headers, but in case it is missing,\n                        # just add it.\n                        if name == 'If-Match':\n                            request.body += name.encode('utf-8') + b': '\n                            request.body += value.encode('utf-8') + b'\\n\\n'\n                            break\n                    else:\n                        request.body += b'If-Match: *\\n\\n'\n\n            request.body += b'--' + changeset_boundary + b'--' + b'\\n'\n            request.body += b'--' + batch_boundary + b'--'\n\n            request.path, request.query = _update_request_uri_query(request)\n            request.headers = _update_storage_table_header(request)\n            auth = _sign_storage_table_request(request,\n                                               self.account_name,\n                                               self.account_key)\n            request.headers.append(('Authorization', auth))\n\n            # Submit the whole request as batch request.\n            response = self.perform_request(request)\n            if response.status >= 300:\n                raise HTTPError(response.status,\n                                _ERROR_BATCH_COMMIT_FAIL,\n                                self.respheader,\n                                response.body)\n\n            # http://www.odata.org/documentation/odata-version-2-0/batch-processing/\n            # The body of a ChangeSet response is either a response for all the\n            # successfully processed change request within the ChangeSet,\n            # formatted exactly as it would have appeared outside of a batch, \n            # or a single response indicating a failure of the entire ChangeSet.\n            responses = self._parse_batch_response(response.body)\n            if responses and responses[0].status >= 300:\n                self._report_batch_error(responses[0])\n\n    def cancel_batch(self):\n        ''' Resets the batch flag. '''\n        self.is_batch = False\n\n    def _parse_batch_response(self, body):\n        parts = body.split(b'--changesetresponse_')\n\n        responses = []\n        for part in parts:\n            httpLocation = part.find(b'HTTP/')\n            if httpLocation > 0:\n                response = self._parse_batch_response_part(part[httpLocation:])\n                responses.append(response)\n\n        return responses\n\n    def _parse_batch_response_part(self, part):\n        lines = part.splitlines();\n\n        # First line is the HTTP status/reason\n        status, _, reason = lines[0].partition(b' ')[2].partition(b' ')\n\n        # Followed by headers and body\n        headers = []\n        body = b''\n        isBody = False\n        for line in lines[1:]:\n            if line == b'' and not isBody:\n                isBody = True\n            elif isBody:\n                body += line\n            else:\n                headerName, _, headerVal = line.partition(b':')\n                headers.append((headerName.lower(), headerVal))\n\n        return HTTPResponse(int(status), reason.strip(), headers, body)\n\n    def _report_batch_error(self, response):\n        xml = response.body.decode('utf-8')\n        doc = minidom.parseString(xml)\n\n        n = _get_children_from_path(doc, (METADATA_NS, 'error'), 'code')\n        code = n[0].firstChild.nodeValue if n and n[0].firstChild else ''\n\n        n = _get_children_from_path(doc, (METADATA_NS, 'error'), 'message')\n        message = n[0].firstChild.nodeValue if n and n[0].firstChild else xml\n\n        raise WindowsAzureBatchOperationError(message, code)\n"
  },
  {
    "path": "DSC/azure/http/httpclient.py",
    "content": "#-------------------------------------------------------------------------\n# Copyright (c) Microsoft.  All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#--------------------------------------------------------------------------\nimport base64\nimport os\nimport sys\n\nif sys.version_info < (3,):\n    from httplib import (\n        HTTPSConnection,\n        HTTPConnection,\n        HTTP_PORT,\n        HTTPS_PORT,\n        )\n    from urlparse import urlparse\nelse:\n    from http.client import (\n        HTTPSConnection,\n        HTTPConnection,\n        HTTP_PORT,\n        HTTPS_PORT,\n        )\n    from urllib.parse import urlparse\n\nfrom azure.http import HTTPError, HTTPResponse\nfrom azure import _USER_AGENT_STRING, _update_request_uri_query\n\n\nclass _HTTPClient(object):\n\n    '''\n    Takes the request and sends it to cloud service and returns the response.\n    '''\n\n    def __init__(self, service_instance, cert_file=None, account_name=None,\n                 account_key=None, protocol='https'):\n        '''\n        service_instance: service client instance.\n        cert_file:\n            certificate file name/location. This is only used in hosted\n            service management.\n        account_name: the storage account.\n        account_key:\n            the storage account access key.\n        '''\n        self.service_instance = service_instance\n        self.status = None\n        self.respheader = None\n        self.message = None\n        self.cert_file = cert_file\n        self.account_name = account_name\n        self.account_key = account_key\n        self.protocol = protocol\n        self.proxy_host = None\n        self.proxy_port = None\n        self.proxy_user = None\n        self.proxy_password = None\n        self.use_httplib = self.should_use_httplib()\n\n    def should_use_httplib(self):\n        if sys.platform.lower().startswith('win') and self.cert_file:\n            # On Windows, auto-detect between Windows Store Certificate\n            # (winhttp) and OpenSSL .pem certificate file (httplib).\n            #\n            # We used to only support certificates installed in the Windows\n            # Certificate Store.\n            #   cert_file example: CURRENT_USER\\my\\CertificateName\n            #\n            # We now support using an OpenSSL .pem certificate file,\n            # for a consistent experience across all platforms.\n            #   cert_file example: account\\certificate.pem\n            #\n            # When using OpenSSL .pem certificate file on Windows, make sure\n            # you are on CPython 2.7.4 or later.\n\n            # If it's not an existing file on disk, then treat it as a path in\n            # the Windows Certificate Store, which means we can't use httplib.\n            if not os.path.isfile(self.cert_file):\n                return False\n\n        return True\n\n    def set_proxy(self, host, port, user, password):\n        '''\n        Sets the proxy server host and port for the HTTP CONNECT Tunnelling.\n\n        host: Address of the proxy. Ex: '192.168.0.100'\n        port: Port of the proxy. Ex: 6000\n        user: User for proxy authorization.\n        password: Password for proxy authorization.\n        '''\n        self.proxy_host = host\n        self.proxy_port = port\n        self.proxy_user = user\n        self.proxy_password = password\n\n    def get_uri(self, request):\n        ''' Return the target uri for the request.'''\n        protocol = request.protocol_override \\\n            if request.protocol_override else self.protocol\n        port = HTTP_PORT if protocol == 'http' else HTTPS_PORT\n        return protocol + '://' + request.host + ':' + str(port) + request.path\n\n    def get_connection(self, request):\n        ''' Create connection for the request. '''\n        protocol = request.protocol_override \\\n            if request.protocol_override else self.protocol\n        target_host = request.host\n        target_port = HTTP_PORT if protocol == 'http' else HTTPS_PORT\n\n        if not self.use_httplib:\n            import azure.http.winhttp\n            connection = azure.http.winhttp._HTTPConnection(\n                target_host, cert_file=self.cert_file, protocol=protocol)\n            proxy_host = self.proxy_host\n            proxy_port = self.proxy_port\n        else:\n            if ':' in target_host:\n                target_host, _, target_port = target_host.rpartition(':')\n            if self.proxy_host:\n                proxy_host = target_host\n                proxy_port = target_port\n                host = self.proxy_host\n                port = self.proxy_port\n            else:\n                host = target_host\n                port = target_port\n\n            if protocol == 'http':\n                connection = HTTPConnection(host, int(port))\n            else:\n                connection = HTTPSConnection(\n                    host, int(port), cert_file=self.cert_file)\n\n        if self.proxy_host:\n            headers = None\n            if self.proxy_user and self.proxy_password:\n                auth = base64.encodestring(\n                    \"{0}:{1}\".format(self.proxy_user, self.proxy_password))\n                headers = {'Proxy-Authorization': 'Basic {0}'.format(auth)}\n            connection.set_tunnel(proxy_host, int(proxy_port), headers)\n\n        return connection\n\n    def send_request_headers(self, connection, request_headers):\n        if self.use_httplib:\n            if self.proxy_host:\n                for i in connection._buffer:\n                    if i.startswith(\"Host: \"):\n                        connection._buffer.remove(i)\n                connection.putheader(\n                    'Host', \"{0}:{1}\".format(connection._tunnel_host,\n                                             connection._tunnel_port))\n\n        for name, value in request_headers:\n            if value:\n                connection.putheader(name, value)\n\n        connection.putheader('User-Agent', _USER_AGENT_STRING)\n        connection.endheaders()\n\n    def send_request_body(self, connection, request_body):\n        if request_body:\n            assert isinstance(request_body, bytes)\n            connection.send(request_body)\n        elif (not isinstance(connection, HTTPSConnection) and\n              not isinstance(connection, HTTPConnection)):\n            connection.send(None)\n\n    def perform_request(self, request):\n        ''' Sends request to cloud service server and return the response. '''\n        connection = self.get_connection(request)\n        try:\n            connection.putrequest(request.method, request.path)\n\n            if not self.use_httplib:\n                if self.proxy_host and self.proxy_user:\n                    connection.set_proxy_credentials(\n                        self.proxy_user, self.proxy_password)\n\n            self.send_request_headers(connection, request.headers)\n            self.send_request_body(connection, request.body)\n\n            resp = connection.getresponse()\n            self.status = int(resp.status)\n            self.message = resp.reason\n            self.respheader = headers = resp.getheaders()\n\n            # for consistency across platforms, make header names lowercase\n            for i, value in enumerate(headers):\n                headers[i] = (value[0].lower(), value[1])\n\n            respbody = None\n            if resp.length is None:\n                respbody = resp.read()\n            elif resp.length > 0:\n                respbody = resp.read(resp.length)\n\n            response = HTTPResponse(\n                int(resp.status), resp.reason, headers, respbody)\n            if self.status == 307:\n                new_url = urlparse(dict(headers)['location'])\n                request.host = new_url.hostname\n                request.path = new_url.path\n                request.path, request.query = _update_request_uri_query(request)\n                return self.perform_request(request)\n            if self.status >= 300:\n                raise HTTPError(self.status, self.message,\n                                self.respheader, respbody)\n\n            return response\n        finally:\n            connection.close()\n"
  },
  {
    "path": "DSC/azure/http/winhttp.py",
    "content": "#-------------------------------------------------------------------------\n# Copyright (c) Microsoft.  All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#--------------------------------------------------------------------------\nfrom ctypes import (\n    c_void_p,\n    c_long,\n    c_ulong,\n    c_longlong,\n    c_ulonglong,\n    c_short,\n    c_ushort,\n    c_wchar_p,\n    c_byte,\n    byref,\n    Structure,\n    Union,\n    POINTER,\n    WINFUNCTYPE,\n    HRESULT,\n    oledll,\n    WinDLL,\n    )\nimport ctypes\nimport sys\n\nif sys.version_info >= (3,):\n    def unicode(text):\n        return text\n\n#------------------------------------------------------------------------------\n#  Constants that are used in COM operations\nVT_EMPTY = 0\nVT_NULL = 1\nVT_I2 = 2\nVT_I4 = 3\nVT_BSTR = 8\nVT_BOOL = 11\nVT_I1 = 16\nVT_UI1 = 17\nVT_UI2 = 18\nVT_UI4 = 19\nVT_I8 = 20\nVT_UI8 = 21\nVT_ARRAY = 8192\n\nHTTPREQUEST_PROXYSETTING_PROXY = 2\nHTTPREQUEST_SETCREDENTIALS_FOR_PROXY = 1\n\nHTTPREQUEST_PROXY_SETTING = c_long\nHTTPREQUEST_SETCREDENTIALS_FLAGS = c_long\n#------------------------------------------------------------------------------\n# Com related APIs that are used.\n_ole32 = oledll.ole32\n_oleaut32 = WinDLL('oleaut32')\n_CLSIDFromString = _ole32.CLSIDFromString\n_CoInitialize = _ole32.CoInitialize\n_CoInitialize.argtypes = [c_void_p]\n\n_CoCreateInstance = _ole32.CoCreateInstance\n\n_SysAllocString = _oleaut32.SysAllocString\n_SysAllocString.restype = c_void_p\n_SysAllocString.argtypes = [c_wchar_p]\n\n_SysFreeString = _oleaut32.SysFreeString\n_SysFreeString.argtypes = [c_void_p]\n\n# SAFEARRAY*\n# SafeArrayCreateVector(_In_ VARTYPE vt,_In_ LONG lLbound,_In_ ULONG\n# cElements);\n_SafeArrayCreateVector = _oleaut32.SafeArrayCreateVector\n_SafeArrayCreateVector.restype = c_void_p\n_SafeArrayCreateVector.argtypes = [c_ushort, c_long, c_ulong]\n\n# HRESULT\n# SafeArrayAccessData(_In_ SAFEARRAY *psa, _Out_ void **ppvData);\n_SafeArrayAccessData = _oleaut32.SafeArrayAccessData\n_SafeArrayAccessData.argtypes = [c_void_p, POINTER(c_void_p)]\n\n# HRESULT\n# SafeArrayUnaccessData(_In_ SAFEARRAY *psa);\n_SafeArrayUnaccessData = _oleaut32.SafeArrayUnaccessData\n_SafeArrayUnaccessData.argtypes = [c_void_p]\n\n# HRESULT\n# SafeArrayGetUBound(_In_ SAFEARRAY *psa, _In_ UINT nDim, _Out_ LONG\n# *plUbound);\n_SafeArrayGetUBound = _oleaut32.SafeArrayGetUBound\n_SafeArrayGetUBound.argtypes = [c_void_p, c_ulong, POINTER(c_long)]\n\n\n#------------------------------------------------------------------------------\n\nclass BSTR(c_wchar_p):\n\n    ''' BSTR class in python. '''\n\n    def __init__(self, value):\n        super(BSTR, self).__init__(_SysAllocString(value))\n\n    def __del__(self):\n        _SysFreeString(self)\n\n\nclass VARIANT(Structure):\n\n    '''\n    VARIANT structure in python. Does not match the definition in\n    MSDN exactly & it is only mapping the used fields.  Field names are also\n    slighty different.\n    '''\n\n    class _tagData(Union):\n\n        class _tagRecord(Structure):\n            _fields_ = [('pvoid', c_void_p), ('precord', c_void_p)]\n\n        _fields_ = [('llval', c_longlong),\n                    ('ullval', c_ulonglong),\n                    ('lval', c_long),\n                    ('ulval', c_ulong),\n                    ('ival', c_short),\n                    ('boolval', c_ushort),\n                    ('bstrval', BSTR),\n                    ('parray', c_void_p),\n                    ('record', _tagRecord)]\n\n    _fields_ = [('vt', c_ushort),\n                ('wReserved1', c_ushort),\n                ('wReserved2', c_ushort),\n                ('wReserved3', c_ushort),\n                ('vdata', _tagData)]\n\n    @staticmethod\n    def create_empty():\n        variant = VARIANT()\n        variant.vt = VT_EMPTY\n        variant.vdata.llval = 0\n        return variant\n\n    @staticmethod\n    def create_safearray_from_str(text):\n        variant = VARIANT()\n        variant.vt = VT_ARRAY | VT_UI1\n\n        length = len(text)\n        variant.vdata.parray = _SafeArrayCreateVector(VT_UI1, 0, length)\n        pvdata = c_void_p()\n        _SafeArrayAccessData(variant.vdata.parray, byref(pvdata))\n        ctypes.memmove(pvdata, text, length)\n        _SafeArrayUnaccessData(variant.vdata.parray)\n\n        return variant\n\n    @staticmethod\n    def create_bstr_from_str(text):\n        variant = VARIANT()\n        variant.vt = VT_BSTR\n        variant.vdata.bstrval = BSTR(text)\n        return variant\n\n    @staticmethod\n    def create_bool_false():\n        variant = VARIANT()\n        variant.vt = VT_BOOL\n        variant.vdata.boolval = 0\n        return variant\n\n    def is_safearray_of_bytes(self):\n        return self.vt == VT_ARRAY | VT_UI1\n\n    def str_from_safearray(self):\n        assert self.vt == VT_ARRAY | VT_UI1\n        pvdata = c_void_p()\n        count = c_long()\n        _SafeArrayGetUBound(self.vdata.parray, 1, byref(count))\n        count = c_long(count.value + 1)\n        _SafeArrayAccessData(self.vdata.parray, byref(pvdata))\n        text = ctypes.string_at(pvdata, count)\n        _SafeArrayUnaccessData(self.vdata.parray)\n        return text\n\n    def __del__(self):\n        _VariantClear(self)\n\n# HRESULT VariantClear(_Inout_ VARIANTARG *pvarg);\n_VariantClear = _oleaut32.VariantClear\n_VariantClear.argtypes = [POINTER(VARIANT)]\n\n\nclass GUID(Structure):\n\n    ''' GUID structure in python. '''\n\n    _fields_ = [(\"data1\", c_ulong),\n                (\"data2\", c_ushort),\n                (\"data3\", c_ushort),\n                (\"data4\", c_byte * 8)]\n\n    def __init__(self, name=None):\n        if name is not None:\n            _CLSIDFromString(unicode(name), byref(self))\n\n\nclass _WinHttpRequest(c_void_p):\n\n    '''\n    Maps the Com API to Python class functions. Not all methods in\n    IWinHttpWebRequest are mapped - only the methods we use.\n    '''\n    _AddRef = WINFUNCTYPE(c_long) \\\n        (1, 'AddRef')\n    _Release = WINFUNCTYPE(c_long) \\\n        (2, 'Release')\n    _SetProxy = WINFUNCTYPE(HRESULT,\n                            HTTPREQUEST_PROXY_SETTING,\n                            VARIANT,\n                            VARIANT) \\\n        (7, 'SetProxy')\n    _SetCredentials = WINFUNCTYPE(HRESULT,\n                                  BSTR,\n                                  BSTR,\n                                  HTTPREQUEST_SETCREDENTIALS_FLAGS) \\\n        (8, 'SetCredentials')\n    _Open = WINFUNCTYPE(HRESULT, BSTR, BSTR, VARIANT) \\\n        (9, 'Open')\n    _SetRequestHeader = WINFUNCTYPE(HRESULT, BSTR, BSTR) \\\n        (10, 'SetRequestHeader')\n    _GetResponseHeader = WINFUNCTYPE(HRESULT, BSTR, POINTER(c_void_p)) \\\n        (11, 'GetResponseHeader')\n    _GetAllResponseHeaders = WINFUNCTYPE(HRESULT, POINTER(c_void_p)) \\\n        (12, 'GetAllResponseHeaders')\n    _Send = WINFUNCTYPE(HRESULT, VARIANT) \\\n        (13, 'Send')\n    _Status = WINFUNCTYPE(HRESULT, POINTER(c_long)) \\\n        (14, 'Status')\n    _StatusText = WINFUNCTYPE(HRESULT, POINTER(c_void_p)) \\\n        (15, 'StatusText')\n    _ResponseText = WINFUNCTYPE(HRESULT, POINTER(c_void_p)) \\\n        (16, 'ResponseText')\n    _ResponseBody = WINFUNCTYPE(HRESULT, POINTER(VARIANT)) \\\n        (17, 'ResponseBody')\n    _ResponseStream = WINFUNCTYPE(HRESULT, POINTER(VARIANT)) \\\n        (18, 'ResponseStream')\n    _WaitForResponse = WINFUNCTYPE(HRESULT, VARIANT, POINTER(c_ushort)) \\\n        (21, 'WaitForResponse')\n    _Abort = WINFUNCTYPE(HRESULT) \\\n        (22, 'Abort')\n    _SetTimeouts = WINFUNCTYPE(HRESULT, c_long, c_long, c_long, c_long) \\\n        (23, 'SetTimeouts')\n    _SetClientCertificate = WINFUNCTYPE(HRESULT, BSTR) \\\n        (24, 'SetClientCertificate')\n\n    def open(self, method, url):\n        '''\n        Opens the request.\n\n        method: the request VERB 'GET', 'POST', etc.\n        url: the url to connect\n        '''\n        _WinHttpRequest._SetTimeouts(self, 0, 65000, 65000, 65000)\n\n        flag = VARIANT.create_bool_false()\n        _method = BSTR(method)\n        _url = BSTR(url)\n        _WinHttpRequest._Open(self, _method, _url, flag)\n\n    def set_request_header(self, name, value):\n        ''' Sets the request header. '''\n\n        _name = BSTR(name)\n        _value = BSTR(value)\n        _WinHttpRequest._SetRequestHeader(self, _name, _value)\n\n    def get_all_response_headers(self):\n        ''' Gets back all response headers. '''\n\n        bstr_headers = c_void_p()\n        _WinHttpRequest._GetAllResponseHeaders(self, byref(bstr_headers))\n        bstr_headers = ctypes.cast(bstr_headers, c_wchar_p)\n        headers = bstr_headers.value\n        _SysFreeString(bstr_headers)\n        return headers\n\n    def send(self, request=None):\n        ''' Sends the request body. '''\n\n        # Sends VT_EMPTY if it is GET, HEAD request.\n        if request is None:\n            var_empty = VARIANT.create_empty()\n            _WinHttpRequest._Send(self, var_empty)\n        else:  # Sends request body as SAFEArray.\n            _request = VARIANT.create_safearray_from_str(request)\n            _WinHttpRequest._Send(self, _request)\n\n    def status(self):\n        ''' Gets status of response. '''\n\n        status = c_long()\n        _WinHttpRequest._Status(self, byref(status))\n        return int(status.value)\n\n    def status_text(self):\n        ''' Gets status text of response. '''\n\n        bstr_status_text = c_void_p()\n        _WinHttpRequest._StatusText(self, byref(bstr_status_text))\n        bstr_status_text = ctypes.cast(bstr_status_text, c_wchar_p)\n        status_text = bstr_status_text.value\n        _SysFreeString(bstr_status_text)\n        return status_text\n\n    def response_body(self):\n        '''\n        Gets response body as a SAFEARRAY and converts the SAFEARRAY to str.\n        If it is an xml file, it always contains 3 characters before <?xml,\n        so we remove them.\n        '''\n        var_respbody = VARIANT()\n        _WinHttpRequest._ResponseBody(self, byref(var_respbody))\n        if var_respbody.is_safearray_of_bytes():\n            respbody = var_respbody.str_from_safearray()\n            if respbody[3:].startswith(b'<?xml') and\\\n               respbody.startswith(b'\\xef\\xbb\\xbf'):\n                respbody = respbody[3:]\n            return respbody\n        else:\n            return ''\n\n    def set_client_certificate(self, certificate):\n        '''Sets client certificate for the request. '''\n        _certificate = BSTR(certificate)\n        _WinHttpRequest._SetClientCertificate(self, _certificate)\n\n    def set_tunnel(self, host, port):\n        ''' Sets up the host and the port for the HTTP CONNECT Tunnelling.'''\n        url = host\n        if port:\n            url = url + u':' + port\n\n        var_host = VARIANT.create_bstr_from_str(url)\n        var_empty = VARIANT.create_empty()\n\n        _WinHttpRequest._SetProxy(\n            self, HTTPREQUEST_PROXYSETTING_PROXY, var_host, var_empty)\n\n    def set_proxy_credentials(self, user, password):\n        _WinHttpRequest._SetCredentials(\n            self, BSTR(user), BSTR(password),\n            HTTPREQUEST_SETCREDENTIALS_FOR_PROXY)\n\n    def __del__(self):\n        if self.value is not None:\n            _WinHttpRequest._Release(self)\n\n\nclass _Response(object):\n\n    ''' Response class corresponding to the response returned from httplib\n    HTTPConnection. '''\n\n    def __init__(self, _status, _status_text, _length, _headers, _respbody):\n        self.status = _status\n        self.reason = _status_text\n        self.length = _length\n        self.headers = _headers\n        self.respbody = _respbody\n\n    def getheaders(self):\n        '''Returns response headers.'''\n        return self.headers\n\n    def read(self, _length):\n        '''Returns resonse body. '''\n        return self.respbody[:_length]\n\n\nclass _HTTPConnection(object):\n\n    ''' Class corresponding to httplib HTTPConnection class. '''\n\n    def __init__(self, host, cert_file=None, key_file=None, protocol='http'):\n        ''' initialize the IWinHttpWebRequest Com Object.'''\n        self.host = unicode(host)\n        self.cert_file = cert_file\n        self._httprequest = _WinHttpRequest()\n        self.protocol = protocol\n        clsid = GUID('{2087C2F4-2CEF-4953-A8AB-66779B670495}')\n        iid = GUID('{016FE2EC-B2C8-45F8-B23B-39E53A75396B}')\n        _CoInitialize(None)\n        _CoCreateInstance(byref(clsid), 0, 1, byref(iid),\n                          byref(self._httprequest))\n\n    def close(self):\n        pass\n\n    def set_tunnel(self, host, port=None, headers=None):\n        ''' Sets up the host and the port for the HTTP CONNECT Tunnelling. '''\n        self._httprequest.set_tunnel(unicode(host), unicode(str(port)))\n\n    def set_proxy_credentials(self, user, password):\n        self._httprequest.set_proxy_credentials(\n            unicode(user), unicode(password))\n\n    def putrequest(self, method, uri):\n        ''' Connects to host and sends the request. '''\n\n        protocol = unicode(self.protocol + '://')\n        url = protocol + self.host + unicode(uri)\n        self._httprequest.open(unicode(method), url)\n\n        # sets certificate for the connection if cert_file is set.\n        if self.cert_file is not None:\n            self._httprequest.set_client_certificate(unicode(self.cert_file))\n\n    def putheader(self, name, value):\n        ''' Sends the headers of request. '''\n        if sys.version_info < (3,):\n            name = str(name).decode('utf-8')\n            value = str(value).decode('utf-8')\n        self._httprequest.set_request_header(name, value)\n\n    def endheaders(self):\n        ''' No operation. Exists only to provide the same interface of httplib\n        HTTPConnection.'''\n        pass\n\n    def send(self, request_body):\n        ''' Sends request body. '''\n        if not request_body:\n            self._httprequest.send()\n        else:\n            self._httprequest.send(request_body)\n\n    def getresponse(self):\n        ''' Gets the response and generates the _Response object'''\n        status = self._httprequest.status()\n        status_text = self._httprequest.status_text()\n\n        resp_headers = self._httprequest.get_all_response_headers()\n        fixed_headers = []\n        for resp_header in resp_headers.split('\\n'):\n            if (resp_header.startswith('\\t') or\\\n                resp_header.startswith(' ')) and fixed_headers:\n                # append to previous header\n                fixed_headers[-1] += resp_header\n            else:\n                fixed_headers.append(resp_header)\n\n        headers = []\n        for resp_header in fixed_headers:\n            if ':' in resp_header:\n                pos = resp_header.find(':')\n                headers.append(\n                    (resp_header[:pos].lower(), resp_header[pos + 1:].strip()))\n\n        body = self._httprequest.response_body()\n        length = len(body)\n\n        return _Response(status, status_text, length, headers, body)\n"
  },
  {
    "path": "DSC/azure/servicebus/__init__.py",
    "content": "#-------------------------------------------------------------------------\n# Copyright (c) Microsoft.  All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#--------------------------------------------------------------------------\nimport ast\nimport json\nimport sys\n\nfrom datetime import datetime\nfrom xml.dom import minidom\nfrom azure import (\n    WindowsAzureData,\n    WindowsAzureError,\n    xml_escape,\n    _create_entry,\n    _general_error_handler,\n    _get_entry_properties,\n    _get_child_nodes,\n    _get_children_from_path,\n    _get_first_child_node_value,\n    _ERROR_MESSAGE_NOT_PEEK_LOCKED_ON_DELETE,\n    _ERROR_MESSAGE_NOT_PEEK_LOCKED_ON_UNLOCK,\n    _ERROR_QUEUE_NOT_FOUND,\n    _ERROR_TOPIC_NOT_FOUND,\n    )\nfrom azure.http import HTTPError\n\n# default rule name for subscription\nDEFAULT_RULE_NAME = '$Default'\n\n#-----------------------------------------------------------------------------\n# Constants for Azure app environment settings.\nAZURE_SERVICEBUS_NAMESPACE = 'AZURE_SERVICEBUS_NAMESPACE'\nAZURE_SERVICEBUS_ACCESS_KEY = 'AZURE_SERVICEBUS_ACCESS_KEY'\nAZURE_SERVICEBUS_ISSUER = 'AZURE_SERVICEBUS_ISSUER'\n\n# namespace used for converting rules to objects\nXML_SCHEMA_NAMESPACE = 'http://www.w3.org/2001/XMLSchema-instance'\n\n\nclass Queue(WindowsAzureData):\n\n    ''' Queue class corresponding to Queue Description:\n    http://msdn.microsoft.com/en-us/library/windowsazure/hh780773'''\n\n    def __init__(self, lock_duration=None, max_size_in_megabytes=None,\n                 requires_duplicate_detection=None, requires_session=None,\n                 default_message_time_to_live=None,\n                 dead_lettering_on_message_expiration=None,\n                 duplicate_detection_history_time_window=None,\n                 max_delivery_count=None, enable_batched_operations=None,\n                 size_in_bytes=None, message_count=None):\n\n        self.lock_duration = lock_duration\n        self.max_size_in_megabytes = max_size_in_megabytes\n        self.requires_duplicate_detection = requires_duplicate_detection\n        self.requires_session = requires_session\n        self.default_message_time_to_live = default_message_time_to_live\n        self.dead_lettering_on_message_expiration = \\\n            dead_lettering_on_message_expiration\n        self.duplicate_detection_history_time_window = \\\n            duplicate_detection_history_time_window\n        self.max_delivery_count = max_delivery_count\n        self.enable_batched_operations = enable_batched_operations\n        self.size_in_bytes = size_in_bytes\n        self.message_count = message_count\n\n\nclass Topic(WindowsAzureData):\n\n    ''' Topic class corresponding to Topic Description:\n    http://msdn.microsoft.com/en-us/library/windowsazure/hh780749. '''\n\n    def __init__(self, default_message_time_to_live=None,\n                 max_size_in_megabytes=None, requires_duplicate_detection=None,\n                 duplicate_detection_history_time_window=None,\n                 enable_batched_operations=None, size_in_bytes=None):\n\n        self.default_message_time_to_live = default_message_time_to_live\n        self.max_size_in_megabytes = max_size_in_megabytes\n        self.requires_duplicate_detection = requires_duplicate_detection\n        self.duplicate_detection_history_time_window = \\\n            duplicate_detection_history_time_window\n        self.enable_batched_operations = enable_batched_operations\n        self.size_in_bytes = size_in_bytes\n\n    @property\n    def max_size_in_mega_bytes(self):\n        import warnings\n        warnings.warn(\n            'This attribute has been changed to max_size_in_megabytes.')\n        return self.max_size_in_megabytes\n\n    @max_size_in_mega_bytes.setter\n    def max_size_in_mega_bytes(self, value):\n        self.max_size_in_megabytes = value\n\n\nclass Subscription(WindowsAzureData):\n\n    ''' Subscription class corresponding to Subscription Description:\n    http://msdn.microsoft.com/en-us/library/windowsazure/hh780763. '''\n\n    def __init__(self, lock_duration=None, requires_session=None,\n                 default_message_time_to_live=None,\n                 dead_lettering_on_message_expiration=None,\n                 dead_lettering_on_filter_evaluation_exceptions=None,\n                 enable_batched_operations=None, max_delivery_count=None,\n                 message_count=None):\n\n        self.lock_duration = lock_duration\n        self.requires_session = requires_session\n        self.default_message_time_to_live = default_message_time_to_live\n        self.dead_lettering_on_message_expiration = \\\n            dead_lettering_on_message_expiration\n        self.dead_lettering_on_filter_evaluation_exceptions = \\\n            dead_lettering_on_filter_evaluation_exceptions\n        self.enable_batched_operations = enable_batched_operations\n        self.max_delivery_count = max_delivery_count\n        self.message_count = message_count\n\n\nclass Rule(WindowsAzureData):\n\n    ''' Rule class corresponding to Rule Description:\n    http://msdn.microsoft.com/en-us/library/windowsazure/hh780753. '''\n\n    def __init__(self, filter_type=None, filter_expression=None,\n                 action_type=None, action_expression=None):\n        self.filter_type = filter_type\n        self.filter_expression = filter_expression\n        self.action_type = action_type\n        self.action_expression = action_type\n\n\nclass Message(WindowsAzureData):\n\n    ''' Message class that used in send message/get mesage apis. '''\n\n    def __init__(self, body=None, service_bus_service=None, location=None,\n                 custom_properties=None,\n                 type='application/atom+xml;type=entry;charset=utf-8',\n                 broker_properties=None):\n        self.body = body\n        self.location = location\n        self.broker_properties = broker_properties\n        self.custom_properties = custom_properties\n        self.type = type\n        self.service_bus_service = service_bus_service\n        self._topic_name = None\n        self._subscription_name = None\n        self._queue_name = None\n\n        if not service_bus_service:\n            return\n\n        # if location is set, then extracts the queue name for queue message and\n        # extracts the topic and subscriptions name if it is topic message.\n        if location:\n            if '/subscriptions/' in location:\n                pos = location.find('/subscriptions/')\n                pos1 = location.rfind('/', 0, pos - 1)\n                self._topic_name = location[pos1 + 1:pos]\n                pos += len('/subscriptions/')\n                pos1 = location.find('/', pos)\n                self._subscription_name = location[pos:pos1]\n            elif '/messages/' in location:\n                pos = location.find('/messages/')\n                pos1 = location.rfind('/', 0, pos - 1)\n                self._queue_name = location[pos1 + 1:pos]\n\n    def delete(self):\n        ''' Deletes itself if find queue name or topic name and subscription\n        name. '''\n        if self._queue_name:\n            self.service_bus_service.delete_queue_message(\n                self._queue_name,\n                self.broker_properties['SequenceNumber'],\n                self.broker_properties['LockToken'])\n        elif self._topic_name and self._subscription_name:\n            self.service_bus_service.delete_subscription_message(\n                self._topic_name,\n                self._subscription_name,\n                self.broker_properties['SequenceNumber'],\n                self.broker_properties['LockToken'])\n        else:\n            raise WindowsAzureError(_ERROR_MESSAGE_NOT_PEEK_LOCKED_ON_DELETE)\n\n    def unlock(self):\n        ''' Unlocks itself if find queue name or topic name and subscription\n        name. '''\n        if self._queue_name:\n            self.service_bus_service.unlock_queue_message(\n                self._queue_name,\n                self.broker_properties['SequenceNumber'],\n                self.broker_properties['LockToken'])\n        elif self._topic_name and self._subscription_name:\n            self.service_bus_service.unlock_subscription_message(\n                self._topic_name,\n                self._subscription_name,\n                self.broker_properties['SequenceNumber'],\n                self.broker_properties['LockToken'])\n        else:\n            raise WindowsAzureError(_ERROR_MESSAGE_NOT_PEEK_LOCKED_ON_UNLOCK)\n\n    def add_headers(self, request):\n        ''' add addtional headers to request for message request.'''\n\n        # Adds custom properties\n        if self.custom_properties:\n            for name, value in self.custom_properties.items():\n                if sys.version_info < (3,) and isinstance(value, unicode):\n                    request.headers.append(\n                        (name, '\"' + value.encode('utf-8') + '\"'))\n                elif isinstance(value, str):\n                    request.headers.append((name, '\"' + str(value) + '\"'))\n                elif isinstance(value, datetime):\n                    request.headers.append(\n                        (name, '\"' + value.strftime('%a, %d %b %Y %H:%M:%S GMT') + '\"'))\n                else:\n                    request.headers.append((name, str(value).lower()))\n\n        # Adds content-type\n        request.headers.append(('Content-Type', self.type))\n\n        # Adds BrokerProperties\n        if self.broker_properties:\n            request.headers.append(\n                ('BrokerProperties', str(self.broker_properties)))\n\n        return request.headers\n\n\ndef _create_message(response, service_instance):\n    ''' Create message from response.\n\n    response: response from service bus cloud server.\n    service_instance: the service bus client.\n    '''\n    respbody = response.body\n    custom_properties = {}\n    broker_properties = None\n    message_type = None\n    message_location = None\n\n    # gets all information from respheaders.\n    for name, value in response.headers:\n        if name.lower() == 'brokerproperties':\n            broker_properties = json.loads(value)\n        elif name.lower() == 'content-type':\n            message_type = value\n        elif name.lower() == 'location':\n            message_location = value\n        elif name.lower() not in ['content-type',\n                                  'brokerproperties',\n                                  'transfer-encoding',\n                                  'server',\n                                  'location',\n                                  'date']:\n            if '\"' in value:\n                value = value[1:-1]\n                try:\n                    custom_properties[name] = datetime.strptime(\n                        value, '%a, %d %b %Y %H:%M:%S GMT')\n                except ValueError:\n                    custom_properties[name] = value\n            else:  # only int, float or boolean\n                if value.lower() == 'true':\n                    custom_properties[name] = True\n                elif value.lower() == 'false':\n                    custom_properties[name] = False\n                # int('3.1') doesn't work so need to get float('3.14') first\n                elif str(int(float(value))) == value:\n                    custom_properties[name] = int(value)\n                else:\n                    custom_properties[name] = float(value)\n\n    if message_type == None:\n        message = Message(\n            respbody, service_instance, message_location, custom_properties,\n            'application/atom+xml;type=entry;charset=utf-8', broker_properties)\n    else:\n        message = Message(respbody, service_instance, message_location,\n                          custom_properties, message_type, broker_properties)\n    return message\n\n# convert functions\n\n\ndef _convert_response_to_rule(response):\n    return _convert_xml_to_rule(response.body)\n\n\ndef _convert_xml_to_rule(xmlstr):\n    ''' Converts response xml to rule object.\n\n    The format of xml for rule:\n<entry xmlns='http://www.w3.org/2005/Atom'>\n<content type='application/xml'>\n<RuleDescription\n    xmlns:i=\"http://www.w3.org/2001/XMLSchema-instance\"\n    xmlns=\"http://schemas.microsoft.com/netservices/2010/10/servicebus/connect\">\n    <Filter i:type=\"SqlFilterExpression\">\n        <SqlExpression>MyProperty='XYZ'</SqlExpression>\n    </Filter>\n    <Action i:type=\"SqlFilterAction\">\n        <SqlExpression>set MyProperty2 = 'ABC'</SqlExpression>\n    </Action>\n</RuleDescription>\n</content>\n</entry>\n    '''\n    xmldoc = minidom.parseString(xmlstr)\n    rule = Rule()\n\n    for rule_desc in _get_children_from_path(xmldoc,\n                                             'entry',\n                                             'content',\n                                             'RuleDescription'):\n        for xml_filter in _get_child_nodes(rule_desc, 'Filter'):\n            filter_type = xml_filter.getAttributeNS(\n                XML_SCHEMA_NAMESPACE, 'type')\n            setattr(rule, 'filter_type', str(filter_type))\n            if xml_filter.childNodes:\n\n                for expr in _get_child_nodes(xml_filter, 'SqlExpression'):\n                    setattr(rule, 'filter_expression',\n                            expr.firstChild.nodeValue)\n\n        for xml_action in _get_child_nodes(rule_desc, 'Action'):\n            action_type = xml_action.getAttributeNS(\n                XML_SCHEMA_NAMESPACE, 'type')\n            setattr(rule, 'action_type', str(action_type))\n            if xml_action.childNodes:\n                action_expression = xml_action.childNodes[0].firstChild\n                if action_expression:\n                    setattr(rule, 'action_expression',\n                            action_expression.nodeValue)\n\n    # extract id, updated and name value from feed entry and set them of rule.\n    for name, value in _get_entry_properties(xmlstr, True, '/rules').items():\n        setattr(rule, name, value)\n\n    return rule\n\n\ndef _convert_response_to_queue(response):\n    return _convert_xml_to_queue(response.body)\n\n\ndef _parse_bool(value):\n    if value.lower() == 'true':\n        return True\n    return False\n\n\ndef _convert_xml_to_queue(xmlstr):\n    ''' Converts xml response to queue object.\n\n    The format of xml response for queue:\n<QueueDescription\n    xmlns=\\\"http://schemas.microsoft.com/netservices/2010/10/servicebus/connect\\\">\n    <MaxSizeInBytes>10000</MaxSizeInBytes>\n    <DefaultMessageTimeToLive>PT5M</DefaultMessageTimeToLive>\n    <LockDuration>PT2M</LockDuration>\n    <RequiresGroupedReceives>False</RequiresGroupedReceives>\n    <SupportsDuplicateDetection>False</SupportsDuplicateDetection>\n    ...\n</QueueDescription>\n\n    '''\n    xmldoc = minidom.parseString(xmlstr)\n    queue = Queue()\n\n    invalid_queue = True\n    # get node for each attribute in Queue class, if nothing found then the\n    # response is not valid xml for Queue.\n    for desc in _get_children_from_path(xmldoc,\n                                        'entry',\n                                        'content',\n                                        'QueueDescription'):\n        node_value = _get_first_child_node_value(desc, 'LockDuration')\n        if node_value is not None:\n            queue.lock_duration = node_value\n            invalid_queue = False\n\n        node_value = _get_first_child_node_value(desc, 'MaxSizeInMegabytes')\n        if node_value is not None:\n            queue.max_size_in_megabytes = int(node_value)\n            invalid_queue = False\n\n        node_value = _get_first_child_node_value(\n            desc, 'RequiresDuplicateDetection')\n        if node_value is not None:\n            queue.requires_duplicate_detection = _parse_bool(node_value)\n            invalid_queue = False\n\n        node_value = _get_first_child_node_value(desc, 'RequiresSession')\n        if node_value is not None:\n            queue.requires_session = _parse_bool(node_value)\n            invalid_queue = False\n\n        node_value = _get_first_child_node_value(\n            desc, 'DefaultMessageTimeToLive')\n        if node_value is not None:\n            queue.default_message_time_to_live = node_value\n            invalid_queue = False\n\n        node_value = _get_first_child_node_value(\n            desc, 'DeadLetteringOnMessageExpiration')\n        if node_value is not None:\n            queue.dead_lettering_on_message_expiration = _parse_bool(node_value)\n            invalid_queue = False\n\n        node_value = _get_first_child_node_value(\n            desc, 'DuplicateDetectionHistoryTimeWindow')\n        if node_value is not None:\n            queue.duplicate_detection_history_time_window = node_value\n            invalid_queue = False\n\n        node_value = _get_first_child_node_value(\n            desc, 'EnableBatchedOperations')\n        if node_value is not None:\n            queue.enable_batched_operations = _parse_bool(node_value)\n            invalid_queue = False\n\n        node_value = _get_first_child_node_value(desc, 'MaxDeliveryCount')\n        if node_value is not None:\n            queue.max_delivery_count = int(node_value)\n            invalid_queue = False\n\n        node_value = _get_first_child_node_value(desc, 'MessageCount')\n        if node_value is not None:\n            queue.message_count = int(node_value)\n            invalid_queue = False\n\n        node_value = _get_first_child_node_value(desc, 'SizeInBytes')\n        if node_value is not None:\n            queue.size_in_bytes = int(node_value)\n            invalid_queue = False\n\n    if invalid_queue:\n        raise WindowsAzureError(_ERROR_QUEUE_NOT_FOUND)\n\n    # extract id, updated and name value from feed entry and set them of queue.\n    for name, value in _get_entry_properties(xmlstr, True).items():\n        setattr(queue, name, value)\n\n    return queue\n\n\ndef _convert_response_to_topic(response):\n    return _convert_xml_to_topic(response.body)\n\n\ndef _convert_xml_to_topic(xmlstr):\n    '''Converts xml response to topic\n\n    The xml format for topic:\n<entry xmlns='http://www.w3.org/2005/Atom'>\n    <content type='application/xml'>\n    <TopicDescription\n        xmlns:i=\"http://www.w3.org/2001/XMLSchema-instance\"\n        xmlns=\"http://schemas.microsoft.com/netservices/2010/10/servicebus/connect\">\n        <DefaultMessageTimeToLive>P10675199DT2H48M5.4775807S</DefaultMessageTimeToLive>\n        <MaxSizeInMegabytes>1024</MaxSizeInMegabytes>\n        <RequiresDuplicateDetection>false</RequiresDuplicateDetection>\n        <DuplicateDetectionHistoryTimeWindow>P7D</DuplicateDetectionHistoryTimeWindow>\n        <DeadLetteringOnFilterEvaluationExceptions>true</DeadLetteringOnFilterEvaluationExceptions>\n    </TopicDescription>\n    </content>\n</entry>\n    '''\n    xmldoc = minidom.parseString(xmlstr)\n    topic = Topic()\n\n    invalid_topic = True\n\n    # get node for each attribute in Topic class, if nothing found then the\n    # response is not valid xml for Topic.\n    for desc in _get_children_from_path(xmldoc,\n                                        'entry',\n                                        'content',\n                                        'TopicDescription'):\n        invalid_topic = True\n        node_value = _get_first_child_node_value(\n            desc, 'DefaultMessageTimeToLive')\n        if node_value is not None:\n            topic.default_message_time_to_live = node_value\n            invalid_topic = False\n        node_value = _get_first_child_node_value(desc, 'MaxSizeInMegabytes')\n        if node_value is not None:\n            topic.max_size_in_megabytes = int(node_value)\n            invalid_topic = False\n        node_value = _get_first_child_node_value(\n            desc, 'RequiresDuplicateDetection')\n        if node_value is not None:\n            topic.requires_duplicate_detection = _parse_bool(node_value)\n            invalid_topic = False\n        node_value = _get_first_child_node_value(\n            desc, 'DuplicateDetectionHistoryTimeWindow')\n        if node_value is not None:\n            topic.duplicate_detection_history_time_window = node_value\n            invalid_topic = False\n        node_value = _get_first_child_node_value(\n            desc, 'EnableBatchedOperations')\n        if node_value is not None:\n            topic.enable_batched_operations = _parse_bool(node_value)\n            invalid_topic = False\n        node_value = _get_first_child_node_value(desc, 'SizeInBytes')\n        if node_value is not None:\n            topic.size_in_bytes = int(node_value)\n            invalid_topic = False\n\n    if invalid_topic:\n        raise WindowsAzureError(_ERROR_TOPIC_NOT_FOUND)\n\n    # extract id, updated and name value from feed entry and set them of topic.\n    for name, value in _get_entry_properties(xmlstr, True).items():\n        setattr(topic, name, value)\n    return topic\n\n\ndef _convert_response_to_subscription(response):\n    return _convert_xml_to_subscription(response.body)\n\n\ndef _convert_xml_to_subscription(xmlstr):\n    '''Converts xml response to subscription\n\n    The xml format for subscription:\n<entry xmlns='http://www.w3.org/2005/Atom'>\n    <content type='application/xml'>\n    <SubscriptionDescription\n        xmlns:i=\"http://www.w3.org/2001/XMLSchema-instance\"\n        xmlns=\"http://schemas.microsoft.com/netservices/2010/10/servicebus/connect\">\n        <LockDuration>PT5M</LockDuration>\n        <RequiresSession>false</RequiresSession>\n        <DefaultMessageTimeToLive>P10675199DT2H48M5.4775807S</DefaultMessageTimeToLive>\n        <DeadLetteringOnMessageExpiration>false</DeadLetteringOnMessageExpiration>\n        <DeadLetteringOnFilterEvaluationExceptions>true</DeadLetteringOnFilterEvaluationExceptions>\n    </SubscriptionDescription>\n    </content>\n</entry>\n    '''\n    xmldoc = minidom.parseString(xmlstr)\n    subscription = Subscription()\n\n    for desc in _get_children_from_path(xmldoc,\n                                        'entry',\n                                        'content',\n                                        'SubscriptionDescription'):\n        node_value = _get_first_child_node_value(desc, 'LockDuration')\n        if node_value is not None:\n            subscription.lock_duration = node_value\n\n        node_value = _get_first_child_node_value(\n            desc, 'RequiresSession')\n        if node_value is not None:\n            subscription.requires_session = _parse_bool(node_value)\n\n        node_value = _get_first_child_node_value(\n            desc, 'DefaultMessageTimeToLive')\n        if node_value is not None:\n            subscription.default_message_time_to_live = node_value\n\n        node_value = _get_first_child_node_value(\n            desc, 'DeadLetteringOnFilterEvaluationExceptions')\n        if node_value is not None:\n            subscription.dead_lettering_on_filter_evaluation_exceptions = \\\n                _parse_bool(node_value)\n\n        node_value = _get_first_child_node_value(\n            desc, 'DeadLetteringOnMessageExpiration')\n        if node_value is not None:\n            subscription.dead_lettering_on_message_expiration = \\\n                _parse_bool(node_value)\n\n        node_value = _get_first_child_node_value(\n            desc, 'EnableBatchedOperations')\n        if node_value is not None:\n            subscription.enable_batched_operations = _parse_bool(node_value)\n\n        node_value = _get_first_child_node_value(\n            desc, 'MaxDeliveryCount')\n        if node_value is not None:\n            subscription.max_delivery_count = int(node_value)\n\n        node_value = _get_first_child_node_value(\n            desc, 'MessageCount')\n        if node_value is not None:\n            subscription.message_count = int(node_value)\n\n    for name, value in _get_entry_properties(xmlstr,\n                                             True,\n                                             '/subscriptions').items():\n        setattr(subscription, name, value)\n\n    return subscription\n\n\ndef _convert_subscription_to_xml(subscription):\n    '''\n    Converts a subscription object to xml to send.  The order of each field of\n    subscription in xml is very important so we can't simple call\n    convert_class_to_xml.\n\n    subscription: the subsciption object to be converted.\n    '''\n\n    subscription_body = '<SubscriptionDescription xmlns:i=\"http://www.w3.org/2001/XMLSchema-instance\" xmlns=\"http://schemas.microsoft.com/netservices/2010/10/servicebus/connect\">'\n    if subscription:\n        if subscription.lock_duration is not None:\n            subscription_body += ''.join(\n                ['<LockDuration>',\n                 str(subscription.lock_duration),\n                 '</LockDuration>'])\n\n        if subscription.requires_session is not None:\n            subscription_body += ''.join(\n                ['<RequiresSession>',\n                 str(subscription.requires_session).lower(),\n                 '</RequiresSession>'])\n\n        if subscription.default_message_time_to_live is not None:\n            subscription_body += ''.join(\n                ['<DefaultMessageTimeToLive>',\n                 str(subscription.default_message_time_to_live),\n                 '</DefaultMessageTimeToLive>'])\n\n        if subscription.dead_lettering_on_message_expiration is not None:\n            subscription_body += ''.join(\n                ['<DeadLetteringOnMessageExpiration>',\n                 str(subscription.dead_lettering_on_message_expiration).lower(),\n                 '</DeadLetteringOnMessageExpiration>'])\n\n        if subscription.dead_lettering_on_filter_evaluation_exceptions is not None:\n            subscription_body += ''.join(\n                ['<DeadLetteringOnFilterEvaluationExceptions>',\n                 str(subscription.dead_lettering_on_filter_evaluation_exceptions).lower(),\n                 '</DeadLetteringOnFilterEvaluationExceptions>'])\n\n        if subscription.enable_batched_operations is not None:\n            subscription_body += ''.join(\n                ['<EnableBatchedOperations>',\n                 str(subscription.enable_batched_operations).lower(),\n                 '</EnableBatchedOperations>'])\n\n        if subscription.max_delivery_count is not None:\n            subscription_body += ''.join(\n                ['<MaxDeliveryCount>',\n                 str(subscription.max_delivery_count),\n                 '</MaxDeliveryCount>'])\n\n        if subscription.message_count is not None:\n            subscription_body += ''.join(\n                ['<MessageCount>',\n                 str(subscription.message_count),\n                 '</MessageCount>'])\n\n    subscription_body += '</SubscriptionDescription>'\n    return _create_entry(subscription_body)\n\n\ndef _convert_rule_to_xml(rule):\n    '''\n    Converts a rule object to xml to send.  The order of each field of rule\n    in xml is very important so we cann't simple call convert_class_to_xml.\n\n    rule: the rule object to be converted.\n    '''\n    rule_body = '<RuleDescription xmlns:i=\"http://www.w3.org/2001/XMLSchema-instance\" xmlns=\"http://schemas.microsoft.com/netservices/2010/10/servicebus/connect\">'\n    if rule:\n        if rule.filter_type:\n            rule_body += ''.join(\n                ['<Filter i:type=\"',\n                 xml_escape(rule.filter_type),\n                 '\">'])\n            if rule.filter_type == 'CorrelationFilter':\n                rule_body += ''.join(\n                    ['<CorrelationId>',\n                     xml_escape(rule.filter_expression),\n                     '</CorrelationId>'])\n            else:\n                rule_body += ''.join(\n                    ['<SqlExpression>',\n                     xml_escape(rule.filter_expression),\n                     '</SqlExpression>'])\n                rule_body += '<CompatibilityLevel>20</CompatibilityLevel>'\n            rule_body += '</Filter>'\n        if rule.action_type:\n            rule_body += ''.join(\n                ['<Action i:type=\"',\n                 xml_escape(rule.action_type),\n                 '\">'])\n            if rule.action_type == 'SqlRuleAction':\n                rule_body += ''.join(\n                    ['<SqlExpression>',\n                     xml_escape(rule.action_expression),\n                     '</SqlExpression>'])\n                rule_body += '<CompatibilityLevel>20</CompatibilityLevel>'\n            rule_body += '</Action>'\n    rule_body += '</RuleDescription>'\n\n    return _create_entry(rule_body)\n\n\ndef _convert_topic_to_xml(topic):\n    '''\n    Converts a topic object to xml to send.  The order of each field of topic\n    in xml is very important so we cann't simple call convert_class_to_xml.\n\n    topic: the topic object to be converted.\n    '''\n\n    topic_body = '<TopicDescription xmlns:i=\"http://www.w3.org/2001/XMLSchema-instance\" xmlns=\"http://schemas.microsoft.com/netservices/2010/10/servicebus/connect\">'\n    if topic:\n        if topic.default_message_time_to_live is not None:\n            topic_body += ''.join(\n                ['<DefaultMessageTimeToLive>',\n                 str(topic.default_message_time_to_live),\n                 '</DefaultMessageTimeToLive>'])\n\n        if topic.max_size_in_megabytes is not None:\n            topic_body += ''.join(\n                ['<MaxSizeInMegabytes>',\n                 str(topic.max_size_in_megabytes),\n                 '</MaxSizeInMegabytes>'])\n\n        if topic.requires_duplicate_detection is not None:\n            topic_body += ''.join(\n                ['<RequiresDuplicateDetection>',\n                 str(topic.requires_duplicate_detection).lower(),\n                 '</RequiresDuplicateDetection>'])\n\n        if topic.duplicate_detection_history_time_window is not None:\n            topic_body += ''.join(\n                ['<DuplicateDetectionHistoryTimeWindow>',\n                 str(topic.duplicate_detection_history_time_window),\n                 '</DuplicateDetectionHistoryTimeWindow>'])\n\n        if topic.enable_batched_operations is not None:\n            topic_body += ''.join(\n                ['<EnableBatchedOperations>',\n                 str(topic.enable_batched_operations).lower(),\n                 '</EnableBatchedOperations>'])\n\n        if topic.size_in_bytes is not None:\n            topic_body += ''.join(\n                ['<SizeInBytes>',\n                 str(topic.size_in_bytes),\n                 '</SizeInBytes>'])\n\n    topic_body += '</TopicDescription>'\n\n    return _create_entry(topic_body)\n\n\ndef _convert_queue_to_xml(queue):\n    '''\n    Converts a queue object to xml to send.  The order of each field of queue\n    in xml is very important so we cann't simple call convert_class_to_xml.\n\n    queue: the queue object to be converted.\n    '''\n    queue_body = '<QueueDescription xmlns:i=\"http://www.w3.org/2001/XMLSchema-instance\" xmlns=\"http://schemas.microsoft.com/netservices/2010/10/servicebus/connect\">'\n    if queue:\n        if queue.lock_duration:\n            queue_body += ''.join(\n                ['<LockDuration>',\n                 str(queue.lock_duration),\n                 '</LockDuration>'])\n\n        if queue.max_size_in_megabytes is not None:\n            queue_body += ''.join(\n                ['<MaxSizeInMegabytes>',\n                 str(queue.max_size_in_megabytes),\n                 '</MaxSizeInMegabytes>'])\n\n        if queue.requires_duplicate_detection is not None:\n            queue_body += ''.join(\n                ['<RequiresDuplicateDetection>',\n                 str(queue.requires_duplicate_detection).lower(),\n                 '</RequiresDuplicateDetection>'])\n\n        if queue.requires_session is not None:\n            queue_body += ''.join(\n                ['<RequiresSession>',\n                 str(queue.requires_session).lower(),\n                 '</RequiresSession>'])\n\n        if queue.default_message_time_to_live is not None:\n            queue_body += ''.join(\n                ['<DefaultMessageTimeToLive>',\n                 str(queue.default_message_time_to_live),\n                 '</DefaultMessageTimeToLive>'])\n\n        if queue.dead_lettering_on_message_expiration is not None:\n            queue_body += ''.join(\n                ['<DeadLetteringOnMessageExpiration>',\n                 str(queue.dead_lettering_on_message_expiration).lower(),\n                 '</DeadLetteringOnMessageExpiration>'])\n\n        if queue.duplicate_detection_history_time_window is not None:\n            queue_body += ''.join(\n                ['<DuplicateDetectionHistoryTimeWindow>',\n                 str(queue.duplicate_detection_history_time_window),\n                 '</DuplicateDetectionHistoryTimeWindow>'])\n\n        if queue.max_delivery_count is not None:\n            queue_body += ''.join(\n                ['<MaxDeliveryCount>',\n                 str(queue.max_delivery_count),\n                 '</MaxDeliveryCount>'])\n\n        if queue.enable_batched_operations is not None:\n            queue_body += ''.join(\n                ['<EnableBatchedOperations>',\n                 str(queue.enable_batched_operations).lower(),\n                 '</EnableBatchedOperations>'])\n\n        if queue.size_in_bytes is not None:\n            queue_body += ''.join(\n                ['<SizeInBytes>',\n                 str(queue.size_in_bytes),\n                 '</SizeInBytes>'])\n\n        if queue.message_count is not None:\n            queue_body += ''.join(\n                ['<MessageCount>',\n                 str(queue.message_count),\n                 '</MessageCount>'])\n\n    queue_body += '</QueueDescription>'\n    return _create_entry(queue_body)\n\n\ndef _service_bus_error_handler(http_error):\n    ''' Simple error handler for service bus service. '''\n    return _general_error_handler(http_error)\n\nfrom azure.servicebus.servicebusservice import ServiceBusService\n"
  },
  {
    "path": "DSC/azure/servicebus/servicebusservice.py",
    "content": "#-------------------------------------------------------------------------\n# Copyright (c) Microsoft.  All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#--------------------------------------------------------------------------\nimport datetime\nimport os\nimport time\n\nfrom azure import (\n    WindowsAzureError,\n    SERVICE_BUS_HOST_BASE,\n    _convert_response_to_feeds,\n    _dont_fail_not_exist,\n    _dont_fail_on_exist,\n    _encode_base64,\n    _get_request_body,\n    _get_request_body_bytes_only,\n    _int_or_none,\n    _sign_string,\n    _str,\n    _unicode_type,\n    _update_request_uri_query,\n    url_quote,\n    url_unquote,\n    _validate_not_none,\n    )\nfrom azure.http import (\n    HTTPError,\n    HTTPRequest,\n    )\nfrom azure.http.httpclient import _HTTPClient\nfrom azure.servicebus import (\n    AZURE_SERVICEBUS_NAMESPACE,\n    AZURE_SERVICEBUS_ACCESS_KEY,\n    AZURE_SERVICEBUS_ISSUER,\n    _convert_topic_to_xml,\n    _convert_response_to_topic,\n    _convert_queue_to_xml,\n    _convert_response_to_queue,\n    _convert_subscription_to_xml,\n    _convert_response_to_subscription,\n    _convert_rule_to_xml,\n    _convert_response_to_rule,\n    _convert_xml_to_queue,\n    _convert_xml_to_topic,\n    _convert_xml_to_subscription,\n    _convert_xml_to_rule,\n    _create_message,\n    _service_bus_error_handler,\n    )\n\n\nclass ServiceBusService(object):\n\n    def __init__(self, service_namespace=None, account_key=None, issuer=None,\n                 x_ms_version='2011-06-01', host_base=SERVICE_BUS_HOST_BASE,\n                 shared_access_key_name=None, shared_access_key_value=None,\n                 authentication=None):\n        '''\n        Initializes the service bus service for a namespace with the specified\n        authentication settings (SAS or ACS).\n\n        service_namespace:\n            Service bus namespace, required for all operations. If None,\n            the value is set to the AZURE_SERVICEBUS_NAMESPACE env variable.\n        account_key:\n            ACS authentication account key. If None, the value is set to the\n            AZURE_SERVICEBUS_ACCESS_KEY env variable.\n            Note that if both SAS and ACS settings are specified, SAS is used.\n        issuer:\n            ACS authentication issuer. If None, the value is set to the\n            AZURE_SERVICEBUS_ISSUER env variable.\n            Note that if both SAS and ACS settings are specified, SAS is used.\n        x_ms_version: Unused. Kept for backwards compatibility.\n        host_base:\n            Optional. Live host base url. Defaults to Azure url. Override this\n            for on-premise.\n        shared_access_key_name:\n            SAS authentication key name.\n            Note that if both SAS and ACS settings are specified, SAS is used.\n        shared_access_key_value:\n            SAS authentication key value.\n            Note that if both SAS and ACS settings are specified, SAS is used.\n        authentication:\n            Instance of authentication class. If this is specified, then\n            ACS and SAS parameters are ignored.\n        '''\n        self.requestid = None\n        self.service_namespace = service_namespace\n        self.host_base = host_base\n\n        if not self.service_namespace:\n            self.service_namespace = os.environ.get(AZURE_SERVICEBUS_NAMESPACE)\n\n        if not self.service_namespace:\n            raise WindowsAzureError('You need to provide servicebus namespace')\n\n        if authentication:\n            self.authentication = authentication\n        else:\n            if not account_key:\n                account_key = os.environ.get(AZURE_SERVICEBUS_ACCESS_KEY)\n            if not issuer:\n                issuer = os.environ.get(AZURE_SERVICEBUS_ISSUER)\n\n            if shared_access_key_name and shared_access_key_value:\n                self.authentication = ServiceBusSASAuthentication(\n                    shared_access_key_name,\n                    shared_access_key_value)\n            elif account_key and issuer:\n                self.authentication = ServiceBusWrapTokenAuthentication(\n                    account_key,\n                    issuer)\n            else:\n                raise WindowsAzureError(\n                    'You need to provide servicebus access key and Issuer OR shared access key and value')\n\n        self._httpclient = _HTTPClient(service_instance=self)\n        self._filter = self._httpclient.perform_request\n\n    # Backwards compatibility:\n    # account_key and issuer used to be stored on the service class, they are\n    # now stored on the authentication class.\n    @property\n    def account_key(self):\n        return self.authentication.account_key\n\n    @account_key.setter\n    def account_key(self, value):\n        self.authentication.account_key = value\n\n    @property\n    def issuer(self):\n        return self.authentication.issuer\n\n    @issuer.setter\n    def issuer(self, value):\n        self.authentication.issuer = value\n\n    def with_filter(self, filter):\n        '''\n        Returns a new service which will process requests with the specified\n        filter.  Filtering operations can include logging, automatic retrying,\n        etc...  The filter is a lambda which receives the HTTPRequest and\n        another lambda.  The filter can perform any pre-processing on the\n        request, pass it off to the next lambda, and then perform any\n        post-processing on the response.\n        '''\n        res = ServiceBusService(\n            service_namespace=self.service_namespace,\n            authentication=self.authentication)\n\n        old_filter = self._filter\n\n        def new_filter(request):\n            return filter(request, old_filter)\n\n        res._filter = new_filter\n        return res\n\n    def set_proxy(self, host, port, user=None, password=None):\n        '''\n        Sets the proxy server host and port for the HTTP CONNECT Tunnelling.\n\n        host: Address of the proxy. Ex: '192.168.0.100'\n        port: Port of the proxy. Ex: 6000\n        user: User for proxy authorization.\n        password: Password for proxy authorization.\n        '''\n        self._httpclient.set_proxy(host, port, user, password)\n\n    def create_queue(self, queue_name, queue=None, fail_on_exist=False):\n        '''\n        Creates a new queue. Once created, this queue's resource manifest is\n        immutable.\n\n        queue_name: Name of the queue to create.\n        queue: Queue object to create.\n        fail_on_exist:\n            Specify whether to throw an exception when the queue exists.\n        '''\n        _validate_not_none('queue_name', queue_name)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + _str(queue_name) + ''\n        request.body = _get_request_body(_convert_queue_to_xml(queue))\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        if not fail_on_exist:\n            try:\n                self._perform_request(request)\n                return True\n            except WindowsAzureError as ex:\n                _dont_fail_on_exist(ex)\n                return False\n        else:\n            self._perform_request(request)\n            return True\n\n    def delete_queue(self, queue_name, fail_not_exist=False):\n        '''\n        Deletes an existing queue. This operation will also remove all\n        associated state including messages in the queue.\n\n        queue_name: Name of the queue to delete.\n        fail_not_exist:\n            Specify whether to throw an exception if the queue doesn't exist.\n        '''\n        _validate_not_none('queue_name', queue_name)\n        request = HTTPRequest()\n        request.method = 'DELETE'\n        request.host = self._get_host()\n        request.path = '/' + _str(queue_name) + ''\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        if not fail_not_exist:\n            try:\n                self._perform_request(request)\n                return True\n            except WindowsAzureError as ex:\n                _dont_fail_not_exist(ex)\n                return False\n        else:\n            self._perform_request(request)\n            return True\n\n    def get_queue(self, queue_name):\n        '''\n        Retrieves an existing queue.\n\n        queue_name: Name of the queue.\n        '''\n        _validate_not_none('queue_name', queue_name)\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/' + _str(queue_name) + ''\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        response = self._perform_request(request)\n\n        return _convert_response_to_queue(response)\n\n    def list_queues(self):\n        '''\n        Enumerates the queues in the service namespace.\n        '''\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/$Resources/Queues'\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        response = self._perform_request(request)\n\n        return _convert_response_to_feeds(response, _convert_xml_to_queue)\n\n    def create_topic(self, topic_name, topic=None, fail_on_exist=False):\n        '''\n        Creates a new topic. Once created, this topic resource manifest is\n        immutable.\n\n        topic_name: Name of the topic to create.\n        topic: Topic object to create.\n        fail_on_exist:\n            Specify whether to throw an exception when the topic exists.\n        '''\n        _validate_not_none('topic_name', topic_name)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + _str(topic_name) + ''\n        request.body = _get_request_body(_convert_topic_to_xml(topic))\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        if not fail_on_exist:\n            try:\n                self._perform_request(request)\n                return True\n            except WindowsAzureError as ex:\n                _dont_fail_on_exist(ex)\n                return False\n        else:\n            self._perform_request(request)\n            return True\n\n    def delete_topic(self, topic_name, fail_not_exist=False):\n        '''\n        Deletes an existing topic. This operation will also remove all\n        associated state including associated subscriptions.\n\n        topic_name: Name of the topic to delete.\n        fail_not_exist:\n            Specify whether throw exception when topic doesn't exist.\n        '''\n        _validate_not_none('topic_name', topic_name)\n        request = HTTPRequest()\n        request.method = 'DELETE'\n        request.host = self._get_host()\n        request.path = '/' + _str(topic_name) + ''\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        if not fail_not_exist:\n            try:\n                self._perform_request(request)\n                return True\n            except WindowsAzureError as ex:\n                _dont_fail_not_exist(ex)\n                return False\n        else:\n            self._perform_request(request)\n            return True\n\n    def get_topic(self, topic_name):\n        '''\n        Retrieves the description for the specified topic.\n\n        topic_name: Name of the topic.\n        '''\n        _validate_not_none('topic_name', topic_name)\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/' + _str(topic_name) + ''\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        response = self._perform_request(request)\n\n        return _convert_response_to_topic(response)\n\n    def list_topics(self):\n        '''\n        Retrieves the topics in the service namespace.\n        '''\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/$Resources/Topics'\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        response = self._perform_request(request)\n\n        return _convert_response_to_feeds(response, _convert_xml_to_topic)\n\n    def create_rule(self, topic_name, subscription_name, rule_name, rule=None,\n                    fail_on_exist=False):\n        '''\n        Creates a new rule. Once created, this rule's resource manifest is\n        immutable.\n\n        topic_name: Name of the topic.\n        subscription_name: Name of the subscription.\n        rule_name: Name of the rule.\n        fail_on_exist:\n            Specify whether to throw an exception when the rule exists.\n        '''\n        _validate_not_none('topic_name', topic_name)\n        _validate_not_none('subscription_name', subscription_name)\n        _validate_not_none('rule_name', rule_name)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + _str(topic_name) + '/subscriptions/' + \\\n            _str(subscription_name) + \\\n            '/rules/' + _str(rule_name) + ''\n        request.body = _get_request_body(_convert_rule_to_xml(rule))\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        if not fail_on_exist:\n            try:\n                self._perform_request(request)\n                return True\n            except WindowsAzureError as ex:\n                _dont_fail_on_exist(ex)\n                return False\n        else:\n            self._perform_request(request)\n            return True\n\n    def delete_rule(self, topic_name, subscription_name, rule_name,\n                    fail_not_exist=False):\n        '''\n        Deletes an existing rule.\n\n        topic_name: Name of the topic.\n        subscription_name: Name of the subscription.\n        rule_name:\n            Name of the rule to delete.  DEFAULT_RULE_NAME=$Default.\n            Use DEFAULT_RULE_NAME to delete default rule for the subscription.\n        fail_not_exist:\n            Specify whether throw exception when rule doesn't exist.\n        '''\n        _validate_not_none('topic_name', topic_name)\n        _validate_not_none('subscription_name', subscription_name)\n        _validate_not_none('rule_name', rule_name)\n        request = HTTPRequest()\n        request.method = 'DELETE'\n        request.host = self._get_host()\n        request.path = '/' + _str(topic_name) + '/subscriptions/' + \\\n            _str(subscription_name) + \\\n            '/rules/' + _str(rule_name) + ''\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        if not fail_not_exist:\n            try:\n                self._perform_request(request)\n                return True\n            except WindowsAzureError as ex:\n                _dont_fail_not_exist(ex)\n                return False\n        else:\n            self._perform_request(request)\n            return True\n\n    def get_rule(self, topic_name, subscription_name, rule_name):\n        '''\n        Retrieves the description for the specified rule.\n\n        topic_name: Name of the topic.\n        subscription_name: Name of the subscription.\n        rule_name: Name of the rule.\n        '''\n        _validate_not_none('topic_name', topic_name)\n        _validate_not_none('subscription_name', subscription_name)\n        _validate_not_none('rule_name', rule_name)\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/' + _str(topic_name) + '/subscriptions/' + \\\n            _str(subscription_name) + \\\n            '/rules/' + _str(rule_name) + ''\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        response = self._perform_request(request)\n\n        return _convert_response_to_rule(response)\n\n    def list_rules(self, topic_name, subscription_name):\n        '''\n        Retrieves the rules that exist under the specified subscription.\n\n        topic_name: Name of the topic.\n        subscription_name: Name of the subscription.\n        '''\n        _validate_not_none('topic_name', topic_name)\n        _validate_not_none('subscription_name', subscription_name)\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(topic_name) + '/subscriptions/' + \\\n            _str(subscription_name) + '/rules/'\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        response = self._perform_request(request)\n\n        return _convert_response_to_feeds(response, _convert_xml_to_rule)\n\n    def create_subscription(self, topic_name, subscription_name,\n                            subscription=None, fail_on_exist=False):\n        '''\n        Creates a new subscription. Once created, this subscription resource\n        manifest is immutable.\n\n        topic_name: Name of the topic.\n        subscription_name: Name of the subscription.\n        fail_on_exist:\n            Specify whether throw exception when subscription exists.\n        '''\n        _validate_not_none('topic_name', topic_name)\n        _validate_not_none('subscription_name', subscription_name)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(topic_name) + '/subscriptions/' + _str(subscription_name) + ''\n        request.body = _get_request_body(\n            _convert_subscription_to_xml(subscription))\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        if not fail_on_exist:\n            try:\n                self._perform_request(request)\n                return True\n            except WindowsAzureError as ex:\n                _dont_fail_on_exist(ex)\n                return False\n        else:\n            self._perform_request(request)\n            return True\n\n    def delete_subscription(self, topic_name, subscription_name,\n                            fail_not_exist=False):\n        '''\n        Deletes an existing subscription.\n\n        topic_name: Name of the topic.\n        subscription_name: Name of the subscription to delete.\n        fail_not_exist:\n            Specify whether to throw an exception when the subscription\n            doesn't exist.\n        '''\n        _validate_not_none('topic_name', topic_name)\n        _validate_not_none('subscription_name', subscription_name)\n        request = HTTPRequest()\n        request.method = 'DELETE'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(topic_name) + '/subscriptions/' + _str(subscription_name) + ''\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        if not fail_not_exist:\n            try:\n                self._perform_request(request)\n                return True\n            except WindowsAzureError as ex:\n                _dont_fail_not_exist(ex)\n                return False\n        else:\n            self._perform_request(request)\n            return True\n\n    def get_subscription(self, topic_name, subscription_name):\n        '''\n        Gets an existing subscription.\n\n        topic_name: Name of the topic.\n        subscription_name: Name of the subscription.\n        '''\n        _validate_not_none('topic_name', topic_name)\n        _validate_not_none('subscription_name', subscription_name)\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(topic_name) + '/subscriptions/' + _str(subscription_name) + ''\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        response = self._perform_request(request)\n\n        return _convert_response_to_subscription(response)\n\n    def list_subscriptions(self, topic_name):\n        '''\n        Retrieves the subscriptions in the specified topic.\n\n        topic_name: Name of the topic.\n        '''\n        _validate_not_none('topic_name', topic_name)\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/' + _str(topic_name) + '/subscriptions/'\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        response = self._perform_request(request)\n\n        return _convert_response_to_feeds(response,\n                                          _convert_xml_to_subscription)\n\n    def send_topic_message(self, topic_name, message=None):\n        '''\n        Enqueues a message into the specified topic. The limit to the number\n        of messages which may be present in the topic is governed by the\n        message size in MaxTopicSizeInBytes. If this message causes the topic\n        to exceed its quota, a quota exceeded error is returned and the\n        message will be rejected.\n\n        topic_name: Name of the topic.\n        message: Message object containing message body and properties.\n        '''\n        _validate_not_none('topic_name', topic_name)\n        _validate_not_none('message', message)\n        request = HTTPRequest()\n        request.method = 'POST'\n        request.host = self._get_host()\n        request.path = '/' + _str(topic_name) + '/messages'\n        request.headers = message.add_headers(request)\n        request.body = _get_request_body_bytes_only(\n            'message.body', message.body)\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        self._perform_request(request)\n\n    def peek_lock_subscription_message(self, topic_name, subscription_name,\n                                       timeout='60'):\n        '''\n        This operation is used to atomically retrieve and lock a message for\n        processing. The message is guaranteed not to be delivered to other\n        receivers during the lock duration period specified in buffer\n        description. Once the lock expires, the message will be available to\n        other receivers (on the same subscription only) during the lock\n        duration period specified in the topic description. Once the lock\n        expires, the message will be available to other receivers. In order to\n        complete processing of the message, the receiver should issue a delete\n        command with the lock ID received from this operation. To abandon\n        processing of the message and unlock it for other receivers, an Unlock\n        Message command should be issued, or the lock duration period can\n        expire.\n\n        topic_name: Name of the topic.\n        subscription_name: Name of the subscription.\n        timeout: Optional. The timeout parameter is expressed in seconds.\n        '''\n        _validate_not_none('topic_name', topic_name)\n        _validate_not_none('subscription_name', subscription_name)\n        request = HTTPRequest()\n        request.method = 'POST'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(topic_name) + '/subscriptions/' + \\\n            _str(subscription_name) + '/messages/head'\n        request.query = [('timeout', _int_or_none(timeout))]\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        response = self._perform_request(request)\n\n        return _create_message(response, self)\n\n    def unlock_subscription_message(self, topic_name, subscription_name,\n                                    sequence_number, lock_token):\n        '''\n        Unlock a message for processing by other receivers on a given\n        subscription. This operation deletes the lock object, causing the\n        message to be unlocked. A message must have first been locked by a\n        receiver before this operation is called.\n\n        topic_name: Name of the topic.\n        subscription_name: Name of the subscription.\n        sequence_number:\n            The sequence number of the message to be unlocked as returned in\n            BrokerProperties['SequenceNumber'] by the Peek Message operation.\n        lock_token:\n            The ID of the lock as returned by the Peek Message operation in\n            BrokerProperties['LockToken']\n        '''\n        _validate_not_none('topic_name', topic_name)\n        _validate_not_none('subscription_name', subscription_name)\n        _validate_not_none('sequence_number', sequence_number)\n        _validate_not_none('lock_token', lock_token)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + _str(topic_name) + \\\n                       '/subscriptions/' + str(subscription_name) + \\\n                       '/messages/' + _str(sequence_number) + \\\n                       '/' + _str(lock_token) + ''\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        self._perform_request(request)\n\n    def read_delete_subscription_message(self, topic_name, subscription_name,\n                                         timeout='60'):\n        '''\n        Read and delete a message from a subscription as an atomic operation.\n        This operation should be used when a best-effort guarantee is\n        sufficient for an application; that is, using this operation it is\n        possible for messages to be lost if processing fails.\n\n        topic_name: Name of the topic.\n        subscription_name: Name of the subscription.\n        timeout: Optional. The timeout parameter is expressed in seconds.\n        '''\n        _validate_not_none('topic_name', topic_name)\n        _validate_not_none('subscription_name', subscription_name)\n        request = HTTPRequest()\n        request.method = 'DELETE'\n        request.host = self._get_host()\n        request.path = '/' + _str(topic_name) + \\\n                       '/subscriptions/' + _str(subscription_name) + \\\n                       '/messages/head'\n        request.query = [('timeout', _int_or_none(timeout))]\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        response = self._perform_request(request)\n\n        return _create_message(response, self)\n\n    def delete_subscription_message(self, topic_name, subscription_name,\n                                    sequence_number, lock_token):\n        '''\n        Completes processing on a locked message and delete it from the\n        subscription. This operation should only be called after processing a\n        previously locked message is successful to maintain At-Least-Once\n        delivery assurances.\n\n        topic_name: Name of the topic.\n        subscription_name: Name of the subscription.\n        sequence_number:\n            The sequence number of the message to be deleted as returned in\n            BrokerProperties['SequenceNumber'] by the Peek Message operation.\n        lock_token:\n            The ID of the lock as returned by the Peek Message operation in\n            BrokerProperties['LockToken']\n        '''\n        _validate_not_none('topic_name', topic_name)\n        _validate_not_none('subscription_name', subscription_name)\n        _validate_not_none('sequence_number', sequence_number)\n        _validate_not_none('lock_token', lock_token)\n        request = HTTPRequest()\n        request.method = 'DELETE'\n        request.host = self._get_host()\n        request.path = '/' + _str(topic_name) + \\\n                       '/subscriptions/' + _str(subscription_name) + \\\n                       '/messages/' + _str(sequence_number) + \\\n                       '/' + _str(lock_token) + ''\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        self._perform_request(request)\n\n    def send_queue_message(self, queue_name, message=None):\n        '''\n        Sends a message into the specified queue. The limit to the number of\n        messages which may be present in the topic is governed by the message\n        size the MaxTopicSizeInMegaBytes. If this message will cause the queue\n        to exceed its quota, a quota exceeded error is returned and the\n        message will be rejected.\n\n        queue_name: Name of the queue.\n        message: Message object containing message body and properties.\n        '''\n        _validate_not_none('queue_name', queue_name)\n        _validate_not_none('message', message)\n        request = HTTPRequest()\n        request.method = 'POST'\n        request.host = self._get_host()\n        request.path = '/' + _str(queue_name) + '/messages'\n        request.headers = message.add_headers(request)\n        request.body = _get_request_body_bytes_only('message.body',\n                                                    message.body)\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        self._perform_request(request)\n\n    def peek_lock_queue_message(self, queue_name, timeout='60'):\n        '''\n        Automically retrieves and locks a message from a queue for processing.\n        The message is guaranteed not to be delivered to other receivers (on\n        the same subscription only) during the lock duration period specified\n        in the queue description. Once the lock expires, the message will be\n        available to other receivers. In order to complete processing of the\n        message, the receiver should issue a delete command with the lock ID\n        received from this operation. To abandon processing of the message and\n        unlock it for other receivers, an Unlock Message command should be\n        issued, or the lock duration period can expire.\n\n        queue_name: Name of the queue.\n        timeout: Optional. The timeout parameter is expressed in seconds.\n        '''\n        _validate_not_none('queue_name', queue_name)\n        request = HTTPRequest()\n        request.method = 'POST'\n        request.host = self._get_host()\n        request.path = '/' + _str(queue_name) + '/messages/head'\n        request.query = [('timeout', _int_or_none(timeout))]\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        response = self._perform_request(request)\n\n        return _create_message(response, self)\n\n    def unlock_queue_message(self, queue_name, sequence_number, lock_token):\n        '''\n        Unlocks a message for processing by other receivers on a given\n        subscription. This operation deletes the lock object, causing the\n        message to be unlocked. A message must have first been locked by a\n        receiver before this operation is called.\n\n        queue_name: Name of the queue.\n        sequence_number:\n            The sequence number of the message to be unlocked as returned in\n            BrokerProperties['SequenceNumber'] by the Peek Message operation.\n        lock_token:\n            The ID of the lock as returned by the Peek Message operation in\n            BrokerProperties['LockToken']\n        '''\n        _validate_not_none('queue_name', queue_name)\n        _validate_not_none('sequence_number', sequence_number)\n        _validate_not_none('lock_token', lock_token)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + _str(queue_name) + \\\n                       '/messages/' + _str(sequence_number) + \\\n                       '/' + _str(lock_token) + ''\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        self._perform_request(request)\n\n    def read_delete_queue_message(self, queue_name, timeout='60'):\n        '''\n        Reads and deletes a message from a queue as an atomic operation. This\n        operation should be used when a best-effort guarantee is sufficient\n        for an application; that is, using this operation it is possible for\n        messages to be lost if processing fails.\n\n        queue_name: Name of the queue.\n        timeout: Optional. The timeout parameter is expressed in seconds.\n        '''\n        _validate_not_none('queue_name', queue_name)\n        request = HTTPRequest()\n        request.method = 'DELETE'\n        request.host = self._get_host()\n        request.path = '/' + _str(queue_name) + '/messages/head'\n        request.query = [('timeout', _int_or_none(timeout))]\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        response = self._perform_request(request)\n\n        return _create_message(response, self)\n\n    def delete_queue_message(self, queue_name, sequence_number, lock_token):\n        '''\n        Completes processing on a locked message and delete it from the queue.\n        This operation should only be called after processing a previously\n        locked message is successful to maintain At-Least-Once delivery\n        assurances.\n\n        queue_name: Name of the queue.\n        sequence_number:\n            The sequence number of the message to be deleted as returned in\n            BrokerProperties['SequenceNumber'] by the Peek Message operation.\n        lock_token:\n            The ID of the lock as returned by the Peek Message operation in\n            BrokerProperties['LockToken']\n        '''\n        _validate_not_none('queue_name', queue_name)\n        _validate_not_none('sequence_number', sequence_number)\n        _validate_not_none('lock_token', lock_token)\n        request = HTTPRequest()\n        request.method = 'DELETE'\n        request.host = self._get_host()\n        request.path = '/' + _str(queue_name) + \\\n                       '/messages/' + _str(sequence_number) + \\\n                       '/' + _str(lock_token) + ''\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        self._perform_request(request)\n\n    def receive_queue_message(self, queue_name, peek_lock=True, timeout=60):\n        '''\n        Receive a message from a queue for processing.\n\n        queue_name: Name of the queue.\n        peek_lock:\n            Optional. True to retrieve and lock the message. False to read and\n            delete the message. Default is True (lock).\n        timeout: Optional. The timeout parameter is expressed in seconds.\n        '''\n        if peek_lock:\n            return self.peek_lock_queue_message(queue_name, timeout)\n        else:\n            return self.read_delete_queue_message(queue_name, timeout)\n\n    def receive_subscription_message(self, topic_name, subscription_name,\n                                     peek_lock=True, timeout=60):\n        '''\n        Receive a message from a subscription for processing.\n\n        topic_name: Name of the topic.\n        subscription_name: Name of the subscription.\n        peek_lock:\n            Optional. True to retrieve and lock the message. False to read and\n            delete the message. Default is True (lock).\n        timeout: Optional. The timeout parameter is expressed in seconds.\n        '''\n        if peek_lock:\n            return self.peek_lock_subscription_message(topic_name,\n                                                       subscription_name,\n                                                       timeout)\n        else:\n            return self.read_delete_subscription_message(topic_name,\n                                                         subscription_name,\n                                                         timeout)\n\n    def _get_host(self):\n        return self.service_namespace + self.host_base\n\n    def _perform_request(self, request):\n        try:\n            resp = self._filter(request)\n        except HTTPError as ex:\n            return _service_bus_error_handler(ex)\n\n        return resp\n\n    def _update_service_bus_header(self, request):\n        ''' Add additional headers for service bus. '''\n\n        if request.method in ['PUT', 'POST', 'MERGE', 'DELETE']:\n            request.headers.append(('Content-Length', str(len(request.body))))\n\n        # if it is not GET or HEAD request, must set content-type.\n        if not request.method in ['GET', 'HEAD']:\n            for name, _ in request.headers:\n                if 'content-type' == name.lower():\n                    break\n            else:\n                request.headers.append(\n                    ('Content-Type',\n                     'application/atom+xml;type=entry;charset=utf-8'))\n\n        # Adds authorization header for authentication.\n        self.authentication.sign_request(request, self._httpclient)\n\n        return request.headers\n\n\n# Token cache for Authentication\n# Shared by the different instances of ServiceBusWrapTokenAuthentication\n_tokens = {}\n\n\nclass ServiceBusWrapTokenAuthentication:\n    def __init__(self, account_key, issuer):\n        self.account_key = account_key\n        self.issuer = issuer\n\n    def sign_request(self, request, httpclient):\n        request.headers.append(\n            ('Authorization', self._get_authorization(request, httpclient)))\n\n    def _get_authorization(self, request, httpclient):\n        ''' return the signed string with token. '''\n        return 'WRAP access_token=\"' + \\\n                self._get_token(request.host, request.path, httpclient) + '\"'\n\n    def _token_is_expired(self, token):\n        ''' Check if token expires or not. '''\n        time_pos_begin = token.find('ExpiresOn=') + len('ExpiresOn=')\n        time_pos_end = token.find('&', time_pos_begin)\n        token_expire_time = int(token[time_pos_begin:time_pos_end])\n        time_now = time.mktime(time.localtime())\n\n        # Adding 30 seconds so the token wouldn't be expired when we send the\n        # token to server.\n        return (token_expire_time - time_now) < 30\n\n    def _get_token(self, host, path, httpclient):\n        '''\n        Returns token for the request.\n\n        host: the service bus service request.\n        path: the service bus service request.\n        '''\n        wrap_scope = 'http://' + host + path + self.issuer + self.account_key\n\n        # Check whether has unexpired cache, return cached token if it is still\n        # usable.\n        if wrap_scope in _tokens:\n            token = _tokens[wrap_scope]\n            if not self._token_is_expired(token):\n                return token\n\n        # get token from accessconstrol server\n        request = HTTPRequest()\n        request.protocol_override = 'https'\n        request.host = host.replace('.servicebus.', '-sb.accesscontrol.')\n        request.method = 'POST'\n        request.path = '/WRAPv0.9'\n        request.body = ('wrap_name=' + url_quote(self.issuer) +\n                        '&wrap_password=' + url_quote(self.account_key) +\n                        '&wrap_scope=' +\n                        url_quote('http://' + host + path)).encode('utf-8')\n        request.headers.append(('Content-Length', str(len(request.body))))\n        resp = httpclient.perform_request(request)\n\n        token = resp.body.decode('utf-8')\n        token = url_unquote(token[token.find('=') + 1:token.rfind('&')])\n        _tokens[wrap_scope] = token\n\n        return token\n\n\nclass ServiceBusSASAuthentication:\n    def __init__(self, key_name, key_value):\n        self.key_name = key_name\n        self.key_value = key_value\n\n    def sign_request(self, request, httpclient):\n        request.headers.append(\n            ('Authorization', self._get_authorization(request, httpclient)))\n\n    def _get_authorization(self, request, httpclient):\n        uri = httpclient.get_uri(request)\n        uri = url_quote(uri, '').lower()\n        expiry = str(self._get_expiry())\n\n        to_sign = uri + '\\n' + expiry\n        signature = url_quote(_sign_string(self.key_value, to_sign, False), '')\n\n        auth_format = 'SharedAccessSignature sig={0}&se={1}&skn={2}&sr={3}'\n        auth = auth_format.format(signature, expiry, self.key_name, uri)\n\n        return auth\n\n    def _get_expiry(self):\n        '''Returns the UTC datetime, in seconds since Epoch, when this signed \n        request expires (5 minutes from now).'''\n        return int(round(time.time() + 300))\n"
  },
  {
    "path": "DSC/azure/servicemanagement/__init__.py",
    "content": "#-------------------------------------------------------------------------\n# Copyright (c) Microsoft.  All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#--------------------------------------------------------------------------\nfrom xml.dom import minidom\nfrom azure import (\n    WindowsAzureData,\n    _Base64String,\n    _create_entry,\n    _dict_of,\n    _encode_base64,\n    _general_error_handler,\n    _get_children_from_path,\n    _get_first_child_node_value,\n    _list_of,\n    _scalar_list_of,\n    _str,\n    _xml_attribute,\n    )\n\n#-----------------------------------------------------------------------------\n# Constants for Azure app environment settings.\nAZURE_MANAGEMENT_CERTFILE = 'AZURE_MANAGEMENT_CERTFILE'\nAZURE_MANAGEMENT_SUBSCRIPTIONID = 'AZURE_MANAGEMENT_SUBSCRIPTIONID'\n\n# x-ms-version for service management.\nX_MS_VERSION = '2013-06-01'\n\n#-----------------------------------------------------------------------------\n# Data classes\n\n\nclass StorageServices(WindowsAzureData):\n\n    def __init__(self):\n        self.storage_services = _list_of(StorageService)\n\n    def __iter__(self):\n        return iter(self.storage_services)\n\n    def __len__(self):\n        return len(self.storage_services)\n\n    def __getitem__(self, index):\n        return self.storage_services[index]\n\n\nclass StorageService(WindowsAzureData):\n\n    def __init__(self):\n        self.url = ''\n        self.service_name = ''\n        self.storage_service_properties = StorageAccountProperties()\n        self.storage_service_keys = StorageServiceKeys()\n        self.extended_properties = _dict_of(\n            'ExtendedProperty', 'Name', 'Value')\n        self.capabilities = _scalar_list_of(str, 'Capability')\n\n\nclass StorageAccountProperties(WindowsAzureData):\n\n    def __init__(self):\n        self.description = u''\n        self.affinity_group = u''\n        self.location = u''\n        self.label = _Base64String()\n        self.status = u''\n        self.endpoints = _scalar_list_of(str, 'Endpoint')\n        self.geo_replication_enabled = False\n        self.geo_primary_region = u''\n        self.status_of_primary = u''\n        self.geo_secondary_region = u''\n        self.status_of_secondary = u''\n        self.last_geo_failover_time = u''\n        self.creation_time = u''\n\n\nclass StorageServiceKeys(WindowsAzureData):\n\n    def __init__(self):\n        self.primary = u''\n        self.secondary = u''\n\n\nclass Locations(WindowsAzureData):\n\n    def __init__(self):\n        self.locations = _list_of(Location)\n\n    def __iter__(self):\n        return iter(self.locations)\n\n    def __len__(self):\n        return len(self.locations)\n\n    def __getitem__(self, index):\n        return self.locations[index]\n\n\nclass Location(WindowsAzureData):\n\n    def __init__(self):\n        self.name = u''\n        self.display_name = u''\n        self.available_services = _scalar_list_of(str, 'AvailableService')\n\n\nclass AffinityGroup(WindowsAzureData):\n\n    def __init__(self):\n        self.name = ''\n        self.label = _Base64String()\n        self.description = u''\n        self.location = u''\n        self.hosted_services = HostedServices()\n        self.storage_services = StorageServices()\n        self.capabilities = _scalar_list_of(str, 'Capability')\n\n\nclass AffinityGroups(WindowsAzureData):\n\n    def __init__(self):\n        self.affinity_groups = _list_of(AffinityGroup)\n\n    def __iter__(self):\n        return iter(self.affinity_groups)\n\n    def __len__(self):\n        return len(self.affinity_groups)\n\n    def __getitem__(self, index):\n        return self.affinity_groups[index]\n\n\nclass HostedServices(WindowsAzureData):\n\n    def __init__(self):\n        self.hosted_services = _list_of(HostedService)\n\n    def __iter__(self):\n        return iter(self.hosted_services)\n\n    def __len__(self):\n        return len(self.hosted_services)\n\n    def __getitem__(self, index):\n        return self.hosted_services[index]\n\n\nclass HostedService(WindowsAzureData):\n\n    def __init__(self):\n        self.url = u''\n        self.service_name = u''\n        self.hosted_service_properties = HostedServiceProperties()\n        self.deployments = Deployments()\n\n\nclass HostedServiceProperties(WindowsAzureData):\n\n    def __init__(self):\n        self.description = u''\n        self.location = u''\n        self.affinity_group = u''\n        self.label = _Base64String()\n        self.status = u''\n        self.date_created = u''\n        self.date_last_modified = u''\n        self.extended_properties = _dict_of(\n            'ExtendedProperty', 'Name', 'Value')\n\n\nclass VirtualNetworkSites(WindowsAzureData):\n\n    def __init__(self):\n        self.virtual_network_sites = _list_of(VirtualNetworkSite)\n\n    def __iter__(self):\n        return iter(self.virtual_network_sites)\n\n    def __len__(self):\n        return len(self.virtual_network_sites)\n\n    def __getitem__(self, index):\n        return self.virtual_network_sites[index]\n\n\nclass VirtualNetworkSite(WindowsAzureData):\n\n    def __init__(self):\n        self.name = u''\n        self.id = u''\n        self.affinity_group = u''\n        self.subnets = Subnets()\n\n\nclass Subnets(WindowsAzureData):\n\n    def __init__(self):\n        self.subnets = _list_of(Subnet)\n\n    def __iter__(self):\n        return iter(self.subnets)\n\n    def __len__(self):\n        return len(self.subnets)\n\n    def __getitem__(self, index):\n        return self.subnets[index]\n\n\nclass Subnet(WindowsAzureData):\n\n    def __init__(self):\n        self.name = u''\n        self.address_prefix = u''\n\n\n\nclass Deployments(WindowsAzureData):\n\n    def __init__(self):\n        self.deployments = _list_of(Deployment)\n\n    def __iter__(self):\n        return iter(self.deployments)\n\n    def __len__(self):\n        return len(self.deployments)\n\n    def __getitem__(self, index):\n        return self.deployments[index]\n\n\nclass Deployment(WindowsAzureData):\n\n    def __init__(self):\n        self.name = u''\n        self.deployment_slot = u''\n        self.private_id = u''\n        self.status = u''\n        self.label = _Base64String()\n        self.url = u''\n        self.configuration = _Base64String()\n        self.role_instance_list = RoleInstanceList()\n        self.upgrade_status = UpgradeStatus()\n        self.upgrade_domain_count = u''\n        self.role_list = RoleList()\n        self.sdk_version = u''\n        self.input_endpoint_list = InputEndpoints()\n        self.locked = False\n        self.rollback_allowed = False\n        self.persistent_vm_downtime_info = PersistentVMDowntimeInfo()\n        self.created_time = u''\n        self.virtual_network_name = u''\n        self.last_modified_time = u''\n        self.extended_properties = _dict_of(\n            'ExtendedProperty', 'Name', 'Value')\n\n\nclass RoleInstanceList(WindowsAzureData):\n\n    def __init__(self):\n        self.role_instances = _list_of(RoleInstance)\n\n    def __iter__(self):\n        return iter(self.role_instances)\n\n    def __len__(self):\n        return len(self.role_instances)\n\n    def __getitem__(self, index):\n        return self.role_instances[index]\n\n\nclass RoleInstance(WindowsAzureData):\n\n    def __init__(self):\n        self.role_name = u''\n        self.instance_name = u''\n        self.instance_status = u''\n        self.instance_upgrade_domain = 0\n        self.instance_fault_domain = 0\n        self.instance_size = u''\n        self.instance_state_details = u''\n        self.instance_error_code = u''\n        self.ip_address = u''\n        self.instance_endpoints = InstanceEndpoints()\n        self.power_state = u''\n        self.fqdn = u''\n        self.host_name = u''\n\n\nclass InstanceEndpoints(WindowsAzureData):\n\n    def __init__(self):\n        self.instance_endpoints = _list_of(InstanceEndpoint)\n\n    def __iter__(self):\n        return iter(self.instance_endpoints)\n\n    def __len__(self):\n        return len(self.instance_endpoints)\n\n    def __getitem__(self, index):\n        return self.instance_endpoints[index]\n\n\nclass InstanceEndpoint(WindowsAzureData):\n\n    def __init__(self):\n        self.name = u''\n        self.vip = u''\n        self.public_port = u''\n        self.local_port = u''\n        self.protocol = u''\n\n\nclass UpgradeStatus(WindowsAzureData):\n\n    def __init__(self):\n        self.upgrade_type = u''\n        self.current_upgrade_domain_state = u''\n        self.current_upgrade_domain = u''\n\n\nclass InputEndpoints(WindowsAzureData):\n\n    def __init__(self):\n        self.input_endpoints = _list_of(InputEndpoint)\n\n    def __iter__(self):\n        return iter(self.input_endpoints)\n\n    def __len__(self):\n        return len(self.input_endpoints)\n\n    def __getitem__(self, index):\n        return self.input_endpoints[index]\n\n\nclass InputEndpoint(WindowsAzureData):\n\n    def __init__(self):\n        self.role_name = u''\n        self.vip = u''\n        self.port = u''\n\n\nclass RoleList(WindowsAzureData):\n\n    def __init__(self):\n        self.roles = _list_of(Role)\n\n    def __iter__(self):\n        return iter(self.roles)\n\n    def __len__(self):\n        return len(self.roles)\n\n    def __getitem__(self, index):\n        return self.roles[index]\n\n\nclass Role(WindowsAzureData):\n\n    def __init__(self):\n        self.role_name = u''\n        self.role_type = u''\n        self.os_version = u''\n        self.configuration_sets = ConfigurationSets()\n        self.availability_set_name = u''\n        self.data_virtual_hard_disks = DataVirtualHardDisks()\n        self.os_virtual_hard_disk = OSVirtualHardDisk()\n        self.role_size = u''\n        self.default_win_rm_certificate_thumbprint = u''\n\n\nclass PersistentVMDowntimeInfo(WindowsAzureData):\n\n    def __init__(self):\n        self.start_time = u''\n        self.end_time = u''\n        self.status = u''\n\n\nclass Certificates(WindowsAzureData):\n\n    def __init__(self):\n        self.certificates = _list_of(Certificate)\n\n    def __iter__(self):\n        return iter(self.certificates)\n\n    def __len__(self):\n        return len(self.certificates)\n\n    def __getitem__(self, index):\n        return self.certificates[index]\n\n\nclass Certificate(WindowsAzureData):\n\n    def __init__(self):\n        self.certificate_url = u''\n        self.thumbprint = u''\n        self.thumbprint_algorithm = u''\n        self.data = u''\n\n\nclass OperationError(WindowsAzureData):\n\n    def __init__(self):\n        self.code = u''\n        self.message = u''\n\n\nclass Operation(WindowsAzureData):\n\n    def __init__(self):\n        self.id = u''\n        self.status = u''\n        self.http_status_code = u''\n        self.error = OperationError()\n\n\nclass OperatingSystem(WindowsAzureData):\n\n    def __init__(self):\n        self.version = u''\n        self.label = _Base64String()\n        self.is_default = True\n        self.is_active = True\n        self.family = 0\n        self.family_label = _Base64String()\n\n\nclass OperatingSystems(WindowsAzureData):\n\n    def __init__(self):\n        self.operating_systems = _list_of(OperatingSystem)\n\n    def __iter__(self):\n        return iter(self.operating_systems)\n\n    def __len__(self):\n        return len(self.operating_systems)\n\n    def __getitem__(self, index):\n        return self.operating_systems[index]\n\n\nclass OperatingSystemFamily(WindowsAzureData):\n\n    def __init__(self):\n        self.name = u''\n        self.label = _Base64String()\n        self.operating_systems = OperatingSystems()\n\n\nclass OperatingSystemFamilies(WindowsAzureData):\n\n    def __init__(self):\n        self.operating_system_families = _list_of(OperatingSystemFamily)\n\n    def __iter__(self):\n        return iter(self.operating_system_families)\n\n    def __len__(self):\n        return len(self.operating_system_families)\n\n    def __getitem__(self, index):\n        return self.operating_system_families[index]\n\n\nclass Subscription(WindowsAzureData):\n\n    def __init__(self):\n        self.subscription_id = u''\n        self.subscription_name = u''\n        self.subscription_status = u''\n        self.account_admin_live_email_id = u''\n        self.service_admin_live_email_id = u''\n        self.max_core_count = 0\n        self.max_storage_accounts = 0\n        self.max_hosted_services = 0\n        self.current_core_count = 0\n        self.current_hosted_services = 0\n        self.current_storage_accounts = 0\n        self.max_virtual_network_sites = 0\n        self.max_local_network_sites = 0\n        self.max_dns_servers = 0\n\n\nclass AvailabilityResponse(WindowsAzureData):\n\n    def __init__(self):\n        self.result = False\n\n\nclass SubscriptionCertificates(WindowsAzureData):\n\n    def __init__(self):\n        self.subscription_certificates = _list_of(SubscriptionCertificate)\n\n    def __iter__(self):\n        return iter(self.subscription_certificates)\n\n    def __len__(self):\n        return len(self.subscription_certificates)\n\n    def __getitem__(self, index):\n        return self.subscription_certificates[index]\n\n\nclass SubscriptionCertificate(WindowsAzureData):\n\n    def __init__(self):\n        self.subscription_certificate_public_key = u''\n        self.subscription_certificate_thumbprint = u''\n        self.subscription_certificate_data = u''\n        self.created = u''\n\n\nclass Images(WindowsAzureData):\n\n    def __init__(self):\n        self.images = _list_of(OSImage)\n\n    def __iter__(self):\n        return iter(self.images)\n\n    def __len__(self):\n        return len(self.images)\n\n    def __getitem__(self, index):\n        return self.images[index]\n\n\nclass OSImage(WindowsAzureData):\n\n    def __init__(self):\n        self.affinity_group = u''\n        self.category = u''\n        self.location = u''\n        self.logical_size_in_gb = 0\n        self.label = u''\n        self.media_link = u''\n        self.name = u''\n        self.os = u''\n        self.eula = u''\n        self.description = u''\n\n\nclass Disks(WindowsAzureData):\n\n    def __init__(self):\n        self.disks = _list_of(Disk)\n\n    def __iter__(self):\n        return iter(self.disks)\n\n    def __len__(self):\n        return len(self.disks)\n\n    def __getitem__(self, index):\n        return self.disks[index]\n\n\nclass Disk(WindowsAzureData):\n\n    def __init__(self):\n        self.affinity_group = u''\n        self.attached_to = AttachedTo()\n        self.has_operating_system = u''\n        self.is_corrupted = u''\n        self.location = u''\n        self.logical_disk_size_in_gb = 0\n        self.label = u''\n        self.media_link = u''\n        self.name = u''\n        self.os = u''\n        self.source_image_name = u''\n\n\nclass AttachedTo(WindowsAzureData):\n\n    def __init__(self):\n        self.hosted_service_name = u''\n        self.deployment_name = u''\n        self.role_name = u''\n\n\nclass PersistentVMRole(WindowsAzureData):\n\n    def __init__(self):\n        self.role_name = u''\n        self.role_type = u''\n        self.os_version = u''  # undocumented\n        self.configuration_sets = ConfigurationSets()\n        self.availability_set_name = u''\n        self.data_virtual_hard_disks = DataVirtualHardDisks()\n        self.os_virtual_hard_disk = OSVirtualHardDisk()\n        self.role_size = u''\n        self.default_win_rm_certificate_thumbprint = u''\n\n\nclass ConfigurationSets(WindowsAzureData):\n\n    def __init__(self):\n        self.configuration_sets = _list_of(ConfigurationSet)\n\n    def __iter__(self):\n        return iter(self.configuration_sets)\n\n    def __len__(self):\n        return len(self.configuration_sets)\n\n    def __getitem__(self, index):\n        return self.configuration_sets[index]\n\n\nclass ConfigurationSet(WindowsAzureData):\n\n    def __init__(self):\n        self.configuration_set_type = u'NetworkConfiguration'\n        self.role_type = u''\n        self.input_endpoints = ConfigurationSetInputEndpoints()\n        self.subnet_names = _scalar_list_of(str, 'SubnetName')\n\n\nclass ConfigurationSetInputEndpoints(WindowsAzureData):\n\n    def __init__(self):\n        self.input_endpoints = _list_of(\n            ConfigurationSetInputEndpoint, 'InputEndpoint')\n\n    def __iter__(self):\n        return iter(self.input_endpoints)\n\n    def __len__(self):\n        return len(self.input_endpoints)\n\n    def __getitem__(self, index):\n        return self.input_endpoints[index]\n\n\nclass ConfigurationSetInputEndpoint(WindowsAzureData):\n\n    '''\n    Initializes a network configuration input endpoint.\n\n    name: Specifies the name for the external endpoint.\n    protocol:\n        Specifies the protocol to use to inspect the virtual machine\n        availability status. Possible values are: HTTP, TCP.\n    port: Specifies the external port to use for the endpoint.\n    local_port:\n        Specifies the internal port on which the virtual machine is listening\n        to serve the endpoint.\n    load_balanced_endpoint_set_name:\n        Specifies a name for a set of load-balanced endpoints. Specifying this\n        element for a given endpoint adds it to the set. If you are setting an\n        endpoint to use to connect to the virtual machine via the Remote\n        Desktop, do not set this property.\n    enable_direct_server_return:\n        Specifies whether direct server return load balancing is enabled.\n    '''\n\n    def __init__(self, name=u'', protocol=u'', port=u'', local_port=u'',\n                 load_balanced_endpoint_set_name=u'',\n                 enable_direct_server_return=False):\n        self.enable_direct_server_return = enable_direct_server_return\n        self.load_balanced_endpoint_set_name = load_balanced_endpoint_set_name\n        self.local_port = local_port\n        self.name = name\n        self.port = port\n        self.load_balancer_probe = LoadBalancerProbe()\n        self.protocol = protocol\n\n\nclass WindowsConfigurationSet(WindowsAzureData):\n\n    def __init__(self, computer_name=None, admin_password=None,\n                 reset_password_on_first_logon=None,\n                 enable_automatic_updates=None, time_zone=None,\n                 admin_username=None):\n        self.configuration_set_type = u'WindowsProvisioningConfiguration'\n        self.computer_name = computer_name\n        self.admin_password = admin_password\n        self.admin_username = admin_username\n        self.reset_password_on_first_logon = reset_password_on_first_logon\n        self.enable_automatic_updates = enable_automatic_updates\n        self.time_zone = time_zone\n        self.domain_join = DomainJoin()\n        self.stored_certificate_settings = StoredCertificateSettings()\n        self.win_rm = WinRM()\n\n\nclass DomainJoin(WindowsAzureData):\n\n    def __init__(self):\n        self.credentials = Credentials()\n        self.join_domain = u''\n        self.machine_object_ou = u''\n\n\nclass Credentials(WindowsAzureData):\n\n    def __init__(self):\n        self.domain = u''\n        self.username = u''\n        self.password = u''\n\n\nclass StoredCertificateSettings(WindowsAzureData):\n\n    def __init__(self):\n        self.stored_certificate_settings = _list_of(CertificateSetting)\n\n    def __iter__(self):\n        return iter(self.stored_certificate_settings)\n\n    def __len__(self):\n        return len(self.stored_certificate_settings)\n\n    def __getitem__(self, index):\n        return self.stored_certificate_settings[index]\n\n\nclass CertificateSetting(WindowsAzureData):\n\n    '''\n    Initializes a certificate setting.\n\n    thumbprint:\n        Specifies the thumbprint of the certificate to be provisioned. The\n        thumbprint must specify an existing service certificate.\n    store_name:\n        Specifies the name of the certificate store from which retrieve\n        certificate.\n    store_location:\n        Specifies the target certificate store location on the virtual machine.\n        The only supported value is LocalMachine.\n    '''\n\n    def __init__(self, thumbprint=u'', store_name=u'', store_location=u''):\n        self.thumbprint = thumbprint\n        self.store_name = store_name\n        self.store_location = store_location\n\n\nclass WinRM(WindowsAzureData):\n\n    '''\n    Contains configuration settings for the Windows Remote Management service on\n    the Virtual Machine.\n    '''\n\n    def __init__(self):\n        self.listeners = Listeners()\n\n\nclass Listeners(WindowsAzureData):\n\n    def __init__(self):\n        self.listeners = _list_of(Listener)\n\n    def __iter__(self):\n        return iter(self.listeners)\n\n    def __len__(self):\n        return len(self.listeners)\n\n    def __getitem__(self, index):\n        return self.listeners[index]\n\n\nclass Listener(WindowsAzureData):\n\n    '''\n    Specifies the protocol and certificate information for the listener.\n\n    protocol:\n        Specifies the protocol of listener.  Possible values are: Http, Https.\n        The value is case sensitive.\n    certificate_thumbprint:\n        Optional. Specifies the certificate thumbprint for the secure\n        connection. If this value is not specified, a self-signed certificate is\n        generated and used for the Virtual Machine.\n    '''\n\n    def __init__(self, protocol=u'', certificate_thumbprint=u''):\n        self.protocol = protocol\n        self.certificate_thumbprint = certificate_thumbprint\n\n\nclass LinuxConfigurationSet(WindowsAzureData):\n\n    def __init__(self, host_name=None, user_name=None, user_password=None,\n                 disable_ssh_password_authentication=None):\n        self.configuration_set_type = u'LinuxProvisioningConfiguration'\n        self.host_name = host_name\n        self.user_name = user_name\n        self.user_password = user_password\n        self.disable_ssh_password_authentication =\\\n            disable_ssh_password_authentication\n        self.ssh = SSH()\n\n\nclass SSH(WindowsAzureData):\n\n    def __init__(self):\n        self.public_keys = PublicKeys()\n        self.key_pairs = KeyPairs()\n\n\nclass PublicKeys(WindowsAzureData):\n\n    def __init__(self):\n        self.public_keys = _list_of(PublicKey)\n\n    def __iter__(self):\n        return iter(self.public_keys)\n\n    def __len__(self):\n        return len(self.public_keys)\n\n    def __getitem__(self, index):\n        return self.public_keys[index]\n\n\nclass PublicKey(WindowsAzureData):\n\n    def __init__(self, fingerprint=u'', path=u''):\n        self.fingerprint = fingerprint\n        self.path = path\n\n\nclass KeyPairs(WindowsAzureData):\n\n    def __init__(self):\n        self.key_pairs = _list_of(KeyPair)\n\n    def __iter__(self):\n        return iter(self.key_pairs)\n\n    def __len__(self):\n        return len(self.key_pairs)\n\n    def __getitem__(self, index):\n        return self.key_pairs[index]\n\n\nclass KeyPair(WindowsAzureData):\n\n    def __init__(self, fingerprint=u'', path=u''):\n        self.fingerprint = fingerprint\n        self.path = path\n\n\nclass LoadBalancerProbe(WindowsAzureData):\n\n    def __init__(self):\n        self.path = u''\n        self.port = u''\n        self.protocol = u''\n\n\nclass DataVirtualHardDisks(WindowsAzureData):\n\n    def __init__(self):\n        self.data_virtual_hard_disks = _list_of(DataVirtualHardDisk)\n\n    def __iter__(self):\n        return iter(self.data_virtual_hard_disks)\n\n    def __len__(self):\n        return len(self.data_virtual_hard_disks)\n\n    def __getitem__(self, index):\n        return self.data_virtual_hard_disks[index]\n\n\nclass DataVirtualHardDisk(WindowsAzureData):\n\n    def __init__(self):\n        self.host_caching = u''\n        self.disk_label = u''\n        self.disk_name = u''\n        self.lun = 0\n        self.logical_disk_size_in_gb = 0\n        self.media_link = u''\n\n\nclass OSVirtualHardDisk(WindowsAzureData):\n\n    def __init__(self, source_image_name=None, media_link=None,\n                 host_caching=None, disk_label=None, disk_name=None):\n        self.source_image_name = source_image_name\n        self.media_link = media_link\n        self.host_caching = host_caching\n        self.disk_label = disk_label\n        self.disk_name = disk_name\n        self.os = u''  # undocumented, not used when adding a role\n\n\nclass AsynchronousOperationResult(WindowsAzureData):\n\n    def __init__(self, request_id=None):\n        self.request_id = request_id\n\n\nclass ServiceBusRegion(WindowsAzureData):\n\n    def __init__(self):\n        self.code = u''\n        self.fullname = u''\n\n\nclass ServiceBusNamespace(WindowsAzureData):\n\n    def __init__(self):\n        self.name = u''\n        self.region = u''\n        self.default_key = u''\n        self.status = u''\n        self.created_at = u''\n        self.acs_management_endpoint = u''\n        self.servicebus_endpoint = u''\n        self.connection_string = u''\n        self.subscription_id = u''\n        self.enabled = False\n\n\nclass WebSpaces(WindowsAzureData):\n\n    def __init__(self):\n        self.web_space = _list_of(WebSpace)\n\n    def __iter__(self):\n        return iter(self.web_space)\n\n    def __len__(self):\n        return len(self.web_space)\n\n    def __getitem__(self, index):\n        return self.web_space[index]\n    \n\nclass WebSpace(WindowsAzureData):\n    \n    def __init__(self):\n        self.availability_state = u''\n        self.geo_location = u''\n        self.geo_region = u''\n        self.name = u''\n        self.plan = u''\n        self.status = u''\n        self.subscription = u''\n\n\nclass Sites(WindowsAzureData):\n\n    def __init__(self):\n        self.site = _list_of(Site)\n\n    def __iter__(self):\n        return iter(self.site)\n\n    def __len__(self):\n        return len(self.site)\n\n    def __getitem__(self, index):\n        return self.site[index]\n    \n\nclass Site(WindowsAzureData):\n    \n    def __init__(self):\n        self.admin_enabled = False\n        self.availability_state = ''\n        self.compute_mode = ''\n        self.enabled = False\n        self.enabled_host_names = _scalar_list_of(str, 'a:string')\n        self.host_name_ssl_states = HostNameSslStates()\n        self.host_names = _scalar_list_of(str, 'a:string')\n        self.last_modified_time_utc = ''\n        self.name = ''\n        self.repository_site_name = ''\n        self.self_link = ''\n        self.server_farm = ''\n        self.site_mode = ''\n        self.state = ''\n        self.storage_recovery_default_state = ''\n        self.usage_state = ''\n        self.web_space = ''\n\n\nclass HostNameSslStates(WindowsAzureData):\n\n    def __init__(self):\n        self.host_name_ssl_state = _list_of(HostNameSslState)\n\n    def __iter__(self):\n        return iter(self.host_name_ssl_state)\n\n    def __len__(self):\n        return len(self.host_name_ssl_state)\n\n    def __getitem__(self, index):\n        return self.host_name_ssl_state[index]\n\n\nclass HostNameSslState(WindowsAzureData):\n    \n    def __init__(self):\n        self.name = u''\n        self.ssl_state = u''\n        \n\nclass PublishData(WindowsAzureData):\n    _xml_name = 'publishData'\n    \n    def __init__(self):\n        self.publish_profiles = _list_of(PublishProfile, 'publishProfile')\n\nclass PublishProfile(WindowsAzureData):\n    \n    def __init__(self):\n        self.profile_name = _xml_attribute('profileName')\n        self.publish_method = _xml_attribute('publishMethod')\n        self.publish_url = _xml_attribute('publishUrl')\n        self.msdeploysite = _xml_attribute('msdeploySite')\n        self.user_name = _xml_attribute('userName')\n        self.user_pwd = _xml_attribute('userPWD')\n        self.destination_app_url = _xml_attribute('destinationAppUrl')\n        self.sql_server_db_connection_string = _xml_attribute('SQLServerDBConnectionString')\n        self.my_sqldb_connection_string = _xml_attribute('mySQLDBConnectionString')\n        self.hosting_provider_forum_link = _xml_attribute('hostingProviderForumLink')\n        self.control_panel_link = _xml_attribute('controlPanelLink')\n    \nclass QueueDescription(WindowsAzureData):\n    \n    def __init__(self):\n        self.lock_duration = u''\n        self.max_size_in_megabytes = 0\n        self.requires_duplicate_detection = False\n        self.requires_session = False\n        self.default_message_time_to_live = u''\n        self.dead_lettering_on_message_expiration = False\n        self.duplicate_detection_history_time_window = u''\n        self.max_delivery_count = 0\n        self.enable_batched_operations = False\n        self.size_in_bytes = 0\n        self.message_count = 0\n        self.is_anonymous_accessible = False\n        self.authorization_rules = AuthorizationRules()\n        self.status = u''\n        self.created_at = u''\n        self.updated_at = u''\n        self.accessed_at = u''\n        self.support_ordering = False\n        self.auto_delete_on_idle = u''\n        self.count_details = CountDetails()\n        self.entity_availability_status = u''\n    \nclass TopicDescription(WindowsAzureData):\n    \n    def __init__(self):\n        self.default_message_time_to_live = u''\n        self.max_size_in_megabytes = 0\n        self.requires_duplicate_detection = False\n        self.duplicate_detection_history_time_window = u''\n        self.enable_batched_operations = False\n        self.size_in_bytes = 0\n        self.filtering_messages_before_publishing = False\n        self.is_anonymous_accessible = False\n        self.authorization_rules = AuthorizationRules()\n        self.status = u''\n        self.created_at = u''\n        self.updated_at = u''\n        self.accessed_at = u''\n        self.support_ordering = False\n        self.count_details = CountDetails()\n        self.subscription_count = 0\n\nclass CountDetails(WindowsAzureData):\n    \n    def __init__(self):\n        self.active_message_count = 0\n        self.dead_letter_message_count = 0\n        self.scheduled_message_count = 0\n        self.transfer_message_count = 0\n        self.transfer_dead_letter_message_count = 0\n\nclass NotificationHubDescription(WindowsAzureData):\n    \n    def __init__(self):\n        self.registration_ttl = u''\n        self.authorization_rules = AuthorizationRules()\n\nclass AuthorizationRules(WindowsAzureData):\n\n    def __init__(self):\n        self.authorization_rule = _list_of(AuthorizationRule)\n\n    def __iter__(self):\n        return iter(self.authorization_rule)\n\n    def __len__(self):\n        return len(self.authorization_rule)\n\n    def __getitem__(self, index):\n        return self.authorization_rule[index]\n    \nclass AuthorizationRule(WindowsAzureData):\n    \n    def __init__(self):\n        self.claim_type = u''\n        self.claim_value = u''\n        self.rights = _scalar_list_of(str, 'AccessRights')\n        self.created_time = u''\n        self.modified_time = u''\n        self.key_name = u''\n        self.primary_key = u''\n        self.secondary_keu = u''\n\nclass RelayDescription(WindowsAzureData):\n    \n    def __init__(self):\n        self.path = u''\n        self.listener_type = u''\n        self.listener_count = 0\n        self.created_at = u''\n        self.updated_at = u''\n\n\nclass MetricResponses(WindowsAzureData):\n\n    def __init__(self):\n        self.metric_response = _list_of(MetricResponse)\n\n    def __iter__(self):\n        return iter(self.metric_response)\n\n    def __len__(self):\n        return len(self.metric_response)\n\n    def __getitem__(self, index):\n        return self.metric_response[index]\n\n\nclass MetricResponse(WindowsAzureData):\n\n    def __init__(self):\n        self.code = u''\n        self.data = Data()\n        self.message = u''\n\n\nclass Data(WindowsAzureData):\n\n    def __init__(self):\n        self.display_name = u''\n        self.end_time = u''\n        self.name = u''\n        self.primary_aggregation_type = u''\n        self.start_time = u''\n        self.time_grain = u''\n        self.unit = u''\n        self.values = Values()\n\n\nclass Values(WindowsAzureData):\n\n    def __init__(self):\n        self.metric_sample = _list_of(MetricSample)\n\n    def __iter__(self):\n        return iter(self.metric_sample)\n\n    def __len__(self):\n        return len(self.metric_sample)\n\n    def __getitem__(self, index):\n        return self.metric_sample[index]\n\n\nclass MetricSample(WindowsAzureData):\n\n    def __init__(self):\n        self.count = 0\n        self.time_created = u''\n        self.total = 0\n\n\nclass MetricDefinitions(WindowsAzureData):\n\n    def __init__(self):\n        self.metric_definition = _list_of(MetricDefinition)\n\n    def __iter__(self):\n        return iter(self.metric_definition)\n\n    def __len__(self):\n        return len(self.metric_definition)\n\n    def __getitem__(self, index):\n        return self.metric_definition[index]\n\n\nclass MetricDefinition(WindowsAzureData):\n\n    def __init__(self):\n        self.display_name = u''\n        self.metric_availabilities = MetricAvailabilities()\n        self.name = u''\n        self.primary_aggregation_type = u''\n        self.unit = u''\n\n\nclass MetricAvailabilities(WindowsAzureData):\n\n    def __init__(self):\n        self.metric_availability = _list_of(MetricAvailability, 'MetricAvailabilily')\n\n    def __iter__(self):\n        return iter(self.metric_availability)\n\n    def __len__(self):\n        return len(self.metric_availability)\n\n    def __getitem__(self, index):\n        return self.metric_availability[index]\n\n\nclass MetricAvailability(WindowsAzureData):\n\n    def __init__(self):\n        self.retention = u''\n        self.time_grain = u''\n\n\nclass Servers(WindowsAzureData):\n\n    def __init__(self):\n        self.server = _list_of(Server)\n\n    def __iter__(self):\n        return iter(self.server)\n\n    def __len__(self):\n        return len(self.server)\n\n    def __getitem__(self, index):\n        return self.server[index]\n\n\nclass Server(WindowsAzureData):\n    \n    def __init__(self):\n        self.name = u''\n        self.administrator_login = u''\n        self.location = u''\n        self.fully_qualified_domain_name = u''\n        self.version = u''\n\n\nclass Database(WindowsAzureData):\n\n    def __init__(self):\n        self.name = u''\n        self.type = u''\n        self.state = u''\n        self.self_link = u''\n        self.parent_link = u''\n        self.id = 0\n        self.edition = u''\n        self.collation_name = u''\n        self.creation_date = u''\n        self.is_federation_root = False\n        self.is_system_object = False\n        self.max_size_bytes = 0\n\n\ndef _update_management_header(request):\n    ''' Add additional headers for management. '''\n\n    if request.method in ['PUT', 'POST', 'MERGE', 'DELETE']:\n        request.headers.append(('Content-Length', str(len(request.body))))\n\n    # append additional headers base on the service\n    request.headers.append(('x-ms-version', X_MS_VERSION))\n\n    # if it is not GET or HEAD request, must set content-type.\n    if not request.method in ['GET', 'HEAD']:\n        for name, _ in request.headers:\n            if 'content-type' == name.lower():\n                break\n        else:\n            request.headers.append(\n                ('Content-Type',\n                 'application/atom+xml;type=entry;charset=utf-8'))\n\n    return request.headers\n\n\ndef _parse_response_for_async_op(response):\n    ''' Extracts request id from response header. '''\n\n    if response is None:\n        return None\n\n    result = AsynchronousOperationResult()\n    if response.headers:\n        for name, value in response.headers:\n            if name.lower() == 'x-ms-request-id':\n                result.request_id = value\n\n    return result\n\n\ndef _management_error_handler(http_error):\n    ''' Simple error handler for management service. '''\n    return _general_error_handler(http_error)\n\n\ndef _lower(text):\n    return text.lower()\n\n\nclass _XmlSerializer(object):\n\n    @staticmethod\n    def create_storage_service_input_to_xml(service_name, description, label,\n                                            affinity_group, location,\n                                            geo_replication_enabled,\n                                            extended_properties):\n        return _XmlSerializer.doc_from_data(\n            'CreateStorageServiceInput',\n            [('ServiceName', service_name),\n             ('Description', description),\n             ('Label', label, _encode_base64),\n             ('AffinityGroup', affinity_group),\n             ('Location', location),\n             ('GeoReplicationEnabled', geo_replication_enabled, _lower)],\n            extended_properties)\n\n    @staticmethod\n    def update_storage_service_input_to_xml(description, label,\n                                            geo_replication_enabled,\n                                            extended_properties):\n        return _XmlSerializer.doc_from_data(\n            'UpdateStorageServiceInput',\n            [('Description', description),\n             ('Label', label, _encode_base64),\n             ('GeoReplicationEnabled', geo_replication_enabled, _lower)],\n            extended_properties)\n\n    @staticmethod\n    def regenerate_keys_to_xml(key_type):\n        return _XmlSerializer.doc_from_data('RegenerateKeys',\n                                            [('KeyType', key_type)])\n\n    @staticmethod\n    def update_hosted_service_to_xml(label, description, extended_properties):\n        return _XmlSerializer.doc_from_data('UpdateHostedService',\n                                            [('Label', label, _encode_base64),\n                                             ('Description', description)],\n                                            extended_properties)\n\n    @staticmethod\n    def create_hosted_service_to_xml(service_name, label, description,\n                                     location, affinity_group,\n                                     extended_properties):\n        return _XmlSerializer.doc_from_data(\n            'CreateHostedService',\n            [('ServiceName', service_name),\n             ('Label', label, _encode_base64),\n             ('Description', description),\n             ('Location', location),\n             ('AffinityGroup', affinity_group)],\n            extended_properties)\n\n    @staticmethod\n    def create_deployment_to_xml(name, package_url, label, configuration,\n                                 start_deployment, treat_warnings_as_error,\n                                 extended_properties):\n        return _XmlSerializer.doc_from_data(\n            'CreateDeployment',\n            [('Name', name),\n             ('PackageUrl', package_url),\n             ('Label', label, _encode_base64),\n             ('Configuration', configuration),\n             ('StartDeployment',\n             start_deployment, _lower),\n             ('TreatWarningsAsError', treat_warnings_as_error, _lower)],\n            extended_properties)\n\n    @staticmethod\n    def swap_deployment_to_xml(production, source_deployment):\n        return _XmlSerializer.doc_from_data(\n            'Swap',\n            [('Production', production),\n             ('SourceDeployment', source_deployment)])\n\n    @staticmethod\n    def update_deployment_status_to_xml(status):\n        return _XmlSerializer.doc_from_data(\n            'UpdateDeploymentStatus',\n            [('Status', status)])\n\n    @staticmethod\n    def change_deployment_to_xml(configuration, treat_warnings_as_error, mode,\n                                 extended_properties):\n        return _XmlSerializer.doc_from_data(\n            'ChangeConfiguration',\n            [('Configuration', configuration),\n             ('TreatWarningsAsError', treat_warnings_as_error, _lower),\n             ('Mode', mode)],\n            extended_properties)\n\n    @staticmethod\n    def upgrade_deployment_to_xml(mode, package_url, configuration, label,\n                                  role_to_upgrade, force, extended_properties):\n        return _XmlSerializer.doc_from_data(\n            'UpgradeDeployment',\n            [('Mode', mode),\n             ('PackageUrl', package_url),\n             ('Configuration', configuration),\n             ('Label', label, _encode_base64),\n             ('RoleToUpgrade', role_to_upgrade),\n             ('Force', force, _lower)],\n            extended_properties)\n\n    @staticmethod\n    def rollback_upgrade_to_xml(mode, force):\n        return _XmlSerializer.doc_from_data(\n            'RollbackUpdateOrUpgrade',\n            [('Mode', mode),\n             ('Force', force, _lower)])\n\n    @staticmethod\n    def walk_upgrade_domain_to_xml(upgrade_domain):\n        return _XmlSerializer.doc_from_data(\n            'WalkUpgradeDomain',\n            [('UpgradeDomain', upgrade_domain)])\n\n    @staticmethod\n    def certificate_file_to_xml(data, certificate_format, password):\n        return _XmlSerializer.doc_from_data(\n            'CertificateFile',\n            [('Data', data),\n             ('CertificateFormat', certificate_format),\n             ('Password', password)])\n\n    @staticmethod\n    def create_affinity_group_to_xml(name, label, description, location):\n        return _XmlSerializer.doc_from_data(\n            'CreateAffinityGroup',\n            [('Name', name),\n             ('Label', label, _encode_base64),\n             ('Description', description),\n             ('Location', location)])\n\n    @staticmethod\n    def update_affinity_group_to_xml(label, description):\n        return _XmlSerializer.doc_from_data(\n            'UpdateAffinityGroup',\n            [('Label', label, _encode_base64),\n             ('Description', description)])\n\n    @staticmethod\n    def subscription_certificate_to_xml(public_key, thumbprint, data):\n        return _XmlSerializer.doc_from_data(\n            'SubscriptionCertificate',\n            [('SubscriptionCertificatePublicKey', public_key),\n             ('SubscriptionCertificateThumbprint', thumbprint),\n             ('SubscriptionCertificateData', data)])\n\n    @staticmethod\n    def os_image_to_xml(label, media_link, name, os):\n        return _XmlSerializer.doc_from_data(\n            'OSImage',\n            [('Label', label),\n             ('MediaLink', media_link),\n             ('Name', name),\n             ('OS', os)])\n\n    @staticmethod\n    def data_virtual_hard_disk_to_xml(host_caching, disk_label, disk_name, lun,\n                                      logical_disk_size_in_gb, media_link,\n                                      source_media_link):\n        return _XmlSerializer.doc_from_data(\n            'DataVirtualHardDisk',\n            [('HostCaching', host_caching),\n             ('DiskLabel', disk_label),\n             ('DiskName', disk_name),\n             ('Lun', lun),\n             ('LogicalDiskSizeInGB', logical_disk_size_in_gb),\n             ('MediaLink', media_link),\n             ('SourceMediaLink', source_media_link)])\n\n    @staticmethod\n    def disk_to_xml(has_operating_system, label, media_link, name, os):\n        return _XmlSerializer.doc_from_data(\n            'Disk',\n            [('HasOperatingSystem', has_operating_system, _lower),\n             ('Label', label),\n             ('MediaLink', media_link),\n             ('Name', name),\n             ('OS', os)])\n\n    @staticmethod\n    def restart_role_operation_to_xml():\n        return _XmlSerializer.doc_from_xml(\n            'RestartRoleOperation',\n            '<OperationType>RestartRoleOperation</OperationType>')\n\n    @staticmethod\n    def shutdown_role_operation_to_xml(post_shutdown_action):\n        xml = _XmlSerializer.data_to_xml(\n            [('OperationType', 'ShutdownRoleOperation'),\n             ('PostShutdownAction', post_shutdown_action)])\n        return _XmlSerializer.doc_from_xml('ShutdownRoleOperation', xml)\n\n    @staticmethod\n    def shutdown_roles_operation_to_xml(role_names, post_shutdown_action):\n        xml = _XmlSerializer.data_to_xml(\n            [('OperationType', 'ShutdownRolesOperation')])\n        xml += '<Roles>'\n        for role_name in role_names:\n            xml += _XmlSerializer.data_to_xml([('Name', role_name)])\n        xml += '</Roles>'\n        xml += _XmlSerializer.data_to_xml(\n             [('PostShutdownAction', post_shutdown_action)])\n        return _XmlSerializer.doc_from_xml('ShutdownRolesOperation', xml)\n\n    @staticmethod\n    def start_role_operation_to_xml():\n        return _XmlSerializer.doc_from_xml(\n            'StartRoleOperation',\n            '<OperationType>StartRoleOperation</OperationType>')\n\n    @staticmethod\n    def start_roles_operation_to_xml(role_names):\n        xml = _XmlSerializer.data_to_xml(\n            [('OperationType', 'StartRolesOperation')])\n        xml += '<Roles>'\n        for role_name in role_names:\n            xml += _XmlSerializer.data_to_xml([('Name', role_name)])\n        xml += '</Roles>'\n        return _XmlSerializer.doc_from_xml('StartRolesOperation', xml)\n\n    @staticmethod\n    def windows_configuration_to_xml(configuration):\n        xml = _XmlSerializer.data_to_xml(\n            [('ConfigurationSetType', configuration.configuration_set_type),\n             ('ComputerName', configuration.computer_name),\n             ('AdminPassword', configuration.admin_password),\n             ('ResetPasswordOnFirstLogon',\n              configuration.reset_password_on_first_logon,\n              _lower),\n             ('EnableAutomaticUpdates',\n              configuration.enable_automatic_updates,\n              _lower),\n             ('TimeZone', configuration.time_zone)])\n\n        if configuration.domain_join is not None:\n            xml += '<DomainJoin>'\n            xml += '<Credentials>'\n            xml += _XmlSerializer.data_to_xml(\n                [('Domain', configuration.domain_join.credentials.domain),\n                 ('Username', configuration.domain_join.credentials.username),\n                 ('Password', configuration.domain_join.credentials.password)])\n            xml += '</Credentials>'\n            xml += _XmlSerializer.data_to_xml(\n                [('JoinDomain', configuration.domain_join.join_domain),\n                 ('MachineObjectOU',\n                  configuration.domain_join.machine_object_ou)])\n            xml += '</DomainJoin>'\n        if configuration.stored_certificate_settings is not None:\n            xml += '<StoredCertificateSettings>'\n            for cert in configuration.stored_certificate_settings:\n                xml += '<CertificateSetting>'\n                xml += _XmlSerializer.data_to_xml(\n                    [('StoreLocation', cert.store_location),\n                     ('StoreName', cert.store_name),\n                     ('Thumbprint', cert.thumbprint)])\n                xml += '</CertificateSetting>'\n            xml += '</StoredCertificateSettings>'\n        if configuration.win_rm is not None:\n            xml += '<WinRM><Listeners>'\n            for listener in configuration.win_rm.listeners:\n                xml += '<Listener>'\n                xml += _XmlSerializer.data_to_xml(\n                    [('Protocol', listener.protocol),\n                     ('CertificateThumbprint', listener.certificate_thumbprint)])\n                xml += '</Listener>'\n            xml += '</Listeners></WinRM>'\n        xml += _XmlSerializer.data_to_xml(\n            [('AdminUsername', configuration.admin_username)])\n        return xml\n\n    @staticmethod\n    def linux_configuration_to_xml(configuration):\n        xml = _XmlSerializer.data_to_xml(\n            [('ConfigurationSetType', configuration.configuration_set_type),\n             ('HostName', configuration.host_name),\n             ('UserName', configuration.user_name),\n             ('UserPassword', configuration.user_password),\n             ('DisableSshPasswordAuthentication',\n              configuration.disable_ssh_password_authentication,\n              _lower)])\n\n        if configuration.ssh is not None:\n            xml += '<SSH>'\n            xml += '<PublicKeys>'\n            for key in configuration.ssh.public_keys:\n                xml += '<PublicKey>'\n                xml += _XmlSerializer.data_to_xml(\n                    [('Fingerprint', key.fingerprint),\n                     ('Path', key.path)])\n                xml += '</PublicKey>'\n            xml += '</PublicKeys>'\n            xml += '<KeyPairs>'\n            for key in configuration.ssh.key_pairs:\n                xml += '<KeyPair>'\n                xml += _XmlSerializer.data_to_xml(\n                    [('Fingerprint', key.fingerprint),\n                     ('Path', key.path)])\n                xml += '</KeyPair>'\n            xml += '</KeyPairs>'\n            xml += '</SSH>'\n        return xml\n\n    @staticmethod\n    def network_configuration_to_xml(configuration):\n        xml = _XmlSerializer.data_to_xml(\n            [('ConfigurationSetType', configuration.configuration_set_type)])\n        xml += '<InputEndpoints>'\n        for endpoint in configuration.input_endpoints:\n            xml += '<InputEndpoint>'\n            xml += _XmlSerializer.data_to_xml(\n                [('LoadBalancedEndpointSetName',\n                  endpoint.load_balanced_endpoint_set_name),\n                 ('LocalPort', endpoint.local_port),\n                 ('Name', endpoint.name),\n                 ('Port', endpoint.port)])\n\n            if endpoint.load_balancer_probe.path or\\\n                endpoint.load_balancer_probe.port or\\\n                endpoint.load_balancer_probe.protocol:\n                xml += '<LoadBalancerProbe>'\n                xml += _XmlSerializer.data_to_xml(\n                    [('Path', endpoint.load_balancer_probe.path),\n                     ('Port', endpoint.load_balancer_probe.port),\n                     ('Protocol', endpoint.load_balancer_probe.protocol)])\n                xml += '</LoadBalancerProbe>'\n\n            xml += _XmlSerializer.data_to_xml(\n                [('Protocol', endpoint.protocol),\n                 ('EnableDirectServerReturn',\n                  endpoint.enable_direct_server_return,\n                  _lower)])\n\n            xml += '</InputEndpoint>'\n        xml += '</InputEndpoints>'\n        xml += '<SubnetNames>'\n        for name in configuration.subnet_names:\n            xml += _XmlSerializer.data_to_xml([('SubnetName', name)])\n        xml += '</SubnetNames>'\n        return xml\n\n    @staticmethod\n    def role_to_xml(availability_set_name, data_virtual_hard_disks,\n                    network_configuration_set, os_virtual_hard_disk, role_name,\n                    role_size, role_type, system_configuration_set):\n        xml = _XmlSerializer.data_to_xml([('RoleName', role_name),\n                                          ('RoleType', role_type)])\n\n        xml += '<ConfigurationSets>'\n\n        if system_configuration_set is not None:\n            xml += '<ConfigurationSet>'\n            if isinstance(system_configuration_set, WindowsConfigurationSet):\n                xml += _XmlSerializer.windows_configuration_to_xml(\n                    system_configuration_set)\n            elif isinstance(system_configuration_set, LinuxConfigurationSet):\n                xml += _XmlSerializer.linux_configuration_to_xml(\n                    system_configuration_set)\n            xml += '</ConfigurationSet>'\n\n        if network_configuration_set is not None:\n            xml += '<ConfigurationSet>'\n            xml += _XmlSerializer.network_configuration_to_xml(\n                network_configuration_set)\n            xml += '</ConfigurationSet>'\n\n        xml += '</ConfigurationSets>'\n\n        if availability_set_name is not None:\n            xml += _XmlSerializer.data_to_xml(\n                [('AvailabilitySetName', availability_set_name)])\n\n        if data_virtual_hard_disks is not None:\n            xml += '<DataVirtualHardDisks>'\n            for hd in data_virtual_hard_disks:\n                xml += '<DataVirtualHardDisk>'\n                xml += _XmlSerializer.data_to_xml(\n                    [('HostCaching', hd.host_caching),\n                     ('DiskLabel', hd.disk_label),\n                     ('DiskName', hd.disk_name),\n                     ('Lun', hd.lun),\n                     ('LogicalDiskSizeInGB', hd.logical_disk_size_in_gb),\n                     ('MediaLink', hd.media_link)])\n                xml += '</DataVirtualHardDisk>'\n            xml += '</DataVirtualHardDisks>'\n\n        if os_virtual_hard_disk is not None:\n            xml += '<OSVirtualHardDisk>'\n            xml += _XmlSerializer.data_to_xml(\n                [('HostCaching', os_virtual_hard_disk.host_caching),\n                 ('DiskLabel', os_virtual_hard_disk.disk_label),\n                 ('DiskName', os_virtual_hard_disk.disk_name),\n                 ('MediaLink', os_virtual_hard_disk.media_link),\n                 ('SourceImageName', os_virtual_hard_disk.source_image_name)])\n            xml += '</OSVirtualHardDisk>'\n\n        if role_size is not None:\n            xml += _XmlSerializer.data_to_xml([('RoleSize', role_size)])\n\n        return xml\n\n    @staticmethod\n    def add_role_to_xml(role_name, system_configuration_set,\n                        os_virtual_hard_disk, role_type,\n                        network_configuration_set, availability_set_name,\n                        data_virtual_hard_disks, role_size):\n        xml = _XmlSerializer.role_to_xml(\n            availability_set_name,\n            data_virtual_hard_disks,\n            network_configuration_set,\n            os_virtual_hard_disk,\n            role_name,\n            role_size,\n            role_type,\n            system_configuration_set)\n        return _XmlSerializer.doc_from_xml('PersistentVMRole', xml)\n\n    @staticmethod\n    def update_role_to_xml(role_name, os_virtual_hard_disk, role_type,\n                           network_configuration_set, availability_set_name,\n                           data_virtual_hard_disks, role_size):\n        xml = _XmlSerializer.role_to_xml(\n            availability_set_name,\n            data_virtual_hard_disks,\n            network_configuration_set,\n            os_virtual_hard_disk,\n            role_name,\n            role_size,\n            role_type,\n            None)\n        return _XmlSerializer.doc_from_xml('PersistentVMRole', xml)\n\n    @staticmethod\n    def capture_role_to_xml(post_capture_action, target_image_name,\n                            target_image_label, provisioning_configuration):\n        xml = _XmlSerializer.data_to_xml(\n            [('OperationType', 'CaptureRoleOperation'),\n             ('PostCaptureAction', post_capture_action)])\n\n        if provisioning_configuration is not None:\n            xml += '<ProvisioningConfiguration>'\n            if isinstance(provisioning_configuration, WindowsConfigurationSet):\n                xml += _XmlSerializer.windows_configuration_to_xml(\n                    provisioning_configuration)\n            elif isinstance(provisioning_configuration, LinuxConfigurationSet):\n                xml += _XmlSerializer.linux_configuration_to_xml(\n                    provisioning_configuration)\n            xml += '</ProvisioningConfiguration>'\n\n        xml += _XmlSerializer.data_to_xml(\n            [('TargetImageLabel', target_image_label),\n             ('TargetImageName', target_image_name)])\n\n        return _XmlSerializer.doc_from_xml('CaptureRoleOperation', xml)\n\n    @staticmethod\n    def virtual_machine_deployment_to_xml(deployment_name, deployment_slot,\n                                          label, role_name,\n                                          system_configuration_set,\n                                          os_virtual_hard_disk, role_type,\n                                          network_configuration_set,\n                                          availability_set_name,\n                                          data_virtual_hard_disks, role_size,\n                                          virtual_network_name):\n        xml = _XmlSerializer.data_to_xml([('Name', deployment_name),\n                                          ('DeploymentSlot', deployment_slot),\n                                          ('Label', label)])\n        xml += '<RoleList>'\n        xml += '<Role>'\n        xml += _XmlSerializer.role_to_xml(\n            availability_set_name,\n            data_virtual_hard_disks,\n            network_configuration_set,\n            os_virtual_hard_disk,\n            role_name,\n            role_size,\n            role_type,\n            system_configuration_set)\n        xml += '</Role>'\n        xml += '</RoleList>'\n\n        if virtual_network_name is not None:\n            xml += _XmlSerializer.data_to_xml(\n                [('VirtualNetworkName', virtual_network_name)])\n\n        return _XmlSerializer.doc_from_xml('Deployment', xml)\n\n    @staticmethod\n    def create_website_to_xml(webspace_name, website_name, geo_region, plan,\n                              host_names, compute_mode, server_farm, site_mode):\n        xml = '<HostNames xmlns:a=\"http://schemas.microsoft.com/2003/10/Serialization/Arrays\">'\n        for host_name in host_names:\n            xml += '<a:string>{0}</a:string>'.format(host_name)\n        xml += '</HostNames>'\n        xml += _XmlSerializer.data_to_xml(\n            [('Name', website_name),\n             ('ComputeMode', compute_mode),\n             ('ServerFarm', server_farm),\n             ('SiteMode', site_mode)])\n        xml += '<WebSpaceToCreate>'\n        xml += _XmlSerializer.data_to_xml(\n            [('GeoRegion', geo_region),\n             ('Name', webspace_name),\n             ('Plan', plan)])\n        xml += '</WebSpaceToCreate>'\n        return _XmlSerializer.doc_from_xml('Site', xml)\n\n    @staticmethod\n    def data_to_xml(data):\n        '''Creates an xml fragment from the specified data.\n           data: Array of tuples, where first: xml element name\n                                        second: xml element text\n                                        third: conversion function\n        '''\n        xml = ''\n        for element in data:\n            name = element[0]\n            val = element[1]\n            if len(element) > 2:\n                converter = element[2]\n            else:\n                converter = None\n\n            if val is not None:\n                if converter is not None:\n                    text = _str(converter(_str(val)))\n                else:\n                    text = _str(val)\n\n                xml += ''.join(['<', name, '>', text, '</', name, '>'])\n        return xml\n\n    @staticmethod\n    def doc_from_xml(document_element_name, inner_xml):\n        '''Wraps the specified xml in an xml root element with default azure\n        namespaces'''\n        xml = ''.join(['<', document_element_name,\n                      ' xmlns:i=\"http://www.w3.org/2001/XMLSchema-instance\"',\n                      ' xmlns=\"http://schemas.microsoft.com/windowsazure\">'])\n        xml += inner_xml\n        xml += ''.join(['</', document_element_name, '>'])\n        return xml\n\n    @staticmethod\n    def doc_from_data(document_element_name, data, extended_properties=None):\n        xml = _XmlSerializer.data_to_xml(data)\n        if extended_properties is not None:\n            xml += _XmlSerializer.extended_properties_dict_to_xml_fragment(\n                extended_properties)\n        return _XmlSerializer.doc_from_xml(document_element_name, xml)\n\n    @staticmethod\n    def extended_properties_dict_to_xml_fragment(extended_properties):\n        xml = ''\n        if extended_properties is not None and len(extended_properties) > 0:\n            xml += '<ExtendedProperties>'\n            for key, val in extended_properties.items():\n                xml += ''.join(['<ExtendedProperty>',\n                                '<Name>',\n                                _str(key),\n                                '</Name>',\n                               '<Value>',\n                               _str(val),\n                               '</Value>',\n                               '</ExtendedProperty>'])\n            xml += '</ExtendedProperties>'\n        return xml\n\n\ndef _parse_bool(value):\n    if value.lower() == 'true':\n        return True\n    return False\n\n\nclass _ServiceBusManagementXmlSerializer(object):\n\n    @staticmethod\n    def namespace_to_xml(region):\n        '''Converts a service bus namespace description to xml\n\n        The xml format:\n<?xml version=\"1.0\" encoding=\"utf-8\" standalone=\"yes\"?>\n<entry xmlns=\"http://www.w3.org/2005/Atom\">\n    <content type=\"application/xml\">\n        <NamespaceDescription\n            xmlns=\"http://schemas.microsoft.com/netservices/2010/10/servicebus/connect\">\n            <Region>West US</Region>\n        </NamespaceDescription>\n    </content>\n</entry>\n        '''\n        body = '<NamespaceDescription xmlns=\"http://schemas.microsoft.com/netservices/2010/10/servicebus/connect\">'\n        body += ''.join(['<Region>', region, '</Region>'])\n        body += '</NamespaceDescription>'\n\n        return _create_entry(body)\n\n    @staticmethod\n    def xml_to_namespace(xmlstr):\n        '''Converts xml response to service bus namespace\n\n        The xml format for namespace:\n<entry>\n<id>uuid:00000000-0000-0000-0000-000000000000;id=0000000</id>\n<title type=\"text\">myunittests</title>\n<updated>2012-08-22T16:48:10Z</updated>\n<content type=\"application/xml\">\n    <NamespaceDescription\n        xmlns=\"http://schemas.microsoft.com/netservices/2010/10/servicebus/connect\"\n        xmlns:i=\"http://www.w3.org/2001/XMLSchema-instance\">\n    <Name>myunittests</Name>\n    <Region>West US</Region>\n    <DefaultKey>0000000000000000000000000000000000000000000=</DefaultKey>\n    <Status>Active</Status>\n    <CreatedAt>2012-08-22T16:48:10.217Z</CreatedAt>\n    <AcsManagementEndpoint>https://myunittests-sb.accesscontrol.windows.net/</AcsManagementEndpoint>\n    <ServiceBusEndpoint>https://myunittests.servicebus.windows.net/</ServiceBusEndpoint>\n    <ConnectionString>Endpoint=sb://myunittests.servicebus.windows.net/;SharedSecretIssuer=owner;SharedSecretValue=0000000000000000000000000000000000000000000=</ConnectionString>\n    <SubscriptionId>00000000000000000000000000000000</SubscriptionId>\n    <Enabled>true</Enabled>\n    </NamespaceDescription>\n</content>\n</entry>\n        '''\n        xmldoc = minidom.parseString(xmlstr)\n        namespace = ServiceBusNamespace()\n\n        mappings = (\n            ('Name', 'name', None),\n            ('Region', 'region', None),\n            ('DefaultKey', 'default_key', None),\n            ('Status', 'status', None),\n            ('CreatedAt', 'created_at', None),\n            ('AcsManagementEndpoint', 'acs_management_endpoint', None),\n            ('ServiceBusEndpoint', 'servicebus_endpoint', None),\n            ('ConnectionString', 'connection_string', None),\n            ('SubscriptionId', 'subscription_id', None),\n            ('Enabled', 'enabled', _parse_bool),\n        )\n\n        for desc in _get_children_from_path(xmldoc,\n                                            'entry',\n                                            'content',\n                                            'NamespaceDescription'):\n            for xml_name, field_name, conversion_func in mappings:\n                node_value = _get_first_child_node_value(desc, xml_name)\n                if node_value is not None:\n                    if conversion_func is not None:\n                        node_value = conversion_func(node_value)\n                    setattr(namespace, field_name, node_value)\n\n        return namespace\n\n    @staticmethod\n    def xml_to_region(xmlstr):\n        '''Converts xml response to service bus region\n\n        The xml format for region:\n<entry>\n<id>uuid:157c311f-081f-4b4a-a0ba-a8f990ffd2a3;id=1756759</id>\n<title type=\"text\"></title>\n<updated>2013-04-10T18:25:29Z</updated>\n<content type=\"application/xml\">\n    <RegionCodeDescription\n        xmlns=\"http://schemas.microsoft.com/netservices/2010/10/servicebus/connect\"\n        xmlns:i=\"http://www.w3.org/2001/XMLSchema-instance\">\n    <Code>East Asia</Code>\n    <FullName>East Asia</FullName>\n    </RegionCodeDescription>\n</content>\n</entry>\n          '''\n        xmldoc = minidom.parseString(xmlstr)\n        region = ServiceBusRegion()\n\n        for desc in _get_children_from_path(xmldoc, 'entry', 'content',\n                                            'RegionCodeDescription'):\n            node_value = _get_first_child_node_value(desc, 'Code')\n            if node_value is not None:\n                region.code = node_value\n            node_value = _get_first_child_node_value(desc, 'FullName')\n            if node_value is not None:\n                region.fullname = node_value\n\n        return region\n\n    @staticmethod\n    def xml_to_namespace_availability(xmlstr):\n        '''Converts xml response to service bus namespace availability\n\n        The xml format:\n<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<entry xmlns=\"http://www.w3.org/2005/Atom\">\n    <id>uuid:9fc7c652-1856-47ab-8d74-cd31502ea8e6;id=3683292</id>\n    <title type=\"text\"></title>\n    <updated>2013-04-16T03:03:37Z</updated>\n    <content type=\"application/xml\">\n        <NamespaceAvailability\n            xmlns=\"http://schemas.microsoft.com/netservices/2010/10/servicebus/connect\"\n            xmlns:i=\"http://www.w3.org/2001/XMLSchema-instance\">\n            <Result>false</Result>\n        </NamespaceAvailability>\n    </content>\n</entry>\n        '''\n        xmldoc = minidom.parseString(xmlstr)\n        availability = AvailabilityResponse()\n\n        for desc in _get_children_from_path(xmldoc, 'entry', 'content',\n                                            'NamespaceAvailability'):\n            node_value = _get_first_child_node_value(desc, 'Result')\n            if node_value is not None:\n                availability.result = _parse_bool(node_value)\n\n        return availability\n\n\nfrom azure.servicemanagement.servicemanagementservice import (\n    ServiceManagementService)\nfrom azure.servicemanagement.servicebusmanagementservice import (\n    ServiceBusManagementService)\nfrom azure.servicemanagement.websitemanagementservice import (\n    WebsiteManagementService)\n"
  },
  {
    "path": "DSC/azure/servicemanagement/servicebusmanagementservice.py",
    "content": "#-------------------------------------------------------------------------\n# Copyright (c) Microsoft.  All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#--------------------------------------------------------------------------\nfrom azure import (\n    MANAGEMENT_HOST,\n    _convert_response_to_feeds,\n    _str,\n    _validate_not_none,\n    )\nfrom azure.servicemanagement import (\n    _ServiceBusManagementXmlSerializer,\n    QueueDescription,\n    TopicDescription,\n    NotificationHubDescription,\n    RelayDescription,\n    )\nfrom azure.servicemanagement.servicemanagementclient import (\n    _ServiceManagementClient,\n    )\n\n\nclass ServiceBusManagementService(_ServiceManagementClient):\n\n    def __init__(self, subscription_id=None, cert_file=None,\n                 host=MANAGEMENT_HOST):\n        super(ServiceBusManagementService, self).__init__(\n            subscription_id, cert_file, host)\n\n    #--Operations for service bus ----------------------------------------\n    def get_regions(self):\n        '''\n        Get list of available service bus regions.\n        '''\n        response = self._perform_get(\n            self._get_path('services/serviceBus/Regions/', None),\n            None)\n\n        return _convert_response_to_feeds(\n            response,\n            _ServiceBusManagementXmlSerializer.xml_to_region)\n\n    def list_namespaces(self):\n        '''\n        List the service bus namespaces defined on the account.\n        '''\n        response = self._perform_get(\n            self._get_path('services/serviceBus/Namespaces/', None),\n            None)\n\n        return _convert_response_to_feeds(\n            response,\n            _ServiceBusManagementXmlSerializer.xml_to_namespace)\n\n    def get_namespace(self, name):\n        '''\n        Get details about a specific namespace.\n\n        name: Name of the service bus namespace.\n        '''\n        response = self._perform_get(\n            self._get_path('services/serviceBus/Namespaces', name),\n            None)\n\n        return _ServiceBusManagementXmlSerializer.xml_to_namespace(\n            response.body)\n\n    def create_namespace(self, name, region):\n        '''\n        Create a new service bus namespace.\n\n        name: Name of the service bus namespace to create.\n        region: Region to create the namespace in.\n        '''\n        _validate_not_none('name', name)\n\n        return self._perform_put(\n            self._get_path('services/serviceBus/Namespaces', name),\n            _ServiceBusManagementXmlSerializer.namespace_to_xml(region))\n\n    def delete_namespace(self, name):\n        '''\n        Delete a service bus namespace.\n\n        name: Name of the service bus namespace to delete.\n        '''\n        _validate_not_none('name', name)\n\n        return self._perform_delete(\n            self._get_path('services/serviceBus/Namespaces', name),\n            None)\n\n    def check_namespace_availability(self, name):\n        '''\n        Checks to see if the specified service bus namespace is available, or\n        if it has already been taken.\n\n        name: Name of the service bus namespace to validate.\n        '''\n        _validate_not_none('name', name)\n\n        response = self._perform_get(\n            self._get_path('services/serviceBus/CheckNamespaceAvailability',\n                           None) + '/?namespace=' + _str(name), None)\n\n        return _ServiceBusManagementXmlSerializer.xml_to_namespace_availability(\n            response.body)\n\n    def list_queues(self, name):\n        '''\n        Enumerates the queues in the service namespace.\n        \n        name: Name of the service bus namespace.\n        '''\n        _validate_not_none('name', name)\n            \n        response = self._perform_get(\n            self._get_list_queues_path(name),\n            None)\n\n        return _convert_response_to_feeds(response, QueueDescription)    \n\n    def list_topics(self, name):\n        '''\n        Retrieves the topics in the service namespace.\n        \n        name: Name of the service bus namespace.\n        '''\n        response = self._perform_get(\n            self._get_list_topics_path(name),\n            None)\n\n        return _convert_response_to_feeds(response, TopicDescription)\n\n    def list_notification_hubs(self, name):\n        '''\n        Retrieves the notification hubs in the service namespace.\n        \n        name: Name of the service bus namespace.\n        '''\n        response = self._perform_get(\n            self._get_list_notification_hubs_path(name),\n            None)\n\n        return _convert_response_to_feeds(response, NotificationHubDescription)\n\n    def list_relays(self, name):\n        '''\n        Retrieves the relays in the service namespace.\n        \n        name: Name of the service bus namespace.\n        '''\n        response = self._perform_get(\n            self._get_list_relays_path(name),\n            None)\n\n        return _convert_response_to_feeds(response, RelayDescription)\n\n    #--Helper functions --------------------------------------------------\n    def _get_list_queues_path(self, namespace_name):\n        return self._get_path('services/serviceBus/Namespaces/',\n                              namespace_name) + '/Queues'\n\n    def _get_list_topics_path(self, namespace_name):\n        return self._get_path('services/serviceBus/Namespaces/',\n                              namespace_name) + '/Topics'\n\n    def _get_list_notification_hubs_path(self, namespace_name):\n        return self._get_path('services/serviceBus/Namespaces/',\n                              namespace_name) + '/NotificationHubs'\n\n    def _get_list_relays_path(self, namespace_name):\n        return self._get_path('services/serviceBus/Namespaces/',\n                              namespace_name) + '/Relays'\n"
  },
  {
    "path": "DSC/azure/servicemanagement/servicemanagementclient.py",
    "content": "#-------------------------------------------------------------------------\n# Copyright (c) Microsoft.  All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#--------------------------------------------------------------------------\nimport os\n\nfrom azure import (\n    WindowsAzureError,\n    MANAGEMENT_HOST,\n    _get_request_body,\n    _parse_response,\n    _str,\n    _update_request_uri_query,\n    )\nfrom azure.http import (\n    HTTPError,\n    HTTPRequest,\n    )\nfrom azure.http.httpclient import _HTTPClient\nfrom azure.servicemanagement import (\n    AZURE_MANAGEMENT_CERTFILE,\n    AZURE_MANAGEMENT_SUBSCRIPTIONID,\n    _management_error_handler,\n    _parse_response_for_async_op,\n    _update_management_header,\n    )\n\n\nclass _ServiceManagementClient(object):\n\n    def __init__(self, subscription_id=None, cert_file=None,\n                 host=MANAGEMENT_HOST):\n        self.requestid = None\n        self.subscription_id = subscription_id\n        self.cert_file = cert_file\n        self.host = host\n\n        if not self.cert_file:\n            if AZURE_MANAGEMENT_CERTFILE in os.environ:\n                self.cert_file = os.environ[AZURE_MANAGEMENT_CERTFILE]\n\n        if not self.subscription_id:\n            if AZURE_MANAGEMENT_SUBSCRIPTIONID in os.environ:\n                self.subscription_id = os.environ[\n                    AZURE_MANAGEMENT_SUBSCRIPTIONID]\n\n        if not self.cert_file or not self.subscription_id:\n            raise WindowsAzureError(\n                'You need to provide subscription id and certificate file')\n\n        self._httpclient = _HTTPClient(\n            service_instance=self, cert_file=self.cert_file)\n        self._filter = self._httpclient.perform_request\n\n    def with_filter(self, filter):\n        '''Returns a new service which will process requests with the\n        specified filter.  Filtering operations can include logging, automatic\n        retrying, etc...  The filter is a lambda which receives the HTTPRequest\n        and another lambda.  The filter can perform any pre-processing on the\n        request, pass it off to the next lambda, and then perform any\n        post-processing on the response.'''\n        res = type(self)(self.subscription_id, self.cert_file, self.host)\n        old_filter = self._filter\n\n        def new_filter(request):\n            return filter(request, old_filter)\n\n        res._filter = new_filter\n        return res\n\n    def set_proxy(self, host, port, user=None, password=None):\n        '''\n        Sets the proxy server host and port for the HTTP CONNECT Tunnelling.\n\n        host: Address of the proxy. Ex: '192.168.0.100'\n        port: Port of the proxy. Ex: 6000\n        user: User for proxy authorization.\n        password: Password for proxy authorization.\n        '''\n        self._httpclient.set_proxy(host, port, user, password)\n\n    #--Helper functions --------------------------------------------------\n    def _perform_request(self, request):\n        try:\n            resp = self._filter(request)\n        except HTTPError as ex:\n            return _management_error_handler(ex)\n\n        return resp\n\n    def _perform_get(self, path, response_type):\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self.host\n        request.path = path\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = _update_management_header(request)\n        response = self._perform_request(request)\n\n        if response_type is not None:\n            return _parse_response(response, response_type)\n\n        return response\n\n    def _perform_put(self, path, body, async=False):\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self.host\n        request.path = path\n        request.body = _get_request_body(body)\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = _update_management_header(request)\n        response = self._perform_request(request)\n\n        if async:\n            return _parse_response_for_async_op(response)\n\n        return None\n\n    def _perform_post(self, path, body, response_type=None, async=False):\n        request = HTTPRequest()\n        request.method = 'POST'\n        request.host = self.host\n        request.path = path\n        request.body = _get_request_body(body)\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = _update_management_header(request)\n        response = self._perform_request(request)\n\n        if response_type is not None:\n            return _parse_response(response, response_type)\n\n        if async:\n            return _parse_response_for_async_op(response)\n\n        return None\n\n    def _perform_delete(self, path, async=False):\n        request = HTTPRequest()\n        request.method = 'DELETE'\n        request.host = self.host\n        request.path = path\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = _update_management_header(request)\n        response = self._perform_request(request)\n\n        if async:\n            return _parse_response_for_async_op(response)\n\n        return None\n\n    def _get_path(self, resource, name):\n        path = '/' + self.subscription_id + '/' + resource\n        if name is not None:\n            path += '/' + _str(name)\n        return path\n"
  },
  {
    "path": "DSC/azure/servicemanagement/servicemanagementservice.py",
    "content": "#-------------------------------------------------------------------------\n# Copyright (c) Microsoft.  All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#--------------------------------------------------------------------------\nfrom azure import (\n    WindowsAzureError,\n    MANAGEMENT_HOST,\n    _str,\n    _validate_not_none,\n    )\nfrom azure.servicemanagement import (\n    AffinityGroups,\n    AffinityGroup,\n    AvailabilityResponse,\n    Certificate,\n    Certificates,\n    DataVirtualHardDisk,\n    Deployment,\n    Disk,\n    Disks,\n    Locations,\n    Operation,\n    HostedService,\n    HostedServices,\n    Images,\n    OperatingSystems,\n    OperatingSystemFamilies,\n    OSImage,\n    PersistentVMRole,\n    StorageService,\n    StorageServices,\n    Subscription,\n    SubscriptionCertificate,\n    SubscriptionCertificates,\n    VirtualNetworkSites,\n    _XmlSerializer,\n    )\nfrom azure.servicemanagement.servicemanagementclient import (\n    _ServiceManagementClient,\n    )\n\nclass ServiceManagementService(_ServiceManagementClient):\n\n    def __init__(self, subscription_id=None, cert_file=None,\n                 host=MANAGEMENT_HOST):\n        super(ServiceManagementService, self).__init__(\n            subscription_id, cert_file, host)\n\n    #--Operations for storage accounts -----------------------------------\n    def list_storage_accounts(self):\n        '''\n        Lists the storage accounts available under the current subscription.\n        '''\n        return self._perform_get(self._get_storage_service_path(),\n                                 StorageServices)\n\n    def get_storage_account_properties(self, service_name):\n        '''\n        Returns system properties for the specified storage account.\n\n        service_name: Name of the storage service account.\n        '''\n        _validate_not_none('service_name', service_name)\n        return self._perform_get(self._get_storage_service_path(service_name),\n                                 StorageService)\n\n    def get_storage_account_keys(self, service_name):\n        '''\n        Returns the primary and secondary access keys for the specified\n        storage account.\n\n        service_name: Name of the storage service account.\n        '''\n        _validate_not_none('service_name', service_name)\n        return self._perform_get(\n            self._get_storage_service_path(service_name) + '/keys',\n            StorageService)\n\n    def regenerate_storage_account_keys(self, service_name, key_type):\n        '''\n        Regenerates the primary or secondary access key for the specified\n        storage account.\n\n        service_name: Name of the storage service account.\n        key_type:\n            Specifies which key to regenerate. Valid values are:\n            Primary, Secondary\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('key_type', key_type)\n        return self._perform_post(\n            self._get_storage_service_path(\n                service_name) + '/keys?action=regenerate',\n            _XmlSerializer.regenerate_keys_to_xml(\n                key_type),\n            StorageService)\n\n    def create_storage_account(self, service_name, description, label,\n                               affinity_group=None, location=None,\n                               geo_replication_enabled=True,\n                               extended_properties=None):\n        '''\n        Creates a new storage account in Windows Azure.\n\n        service_name:\n            A name for the storage account that is unique within Windows Azure.\n            Storage account names must be between 3 and 24 characters in length\n            and use numbers and lower-case letters only.\n        description:\n            A description for the storage account. The description may be up\n            to 1024 characters in length.\n        label:\n            A name for the storage account. The name may be up to 100\n            characters in length. The name can be used to identify the storage\n            account for your tracking purposes.\n        affinity_group:\n            The name of an existing affinity group in the specified\n            subscription. You can specify either a location or affinity_group,\n            but not both.\n        location:\n            The location where the storage account is created. You can specify\n            either a location or affinity_group, but not both.\n        geo_replication_enabled:\n            Specifies whether the storage account is created with the\n            geo-replication enabled. If the element is not included in the\n            request body, the default value is true. If set to true, the data\n            in the storage account is replicated across more than one\n            geographic location so as to enable resilience in the face of\n            catastrophic service loss.\n        extended_properties:\n            Dictionary containing name/value pairs of storage account\n            properties. You can have a maximum of 50 extended property\n            name/value pairs. The maximum length of the Name element is 64\n            characters, only alphanumeric characters and underscores are valid\n            in the Name, and the name must start with a letter. The value has\n            a maximum length of 255 characters.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('description', description)\n        _validate_not_none('label', label)\n        if affinity_group is None and location is None:\n            raise WindowsAzureError(\n                'location or affinity_group must be specified')\n        if affinity_group is not None and location is not None:\n            raise WindowsAzureError(\n                'Only one of location or affinity_group needs to be specified')\n        return self._perform_post(\n            self._get_storage_service_path(),\n            _XmlSerializer.create_storage_service_input_to_xml(\n                service_name,\n                description,\n                label,\n                affinity_group,\n                location,\n                geo_replication_enabled,\n                extended_properties),\n            async=True)\n\n    def update_storage_account(self, service_name, description=None,\n                               label=None, geo_replication_enabled=None,\n                               extended_properties=None):\n        '''\n        Updates the label, the description, and enables or disables the\n        geo-replication status for a storage account in Windows Azure.\n\n        service_name: Name of the storage service account.\n        description:\n            A description for the storage account. The description may be up\n            to 1024 characters in length.\n        label:\n            A name for the storage account. The name may be up to 100\n            characters in length. The name can be used to identify the storage\n            account for your tracking purposes.\n        geo_replication_enabled:\n            Specifies whether the storage account is created with the\n            geo-replication enabled. If the element is not included in the\n            request body, the default value is true. If set to true, the data\n            in the storage account is replicated across more than one\n            geographic location so as to enable resilience in the face of\n            catastrophic service loss.\n        extended_properties:\n            Dictionary containing name/value pairs of storage account\n            properties. You can have a maximum of 50 extended property\n            name/value pairs. The maximum length of the Name element is 64\n            characters, only alphanumeric characters and underscores are valid\n            in the Name, and the name must start with a letter. The value has\n            a maximum length of 255 characters.\n        '''\n        _validate_not_none('service_name', service_name)\n        return self._perform_put(\n            self._get_storage_service_path(service_name),\n            _XmlSerializer.update_storage_service_input_to_xml(\n                description,\n                label,\n                geo_replication_enabled,\n                extended_properties))\n\n    def delete_storage_account(self, service_name):\n        '''\n        Deletes the specified storage account from Windows Azure.\n\n        service_name: Name of the storage service account.\n        '''\n        _validate_not_none('service_name', service_name)\n        return self._perform_delete(\n            self._get_storage_service_path(service_name))\n\n    def check_storage_account_name_availability(self, service_name):\n        '''\n        Checks to see if the specified storage account name is available, or\n        if it has already been taken.\n\n        service_name: Name of the storage service account.\n        '''\n        _validate_not_none('service_name', service_name)\n        return self._perform_get(\n            self._get_storage_service_path() +\n            '/operations/isavailable/' +\n            _str(service_name) + '',\n            AvailabilityResponse)\n\n    #--Operations for hosted services ------------------------------------\n    def list_hosted_services(self):\n        '''\n        Lists the hosted services available under the current subscription.\n        '''\n        return self._perform_get(self._get_hosted_service_path(),\n                                 HostedServices)\n\n    def get_hosted_service_properties(self, service_name, embed_detail=False):\n        '''\n        Retrieves system properties for the specified hosted service. These\n        properties include the service name and service type; the name of the\n        affinity group to which the service belongs, or its location if it is\n        not part of an affinity group; and optionally, information on the\n        service's deployments.\n\n        service_name: Name of the hosted service.\n        embed_detail:\n            When True, the management service returns properties for all\n            deployments of the service, as well as for the service itself.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('embed_detail', embed_detail)\n        return self._perform_get(\n            self._get_hosted_service_path(service_name) +\n            '?embed-detail=' +\n            _str(embed_detail).lower(),\n            HostedService)\n\n    def create_hosted_service(self, service_name, label, description=None,\n                              location=None, affinity_group=None,\n                              extended_properties=None):\n        '''\n        Creates a new hosted service in Windows Azure.\n\n        service_name:\n            A name for the hosted service that is unique within Windows Azure.\n            This name is the DNS prefix name and can be used to access the\n            hosted service.\n        label:\n            A name for the hosted service. The name can be up to 100 characters\n            in length. The name can be used to identify the storage account for\n            your tracking purposes.\n        description:\n            A description for the hosted service. The description can be up to\n            1024 characters in length.\n        location:\n            The location where the hosted service will be created. You can\n            specify either a location or affinity_group, but not both.\n        affinity_group:\n            The name of an existing affinity group associated with this\n            subscription. This name is a GUID and can be retrieved by examining\n            the name element of the response body returned by\n            list_affinity_groups. You can specify either a location or\n            affinity_group, but not both.\n        extended_properties:\n            Dictionary containing name/value pairs of storage account\n            properties. You can have a maximum of 50 extended property\n            name/value pairs. The maximum length of the Name element is 64\n            characters, only alphanumeric characters and underscores are valid\n            in the Name, and the name must start with a letter. The value has\n            a maximum length of 255 characters.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('label', label)\n        if affinity_group is None and location is None:\n            raise WindowsAzureError(\n                'location or affinity_group must be specified')\n        if affinity_group is not None and location is not None:\n            raise WindowsAzureError(\n                'Only one of location or affinity_group needs to be specified')\n        return self._perform_post(self._get_hosted_service_path(),\n                                  _XmlSerializer.create_hosted_service_to_xml(\n                                      service_name,\n                                      label,\n                                      description,\n                                      location,\n                                      affinity_group,\n                                      extended_properties))\n\n    def update_hosted_service(self, service_name, label=None, description=None,\n                              extended_properties=None):\n        '''\n        Updates the label and/or the description for a hosted service in\n        Windows Azure.\n\n        service_name: Name of the hosted service.\n        label:\n            A name for the hosted service. The name may be up to 100 characters\n            in length. You must specify a value for either Label or\n            Description, or for both. It is recommended that the label be\n            unique within the subscription. The name can be used\n            identify the hosted service for your tracking purposes.\n        description:\n            A description for the hosted service. The description may be up to\n            1024 characters in length. You must specify a value for either\n            Label or Description, or for both.\n        extended_properties:\n            Dictionary containing name/value pairs of storage account\n            properties. You can have a maximum of 50 extended property\n            name/value pairs. The maximum length of the Name element is 64\n            characters, only alphanumeric characters and underscores are valid\n            in the Name, and the name must start with a letter. The value has\n            a maximum length of 255 characters.\n        '''\n        _validate_not_none('service_name', service_name)\n        return self._perform_put(self._get_hosted_service_path(service_name),\n                                 _XmlSerializer.update_hosted_service_to_xml(\n                                     label,\n                                     description,\n                                     extended_properties))\n\n    def delete_hosted_service(self, service_name):\n        '''\n        Deletes the specified hosted service from Windows Azure.\n\n        service_name: Name of the hosted service.\n        '''\n        _validate_not_none('service_name', service_name)\n        return self._perform_delete(self._get_hosted_service_path(service_name))\n\n    def get_deployment_by_slot(self, service_name, deployment_slot):\n        '''\n        Returns configuration information, status, and system properties for\n        a deployment.\n\n        service_name: Name of the hosted service.\n        deployment_slot:\n            The environment to which the hosted service is deployed. Valid\n            values are: staging, production\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_slot', deployment_slot)\n        return self._perform_get(\n            self._get_deployment_path_using_slot(\n                service_name, deployment_slot),\n            Deployment)\n\n    def get_deployment_by_name(self, service_name, deployment_name):\n        '''\n        Returns configuration information, status, and system properties for a\n        deployment.\n\n        service_name: Name of the hosted service.\n        deployment_name: The name of the deployment.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        return self._perform_get(\n            self._get_deployment_path_using_name(\n                service_name, deployment_name),\n            Deployment)\n\n    def create_deployment(self, service_name, deployment_slot, name,\n                          package_url, label, configuration,\n                          start_deployment=False,\n                          treat_warnings_as_error=False,\n                          extended_properties=None):\n        '''\n        Uploads a new service package and creates a new deployment on staging\n        or production.\n\n        service_name: Name of the hosted service.\n        deployment_slot:\n            The environment to which the hosted service is deployed. Valid\n            values are: staging, production\n        name:\n            The name for the deployment. The deployment name must be unique\n            among other deployments for the hosted service.\n        package_url:\n            A URL that refers to the location of the service package in the\n            Blob service. The service package can be located either in a\n            storage account beneath the same subscription or a Shared Access\n            Signature (SAS) URI from any storage account.\n        label:\n            A name for the hosted service. The name can be up to 100 characters\n            in length. It is recommended that the label be unique within the\n            subscription. The name can be used to identify the hosted service\n            for your tracking purposes.\n        configuration:\n            The base-64 encoded service configuration file for the deployment.\n        start_deployment:\n            Indicates whether to start the deployment immediately after it is\n            created. If false, the service model is still deployed to the\n            virtual machines but the code is not run immediately. Instead, the\n            service is Suspended until you call Update Deployment Status and\n            set the status to Running, at which time the service will be\n            started. A deployed service still incurs charges, even if it is\n            suspended.\n        treat_warnings_as_error:\n            Indicates whether to treat package validation warnings as errors.\n            If set to true, the Created Deployment operation fails if there\n            are validation warnings on the service package.\n        extended_properties:\n            Dictionary containing name/value pairs of storage account\n            properties. You can have a maximum of 50 extended property\n            name/value pairs. The maximum length of the Name element is 64\n            characters, only alphanumeric characters and underscores are valid\n            in the Name, and the name must start with a letter. The value has\n            a maximum length of 255 characters.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_slot', deployment_slot)\n        _validate_not_none('name', name)\n        _validate_not_none('package_url', package_url)\n        _validate_not_none('label', label)\n        _validate_not_none('configuration', configuration)\n        return self._perform_post(\n            self._get_deployment_path_using_slot(\n                service_name, deployment_slot),\n            _XmlSerializer.create_deployment_to_xml(\n                name,\n                package_url,\n                label,\n                configuration,\n                start_deployment,\n                treat_warnings_as_error,\n                extended_properties),\n            async=True)\n\n    def delete_deployment(self, service_name, deployment_name):\n        '''\n        Deletes the specified deployment.\n\n        service_name: Name of the hosted service.\n        deployment_name: The name of the deployment.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        return self._perform_delete(\n            self._get_deployment_path_using_name(\n                service_name, deployment_name),\n            async=True)\n\n    def swap_deployment(self, service_name, production, source_deployment):\n        '''\n        Initiates a virtual IP swap between the staging and production\n        deployment environments for a service. If the service is currently\n        running in the staging environment, it will be swapped to the\n        production environment. If it is running in the production\n        environment, it will be swapped to staging.\n\n        service_name: Name of the hosted service.\n        production: The name of the production deployment.\n        source_deployment: The name of the source deployment.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('production', production)\n        _validate_not_none('source_deployment', source_deployment)\n        return self._perform_post(self._get_hosted_service_path(service_name),\n                                  _XmlSerializer.swap_deployment_to_xml(\n                                      production, source_deployment),\n                                  async=True)\n\n    def change_deployment_configuration(self, service_name, deployment_name,\n                                        configuration,\n                                        treat_warnings_as_error=False,\n                                        mode='Auto', extended_properties=None):\n        '''\n        Initiates a change to the deployment configuration.\n\n        service_name: Name of the hosted service.\n        deployment_name: The name of the deployment.\n        configuration:\n            The base-64 encoded service configuration file for the deployment.\n        treat_warnings_as_error:\n            Indicates whether to treat package validation warnings as errors.\n            If set to true, the Created Deployment operation fails if there\n            are validation warnings on the service package.\n        mode:\n            If set to Manual, WalkUpgradeDomain must be called to apply the\n            update. If set to Auto, the Windows Azure platform will\n            automatically apply the update To each upgrade domain for the\n            service. Possible values are: Auto, Manual\n        extended_properties:\n            Dictionary containing name/value pairs of storage account\n            properties. You can have a maximum of 50 extended property\n            name/value pairs. The maximum length of the Name element is 64\n            characters, only alphanumeric characters and underscores are valid\n            in the Name, and the name must start with a letter. The value has\n            a maximum length of 255 characters.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('configuration', configuration)\n        return self._perform_post(\n            self._get_deployment_path_using_name(\n                service_name, deployment_name) + '/?comp=config',\n            _XmlSerializer.change_deployment_to_xml(\n                configuration,\n                treat_warnings_as_error,\n                mode,\n                extended_properties),\n            async=True)\n\n    def update_deployment_status(self, service_name, deployment_name, status):\n        '''\n        Initiates a change in deployment status.\n\n        service_name: Name of the hosted service.\n        deployment_name: The name of the deployment.\n        status:\n            The change to initiate to the deployment status. Possible values\n            include: Running, Suspended\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('status', status)\n        return self._perform_post(\n            self._get_deployment_path_using_name(\n                service_name, deployment_name) + '/?comp=status',\n            _XmlSerializer.update_deployment_status_to_xml(\n                status),\n            async=True)\n\n    def upgrade_deployment(self, service_name, deployment_name, mode,\n                           package_url, configuration, label, force,\n                           role_to_upgrade=None, extended_properties=None):\n        '''\n        Initiates an upgrade.\n\n        service_name: Name of the hosted service.\n        deployment_name: The name of the deployment.\n        mode:\n            If set to Manual, WalkUpgradeDomain must be called to apply the\n            update. If set to Auto, the Windows Azure platform will\n            automatically apply the update To each upgrade domain for the\n            service. Possible values are: Auto, Manual\n        package_url:\n            A URL that refers to the location of the service package in the\n            Blob service. The service package can be located either in a\n            storage account beneath the same subscription or a Shared Access\n            Signature (SAS) URI from any storage account.\n        configuration:\n            The base-64 encoded service configuration file for the deployment.\n        label:\n            A name for the hosted service. The name can be up to 100 characters\n            in length. It is recommended that the label be unique within the\n            subscription. The name can be used to identify the hosted service\n            for your tracking purposes.\n        force:\n            Specifies whether the rollback should proceed even when it will\n            cause local data to be lost from some role instances. True if the\n            rollback should proceed; otherwise false if the rollback should\n            fail.\n        role_to_upgrade: The name of the specific role to upgrade.\n        extended_properties:\n            Dictionary containing name/value pairs of storage account\n            properties. You can have a maximum of 50 extended property\n            name/value pairs. The maximum length of the Name element is 64\n            characters, only alphanumeric characters and underscores are valid\n            in the Name, and the name must start with a letter. The value has\n            a maximum length of 255 characters.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('mode', mode)\n        _validate_not_none('package_url', package_url)\n        _validate_not_none('configuration', configuration)\n        _validate_not_none('label', label)\n        _validate_not_none('force', force)\n        return self._perform_post(\n            self._get_deployment_path_using_name(\n                service_name, deployment_name) + '/?comp=upgrade',\n            _XmlSerializer.upgrade_deployment_to_xml(\n                mode,\n                package_url,\n                configuration,\n                label,\n                role_to_upgrade,\n                force,\n                extended_properties),\n            async=True)\n\n    def walk_upgrade_domain(self, service_name, deployment_name,\n                            upgrade_domain):\n        '''\n        Specifies the next upgrade domain to be walked during manual in-place\n        upgrade or configuration change.\n\n        service_name: Name of the hosted service.\n        deployment_name: The name of the deployment.\n        upgrade_domain:\n            An integer value that identifies the upgrade domain to walk.\n            Upgrade domains are identified with a zero-based index: the first\n            upgrade domain has an ID of 0, the second has an ID of 1, and so on.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('upgrade_domain', upgrade_domain)\n        return self._perform_post(\n            self._get_deployment_path_using_name(\n                service_name, deployment_name) + '/?comp=walkupgradedomain',\n            _XmlSerializer.walk_upgrade_domain_to_xml(\n                upgrade_domain),\n            async=True)\n\n    def rollback_update_or_upgrade(self, service_name, deployment_name, mode,\n                                   force):\n        '''\n        Cancels an in progress configuration change (update) or upgrade and\n        returns the deployment to its state before the upgrade or\n        configuration change was started.\n\n        service_name: Name of the hosted service.\n        deployment_name: The name of the deployment.\n        mode:\n            Specifies whether the rollback should proceed automatically.\n                auto - The rollback proceeds without further user input.\n                manual - You must call the Walk Upgrade Domain operation to\n                         apply the rollback to each upgrade domain.\n        force:\n            Specifies whether the rollback should proceed even when it will\n            cause local data to be lost from some role instances. True if the\n            rollback should proceed; otherwise false if the rollback should\n            fail.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('mode', mode)\n        _validate_not_none('force', force)\n        return self._perform_post(\n            self._get_deployment_path_using_name(\n                service_name, deployment_name) + '/?comp=rollback',\n            _XmlSerializer.rollback_upgrade_to_xml(\n                mode, force),\n            async=True)\n\n    def reboot_role_instance(self, service_name, deployment_name,\n                             role_instance_name):\n        '''\n        Requests a reboot of a role instance that is running in a deployment.\n\n        service_name: Name of the hosted service.\n        deployment_name: The name of the deployment.\n        role_instance_name: The name of the role instance.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('role_instance_name', role_instance_name)\n        return self._perform_post(\n            self._get_deployment_path_using_name(\n                service_name, deployment_name) + \\\n                    '/roleinstances/' + _str(role_instance_name) + \\\n                    '?comp=reboot',\n            '',\n            async=True)\n\n    def reimage_role_instance(self, service_name, deployment_name,\n                              role_instance_name):\n        '''\n        Requests a reimage of a role instance that is running in a deployment.\n\n        service_name: Name of the hosted service.\n        deployment_name: The name of the deployment.\n        role_instance_name: The name of the role instance.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('role_instance_name', role_instance_name)\n        return self._perform_post(\n            self._get_deployment_path_using_name(\n                service_name, deployment_name) + \\\n                    '/roleinstances/' + _str(role_instance_name) + \\\n                    '?comp=reimage',\n            '',\n            async=True)\n\n    def check_hosted_service_name_availability(self, service_name):\n        '''\n        Checks to see if the specified hosted service name is available, or if\n        it has already been taken.\n\n        service_name: Name of the hosted service.\n        '''\n        _validate_not_none('service_name', service_name)\n        return self._perform_get(\n            '/' + self.subscription_id +\n            '/services/hostedservices/operations/isavailable/' +\n            _str(service_name) + '',\n            AvailabilityResponse)\n\n    #--Operations for service certificates -------------------------------\n    def list_service_certificates(self, service_name):\n        '''\n        Lists all of the service certificates associated with the specified\n        hosted service.\n\n        service_name: Name of the hosted service.\n        '''\n        _validate_not_none('service_name', service_name)\n        return self._perform_get(\n            '/' + self.subscription_id + '/services/hostedservices/' +\n            _str(service_name) + '/certificates',\n            Certificates)\n\n    def get_service_certificate(self, service_name, thumbalgorithm, thumbprint):\n        '''\n        Returns the public data for the specified X.509 certificate associated\n        with a hosted service.\n\n        service_name: Name of the hosted service.\n        thumbalgorithm: The algorithm for the certificate's thumbprint.\n        thumbprint: The hexadecimal representation of the thumbprint.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('thumbalgorithm', thumbalgorithm)\n        _validate_not_none('thumbprint', thumbprint)\n        return self._perform_get(\n            '/' + self.subscription_id + '/services/hostedservices/' +\n            _str(service_name) + '/certificates/' +\n            _str(thumbalgorithm) + '-' + _str(thumbprint) + '',\n            Certificate)\n\n    def add_service_certificate(self, service_name, data, certificate_format,\n                                password):\n        '''\n        Adds a certificate to a hosted service.\n\n        service_name: Name of the hosted service.\n        data: The base-64 encoded form of the pfx file.\n        certificate_format:\n            The service certificate format. The only supported value is pfx.\n        password: The certificate password.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('data', data)\n        _validate_not_none('certificate_format', certificate_format)\n        _validate_not_none('password', password)\n        return self._perform_post(\n            '/' + self.subscription_id + '/services/hostedservices/' +\n            _str(service_name) + '/certificates',\n            _XmlSerializer.certificate_file_to_xml(\n                data, certificate_format, password),\n            async=True)\n\n    def delete_service_certificate(self, service_name, thumbalgorithm,\n                                   thumbprint):\n        '''\n        Deletes a service certificate from the certificate store of a hosted\n        service.\n\n        service_name: Name of the hosted service.\n        thumbalgorithm: The algorithm for the certificate's thumbprint.\n        thumbprint: The hexadecimal representation of the thumbprint.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('thumbalgorithm', thumbalgorithm)\n        _validate_not_none('thumbprint', thumbprint)\n        return self._perform_delete(\n            '/' + self.subscription_id + '/services/hostedservices/' +\n            _str(service_name) + '/certificates/' +\n            _str(thumbalgorithm) + '-' + _str(thumbprint),\n            async=True)\n\n    #--Operations for management certificates ----------------------------\n    def list_management_certificates(self):\n        '''\n        The List Management Certificates operation lists and returns basic\n        information about all of the management certificates associated with\n        the specified subscription. Management certificates, which are also\n        known as subscription certificates, authenticate clients attempting to\n        connect to resources associated with your Windows Azure subscription.\n        '''\n        return self._perform_get('/' + self.subscription_id + '/certificates',\n                                 SubscriptionCertificates)\n\n    def get_management_certificate(self, thumbprint):\n        '''\n        The Get Management Certificate operation retrieves information about\n        the management certificate with the specified thumbprint. Management\n        certificates, which are also known as subscription certificates,\n        authenticate clients attempting to connect to resources associated\n        with your Windows Azure subscription.\n\n        thumbprint: The thumbprint value of the certificate.\n        '''\n        _validate_not_none('thumbprint', thumbprint)\n        return self._perform_get(\n            '/' + self.subscription_id + '/certificates/' + _str(thumbprint),\n            SubscriptionCertificate)\n\n    def add_management_certificate(self, public_key, thumbprint, data):\n        '''\n        The Add Management Certificate operation adds a certificate to the\n        list of management certificates. Management certificates, which are\n        also known as subscription certificates, authenticate clients\n        attempting to connect to resources associated with your Windows Azure\n        subscription.\n\n        public_key:\n            A base64 representation of the management certificate public key.\n        thumbprint:\n            The thumb print that uniquely identifies the management\n            certificate.\n        data: The certificate's raw data in base-64 encoded .cer format.\n        '''\n        _validate_not_none('public_key', public_key)\n        _validate_not_none('thumbprint', thumbprint)\n        _validate_not_none('data', data)\n        return self._perform_post(\n            '/' + self.subscription_id + '/certificates',\n            _XmlSerializer.subscription_certificate_to_xml(\n                public_key, thumbprint, data))\n\n    def delete_management_certificate(self, thumbprint):\n        '''\n        The Delete Management Certificate operation deletes a certificate from\n        the list of management certificates. Management certificates, which\n        are also known as subscription certificates, authenticate clients\n        attempting to connect to resources associated with your Windows Azure\n        subscription.\n\n        thumbprint:\n            The thumb print that uniquely identifies the management\n            certificate.\n        '''\n        _validate_not_none('thumbprint', thumbprint)\n        return self._perform_delete(\n            '/' + self.subscription_id + '/certificates/' + _str(thumbprint))\n\n    #--Operations for affinity groups ------------------------------------\n    def list_affinity_groups(self):\n        '''\n        Lists the affinity groups associated with the specified subscription.\n        '''\n        return self._perform_get(\n            '/' + self.subscription_id + '/affinitygroups',\n            AffinityGroups)\n\n    def get_affinity_group_properties(self, affinity_group_name):\n        '''\n        Returns the system properties associated with the specified affinity\n        group.\n\n        affinity_group_name: The name of the affinity group.\n        '''\n        _validate_not_none('affinity_group_name', affinity_group_name)\n        return self._perform_get(\n            '/' + self.subscription_id + '/affinitygroups/' +\n            _str(affinity_group_name) + '',\n            AffinityGroup)\n\n    def create_affinity_group(self, name, label, location, description=None):\n        '''\n        Creates a new affinity group for the specified subscription.\n\n        name: A name for the affinity group that is unique to the subscription.\n        label:\n            A name for the affinity group. The name can be up to 100 characters\n            in length.\n        location:\n            The data center location where the affinity group will be created.\n            To list available locations, use the list_location function.\n        description:\n            A description for the affinity group. The description can be up to\n            1024 characters in length.\n        '''\n        _validate_not_none('name', name)\n        _validate_not_none('label', label)\n        _validate_not_none('location', location)\n        return self._perform_post(\n            '/' + self.subscription_id + '/affinitygroups',\n            _XmlSerializer.create_affinity_group_to_xml(name,\n                                                        label,\n                                                        description,\n                                                        location))\n\n    def update_affinity_group(self, affinity_group_name, label,\n                              description=None):\n        '''\n        Updates the label and/or the description for an affinity group for the\n        specified subscription.\n\n        affinity_group_name: The name of the affinity group.\n        label:\n            A name for the affinity group. The name can be up to 100 characters\n            in length.\n        description:\n            A description for the affinity group. The description can be up to\n            1024 characters in length.\n        '''\n        _validate_not_none('affinity_group_name', affinity_group_name)\n        _validate_not_none('label', label)\n        return self._perform_put(\n            '/' + self.subscription_id + '/affinitygroups/' +\n            _str(affinity_group_name),\n            _XmlSerializer.update_affinity_group_to_xml(label, description))\n\n    def delete_affinity_group(self, affinity_group_name):\n        '''\n        Deletes an affinity group in the specified subscription.\n\n        affinity_group_name: The name of the affinity group.\n        '''\n        _validate_not_none('affinity_group_name', affinity_group_name)\n        return self._perform_delete('/' + self.subscription_id + \\\n                                    '/affinitygroups/' + \\\n                                    _str(affinity_group_name))\n\n    #--Operations for locations ------------------------------------------\n    def list_locations(self):\n        '''\n        Lists all of the data center locations that are valid for your\n        subscription.\n        '''\n        return self._perform_get('/' + self.subscription_id + '/locations',\n                                 Locations)\n\n    #--Operations for tracking asynchronous requests ---------------------\n    def get_operation_status(self, request_id):\n        '''\n        Returns the status of the specified operation. After calling an\n        asynchronous operation, you can call Get Operation Status to determine\n        whether the operation has succeeded, failed, or is still in progress.\n\n        request_id: The request ID for the request you wish to track.\n        '''\n        _validate_not_none('request_id', request_id)\n        return self._perform_get(\n            '/' + self.subscription_id + '/operations/' + _str(request_id),\n            Operation)\n\n    #--Operations for retrieving operating system information ------------\n    def list_operating_systems(self):\n        '''\n        Lists the versions of the guest operating system that are currently\n        available in Windows Azure.\n        '''\n        return self._perform_get(\n            '/' + self.subscription_id + '/operatingsystems',\n            OperatingSystems)\n\n    def list_operating_system_families(self):\n        '''\n        Lists the guest operating system families available in Windows Azure,\n        and also lists the operating system versions available for each family.\n        '''\n        return self._perform_get(\n            '/' + self.subscription_id + '/operatingsystemfamilies',\n            OperatingSystemFamilies)\n\n    #--Operations for retrieving subscription history --------------------\n    def get_subscription(self):\n        '''\n        Returns account and resource allocation information on the specified\n        subscription.\n        '''\n        return self._perform_get('/' + self.subscription_id + '',\n                                 Subscription)\n\n    #--Operations for virtual machines -----------------------------------\n    def get_role(self, service_name, deployment_name, role_name):\n        '''\n        Retrieves the specified virtual machine.\n\n        service_name: The name of the service.\n        deployment_name: The name of the deployment.\n        role_name: The name of the role.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('role_name', role_name)\n        return self._perform_get(\n            self._get_role_path(service_name, deployment_name, role_name),\n            PersistentVMRole)\n\n    def create_virtual_machine_deployment(self, service_name, deployment_name,\n                                          deployment_slot, label, role_name,\n                                          system_config, os_virtual_hard_disk,\n                                          network_config=None,\n                                          availability_set_name=None,\n                                          data_virtual_hard_disks=None,\n                                          role_size=None,\n                                          role_type='PersistentVMRole',\n                                          virtual_network_name=None):\n        '''\n        Provisions a virtual machine based on the supplied configuration.\n\n        service_name: Name of the hosted service.\n        deployment_name:\n            The name for the deployment. The deployment name must be unique\n            among other deployments for the hosted service.\n        deployment_slot:\n            The environment to which the hosted service is deployed. Valid\n            values are: staging, production\n        label:\n            Specifies an identifier for the deployment. The label can be up to\n            100 characters long. The label can be used for tracking purposes.\n        role_name: The name of the role.\n        system_config:\n            Contains the metadata required to provision a virtual machine from\n            a Windows or Linux OS image.  Use an instance of\n            WindowsConfigurationSet or LinuxConfigurationSet.\n        os_virtual_hard_disk:\n            Contains the parameters Windows Azure uses to create the operating\n            system disk for the virtual machine.\n        network_config:\n            Encapsulates the metadata required to create the virtual network\n            configuration for a virtual machine. If you do not include a\n            network configuration set you will not be able to access the VM\n            through VIPs over the internet. If your virtual machine belongs to\n            a virtual network you can not specify which subnet address space\n            it resides under.\n        availability_set_name:\n            Specifies the name of an availability set to which to add the\n            virtual machine. This value controls the virtual machine\n            allocation in the Windows Azure environment. Virtual machines\n            specified in the same availability set are allocated to different\n            nodes to maximize availability.\n        data_virtual_hard_disks:\n            Contains the parameters Windows Azure uses to create a data disk\n            for a virtual machine.\n        role_size:\n            The size of the virtual machine to allocate. The default value is\n            Small. Possible values are: ExtraSmall, Small, Medium, Large,\n            ExtraLarge. The specified value must be compatible with the disk\n            selected in the OSVirtualHardDisk values.\n        role_type:\n            The type of the role for the virtual machine. The only supported\n            value is PersistentVMRole.\n        virtual_network_name:\n            Specifies the name of an existing virtual network to which the\n            deployment will belong.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('deployment_slot', deployment_slot)\n        _validate_not_none('label', label)\n        _validate_not_none('role_name', role_name)\n        _validate_not_none('system_config', system_config)\n        _validate_not_none('os_virtual_hard_disk', os_virtual_hard_disk)\n        return self._perform_post(\n            self._get_deployment_path_using_name(service_name),\n            _XmlSerializer.virtual_machine_deployment_to_xml(\n                deployment_name,\n                deployment_slot,\n                label,\n                role_name,\n                system_config,\n                os_virtual_hard_disk,\n                role_type,\n                network_config,\n                availability_set_name,\n                data_virtual_hard_disks,\n                role_size,\n                virtual_network_name),\n            async=True)\n\n    def add_role(self, service_name, deployment_name, role_name, system_config,\n                 os_virtual_hard_disk, network_config=None,\n                 availability_set_name=None, data_virtual_hard_disks=None,\n                 role_size=None, role_type='PersistentVMRole'):\n        '''\n        Adds a virtual machine to an existing deployment.\n\n        service_name: The name of the service.\n        deployment_name: The name of the deployment.\n        role_name: The name of the role.\n        system_config:\n            Contains the metadata required to provision a virtual machine from\n            a Windows or Linux OS image.  Use an instance of\n            WindowsConfigurationSet or LinuxConfigurationSet.\n        os_virtual_hard_disk:\n            Contains the parameters Windows Azure uses to create the operating\n            system disk for the virtual machine.\n        network_config:\n            Encapsulates the metadata required to create the virtual network\n            configuration for a virtual machine. If you do not include a\n            network configuration set you will not be able to access the VM\n            through VIPs over the internet. If your virtual machine belongs to\n            a virtual network you can not specify which subnet address space\n            it resides under.\n        availability_set_name:\n            Specifies the name of an availability set to which to add the\n            virtual machine. This value controls the virtual machine allocation\n            in the Windows Azure environment. Virtual machines specified in the\n            same availability set are allocated to different nodes to maximize\n            availability.\n        data_virtual_hard_disks:\n            Contains the parameters Windows Azure uses to create a data disk\n            for a virtual machine.\n        role_size:\n            The size of the virtual machine to allocate. The default value is\n            Small. Possible values are: ExtraSmall, Small, Medium, Large,\n            ExtraLarge. The specified value must be compatible with the disk\n            selected in the OSVirtualHardDisk values.\n        role_type:\n            The type of the role for the virtual machine. The only supported\n            value is PersistentVMRole.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('role_name', role_name)\n        _validate_not_none('system_config', system_config)\n        _validate_not_none('os_virtual_hard_disk', os_virtual_hard_disk)\n        return self._perform_post(\n            self._get_role_path(service_name, deployment_name),\n            _XmlSerializer.add_role_to_xml(\n                role_name,\n                system_config,\n                os_virtual_hard_disk,\n                role_type,\n                network_config,\n                availability_set_name,\n                data_virtual_hard_disks,\n                role_size),\n            async=True)\n\n    def update_role(self, service_name, deployment_name, role_name,\n                    os_virtual_hard_disk=None, network_config=None,\n                    availability_set_name=None, data_virtual_hard_disks=None,\n                    role_size=None, role_type='PersistentVMRole'):\n        '''\n        Updates the specified virtual machine.\n\n        service_name: The name of the service.\n        deployment_name: The name of the deployment.\n        role_name: The name of the role.\n        os_virtual_hard_disk:\n            Contains the parameters Windows Azure uses to create the operating\n            system disk for the virtual machine.\n        network_config:\n            Encapsulates the metadata required to create the virtual network\n            configuration for a virtual machine. If you do not include a\n            network configuration set you will not be able to access the VM\n            through VIPs over the internet. If your virtual machine belongs to\n            a virtual network you can not specify which subnet address space\n            it resides under.\n        availability_set_name:\n            Specifies the name of an availability set to which to add the\n            virtual machine. This value controls the virtual machine allocation\n            in the Windows Azure environment. Virtual machines specified in the\n            same availability set are allocated to different nodes to maximize\n            availability.\n        data_virtual_hard_disks:\n            Contains the parameters Windows Azure uses to create a data disk\n            for a virtual machine.\n        role_size:\n            The size of the virtual machine to allocate. The default value is\n            Small. Possible values are: ExtraSmall, Small, Medium, Large,\n            ExtraLarge. The specified value must be compatible with the disk\n            selected in the OSVirtualHardDisk values.\n        role_type:\n            The type of the role for the virtual machine. The only supported\n            value is PersistentVMRole.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('role_name', role_name)\n        return self._perform_put(\n            self._get_role_path(service_name, deployment_name, role_name),\n            _XmlSerializer.update_role_to_xml(\n                role_name,\n                os_virtual_hard_disk,\n                role_type,\n                network_config,\n                availability_set_name,\n                data_virtual_hard_disks,\n                role_size),\n            async=True)\n\n    def delete_role(self, service_name, deployment_name, role_name):\n        '''\n        Deletes the specified virtual machine.\n\n        service_name: The name of the service.\n        deployment_name: The name of the deployment.\n        role_name: The name of the role.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('role_name', role_name)\n        return self._perform_delete(\n            self._get_role_path(service_name, deployment_name, role_name),\n            async=True)\n\n    def capture_role(self, service_name, deployment_name, role_name,\n                     post_capture_action, target_image_name,\n                     target_image_label, provisioning_configuration=None):\n        '''\n        The Capture Role operation captures a virtual machine image to your\n        image gallery. From the captured image, you can create additional\n        customized virtual machines.\n\n        service_name: The name of the service.\n        deployment_name: The name of the deployment.\n        role_name: The name of the role.\n        post_capture_action:\n            Specifies the action after capture operation completes. Possible\n            values are: Delete, Reprovision.\n        target_image_name:\n            Specifies the image name of the captured virtual machine.\n        target_image_label:\n            Specifies the friendly name of the captured virtual machine.\n        provisioning_configuration:\n            Use an instance of WindowsConfigurationSet or LinuxConfigurationSet.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('role_name', role_name)\n        _validate_not_none('post_capture_action', post_capture_action)\n        _validate_not_none('target_image_name', target_image_name)\n        _validate_not_none('target_image_label', target_image_label)\n        return self._perform_post(\n            self._get_role_instance_operations_path(\n                service_name, deployment_name, role_name),\n            _XmlSerializer.capture_role_to_xml(\n                post_capture_action,\n                target_image_name,\n                target_image_label,\n                provisioning_configuration),\n            async=True)\n\n    def start_role(self, service_name, deployment_name, role_name):\n        '''\n        Starts the specified virtual machine.\n\n        service_name: The name of the service.\n        deployment_name: The name of the deployment.\n        role_name: The name of the role.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('role_name', role_name)\n        return self._perform_post(\n            self._get_role_instance_operations_path(\n                service_name, deployment_name, role_name),\n            _XmlSerializer.start_role_operation_to_xml(),\n            async=True)\n\n    def start_roles(self, service_name, deployment_name, role_names):\n        '''\n        Starts the specified virtual machines.\n\n        service_name: The name of the service.\n        deployment_name: The name of the deployment.\n        role_names: The names of the roles, as an enumerable of strings.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('role_names', role_names)\n        return self._perform_post(\n            self._get_roles_operations_path(service_name, deployment_name),\n            _XmlSerializer.start_roles_operation_to_xml(role_names),\n            async=True)\n\n    def restart_role(self, service_name, deployment_name, role_name):\n        '''\n        Restarts the specified virtual machine.\n\n        service_name: The name of the service.\n        deployment_name: The name of the deployment.\n        role_name: The name of the role.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('role_name', role_name)\n        return self._perform_post(\n            self._get_role_instance_operations_path(\n                service_name, deployment_name, role_name),\n            _XmlSerializer.restart_role_operation_to_xml(\n            ),\n            async=True)\n\n    def shutdown_role(self, service_name, deployment_name, role_name,\n                      post_shutdown_action='Stopped'):\n        '''\n        Shuts down the specified virtual machine.\n\n        service_name: The name of the service.\n        deployment_name: The name of the deployment.\n        role_name: The name of the role.\n        post_shutdown_action:\n            Specifies how the Virtual Machine should be shut down. Values are:\n                Stopped\n                    Shuts down the Virtual Machine but retains the compute\n                    resources. You will continue to be billed for the resources\n                    that the stopped machine uses.\n                StoppedDeallocated\n                    Shuts down the Virtual Machine and releases the compute\n                    resources. You are not billed for the compute resources that\n                    this Virtual Machine uses. If a static Virtual Network IP\n                    address is assigned to the Virtual Machine, it is reserved.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('role_name', role_name)\n        _validate_not_none('post_shutdown_action', post_shutdown_action)\n        return self._perform_post(\n            self._get_role_instance_operations_path(\n                service_name, deployment_name, role_name),\n            _XmlSerializer.shutdown_role_operation_to_xml(post_shutdown_action),\n            async=True)\n\n    def shutdown_roles(self, service_name, deployment_name, role_names,\n                       post_shutdown_action='Stopped'):\n        '''\n        Shuts down the specified virtual machines.\n\n        service_name: The name of the service.\n        deployment_name: The name of the deployment.\n        role_names: The names of the roles, as an enumerable of strings.\n        post_shutdown_action:\n            Specifies how the Virtual Machine should be shut down. Values are:\n                Stopped\n                    Shuts down the Virtual Machine but retains the compute\n                    resources. You will continue to be billed for the resources\n                    that the stopped machine uses.\n                StoppedDeallocated\n                    Shuts down the Virtual Machine and releases the compute\n                    resources. You are not billed for the compute resources that\n                    this Virtual Machine uses. If a static Virtual Network IP\n                    address is assigned to the Virtual Machine, it is reserved.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('role_names', role_names)\n        _validate_not_none('post_shutdown_action', post_shutdown_action)\n        return self._perform_post(\n            self._get_roles_operations_path(service_name, deployment_name),\n            _XmlSerializer.shutdown_roles_operation_to_xml(\n                role_names, post_shutdown_action),\n            async=True)\n\n    #--Operations for virtual machine images -----------------------------\n    def list_os_images(self):\n        '''\n        Retrieves a list of the OS images from the image repository.\n        '''\n        return self._perform_get(self._get_image_path(),\n                                 Images)\n\n    def get_os_image(self, image_name):\n        '''\n        Retrieves an OS image from the image repository.\n        '''\n        return self._perform_get(self._get_image_path(image_name),\n                                 OSImage)\n\n    def add_os_image(self, label, media_link, name, os):\n        '''\n        Adds an OS image that is currently stored in a storage account in your\n        subscription to the image repository.\n\n        label: Specifies the friendly name of the image.\n        media_link:\n            Specifies the location of the blob in Windows Azure blob store\n            where the media for the image is located. The blob location must\n            belong to a storage account in the subscription specified by the\n            <subscription-id> value in the operation call. Example:\n            http://example.blob.core.windows.net/disks/mydisk.vhd\n        name:\n            Specifies a name for the OS image that Windows Azure uses to\n            identify the image when creating one or more virtual machines.\n        os:\n            The operating system type of the OS image. Possible values are:\n            Linux, Windows\n        '''\n        _validate_not_none('label', label)\n        _validate_not_none('media_link', media_link)\n        _validate_not_none('name', name)\n        _validate_not_none('os', os)\n        return self._perform_post(self._get_image_path(),\n                                  _XmlSerializer.os_image_to_xml(\n                                      label, media_link, name, os),\n                                  async=True)\n\n    def update_os_image(self, image_name, label, media_link, name, os):\n        '''\n        Updates an OS image that in your image repository.\n\n        image_name: The name of the image to update.\n        label:\n            Specifies the friendly name of the image to be updated. You cannot\n            use this operation to update images provided by the Windows Azure\n            platform.\n        media_link:\n            Specifies the location of the blob in Windows Azure blob store\n            where the media for the image is located. The blob location must\n            belong to a storage account in the subscription specified by the\n            <subscription-id> value in the operation call. Example:\n            http://example.blob.core.windows.net/disks/mydisk.vhd\n        name:\n            Specifies a name for the OS image that Windows Azure uses to\n            identify the image when creating one or more VM Roles.\n        os:\n            The operating system type of the OS image. Possible values are:\n            Linux, Windows\n        '''\n        _validate_not_none('image_name', image_name)\n        _validate_not_none('label', label)\n        _validate_not_none('media_link', media_link)\n        _validate_not_none('name', name)\n        _validate_not_none('os', os)\n        return self._perform_put(self._get_image_path(image_name),\n                                 _XmlSerializer.os_image_to_xml(\n                                     label, media_link, name, os),\n                                 async=True)\n\n    def delete_os_image(self, image_name, delete_vhd=False):\n        '''\n        Deletes the specified OS image from your image repository.\n\n        image_name: The name of the image.\n        delete_vhd: Deletes the underlying vhd blob in Azure storage.\n        '''\n        _validate_not_none('image_name', image_name)\n        path = self._get_image_path(image_name)\n        if delete_vhd:\n            path += '?comp=media'\n        return self._perform_delete(path, async=True)\n\n    #--Operations for virtual machine disks ------------------------------\n    def get_data_disk(self, service_name, deployment_name, role_name, lun):\n        '''\n        Retrieves the specified data disk from a virtual machine.\n\n        service_name: The name of the service.\n        deployment_name: The name of the deployment.\n        role_name: The name of the role.\n        lun: The Logical Unit Number (LUN) for the disk.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('role_name', role_name)\n        _validate_not_none('lun', lun)\n        return self._perform_get(\n            self._get_data_disk_path(\n                service_name, deployment_name, role_name, lun),\n            DataVirtualHardDisk)\n\n    def add_data_disk(self, service_name, deployment_name, role_name, lun,\n                      host_caching=None, media_link=None, disk_label=None,\n                      disk_name=None, logical_disk_size_in_gb=None,\n                      source_media_link=None):\n        '''\n        Adds a data disk to a virtual machine.\n\n        service_name: The name of the service.\n        deployment_name: The name of the deployment.\n        role_name: The name of the role.\n        lun:\n            Specifies the Logical Unit Number (LUN) for the disk. The LUN\n            specifies the slot in which the data drive appears when mounted\n            for usage by the virtual machine. Valid LUN values are 0 through 15.\n        host_caching:\n            Specifies the platform caching behavior of data disk blob for\n            read/write efficiency. The default vault is ReadOnly. Possible\n            values are: None, ReadOnly, ReadWrite\n        media_link:\n            Specifies the location of the blob in Windows Azure blob store\n            where the media for the disk is located. The blob location must\n            belong to the storage account in the subscription specified by the\n            <subscription-id> value in the operation call. Example:\n            http://example.blob.core.windows.net/disks/mydisk.vhd\n        disk_label:\n            Specifies the description of the data disk. When you attach a disk,\n            either by directly referencing a media using the MediaLink element\n            or specifying the target disk size, you can use the DiskLabel\n            element to customize the name property of the target data disk.\n        disk_name:\n            Specifies the name of the disk. Windows Azure uses the specified\n            disk to create the data disk for the machine and populates this\n            field with the disk name.\n        logical_disk_size_in_gb:\n            Specifies the size, in GB, of an empty disk to be attached to the\n            role. The disk can be created as part of disk attach or create VM\n            role call by specifying the value for this property. Windows Azure\n            creates the empty disk based on size preference and attaches the\n            newly created disk to the Role.\n        source_media_link:\n            Specifies the location of a blob in account storage which is\n            mounted as a data disk when the virtual machine is created.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('role_name', role_name)\n        _validate_not_none('lun', lun)\n        return self._perform_post(\n            self._get_data_disk_path(service_name, deployment_name, role_name),\n            _XmlSerializer.data_virtual_hard_disk_to_xml(\n                host_caching,\n                disk_label,\n                disk_name,\n                lun,\n                logical_disk_size_in_gb,\n                media_link,\n                source_media_link),\n            async=True)\n\n    def update_data_disk(self, service_name, deployment_name, role_name, lun,\n                         host_caching=None, media_link=None, updated_lun=None,\n                         disk_label=None, disk_name=None,\n                         logical_disk_size_in_gb=None):\n        '''\n        Updates the specified data disk attached to the specified virtual\n        machine.\n\n        service_name: The name of the service.\n        deployment_name: The name of the deployment.\n        role_name: The name of the role.\n        lun:\n            Specifies the Logical Unit Number (LUN) for the disk. The LUN\n            specifies the slot in which the data drive appears when mounted\n            for usage by the virtual machine. Valid LUN values are 0 through\n            15.\n        host_caching:\n            Specifies the platform caching behavior of data disk blob for\n            read/write efficiency. The default vault is ReadOnly. Possible\n            values are: None, ReadOnly, ReadWrite\n        media_link:\n            Specifies the location of the blob in Windows Azure blob store\n            where the media for the disk is located. The blob location must\n            belong to the storage account in the subscription specified by\n            the <subscription-id> value in the operation call. Example:\n            http://example.blob.core.windows.net/disks/mydisk.vhd\n        updated_lun:\n            Specifies the Logical Unit Number (LUN) for the disk. The LUN\n            specifies the slot in which the data drive appears when mounted\n            for usage by the virtual machine. Valid LUN values are 0 through 15.\n        disk_label:\n            Specifies the description of the data disk. When you attach a disk,\n            either by directly referencing a media using the MediaLink element\n            or specifying the target disk size, you can use the DiskLabel\n            element to customize the name property of the target data disk.\n        disk_name:\n            Specifies the name of the disk. Windows Azure uses the specified\n            disk to create the data disk for the machine and populates this\n            field with the disk name.\n        logical_disk_size_in_gb:\n            Specifies the size, in GB, of an empty disk to be attached to the\n            role. The disk can be created as part of disk attach or create VM\n            role call by specifying the value for this property. Windows Azure\n            creates the empty disk based on size preference and attaches the\n            newly created disk to the Role.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('role_name', role_name)\n        _validate_not_none('lun', lun)\n        return self._perform_put(\n            self._get_data_disk_path(\n                service_name, deployment_name, role_name, lun),\n            _XmlSerializer.data_virtual_hard_disk_to_xml(\n                host_caching,\n                disk_label,\n                disk_name,\n                updated_lun,\n                logical_disk_size_in_gb,\n                media_link,\n                None),\n            async=True)\n\n    def delete_data_disk(self, service_name, deployment_name, role_name, lun, delete_vhd=False):\n        '''\n        Removes the specified data disk from a virtual machine.\n\n        service_name: The name of the service.\n        deployment_name: The name of the deployment.\n        role_name: The name of the role.\n        lun: The Logical Unit Number (LUN) for the disk.\n        delete_vhd: Deletes the underlying vhd blob in Azure storage.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('role_name', role_name)\n        _validate_not_none('lun', lun)\n        path = self._get_data_disk_path(service_name, deployment_name, role_name, lun)\n        if delete_vhd:\n            path += '?comp=media'\n        return self._perform_delete(path, async=True)\n\n    #--Operations for virtual machine disks ------------------------------\n    def list_disks(self):\n        '''\n        Retrieves a list of the disks in your image repository.\n        '''\n        return self._perform_get(self._get_disk_path(),\n                                 Disks)\n\n    def get_disk(self, disk_name):\n        '''\n        Retrieves a disk from your image repository.\n        '''\n        return self._perform_get(self._get_disk_path(disk_name),\n                                 Disk)\n\n    def add_disk(self, has_operating_system, label, media_link, name, os):\n        '''\n        Adds a disk to the user image repository. The disk can be an OS disk\n        or a data disk.\n\n        has_operating_system:\n            Specifies whether the disk contains an operation system. Only a\n            disk with an operating system installed can be mounted as OS Drive.\n        label: Specifies the description of the disk.\n        media_link:\n            Specifies the location of the blob in Windows Azure blob store\n            where the media for the disk is located. The blob location must\n            belong to the storage account in the current subscription specified\n            by the <subscription-id> value in the operation call. Example:\n            http://example.blob.core.windows.net/disks/mydisk.vhd\n        name:\n            Specifies a name for the disk. Windows Azure uses the name to\n            identify the disk when creating virtual machines from the disk.\n        os: The OS type of the disk. Possible values are: Linux, Windows\n        '''\n        _validate_not_none('has_operating_system', has_operating_system)\n        _validate_not_none('label', label)\n        _validate_not_none('media_link', media_link)\n        _validate_not_none('name', name)\n        _validate_not_none('os', os)\n        return self._perform_post(self._get_disk_path(),\n                                  _XmlSerializer.disk_to_xml(\n                                      has_operating_system,\n                                      label,\n                                      media_link,\n                                      name,\n                                      os))\n\n    def update_disk(self, disk_name, has_operating_system, label, media_link,\n                    name, os):\n        '''\n        Updates an existing disk in your image repository.\n\n        disk_name: The name of the disk to update.\n        has_operating_system:\n            Specifies whether the disk contains an operation system. Only a\n            disk with an operating system installed can be mounted as OS Drive.\n        label: Specifies the description of the disk.\n        media_link:\n            Specifies the location of the blob in Windows Azure blob store\n            where the media for the disk is located. The blob location must\n            belong to the storage account in the current subscription specified\n            by the <subscription-id> value in the operation call. Example:\n            http://example.blob.core.windows.net/disks/mydisk.vhd\n        name:\n            Specifies a name for the disk. Windows Azure uses the name to\n            identify the disk when creating virtual machines from the disk.\n        os: The OS type of the disk. Possible values are: Linux, Windows\n        '''\n        _validate_not_none('disk_name', disk_name)\n        _validate_not_none('has_operating_system', has_operating_system)\n        _validate_not_none('label', label)\n        _validate_not_none('media_link', media_link)\n        _validate_not_none('name', name)\n        _validate_not_none('os', os)\n        return self._perform_put(self._get_disk_path(disk_name),\n                                 _XmlSerializer.disk_to_xml(\n                                     has_operating_system,\n                                     label,\n                                     media_link,\n                                     name,\n                                     os))\n\n    def delete_disk(self, disk_name, delete_vhd=False):\n        '''\n        Deletes the specified data or operating system disk from your image\n        repository.\n\n        disk_name: The name of the disk to delete.\n        delete_vhd: Deletes the underlying vhd blob in Azure storage.\n        '''\n        _validate_not_none('disk_name', disk_name)\n        path = self._get_disk_path(disk_name)\n        if delete_vhd:\n            path += '?comp=media'\n        return self._perform_delete(path)\n\n    #--Operations for virtual networks  ------------------------------\n    def list_virtual_network_sites(self):\n        '''\n        Retrieves a list of the virtual networks.\n        '''\n        return self._perform_get(self._get_virtual_network_site_path(), VirtualNetworkSites)\n  \n      #--Helper functions --------------------------------------------------\n    def _get_virtual_network_site_path(self):\n        return self._get_path('services/networking/virtualnetwork', None)\n\n    def _get_storage_service_path(self, service_name=None):\n        return self._get_path('services/storageservices', service_name)\n\n    def _get_hosted_service_path(self, service_name=None):\n        return self._get_path('services/hostedservices', service_name)\n\n    def _get_deployment_path_using_slot(self, service_name, slot=None):\n        return self._get_path('services/hostedservices/' + _str(service_name) +\n                              '/deploymentslots', slot)\n\n    def _get_deployment_path_using_name(self, service_name,\n                                        deployment_name=None):\n        return self._get_path('services/hostedservices/' + _str(service_name) +\n                              '/deployments', deployment_name)\n\n    def _get_role_path(self, service_name, deployment_name, role_name=None):\n        return self._get_path('services/hostedservices/' + _str(service_name) +\n                              '/deployments/' + deployment_name +\n                              '/roles', role_name)\n\n    def _get_role_instance_operations_path(self, service_name, deployment_name,\n                                           role_name=None):\n        return self._get_path('services/hostedservices/' + _str(service_name) +\n                              '/deployments/' + deployment_name +\n                              '/roleinstances', role_name) + '/Operations'\n\n    def _get_roles_operations_path(self, service_name, deployment_name):\n        return self._get_path('services/hostedservices/' + _str(service_name) +\n                              '/deployments/' + deployment_name +\n                              '/roles/Operations', None)\n\n    def _get_data_disk_path(self, service_name, deployment_name, role_name,\n                            lun=None):\n        return self._get_path('services/hostedservices/' + _str(service_name) +\n                              '/deployments/' + _str(deployment_name) +\n                              '/roles/' + _str(role_name) + '/DataDisks', lun)\n\n    def _get_disk_path(self, disk_name=None):\n        return self._get_path('services/disks', disk_name)\n\n    def _get_image_path(self, image_name=None):\n        return self._get_path('services/images', image_name)\n"
  },
  {
    "path": "DSC/azure/servicemanagement/sqldatabasemanagementservice.py",
    "content": "#-------------------------------------------------------------------------\n# Copyright (c) Microsoft.  All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#--------------------------------------------------------------------------\nfrom azure import (\n    MANAGEMENT_HOST,\n    _parse_service_resources_response,\n    )\nfrom azure.servicemanagement import (\n    Servers,\n    Database,\n    )\nfrom azure.servicemanagement.servicemanagementclient import (\n    _ServiceManagementClient,\n    )\n\nclass SqlDatabaseManagementService(_ServiceManagementClient):\n    ''' Note that this class is a preliminary work on SQL Database\n        management. Since it lack a lot a features, final version\n        can be slightly different from the current one.\n    '''\n\n    def __init__(self, subscription_id=None, cert_file=None,\n                 host=MANAGEMENT_HOST):\n        super(SqlDatabaseManagementService, self).__init__(\n            subscription_id, cert_file, host)\n\n    #--Operations for sql servers ----------------------------------------\n    def list_servers(self):\n        '''\n        List the SQL servers defined on the account.\n        '''\n        return self._perform_get(self._get_list_servers_path(),\n                                 Servers)\n\n    #--Operations for sql databases ----------------------------------------\n    def list_databases(self, name):\n        '''\n        List the SQL databases defined on the specified server name\n        '''\n        response = self._perform_get(self._get_list_databases_path(name),\n                                     None)\n        return _parse_service_resources_response(response, Database)\n\n\n    #--Helper functions --------------------------------------------------\n    def _get_list_servers_path(self):\n        return self._get_path('services/sqlservers/servers', None)\n\n    def _get_list_databases_path(self, name):\n        # *contentview=generic is mandatory*\n        return self._get_path('services/sqlservers/servers/',\n                              name) + '/databases?contentview=generic' \n    \n"
  },
  {
    "path": "DSC/azure/servicemanagement/websitemanagementservice.py",
    "content": "#-------------------------------------------------------------------------\n# Copyright (c) Microsoft.  All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#--------------------------------------------------------------------------\nfrom azure import (\n    MANAGEMENT_HOST,\n    _str,\n    )\nfrom azure.servicemanagement import (\n    WebSpaces,\n    WebSpace,\n    Sites,\n    Site,\n    MetricResponses,\n    MetricDefinitions,\n    PublishData,\n    _XmlSerializer,\n    )\nfrom azure.servicemanagement.servicemanagementclient import (\n    _ServiceManagementClient,\n    )\n\nclass WebsiteManagementService(_ServiceManagementClient):\n    ''' Note that this class is a preliminary work on WebSite\n        management. Since it lack a lot a features, final version\n        can be slightly different from the current one.\n    '''\n\n    def __init__(self, subscription_id=None, cert_file=None,\n                 host=MANAGEMENT_HOST):\n        super(WebsiteManagementService, self).__init__(\n            subscription_id, cert_file, host)\n\n    #--Operations for web sites ----------------------------------------\n    def list_webspaces(self):\n        '''\n        List the webspaces defined on the account.\n        '''\n        return self._perform_get(self._get_list_webspaces_path(),\n                                 WebSpaces)\n\n    def get_webspace(self, webspace_name):\n        '''\n        Get details of a specific webspace.\n\n        webspace_name: The name of the webspace.\n        '''\n        return self._perform_get(self._get_webspace_details_path(webspace_name),\n                                 WebSpace)\n\n    def list_sites(self, webspace_name):\n        '''\n        List the web sites defined on this webspace.\n\n        webspace_name: The name of the webspace.\n        '''\n        return self._perform_get(self._get_sites_path(webspace_name),\n                                 Sites)\n\n    def get_site(self, webspace_name, website_name):\n        '''\n        List the web sites defined on this webspace.\n\n        webspace_name: The name of the webspace.\n        website_name: The name of the website.\n        '''\n        return self._perform_get(self._get_sites_details_path(webspace_name,\n                                                              website_name),\n                                 Site)\n\n    def create_site(self, webspace_name, website_name, geo_region, host_names,\n                    plan='VirtualDedicatedPlan', compute_mode='Shared',\n                    server_farm=None, site_mode=None):\n        '''\n        Create a website.\n\n        webspace_name: The name of the webspace.\n        website_name: The name of the website.\n        geo_region:\n            The geographical region of the webspace that will be created.\n        host_names:\n            An array of fully qualified domain names for website. Only one\n            hostname can be specified in the azurewebsites.net domain.\n            The hostname should match the name of the website. Custom domains\n            can only be specified for Shared or Standard websites.\n        plan:\n            This value must be 'VirtualDedicatedPlan'.\n        compute_mode:\n            This value should be 'Shared' for the Free or Paid Shared\n            offerings, or 'Dedicated' for the Standard offering. The default\n            value is 'Shared'. If you set it to 'Dedicated', you must specify\n            a value for the server_farm parameter.\n        server_farm:\n            The name of the Server Farm associated with this website. This is\n            a required value for Standard mode.\n        site_mode:\n            Can be None, 'Limited' or 'Basic'. This value is 'Limited' for the\n            Free offering, and 'Basic' for the Paid Shared offering. Standard\n            mode does not use the site_mode parameter; it uses the compute_mode\n            parameter.\n        '''\n        xml = _XmlSerializer.create_website_to_xml(webspace_name, website_name, geo_region, plan, host_names, compute_mode, server_farm, site_mode)\n        return self._perform_post(\n            self._get_sites_path(webspace_name),\n            xml,\n            Site)\n\n    def delete_site(self, webspace_name, website_name,\n                    delete_empty_server_farm=False, delete_metrics=False):\n        '''\n        Delete a website.\n\n        webspace_name: The name of the webspace.\n        website_name: The name of the website.\n        delete_empty_server_farm:\n            If the site being deleted is the last web site in a server farm,\n            you can delete the server farm by setting this to True.\n        delete_metrics:\n            To also delete the metrics for the site that you are deleting, you\n            can set this to True.\n        '''\n        path = self._get_sites_details_path(webspace_name, website_name)\n        query = ''\n        if delete_empty_server_farm:\n            query += '&deleteEmptyServerFarm=true'\n        if delete_metrics:\n            query += '&deleteMetrics=true'\n        if query:\n            path = path + '?' + query.lstrip('&')\n        return self._perform_delete(path)\n\n    def restart_site(self, webspace_name, website_name):\n        '''\n        Restart a web site.\n\n        webspace_name: The name of the webspace.\n        website_name: The name of the website.\n        '''\n        return self._perform_post(\n            self._get_restart_path(webspace_name, website_name),\n            '')\n\n    def get_historical_usage_metrics(self, webspace_name, website_name,\n                                     metrics = None, start_time=None, end_time=None, time_grain=None):\n        '''\n        Get historical usage metrics.\n\n        webspace_name: The name of the webspace.\n        website_name: The name of the website.\n        metrics: Optional. List of metrics name. Otherwise, all metrics returned.\n        start_time: Optional. An ISO8601 date. Otherwise, current hour is used.\n        end_time: Optional. An ISO8601 date. Otherwise, current time is used.\n        time_grain: Optional. A rollup name, as P1D. OTherwise, default rollup for the metrics is used.\n        More information and metrics name at:\n        http://msdn.microsoft.com/en-us/library/azure/dn166964.aspx\n        '''        \n        metrics = ('names='+','.join(metrics)) if metrics else ''\n        start_time = ('StartTime='+start_time) if start_time else ''\n        end_time = ('EndTime='+end_time) if end_time else ''\n        time_grain = ('TimeGrain='+time_grain) if time_grain else ''\n        parameters = ('&'.join(v for v in (metrics, start_time, end_time, time_grain) if v))\n        parameters = '?'+parameters if parameters else ''\n        return self._perform_get(self._get_historical_usage_metrics_path(webspace_name, website_name) + parameters,\n                                 MetricResponses)\n\n    def get_metric_definitions(self, webspace_name, website_name):\n        '''\n        Get metric definitions of metrics available of this web site.\n\n        webspace_name: The name of the webspace.\n        website_name: The name of the website.\n        '''\n        return self._perform_get(self._get_metric_definitions_path(webspace_name, website_name),\n                                 MetricDefinitions)\n\n    def get_publish_profile_xml(self, webspace_name, website_name):\n        '''\n        Get a site's publish profile as a string\n\n        webspace_name: The name of the webspace.\n        website_name: The name of the website.\n        '''\n        return self._perform_get(self._get_publishxml_path(webspace_name, website_name),\n                                 None).body.decode(\"utf-8\")\n\n    def get_publish_profile(self, webspace_name, website_name):\n        '''\n        Get a site's publish profile as an object\n\n        webspace_name: The name of the webspace.\n        website_name: The name of the website.\n        '''\n        return self._perform_get(self._get_publishxml_path(webspace_name, website_name),\n                                 PublishData)\n\n    #--Helper functions --------------------------------------------------\n    def _get_list_webspaces_path(self):\n        return self._get_path('services/webspaces', None)\n\n    def _get_webspace_details_path(self, webspace_name):\n        return self._get_path('services/webspaces/', webspace_name)\n\n    def _get_sites_path(self, webspace_name):\n        return self._get_path('services/webspaces/',\n                              webspace_name) + '/sites'\n\n    def _get_sites_details_path(self, webspace_name, website_name):\n        return self._get_path('services/webspaces/',\n                              webspace_name) + '/sites/' + _str(website_name)\n\n    def _get_restart_path(self, webspace_name, website_name):\n        return self._get_path('services/webspaces/',\n                              webspace_name) + '/sites/' + _str(website_name) + '/restart/' \n\n    def _get_historical_usage_metrics_path(self, webspace_name, website_name):\n        return self._get_path('services/webspaces/',\n                              webspace_name) + '/sites/' + _str(website_name) + '/metrics/' \n                               \n    def _get_metric_definitions_path(self, webspace_name, website_name):\n        return self._get_path('services/webspaces/',\n                              webspace_name) + '/sites/' + _str(website_name) + '/metricdefinitions/' \n\n    def _get_publishxml_path(self, webspace_name, website_name):\n        return self._get_path('services/webspaces/',\n                              webspace_name) + '/sites/' + _str(website_name) + '/publishxml/' \n"
  },
  {
    "path": "DSC/azure/storage/__init__.py",
    "content": "#-------------------------------------------------------------------------\n# Copyright (c) Microsoft.  All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#--------------------------------------------------------------------------\nimport sys\nimport types\n\nfrom datetime import datetime\nfrom xml.dom import minidom\nfrom azure import (WindowsAzureData,\n                   WindowsAzureError,\n                   METADATA_NS,\n                   xml_escape,\n                   _create_entry,\n                   _decode_base64_to_text,\n                   _decode_base64_to_bytes,\n                   _encode_base64,\n                   _fill_data_minidom,\n                   _fill_instance_element,\n                   _get_child_nodes,\n                   _get_child_nodesNS,\n                   _get_children_from_path,\n                   _get_entry_properties,\n                   _general_error_handler,\n                   _list_of,\n                   _parse_response_for_dict,\n                   _sign_string,\n                   _unicode_type,\n                   _ERROR_CANNOT_SERIALIZE_VALUE_TO_ENTITY,\n                   )\n\n# x-ms-version for storage service.\nX_MS_VERSION = '2012-02-12'\n\n\nclass EnumResultsBase(object):\n\n    ''' base class for EnumResults. '''\n\n    def __init__(self):\n        self.prefix = u''\n        self.marker = u''\n        self.max_results = 0\n        self.next_marker = u''\n\n\nclass ContainerEnumResults(EnumResultsBase):\n\n    ''' Blob Container list. '''\n\n    def __init__(self):\n        EnumResultsBase.__init__(self)\n        self.containers = _list_of(Container)\n\n    def __iter__(self):\n        return iter(self.containers)\n\n    def __len__(self):\n        return len(self.containers)\n\n    def __getitem__(self, index):\n        return self.containers[index]\n\n\nclass Container(WindowsAzureData):\n\n    ''' Blob container class. '''\n\n    def __init__(self):\n        self.name = u''\n        self.url = u''\n        self.properties = Properties()\n        self.metadata = {}\n\n\nclass Properties(WindowsAzureData):\n\n    ''' Blob container's properties class. '''\n\n    def __init__(self):\n        self.last_modified = u''\n        self.etag = u''\n\n\nclass RetentionPolicy(WindowsAzureData):\n\n    ''' RetentionPolicy in service properties. '''\n\n    def __init__(self):\n        self.enabled = False\n        self.__dict__['days'] = None\n\n    def get_days(self):\n        # convert days to int value\n        return int(self.__dict__['days'])\n\n    def set_days(self, value):\n        ''' set default days if days is set to empty. '''\n        self.__dict__['days'] = value\n\n    days = property(fget=get_days, fset=set_days)\n\n\nclass Logging(WindowsAzureData):\n\n    ''' Logging class in service properties. '''\n\n    def __init__(self):\n        self.version = u'1.0'\n        self.delete = False\n        self.read = False\n        self.write = False\n        self.retention_policy = RetentionPolicy()\n\n\nclass Metrics(WindowsAzureData):\n\n    ''' Metrics class in service properties. '''\n\n    def __init__(self):\n        self.version = u'1.0'\n        self.enabled = False\n        self.include_apis = None\n        self.retention_policy = RetentionPolicy()\n\n\nclass StorageServiceProperties(WindowsAzureData):\n\n    ''' Storage Service Propeties class. '''\n\n    def __init__(self):\n        self.logging = Logging()\n        self.metrics = Metrics()\n\n\nclass AccessPolicy(WindowsAzureData):\n\n    ''' Access Policy class in service properties. '''\n\n    def __init__(self, start=u'', expiry=u'', permission='u'):\n        self.start = start\n        self.expiry = expiry\n        self.permission = permission\n\n\nclass SignedIdentifier(WindowsAzureData):\n\n    ''' Signed Identifier class for service properties. '''\n\n    def __init__(self):\n        self.id = u''\n        self.access_policy = AccessPolicy()\n\n\nclass SignedIdentifiers(WindowsAzureData):\n\n    ''' SignedIdentifier list. '''\n\n    def __init__(self):\n        self.signed_identifiers = _list_of(SignedIdentifier)\n\n    def __iter__(self):\n        return iter(self.signed_identifiers)\n\n    def __len__(self):\n        return len(self.signed_identifiers)\n\n    def __getitem__(self, index):\n        return self.signed_identifiers[index]\n\n\nclass BlobEnumResults(EnumResultsBase):\n\n    ''' Blob list.'''\n\n    def __init__(self):\n        EnumResultsBase.__init__(self)\n        self.blobs = _list_of(Blob)\n        self.prefixes = _list_of(BlobPrefix)\n        self.delimiter = ''\n\n    def __iter__(self):\n        return iter(self.blobs)\n\n    def __len__(self):\n        return len(self.blobs)\n\n    def __getitem__(self, index):\n        return self.blobs[index]\n\n\nclass BlobResult(bytes):\n\n    def __new__(cls, blob, properties):\n        return bytes.__new__(cls, blob if blob else b'')\n\n    def __init__(self, blob, properties):\n        self.properties = properties\n\n\nclass Blob(WindowsAzureData):\n\n    ''' Blob class. '''\n\n    def __init__(self):\n        self.name = u''\n        self.snapshot = u''\n        self.url = u''\n        self.properties = BlobProperties()\n        self.metadata = {}\n\n\nclass BlobProperties(WindowsAzureData):\n\n    ''' Blob Properties '''\n\n    def __init__(self):\n        self.last_modified = u''\n        self.etag = u''\n        self.content_length = 0\n        self.content_type = u''\n        self.content_encoding = u''\n        self.content_language = u''\n        self.content_md5 = u''\n        self.xms_blob_sequence_number = 0\n        self.blob_type = u''\n        self.lease_status = u''\n        self.lease_state = u''\n        self.lease_duration = u''\n        self.copy_id = u''\n        self.copy_source = u''\n        self.copy_status = u''\n        self.copy_progress = u''\n        self.copy_completion_time = u''\n        self.copy_status_description = u''\n\n\nclass BlobPrefix(WindowsAzureData):\n\n    ''' BlobPrefix in Blob. '''\n\n    def __init__(self):\n        self.name = ''\n\n\nclass BlobBlock(WindowsAzureData):\n\n    ''' BlobBlock class '''\n\n    def __init__(self, id=None, size=None):\n        self.id = id\n        self.size = size\n\n\nclass BlobBlockList(WindowsAzureData):\n\n    ''' BlobBlockList class '''\n\n    def __init__(self):\n        self.committed_blocks = []\n        self.uncommitted_blocks = []\n\n\nclass PageRange(WindowsAzureData):\n\n    ''' Page Range for page blob. '''\n\n    def __init__(self):\n        self.start = 0\n        self.end = 0\n\n\nclass PageList(object):\n\n    ''' Page list for page blob. '''\n\n    def __init__(self):\n        self.page_ranges = _list_of(PageRange)\n\n    def __iter__(self):\n        return iter(self.page_ranges)\n\n    def __len__(self):\n        return len(self.page_ranges)\n\n    def __getitem__(self, index):\n        return self.page_ranges[index]\n\n\nclass QueueEnumResults(EnumResultsBase):\n\n    ''' Queue list'''\n\n    def __init__(self):\n        EnumResultsBase.__init__(self)\n        self.queues = _list_of(Queue)\n\n    def __iter__(self):\n        return iter(self.queues)\n\n    def __len__(self):\n        return len(self.queues)\n\n    def __getitem__(self, index):\n        return self.queues[index]\n\n\nclass Queue(WindowsAzureData):\n\n    ''' Queue class '''\n\n    def __init__(self):\n        self.name = u''\n        self.url = u''\n        self.metadata = {}\n\n\nclass QueueMessagesList(WindowsAzureData):\n\n    ''' Queue message list. '''\n\n    def __init__(self):\n        self.queue_messages = _list_of(QueueMessage)\n\n    def __iter__(self):\n        return iter(self.queue_messages)\n\n    def __len__(self):\n        return len(self.queue_messages)\n\n    def __getitem__(self, index):\n        return self.queue_messages[index]\n\n\nclass QueueMessage(WindowsAzureData):\n\n    ''' Queue message class. '''\n\n    def __init__(self):\n        self.message_id = u''\n        self.insertion_time = u''\n        self.expiration_time = u''\n        self.pop_receipt = u''\n        self.time_next_visible = u''\n        self.dequeue_count = u''\n        self.message_text = u''\n\n\nclass Entity(WindowsAzureData):\n\n    ''' Entity class. The attributes of entity will be created dynamically. '''\n    pass\n\n\nclass EntityProperty(WindowsAzureData):\n\n    ''' Entity property. contains type and value.  '''\n\n    def __init__(self, type=None, value=None):\n        self.type = type\n        self.value = value\n\n\nclass Table(WindowsAzureData):\n\n    ''' Only for intellicens and telling user the return type. '''\n    pass\n\n\ndef _parse_blob_enum_results_list(response):\n    respbody = response.body\n    return_obj = BlobEnumResults()\n    doc = minidom.parseString(respbody)\n\n    for enum_results in _get_child_nodes(doc, 'EnumerationResults'):\n        for child in _get_children_from_path(enum_results, 'Blobs', 'Blob'):\n            return_obj.blobs.append(_fill_instance_element(child, Blob))\n\n        for child in _get_children_from_path(enum_results,\n                                             'Blobs',\n                                             'BlobPrefix'):\n            return_obj.prefixes.append(\n                _fill_instance_element(child, BlobPrefix))\n\n        for name, value in vars(return_obj).items():\n            if name == 'blobs' or name == 'prefixes':\n                continue\n            value = _fill_data_minidom(enum_results, name, value)\n            if value is not None:\n                setattr(return_obj, name, value)\n\n    return return_obj\n\n\ndef _update_storage_header(request):\n    ''' add additional headers for storage request. '''\n    if request.body:\n        assert isinstance(request.body, bytes)\n\n    # if it is PUT, POST, MERGE, DELETE, need to add content-lengt to header.\n    if request.method in ['PUT', 'POST', 'MERGE', 'DELETE']:\n        request.headers.append(('Content-Length', str(len(request.body))))\n\n    # append addtional headers base on the service\n    request.headers.append(('x-ms-version', X_MS_VERSION))\n\n    # append x-ms-meta name, values to header\n    for name, value in request.headers:\n        if 'x-ms-meta-name-values' in name and value:\n            for meta_name, meta_value in value.items():\n                request.headers.append(('x-ms-meta-' + meta_name, meta_value))\n            request.headers.remove((name, value))\n            break\n    return request\n\n\ndef _update_storage_blob_header(request, account_name, account_key):\n    ''' add additional headers for storage blob request. '''\n\n    request = _update_storage_header(request)\n    current_time = datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')\n    request.headers.append(('x-ms-date', current_time))\n    request.headers.append(\n        ('Content-Type', 'application/octet-stream Charset=UTF-8'))\n    request.headers.append(('Authorization',\n                            _sign_storage_blob_request(request,\n                                                       account_name,\n                                                       account_key)))\n\n    return request.headers\n\n\ndef _update_storage_queue_header(request, account_name, account_key):\n    ''' add additional headers for storage queue request. '''\n    return _update_storage_blob_header(request, account_name, account_key)\n\n\ndef _update_storage_table_header(request):\n    ''' add additional headers for storage table request. '''\n\n    request = _update_storage_header(request)\n    for name, _ in request.headers:\n        if name.lower() == 'content-type':\n            break\n    else:\n        request.headers.append(('Content-Type', 'application/atom+xml'))\n    request.headers.append(('DataServiceVersion', '2.0;NetFx'))\n    request.headers.append(('MaxDataServiceVersion', '2.0;NetFx'))\n    current_time = datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')\n    request.headers.append(('x-ms-date', current_time))\n    request.headers.append(('Date', current_time))\n    return request.headers\n\n\ndef _sign_storage_blob_request(request, account_name, account_key):\n    '''\n    Returns the signed string for blob request which is used to set\n    Authorization header. This is also used to sign queue request.\n    '''\n\n    uri_path = request.path.split('?')[0]\n\n    # method to sign\n    string_to_sign = request.method + '\\n'\n\n    # get headers to sign\n    headers_to_sign = [\n        'content-encoding', 'content-language', 'content-length',\n        'content-md5', 'content-type', 'date', 'if-modified-since',\n        'if-match', 'if-none-match', 'if-unmodified-since', 'range']\n\n    request_header_dict = dict((name.lower(), value)\n                               for name, value in request.headers if value)\n    string_to_sign += '\\n'.join(request_header_dict.get(x, '')\n                                for x in headers_to_sign) + '\\n'\n\n    # get x-ms header to sign\n    x_ms_headers = []\n    for name, value in request.headers:\n        if 'x-ms' in name:\n            x_ms_headers.append((name.lower(), value))\n    x_ms_headers.sort()\n    for name, value in x_ms_headers:\n        if value:\n            string_to_sign += ''.join([name, ':', value, '\\n'])\n\n    # get account_name and uri path to sign\n    string_to_sign += '/' + account_name + uri_path\n\n    # get query string to sign if it is not table service\n    query_to_sign = request.query\n    query_to_sign.sort()\n\n    current_name = ''\n    for name, value in query_to_sign:\n        if value:\n            if current_name != name:\n                string_to_sign += '\\n' + name + ':' + value\n            else:\n                string_to_sign += '\\n' + ',' + value\n\n    # sign the request\n    auth_string = 'SharedKey ' + account_name + ':' + \\\n        _sign_string(account_key, string_to_sign)\n    return auth_string\n\n\ndef _sign_storage_table_request(request, account_name, account_key):\n    uri_path = request.path.split('?')[0]\n\n    string_to_sign = request.method + '\\n'\n    headers_to_sign = ['content-md5', 'content-type', 'date']\n    request_header_dict = dict((name.lower(), value)\n                               for name, value in request.headers if value)\n    string_to_sign += '\\n'.join(request_header_dict.get(x, '')\n                                for x in headers_to_sign) + '\\n'\n\n    # get account_name and uri path to sign\n    string_to_sign += ''.join(['/', account_name, uri_path])\n\n    for name, value in request.query:\n        if name == 'comp' and uri_path == '/':\n            string_to_sign += '?comp=' + value\n            break\n\n    # sign the request\n    auth_string = 'SharedKey ' + account_name + ':' + \\\n        _sign_string(account_key, string_to_sign)\n    return auth_string\n\n\ndef _to_python_bool(value):\n    if value.lower() == 'true':\n        return True\n    return False\n\n\ndef _to_entity_int(data):\n    int_max = (2 << 30) - 1\n    if data > (int_max) or data < (int_max + 1) * (-1):\n        return 'Edm.Int64', str(data)\n    else:\n        return 'Edm.Int32', str(data)\n\n\ndef _to_entity_bool(value):\n    if value:\n        return 'Edm.Boolean', 'true'\n    return 'Edm.Boolean', 'false'\n\n\ndef _to_entity_datetime(value):\n    return 'Edm.DateTime', value.strftime('%Y-%m-%dT%H:%M:%S')\n\n\ndef _to_entity_float(value):\n    return 'Edm.Double', str(value)\n\n\ndef _to_entity_property(value):\n    if value.type == 'Edm.Binary':\n        return value.type, _encode_base64(value.value)\n\n    return value.type, str(value.value)\n\n\ndef _to_entity_none(value):\n    return None, None\n\n\ndef _to_entity_str(value):\n    return 'Edm.String', value\n\n\n# Tables of conversions to and from entity types.  We support specific\n# datatypes, and beyond that the user can use an EntityProperty to get\n# custom data type support.\n\ndef _from_entity_binary(value):\n    return EntityProperty('Edm.Binary', _decode_base64_to_bytes(value))\n\n\ndef _from_entity_int(value):\n    return int(value)\n\n\ndef _from_entity_datetime(value):\n    format = '%Y-%m-%dT%H:%M:%S'\n    if '.' in value:\n        format = format + '.%f'\n    if value.endswith('Z'):\n        format = format + 'Z'\n    return datetime.strptime(value, format)\n\n_ENTITY_TO_PYTHON_CONVERSIONS = {\n    'Edm.Binary': _from_entity_binary,\n    'Edm.Int32': _from_entity_int,\n    'Edm.Int64': _from_entity_int,\n    'Edm.Double': float,\n    'Edm.Boolean': _to_python_bool,\n    'Edm.DateTime': _from_entity_datetime,\n}\n\n# Conversion from Python type to a function which returns a tuple of the\n# type string and content string.\n_PYTHON_TO_ENTITY_CONVERSIONS = {\n    int: _to_entity_int,\n    bool: _to_entity_bool,\n    datetime: _to_entity_datetime,\n    float: _to_entity_float,\n    EntityProperty: _to_entity_property,\n    str: _to_entity_str,\n}\n\nif sys.version_info < (3,):\n    _PYTHON_TO_ENTITY_CONVERSIONS.update({\n        long: _to_entity_int,\n        types.NoneType: _to_entity_none,\n        unicode: _to_entity_str,\n    })\n\n\ndef _convert_entity_to_xml(source):\n    ''' Converts an entity object to xml to send.\n\n    The entity format is:\n    <entry xmlns:d=\"http://schemas.microsoft.com/ado/2007/08/dataservices\" xmlns:m=\"http://schemas.microsoft.com/ado/2007/08/dataservices/metadata\" xmlns=\"http://www.w3.org/2005/Atom\">\n      <title />\n      <updated>2008-09-18T23:46:19.3857256Z</updated>\n      <author>\n        <name />\n      </author>\n      <id />\n      <content type=\"application/xml\">\n        <m:properties>\n          <d:Address>Mountain View</d:Address>\n          <d:Age m:type=\"Edm.Int32\">23</d:Age>\n          <d:AmountDue m:type=\"Edm.Double\">200.23</d:AmountDue>\n          <d:BinaryData m:type=\"Edm.Binary\" m:null=\"true\" />\n          <d:CustomerCode m:type=\"Edm.Guid\">c9da6455-213d-42c9-9a79-3e9149a57833</d:CustomerCode>\n          <d:CustomerSince m:type=\"Edm.DateTime\">2008-07-10T00:00:00</d:CustomerSince>\n          <d:IsActive m:type=\"Edm.Boolean\">true</d:IsActive>\n          <d:NumOfOrders m:type=\"Edm.Int64\">255</d:NumOfOrders>\n          <d:PartitionKey>mypartitionkey</d:PartitionKey>\n          <d:RowKey>myrowkey1</d:RowKey>\n          <d:Timestamp m:type=\"Edm.DateTime\">0001-01-01T00:00:00</d:Timestamp>\n        </m:properties>\n      </content>\n    </entry>\n    '''\n\n    # construct the entity body included in <m:properties> and </m:properties>\n    entity_body = '<m:properties xml:space=\"preserve\">{properties}</m:properties>'\n\n    if isinstance(source, WindowsAzureData):\n        source = vars(source)\n\n    properties_str = ''\n\n    # set properties type for types we know if value has no type info.\n    # if value has type info, then set the type to value.type\n    for name, value in source.items():\n        mtype = ''\n        conv = _PYTHON_TO_ENTITY_CONVERSIONS.get(type(value))\n        if conv is None and sys.version_info >= (3,) and value is None:\n            conv = _to_entity_none\n        if conv is None:\n            raise WindowsAzureError(\n                _ERROR_CANNOT_SERIALIZE_VALUE_TO_ENTITY.format(\n                    type(value).__name__))\n\n        mtype, value = conv(value)\n\n        # form the property node\n        properties_str += ''.join(['<d:', name])\n        if value is None:\n            properties_str += ' m:null=\"true\" />'\n        else:\n            if mtype:\n                properties_str += ''.join([' m:type=\"', mtype, '\"'])\n            properties_str += ''.join(['>',\n                                      xml_escape(value), '</d:', name, '>'])\n\n    if sys.version_info < (3,):\n        if isinstance(properties_str, unicode):\n            properties_str = properties_str.encode('utf-8')\n\n    # generate the entity_body\n    entity_body = entity_body.format(properties=properties_str)\n    xmlstr = _create_entry(entity_body)\n    return xmlstr\n\n\ndef _convert_table_to_xml(table_name):\n    '''\n    Create xml to send for a given table name. Since xml format for table is\n    the same as entity and the only difference is that table has only one\n    property 'TableName', so we just call _convert_entity_to_xml.\n\n    table_name: the name of the table\n    '''\n    return _convert_entity_to_xml({'TableName': table_name})\n\n\ndef _convert_block_list_to_xml(block_id_list):\n    '''\n    Convert a block list to xml to send.\n\n    block_id_list:\n        a str list containing the block ids that are used in put_block_list.\n    Only get block from latest blocks.\n    '''\n    if block_id_list is None:\n        return ''\n    xml = '<?xml version=\"1.0\" encoding=\"utf-8\"?><BlockList>'\n    for value in block_id_list:\n        xml += '<Latest>{0}</Latest>'.format(_encode_base64(value))\n\n    return xml + '</BlockList>'\n\n\ndef _create_blob_result(response):\n    blob_properties = _parse_response_for_dict(response)\n    return BlobResult(response.body, blob_properties)\n\n\ndef _convert_response_to_block_list(response):\n    '''\n    Converts xml response to block list class.\n    '''\n    blob_block_list = BlobBlockList()\n\n    xmldoc = minidom.parseString(response.body)\n    for xml_block in _get_children_from_path(xmldoc,\n                                             'BlockList',\n                                             'CommittedBlocks',\n                                             'Block'):\n        xml_block_id = _decode_base64_to_text(\n            _get_child_nodes(xml_block, 'Name')[0].firstChild.nodeValue)\n        xml_block_size = int(\n            _get_child_nodes(xml_block, 'Size')[0].firstChild.nodeValue)\n        blob_block_list.committed_blocks.append(\n            BlobBlock(xml_block_id, xml_block_size))\n\n    for xml_block in _get_children_from_path(xmldoc,\n                                             'BlockList',\n                                             'UncommittedBlocks',\n                                             'Block'):\n        xml_block_id = _decode_base64_to_text(\n            _get_child_nodes(xml_block, 'Name')[0].firstChild.nodeValue)\n        xml_block_size = int(\n            _get_child_nodes(xml_block, 'Size')[0].firstChild.nodeValue)\n        blob_block_list.uncommitted_blocks.append(\n            BlobBlock(xml_block_id, xml_block_size))\n\n    return blob_block_list\n\n\ndef _remove_prefix(name):\n    colon = name.find(':')\n    if colon != -1:\n        return name[colon + 1:]\n    return name\n\n\ndef _convert_response_to_entity(response):\n    if response is None:\n        return response\n    return _convert_xml_to_entity(response.body)\n\n\ndef _convert_xml_to_entity(xmlstr):\n    ''' Convert xml response to entity.\n\n    The format of entity:\n    <entry xmlns:d=\"http://schemas.microsoft.com/ado/2007/08/dataservices\" xmlns:m=\"http://schemas.microsoft.com/ado/2007/08/dataservices/metadata\" xmlns=\"http://www.w3.org/2005/Atom\">\n      <title />\n      <updated>2008-09-18T23:46:19.3857256Z</updated>\n      <author>\n        <name />\n      </author>\n      <id />\n      <content type=\"application/xml\">\n        <m:properties>\n          <d:Address>Mountain View</d:Address>\n          <d:Age m:type=\"Edm.Int32\">23</d:Age>\n          <d:AmountDue m:type=\"Edm.Double\">200.23</d:AmountDue>\n          <d:BinaryData m:type=\"Edm.Binary\" m:null=\"true\" />\n          <d:CustomerCode m:type=\"Edm.Guid\">c9da6455-213d-42c9-9a79-3e9149a57833</d:CustomerCode>\n          <d:CustomerSince m:type=\"Edm.DateTime\">2008-07-10T00:00:00</d:CustomerSince>\n          <d:IsActive m:type=\"Edm.Boolean\">true</d:IsActive>\n          <d:NumOfOrders m:type=\"Edm.Int64\">255</d:NumOfOrders>\n          <d:PartitionKey>mypartitionkey</d:PartitionKey>\n          <d:RowKey>myrowkey1</d:RowKey>\n          <d:Timestamp m:type=\"Edm.DateTime\">0001-01-01T00:00:00</d:Timestamp>\n        </m:properties>\n      </content>\n    </entry>\n    '''\n    xmldoc = minidom.parseString(xmlstr)\n\n    xml_properties = None\n    for entry in _get_child_nodes(xmldoc, 'entry'):\n        for content in _get_child_nodes(entry, 'content'):\n            # TODO: Namespace\n            xml_properties = _get_child_nodesNS(\n                content, METADATA_NS, 'properties')\n\n    if not xml_properties:\n        return None\n\n    entity = Entity()\n    # extract each property node and get the type from attribute and node value\n    for xml_property in xml_properties[0].childNodes:\n        name = _remove_prefix(xml_property.nodeName)\n        # exclude the Timestamp since it is auto added by azure when\n        # inserting entity. We don't want this to mix with real properties\n        if name in ['Timestamp']:\n            continue\n\n        if xml_property.firstChild:\n            value = xml_property.firstChild.nodeValue\n        else:\n            value = ''\n\n        isnull = xml_property.getAttributeNS(METADATA_NS, 'null')\n        mtype = xml_property.getAttributeNS(METADATA_NS, 'type')\n\n        # if not isnull and no type info, then it is a string and we just\n        # need the str type to hold the property.\n        if not isnull and not mtype:\n            _set_entity_attr(entity, name, value)\n        elif isnull == 'true':\n            if mtype:\n                property = EntityProperty(mtype, None)\n            else:\n                property = EntityProperty('Edm.String', None)\n        else:  # need an object to hold the property\n            conv = _ENTITY_TO_PYTHON_CONVERSIONS.get(mtype)\n            if conv is not None:\n                property = conv(value)\n            else:\n                property = EntityProperty(mtype, value)\n            _set_entity_attr(entity, name, property)\n\n        # extract id, updated and name value from feed entry and set them of\n        # rule.\n    for name, value in _get_entry_properties(xmlstr, True).items():\n        if name in ['etag']:\n            _set_entity_attr(entity, name, value)\n\n    return entity\n\n\ndef _set_entity_attr(entity, name, value):\n    try:\n        setattr(entity, name, value)\n    except UnicodeEncodeError:\n        # Python 2 doesn't support unicode attribute names, so we'll\n        # add them and access them directly through the dictionary\n        entity.__dict__[name] = value\n\n\ndef _convert_xml_to_table(xmlstr):\n    ''' Converts the xml response to table class.\n    Simply call convert_xml_to_entity and extract the table name, and add\n    updated and author info\n    '''\n    table = Table()\n    entity = _convert_xml_to_entity(xmlstr)\n    setattr(table, 'name', entity.TableName)\n    for name, value in _get_entry_properties(xmlstr, False).items():\n        setattr(table, name, value)\n    return table\n\n\ndef _storage_error_handler(http_error):\n    ''' Simple error handler for storage service. '''\n    return _general_error_handler(http_error)\n\n# make these available just from storage.\nfrom azure.storage.blobservice import BlobService\nfrom azure.storage.queueservice import QueueService\nfrom azure.storage.tableservice import TableService\nfrom azure.storage.cloudstorageaccount import CloudStorageAccount\nfrom azure.storage.sharedaccesssignature import (\n    SharedAccessSignature,\n    SharedAccessPolicy,\n    Permission,\n    WebResource,\n    )\n"
  },
  {
    "path": "DSC/azure/storage/blobservice.py",
    "content": "#-------------------------------------------------------------------------\n# Copyright (c) Microsoft.  All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#--------------------------------------------------------------------------\nfrom azure import (\n    WindowsAzureError,\n    BLOB_SERVICE_HOST_BASE,\n    DEV_BLOB_HOST,\n    _ERROR_VALUE_NEGATIVE,\n    _ERROR_PAGE_BLOB_SIZE_ALIGNMENT,\n    _convert_class_to_xml,\n    _dont_fail_not_exist,\n    _dont_fail_on_exist,\n    _encode_base64,\n    _get_request_body,\n    _get_request_body_bytes_only,\n    _int_or_none,\n    _parse_enum_results_list,\n    _parse_response,\n    _parse_response_for_dict,\n    _parse_response_for_dict_filter,\n    _parse_response_for_dict_prefix,\n    _parse_simple_list,\n    _str,\n    _str_or_none,\n    _update_request_uri_query_local_storage,\n    _validate_type_bytes,\n    _validate_not_none,\n    )\nfrom azure.http import HTTPRequest\nfrom azure.storage import (\n    Container,\n    ContainerEnumResults,\n    PageList,\n    PageRange,\n    SignedIdentifiers,\n    StorageServiceProperties,\n    _convert_block_list_to_xml,\n    _convert_response_to_block_list,\n    _create_blob_result,\n    _parse_blob_enum_results_list,\n    _update_storage_blob_header,\n    )\nfrom azure.storage.storageclient import _StorageClient\nfrom os import path\nimport sys\nif sys.version_info >= (3,):\n    from io import BytesIO\nelse:\n    from cStringIO import StringIO as BytesIO\n\n# Keep this value sync with _ERROR_PAGE_BLOB_SIZE_ALIGNMENT\n_PAGE_SIZE = 512\n\nclass BlobService(_StorageClient):\n\n    '''\n    This is the main class managing Blob resources.\n    '''\n\n    def __init__(self, account_name=None, account_key=None, protocol='https',\n                 host_base=BLOB_SERVICE_HOST_BASE, dev_host=DEV_BLOB_HOST):\n        '''\n        account_name: your storage account name, required for all operations.\n        account_key: your storage account key, required for all operations.\n        protocol: Optional. Protocol. Defaults to https.\n        host_base:\n            Optional. Live host base url. Defaults to Azure url. Override this\n            for on-premise.\n        dev_host: Optional. Dev host url. Defaults to localhost.\n        '''\n        self._BLOB_MAX_DATA_SIZE = 64 * 1024 * 1024\n        self._BLOB_MAX_CHUNK_DATA_SIZE = 4 * 1024 * 1024\n        super(BlobService, self).__init__(\n            account_name, account_key, protocol, host_base, dev_host)\n\n    def make_blob_url(self, container_name, blob_name, account_name=None,\n                      protocol=None, host_base=None):\n        '''\n        Creates the url to access a blob.\n\n        container_name: Name of container.\n        blob_name: Name of blob.\n        account_name:\n            Name of the storage account. If not specified, uses the account\n            specified when BlobService was initialized.\n        protocol:\n            Protocol to use: 'http' or 'https'. If not specified, uses the\n            protocol specified when BlobService was initialized.\n        host_base:\n            Live host base url.  If not specified, uses the host base specified\n            when BlobService was initialized.\n        '''\n        if not account_name:\n            account_name = self.account_name\n        if not protocol:\n            protocol = self.protocol\n        if not host_base:\n            host_base = self.host_base\n\n        return '{0}://{1}{2}/{3}/{4}'.format(protocol,\n                                             account_name,\n                                             host_base,\n                                             container_name,\n                                             blob_name)\n\n    def list_containers(self, prefix=None, marker=None, maxresults=None,\n                        include=None):\n        '''\n        The List Containers operation returns a list of the containers under\n        the specified account.\n\n        prefix:\n            Optional. Filters the results to return only containers whose names\n            begin with the specified prefix.\n        marker:\n            Optional. A string value that identifies the portion of the list to\n            be returned with the next list operation.\n        maxresults:\n            Optional. Specifies the maximum number of containers to return.\n        include:\n            Optional. Include this parameter to specify that the container's\n            metadata be returned as part of the response body. set this\n            parameter to string 'metadata' to get container's metadata.\n        '''\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/?comp=list'\n        request.query = [\n            ('prefix', _str_or_none(prefix)),\n            ('marker', _str_or_none(marker)),\n            ('maxresults', _int_or_none(maxresults)),\n            ('include', _str_or_none(include))\n        ]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request)\n\n        return _parse_enum_results_list(response,\n                                        ContainerEnumResults,\n                                        \"Containers\",\n                                        Container)\n\n    def create_container(self, container_name, x_ms_meta_name_values=None,\n                         x_ms_blob_public_access=None, fail_on_exist=False):\n        '''\n        Creates a new container under the specified account. If the container\n        with the same name already exists, the operation fails.\n\n        container_name: Name of container to create.\n        x_ms_meta_name_values:\n            Optional. A dict with name_value pairs to associate with the\n            container as metadata. Example:{'Category':'test'}\n        x_ms_blob_public_access:\n            Optional. Possible values include: container, blob\n        fail_on_exist:\n            specify whether to throw an exception when the container exists.\n        '''\n        _validate_not_none('container_name', container_name)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + _str(container_name) + '?restype=container'\n        request.headers = [\n            ('x-ms-meta-name-values', x_ms_meta_name_values),\n            ('x-ms-blob-public-access', _str_or_none(x_ms_blob_public_access))\n        ]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        if not fail_on_exist:\n            try:\n                self._perform_request(request)\n                return True\n            except WindowsAzureError as ex:\n                _dont_fail_on_exist(ex)\n                return False\n        else:\n            self._perform_request(request)\n            return True\n\n    def get_container_properties(self, container_name, x_ms_lease_id=None):\n        '''\n        Returns all user-defined metadata and system properties for the\n        specified container.\n\n        container_name: Name of existing container.\n        x_ms_lease_id:\n            If specified, get_container_properties only succeeds if the\n            container's lease is active and matches this ID.\n        '''\n        _validate_not_none('container_name', container_name)\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/' + _str(container_name) + '?restype=container'\n        request.headers = [('x-ms-lease-id', _str_or_none(x_ms_lease_id))]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request)\n\n        return _parse_response_for_dict(response)\n\n    def get_container_metadata(self, container_name, x_ms_lease_id=None):\n        '''\n        Returns all user-defined metadata for the specified container. The\n        metadata will be in returned dictionary['x-ms-meta-(name)'].\n\n        container_name: Name of existing container.\n        x_ms_lease_id:\n            If specified, get_container_metadata only succeeds if the\n            container's lease is active and matches this ID.\n        '''\n        _validate_not_none('container_name', container_name)\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(container_name) + '?restype=container&comp=metadata'\n        request.headers = [('x-ms-lease-id', _str_or_none(x_ms_lease_id))]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request)\n\n        return _parse_response_for_dict_prefix(response, prefixes=['x-ms-meta'])\n\n    def set_container_metadata(self, container_name,\n                               x_ms_meta_name_values=None, x_ms_lease_id=None):\n        '''\n        Sets one or more user-defined name-value pairs for the specified\n        container.\n\n        container_name: Name of existing container.\n        x_ms_meta_name_values:\n            A dict containing name, value for metadata.\n            Example: {'category':'test'}\n        x_ms_lease_id:\n            If specified, set_container_metadata only succeeds if the\n            container's lease is active and matches this ID.\n        '''\n        _validate_not_none('container_name', container_name)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(container_name) + '?restype=container&comp=metadata'\n        request.headers = [\n            ('x-ms-meta-name-values', x_ms_meta_name_values),\n            ('x-ms-lease-id', _str_or_none(x_ms_lease_id)),\n        ]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        self._perform_request(request)\n\n    def get_container_acl(self, container_name, x_ms_lease_id=None):\n        '''\n        Gets the permissions for the specified container.\n\n        container_name: Name of existing container.\n        x_ms_lease_id:\n            If specified, get_container_acl only succeeds if the\n            container's lease is active and matches this ID.\n        '''\n        _validate_not_none('container_name', container_name)\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(container_name) + '?restype=container&comp=acl'\n        request.headers = [('x-ms-lease-id', _str_or_none(x_ms_lease_id))]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request)\n\n        return _parse_response(response, SignedIdentifiers)\n\n    def set_container_acl(self, container_name, signed_identifiers=None,\n                          x_ms_blob_public_access=None, x_ms_lease_id=None):\n        '''\n        Sets the permissions for the specified container.\n\n        container_name: Name of existing container.\n        signed_identifiers: SignedIdentifers instance\n        x_ms_blob_public_access:\n            Optional. Possible values include: container, blob\n        x_ms_lease_id:\n            If specified, set_container_acl only succeeds if the\n            container's lease is active and matches this ID.\n        '''\n        _validate_not_none('container_name', container_name)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(container_name) + '?restype=container&comp=acl'\n        request.headers = [\n            ('x-ms-blob-public-access', _str_or_none(x_ms_blob_public_access)),\n            ('x-ms-lease-id', _str_or_none(x_ms_lease_id)),\n        ]\n        request.body = _get_request_body(\n            _convert_class_to_xml(signed_identifiers))\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        self._perform_request(request)\n\n    def delete_container(self, container_name, fail_not_exist=False,\n                         x_ms_lease_id=None):\n        '''\n        Marks the specified container for deletion.\n\n        container_name: Name of container to delete.\n        fail_not_exist:\n            Specify whether to throw an exception when the container doesn't\n            exist.\n        x_ms_lease_id: Required if the container has an active lease.\n        '''\n        _validate_not_none('container_name', container_name)\n        request = HTTPRequest()\n        request.method = 'DELETE'\n        request.host = self._get_host()\n        request.path = '/' + _str(container_name) + '?restype=container'\n        request.headers = [('x-ms-lease-id', _str_or_none(x_ms_lease_id))]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        if not fail_not_exist:\n            try:\n                self._perform_request(request)\n                return True\n            except WindowsAzureError as ex:\n                _dont_fail_not_exist(ex)\n                return False\n        else:\n            self._perform_request(request)\n            return True\n\n    def lease_container(self, container_name, x_ms_lease_action,\n                        x_ms_lease_id=None, x_ms_lease_duration=60,\n                        x_ms_lease_break_period=None,\n                        x_ms_proposed_lease_id=None):\n        '''\n        Establishes and manages a lock on a container for delete operations.\n        The lock duration can be 15 to 60 seconds, or can be infinite.\n\n        container_name: Name of existing container.\n        x_ms_lease_action:\n            Required. Possible values: acquire|renew|release|break|change\n        x_ms_lease_id: Required if the container has an active lease.\n        x_ms_lease_duration:\n            Specifies the duration of the lease, in seconds, or negative one\n            (-1) for a lease that never expires. A non-infinite lease can be\n            between 15 and 60 seconds. A lease duration cannot be changed\n            using renew or change. For backwards compatibility, the default is\n            60, and the value is only used on an acquire operation.\n        x_ms_lease_break_period:\n            Optional. For a break operation, this is the proposed duration of\n            seconds that the lease should continue before it is broken, between\n            0 and 60 seconds. This break period is only used if it is shorter\n            than the time remaining on the lease. If longer, the time remaining\n            on the lease is used. A new lease will not be available before the\n            break period has expired, but the lease may be held for longer than\n            the break period. If this header does not appear with a break\n            operation, a fixed-duration lease breaks after the remaining lease\n            period elapses, and an infinite lease breaks immediately.\n        x_ms_proposed_lease_id:\n            Optional for acquire, required for change. Proposed lease ID, in a\n            GUID string format.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('x_ms_lease_action', x_ms_lease_action)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(container_name) + '?restype=container&comp=lease'\n        request.headers = [\n            ('x-ms-lease-id', _str_or_none(x_ms_lease_id)),\n            ('x-ms-lease-action', _str_or_none(x_ms_lease_action)),\n            ('x-ms-lease-duration',\n             _str_or_none(\n                 x_ms_lease_duration if x_ms_lease_action == 'acquire'\\\n                     else None)),\n            ('x-ms-lease-break-period', _str_or_none(x_ms_lease_break_period)),\n            ('x-ms-proposed-lease-id', _str_or_none(x_ms_proposed_lease_id)),\n        ]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request)\n\n        return _parse_response_for_dict_filter(\n            response,\n            filter=['x-ms-lease-id', 'x-ms-lease-time'])\n\n    def list_blobs(self, container_name, prefix=None, marker=None,\n                   maxresults=None, include=None, delimiter=None):\n        '''\n        Returns the list of blobs under the specified container.\n\n        container_name: Name of existing container.\n        prefix:\n            Optional. Filters the results to return only blobs whose names\n            begin with the specified prefix.\n        marker:\n            Optional. A string value that identifies the portion of the list\n            to be returned with the next list operation. The operation returns\n            a marker value within the response body if the list returned was\n            not complete. The marker value may then be used in a subsequent\n            call to request the next set of list items. The marker value is\n            opaque to the client.\n        maxresults:\n            Optional. Specifies the maximum number of blobs to return,\n            including all BlobPrefix elements. If the request does not specify\n            maxresults or specifies a value greater than 5,000, the server will\n            return up to 5,000 items. Setting maxresults to a value less than\n            or equal to zero results in error response code 400 (Bad Request).\n        include:\n            Optional. Specifies one or more datasets to include in the\n            response. To specify more than one of these options on the URI,\n            you must separate each option with a comma. Valid values are:\n                snapshots:\n                    Specifies that snapshots should be included in the\n                    enumeration. Snapshots are listed from oldest to newest in\n                    the response.\n                metadata:\n                    Specifies that blob metadata be returned in the response.\n                uncommittedblobs:\n                    Specifies that blobs for which blocks have been uploaded,\n                    but which have not been committed using Put Block List\n                    (REST API), be included in the response.\n                copy:\n                    Version 2012-02-12 and newer. Specifies that metadata\n                    related to any current or previous Copy Blob operation\n                    should be included in the response.\n        delimiter:\n            Optional. When the request includes this parameter, the operation\n            returns a BlobPrefix element in the response body that acts as a\n            placeholder for all blobs whose names begin with the same\n            substring up to the appearance of the delimiter character. The\n            delimiter may be a single character or a string.\n        '''\n        _validate_not_none('container_name', container_name)\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(container_name) + '?restype=container&comp=list'\n        request.query = [\n            ('prefix', _str_or_none(prefix)),\n            ('delimiter', _str_or_none(delimiter)),\n            ('marker', _str_or_none(marker)),\n            ('maxresults', _int_or_none(maxresults)),\n            ('include', _str_or_none(include))\n        ]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request)\n\n        return _parse_blob_enum_results_list(response)\n\n    def set_blob_service_properties(self, storage_service_properties,\n                                    timeout=None):\n        '''\n        Sets the properties of a storage account's Blob service, including\n        Windows Azure Storage Analytics. You can also use this operation to\n        set the default request version for all incoming requests that do not\n        have a version specified.\n\n        storage_service_properties: a StorageServiceProperties object.\n        timeout: Optional. The timeout parameter is expressed in seconds.\n        '''\n        _validate_not_none('storage_service_properties',\n                           storage_service_properties)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/?restype=service&comp=properties'\n        request.query = [('timeout', _int_or_none(timeout))]\n        request.body = _get_request_body(\n            _convert_class_to_xml(storage_service_properties))\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        self._perform_request(request)\n\n    def get_blob_service_properties(self, timeout=None):\n        '''\n        Gets the properties of a storage account's Blob service, including\n        Windows Azure Storage Analytics.\n\n        timeout: Optional. The timeout parameter is expressed in seconds.\n        '''\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/?restype=service&comp=properties'\n        request.query = [('timeout', _int_or_none(timeout))]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request)\n\n        return _parse_response(response, StorageServiceProperties)\n\n    def get_blob_properties(self, container_name, blob_name,\n                            x_ms_lease_id=None):\n        '''\n        Returns all user-defined metadata, standard HTTP properties, and\n        system properties for the blob.\n\n        container_name: Name of existing container.\n        blob_name: Name of existing blob.\n        x_ms_lease_id: Required if the blob has an active lease.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        request = HTTPRequest()\n        request.method = 'HEAD'\n        request.host = self._get_host()\n        request.path = '/' + _str(container_name) + '/' + _str(blob_name) + ''\n        request.headers = [('x-ms-lease-id', _str_or_none(x_ms_lease_id))]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request)\n\n        return _parse_response_for_dict(response)\n\n    def set_blob_properties(self, container_name, blob_name,\n                            x_ms_blob_cache_control=None,\n                            x_ms_blob_content_type=None,\n                            x_ms_blob_content_md5=None,\n                            x_ms_blob_content_encoding=None,\n                            x_ms_blob_content_language=None,\n                            x_ms_lease_id=None):\n        '''\n        Sets system properties on the blob.\n\n        container_name: Name of existing container.\n        blob_name: Name of existing blob.\n        x_ms_blob_cache_control:\n            Optional. Modifies the cache control string for the blob.\n        x_ms_blob_content_type: Optional. Sets the blob's content type.\n        x_ms_blob_content_md5: Optional. Sets the blob's MD5 hash.\n        x_ms_blob_content_encoding: Optional. Sets the blob's content encoding.\n        x_ms_blob_content_language: Optional. Sets the blob's content language.\n        x_ms_lease_id: Required if the blob has an active lease.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(container_name) + '/' + _str(blob_name) + '?comp=properties'\n        request.headers = [\n            ('x-ms-blob-cache-control', _str_or_none(x_ms_blob_cache_control)),\n            ('x-ms-blob-content-type', _str_or_none(x_ms_blob_content_type)),\n            ('x-ms-blob-content-md5', _str_or_none(x_ms_blob_content_md5)),\n            ('x-ms-blob-content-encoding',\n             _str_or_none(x_ms_blob_content_encoding)),\n            ('x-ms-blob-content-language',\n             _str_or_none(x_ms_blob_content_language)),\n            ('x-ms-lease-id', _str_or_none(x_ms_lease_id))\n        ]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        self._perform_request(request)\n\n    def put_blob(self, container_name, blob_name, blob, x_ms_blob_type,\n                 content_encoding=None, content_language=None,\n                 content_md5=None, cache_control=None,\n                 x_ms_blob_content_type=None, x_ms_blob_content_encoding=None,\n                 x_ms_blob_content_language=None, x_ms_blob_content_md5=None,\n                 x_ms_blob_cache_control=None, x_ms_meta_name_values=None,\n                 x_ms_lease_id=None, x_ms_blob_content_length=None,\n                 x_ms_blob_sequence_number=None):\n        '''\n        Creates a new block blob or page blob, or updates the content of an\n        existing block blob.\n\n        See put_block_blob_from_* and put_page_blob_from_* for high level\n        functions that handle the creation and upload of large blobs with\n        automatic chunking and progress notifications.\n\n        container_name: Name of existing container.\n        blob_name: Name of blob to create or update.\n        blob:\n            For BlockBlob:\n                Content of blob as bytes (size < 64MB). For larger size, you\n                must call put_block and put_block_list to set content of blob.\n            For PageBlob:\n                Use None and call put_page to set content of blob.\n        x_ms_blob_type: Required. Could be BlockBlob or PageBlob.\n        content_encoding:\n            Optional. Specifies which content encodings have been applied to\n            the blob. This value is returned to the client when the Get Blob\n            (REST API) operation is performed on the blob resource. The client\n            can use this value when returned to decode the blob content.\n        content_language:\n            Optional. Specifies the natural languages used by this resource.\n        content_md5:\n            Optional. An MD5 hash of the blob content. This hash is used to\n            verify the integrity of the blob during transport. When this header\n            is specified, the storage service checks the hash that has arrived\n            with the one that was sent. If the two hashes do not match, the\n            operation will fail with error code 400 (Bad Request).\n        cache_control:\n            Optional. The Blob service stores this value but does not use or\n            modify it.\n        x_ms_blob_content_type: Optional. Set the blob's content type.\n        x_ms_blob_content_encoding: Optional. Set the blob's content encoding.\n        x_ms_blob_content_language: Optional. Set the blob's content language.\n        x_ms_blob_content_md5: Optional. Set the blob's MD5 hash.\n        x_ms_blob_cache_control: Optional. Sets the blob's cache control.\n        x_ms_meta_name_values: A dict containing name, value for metadata.\n        x_ms_lease_id: Required if the blob has an active lease.\n        x_ms_blob_content_length:\n            Required for page blobs. This header specifies the maximum size\n            for the page blob, up to 1 TB. The page blob size must be aligned\n            to a 512-byte boundary.\n        x_ms_blob_sequence_number:\n            Optional. Set for page blobs only. The sequence number is a\n            user-controlled value that you can use to track requests. The\n            value of the sequence number must be between 0 and 2^63 - 1. The\n            default value is 0.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        _validate_not_none('x_ms_blob_type', x_ms_blob_type)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + _str(container_name) + '/' + _str(blob_name) + ''\n        request.headers = [\n            ('x-ms-blob-type', _str_or_none(x_ms_blob_type)),\n            ('Content-Encoding', _str_or_none(content_encoding)),\n            ('Content-Language', _str_or_none(content_language)),\n            ('Content-MD5', _str_or_none(content_md5)),\n            ('Cache-Control', _str_or_none(cache_control)),\n            ('x-ms-blob-content-type', _str_or_none(x_ms_blob_content_type)),\n            ('x-ms-blob-content-encoding',\n             _str_or_none(x_ms_blob_content_encoding)),\n            ('x-ms-blob-content-language',\n             _str_or_none(x_ms_blob_content_language)),\n            ('x-ms-blob-content-md5', _str_or_none(x_ms_blob_content_md5)),\n            ('x-ms-blob-cache-control', _str_or_none(x_ms_blob_cache_control)),\n            ('x-ms-meta-name-values', x_ms_meta_name_values),\n            ('x-ms-lease-id', _str_or_none(x_ms_lease_id)),\n            ('x-ms-blob-content-length',\n             _str_or_none(x_ms_blob_content_length)),\n            ('x-ms-blob-sequence-number',\n             _str_or_none(x_ms_blob_sequence_number))\n        ]\n        request.body = _get_request_body_bytes_only('blob', blob)\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        self._perform_request(request)\n\n    def put_block_blob_from_path(self, container_name, blob_name, file_path,\n                                 content_encoding=None, content_language=None,\n                                 content_md5=None, cache_control=None,\n                                 x_ms_blob_content_type=None,\n                                 x_ms_blob_content_encoding=None,\n                                 x_ms_blob_content_language=None,\n                                 x_ms_blob_content_md5=None,\n                                 x_ms_blob_cache_control=None,\n                                 x_ms_meta_name_values=None,\n                                 x_ms_lease_id=None, progress_callback=None):\n        '''\n        Creates a new block blob from a file path, or updates the content of an\n        existing block blob, with automatic chunking and progress notifications.\n\n        container_name: Name of existing container.\n        blob_name: Name of blob to create or update.\n        file_path: Path of the file to upload as the blob content.\n        content_encoding:\n            Optional. Specifies which content encodings have been applied to\n            the blob. This value is returned to the client when the Get Blob\n            (REST API) operation is performed on the blob resource. The client\n            can use this value when returned to decode the blob content.\n        content_language:\n            Optional. Specifies the natural languages used by this resource.\n        content_md5:\n            Optional. An MD5 hash of the blob content. This hash is used to\n            verify the integrity of the blob during transport. When this header\n            is specified, the storage service checks the hash that has arrived\n            with the one that was sent. If the two hashes do not match, the\n            operation will fail with error code 400 (Bad Request).\n        cache_control:\n            Optional. The Blob service stores this value but does not use or\n            modify it.\n        x_ms_blob_content_type: Optional. Set the blob's content type.\n        x_ms_blob_content_encoding: Optional. Set the blob's content encoding.\n        x_ms_blob_content_language: Optional. Set the blob's content language.\n        x_ms_blob_content_md5: Optional. Set the blob's MD5 hash.\n        x_ms_blob_cache_control: Optional. Sets the blob's cache control.\n        x_ms_meta_name_values: A dict containing name, value for metadata.\n        x_ms_lease_id: Required if the blob has an active lease.\n        progress_callback:\n            Callback for progress with signature function(current, total) where\n            current is the number of bytes transfered so far, and total is the\n            size of the blob, or None if the total size is unknown.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        _validate_not_none('file_path', file_path)\n\n        count = path.getsize(file_path)\n        with open(file_path, 'rb') as stream:\n            self.put_block_blob_from_file(container_name,\n                                          blob_name,\n                                          stream,\n                                          count,\n                                          content_encoding,\n                                          content_language,\n                                          content_md5,\n                                          cache_control,\n                                          x_ms_blob_content_type,\n                                          x_ms_blob_content_encoding,\n                                          x_ms_blob_content_language,\n                                          x_ms_blob_content_md5,\n                                          x_ms_blob_cache_control,\n                                          x_ms_meta_name_values,\n                                          x_ms_lease_id,\n                                          progress_callback)\n\n    def put_block_blob_from_file(self, container_name, blob_name, stream,\n                                 count=None, content_encoding=None,\n                                 content_language=None, content_md5=None,\n                                 cache_control=None,\n                                 x_ms_blob_content_type=None,\n                                 x_ms_blob_content_encoding=None,\n                                 x_ms_blob_content_language=None,\n                                 x_ms_blob_content_md5=None,\n                                 x_ms_blob_cache_control=None,\n                                 x_ms_meta_name_values=None,\n                                 x_ms_lease_id=None, progress_callback=None):\n        '''\n        Creates a new block blob from a file/stream, or updates the content of\n        an existing block blob, with automatic chunking and progress\n        notifications.\n\n        container_name: Name of existing container.\n        blob_name: Name of blob to create or update.\n        stream: Opened file/stream to upload as the blob content.\n        count:\n            Number of bytes to read from the stream. This is optional, but\n            should be supplied for optimal performance.\n        content_encoding:\n            Optional. Specifies which content encodings have been applied to\n            the blob. This value is returned to the client when the Get Blob\n            (REST API) operation is performed on the blob resource. The client\n            can use this value when returned to decode the blob content.\n        content_language:\n            Optional. Specifies the natural languages used by this resource.\n        content_md5:\n            Optional. An MD5 hash of the blob content. This hash is used to\n            verify the integrity of the blob during transport. When this header\n            is specified, the storage service checks the hash that has arrived\n            with the one that was sent. If the two hashes do not match, the\n            operation will fail with error code 400 (Bad Request).\n        cache_control:\n            Optional. The Blob service stores this value but does not use or\n            modify it.\n        x_ms_blob_content_type: Optional. Set the blob's content type.\n        x_ms_blob_content_encoding: Optional. Set the blob's content encoding.\n        x_ms_blob_content_language: Optional. Set the blob's content language.\n        x_ms_blob_content_md5: Optional. Set the blob's MD5 hash.\n        x_ms_blob_cache_control: Optional. Sets the blob's cache control.\n        x_ms_meta_name_values: A dict containing name, value for metadata.\n        x_ms_lease_id: Required if the blob has an active lease.\n        progress_callback:\n            Callback for progress with signature function(current, total) where\n            current is the number of bytes transfered so far, and total is the\n            size of the blob, or None if the total size is unknown.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        _validate_not_none('stream', stream)\n\n        if count and count < self._BLOB_MAX_DATA_SIZE:\n            if progress_callback:\n                progress_callback(0, count)\n\n            data = stream.read(count)\n            self.put_blob(container_name,\n                          blob_name,\n                          data,\n                          'BlockBlob',\n                          content_encoding,\n                          content_language,\n                          content_md5,\n                          cache_control,\n                          x_ms_blob_content_type,\n                          x_ms_blob_content_encoding,\n                          x_ms_blob_content_language,\n                          x_ms_blob_content_md5,\n                          x_ms_blob_cache_control,\n                          x_ms_meta_name_values,\n                          x_ms_lease_id)\n\n            if progress_callback:\n                progress_callback(count, count)\n        else:\n            if progress_callback:\n                progress_callback(0, count)\n\n            self.put_blob(container_name,\n                          blob_name,\n                          None,\n                          'BlockBlob',\n                          content_encoding,\n                          content_language,\n                          content_md5,\n                          cache_control,\n                          x_ms_blob_content_type,\n                          x_ms_blob_content_encoding,\n                          x_ms_blob_content_language,\n                          x_ms_blob_content_md5,\n                          x_ms_blob_cache_control,\n                          x_ms_meta_name_values,\n                          x_ms_lease_id)\n\n            remain_bytes = count\n            block_ids = []\n            block_index = 0\n            index = 0\n            while True:\n                request_count = self._BLOB_MAX_CHUNK_DATA_SIZE\\\n                    if remain_bytes is None else min(\n                        remain_bytes,\n                        self._BLOB_MAX_CHUNK_DATA_SIZE)\n                data = stream.read(request_count)\n                if data:\n                    length = len(data)\n                    index += length\n                    remain_bytes = remain_bytes - \\\n                        length if remain_bytes else None\n                    block_id = '{0:08d}'.format(block_index)\n                    self.put_block(container_name, blob_name,\n                                   data, block_id, x_ms_lease_id=x_ms_lease_id)\n                    block_ids.append(block_id)\n                    block_index += 1\n                    if progress_callback:\n                        progress_callback(index, count)\n                else:\n                    break\n\n            self.put_block_list(container_name, blob_name, block_ids,\n                                content_md5, x_ms_blob_cache_control,\n                                x_ms_blob_content_type,\n                                x_ms_blob_content_encoding,\n                                x_ms_blob_content_language,\n                                x_ms_blob_content_md5,\n                                x_ms_meta_name_values,\n                                x_ms_lease_id)\n\n    def put_block_blob_from_bytes(self, container_name, blob_name, blob,\n                                  index=0, count=None, content_encoding=None,\n                                  content_language=None, content_md5=None,\n                                  cache_control=None,\n                                  x_ms_blob_content_type=None,\n                                  x_ms_blob_content_encoding=None,\n                                  x_ms_blob_content_language=None,\n                                  x_ms_blob_content_md5=None,\n                                  x_ms_blob_cache_control=None,\n                                  x_ms_meta_name_values=None,\n                                  x_ms_lease_id=None, progress_callback=None):\n        '''\n        Creates a new block blob from an array of bytes, or updates the content\n        of an existing block blob, with automatic chunking and progress\n        notifications.\n\n        container_name: Name of existing container.\n        blob_name: Name of blob to create or update.\n        blob: Content of blob as an array of bytes.\n        index: Start index in the array of bytes.\n        count:\n            Number of bytes to upload. Set to None or negative value to upload\n            all bytes starting from index.\n        content_encoding:\n            Optional. Specifies which content encodings have been applied to\n            the blob. This value is returned to the client when the Get Blob\n            (REST API) operation is performed on the blob resource. The client\n            can use this value when returned to decode the blob content.\n        content_language:\n            Optional. Specifies the natural languages used by this resource.\n        content_md5:\n            Optional. An MD5 hash of the blob content. This hash is used to\n            verify the integrity of the blob during transport. When this header\n            is specified, the storage service checks the hash that has arrived\n            with the one that was sent. If the two hashes do not match, the\n            operation will fail with error code 400 (Bad Request).\n        cache_control:\n            Optional. The Blob service stores this value but does not use or\n            modify it.\n        x_ms_blob_content_type: Optional. Set the blob's content type.\n        x_ms_blob_content_encoding: Optional. Set the blob's content encoding.\n        x_ms_blob_content_language: Optional. Set the blob's content language.\n        x_ms_blob_content_md5: Optional. Set the blob's MD5 hash.\n        x_ms_blob_cache_control: Optional. Sets the blob's cache control.\n        x_ms_meta_name_values: A dict containing name, value for metadata.\n        x_ms_lease_id: Required if the blob has an active lease.\n        progress_callback:\n            Callback for progress with signature function(current, total) where\n            current is the number of bytes transfered so far, and total is the\n            size of the blob, or None if the total size is unknown.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        _validate_not_none('blob', blob)\n        _validate_not_none('index', index)\n        _validate_type_bytes('blob', blob)\n\n        if index < 0:\n            raise TypeError(_ERROR_VALUE_NEGATIVE.format('index'))\n\n        if count is None or count < 0:\n            count = len(blob) - index\n\n        if count < self._BLOB_MAX_DATA_SIZE:\n            if progress_callback:\n                progress_callback(0, count)\n\n            data = blob[index: index + count]\n            self.put_blob(container_name,\n                          blob_name,\n                          data,\n                          'BlockBlob',\n                          content_encoding,\n                          content_language,\n                          content_md5,\n                          cache_control,\n                          x_ms_blob_content_type,\n                          x_ms_blob_content_encoding,\n                          x_ms_blob_content_language,\n                          x_ms_blob_content_md5,\n                          x_ms_blob_cache_control,\n                          x_ms_meta_name_values,\n                          x_ms_lease_id)\n\n            if progress_callback:\n                progress_callback(count, count)\n        else:\n            stream = BytesIO(blob)\n            stream.seek(index)\n\n            self.put_block_blob_from_file(container_name,\n                                          blob_name,\n                                          stream,\n                                          count,\n                                          content_encoding,\n                                          content_language,\n                                          content_md5,\n                                          cache_control,\n                                          x_ms_blob_content_type,\n                                          x_ms_blob_content_encoding,\n                                          x_ms_blob_content_language,\n                                          x_ms_blob_content_md5,\n                                          x_ms_blob_cache_control,\n                                          x_ms_meta_name_values,\n                                          x_ms_lease_id,\n                                          progress_callback)\n\n    def put_block_blob_from_text(self, container_name, blob_name, text,\n                                 text_encoding='utf-8',\n                                 content_encoding=None, content_language=None,\n                                 content_md5=None, cache_control=None,\n                                 x_ms_blob_content_type=None,\n                                 x_ms_blob_content_encoding=None,\n                                 x_ms_blob_content_language=None,\n                                 x_ms_blob_content_md5=None,\n                                 x_ms_blob_cache_control=None,\n                                 x_ms_meta_name_values=None,\n                                 x_ms_lease_id=None, progress_callback=None):\n        '''\n        Creates a new block blob from str/unicode, or updates the content of an\n        existing block blob, with automatic chunking and progress notifications.\n\n        container_name: Name of existing container.\n        blob_name: Name of blob to create or update.\n        text: Text to upload to the blob.\n        text_encoding: Encoding to use to convert the text to bytes.\n        content_encoding:\n            Optional. Specifies which content encodings have been applied to\n            the blob. This value is returned to the client when the Get Blob\n            (REST API) operation is performed on the blob resource. The client\n            can use this value when returned to decode the blob content.\n        content_language:\n            Optional. Specifies the natural languages used by this resource.\n        content_md5:\n            Optional. An MD5 hash of the blob content. This hash is used to\n            verify the integrity of the blob during transport. When this header\n            is specified, the storage service checks the hash that has arrived\n            with the one that was sent. If the two hashes do not match, the\n            operation will fail with error code 400 (Bad Request).\n        cache_control:\n            Optional. The Blob service stores this value but does not use or\n            modify it.\n        x_ms_blob_content_type: Optional. Set the blob's content type.\n        x_ms_blob_content_encoding: Optional. Set the blob's content encoding.\n        x_ms_blob_content_language: Optional. Set the blob's content language.\n        x_ms_blob_content_md5: Optional. Set the blob's MD5 hash.\n        x_ms_blob_cache_control: Optional. Sets the blob's cache control.\n        x_ms_meta_name_values: A dict containing name, value for metadata.\n        x_ms_lease_id: Required if the blob has an active lease.\n        progress_callback:\n            Callback for progress with signature function(current, total) where\n            current is the number of bytes transfered so far, and total is the\n            size of the blob, or None if the total size is unknown.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        _validate_not_none('text', text)\n\n        if not isinstance(text, bytes):\n            _validate_not_none('text_encoding', text_encoding)\n            text = text.encode(text_encoding)\n\n        self.put_block_blob_from_bytes(container_name,\n                                       blob_name,\n                                       text,\n                                       0,\n                                       len(text),\n                                       content_encoding,\n                                       content_language,\n                                       content_md5,\n                                       cache_control,\n                                       x_ms_blob_content_type,\n                                       x_ms_blob_content_encoding,\n                                       x_ms_blob_content_language,\n                                       x_ms_blob_content_md5,\n                                       x_ms_blob_cache_control,\n                                       x_ms_meta_name_values,\n                                       x_ms_lease_id,\n                                       progress_callback)\n\n    def put_page_blob_from_path(self, container_name, blob_name, file_path,\n                                content_encoding=None, content_language=None,\n                                content_md5=None, cache_control=None,\n                                x_ms_blob_content_type=None,\n                                x_ms_blob_content_encoding=None,\n                                x_ms_blob_content_language=None,\n                                x_ms_blob_content_md5=None,\n                                x_ms_blob_cache_control=None,\n                                x_ms_meta_name_values=None,\n                                x_ms_lease_id=None,\n                                x_ms_blob_sequence_number=None,\n                                progress_callback=None):\n        '''\n        Creates a new page blob from a file path, or updates the content of an\n        existing page blob, with automatic chunking and progress notifications.\n\n        container_name: Name of existing container.\n        blob_name: Name of blob to create or update.\n        file_path: Path of the file to upload as the blob content.\n        content_encoding:\n            Optional. Specifies which content encodings have been applied to\n            the blob. This value is returned to the client when the Get Blob\n            (REST API) operation is performed on the blob resource. The client\n            can use this value when returned to decode the blob content.\n        content_language:\n            Optional. Specifies the natural languages used by this resource.\n        content_md5:\n            Optional. An MD5 hash of the blob content. This hash is used to\n            verify the integrity of the blob during transport. When this header\n            is specified, the storage service checks the hash that has arrived\n            with the one that was sent. If the two hashes do not match, the\n            operation will fail with error code 400 (Bad Request).\n        cache_control:\n            Optional. The Blob service stores this value but does not use or\n            modify it.\n        x_ms_blob_content_type: Optional. Set the blob's content type.\n        x_ms_blob_content_encoding: Optional. Set the blob's content encoding.\n        x_ms_blob_content_language: Optional. Set the blob's content language.\n        x_ms_blob_content_md5: Optional. Set the blob's MD5 hash.\n        x_ms_blob_cache_control: Optional. Sets the blob's cache control.\n        x_ms_meta_name_values: A dict containing name, value for metadata.\n        x_ms_lease_id: Required if the blob has an active lease.\n        x_ms_blob_sequence_number:\n            Optional. Set for page blobs only. The sequence number is a\n            user-controlled value that you can use to track requests. The\n            value of the sequence number must be between 0 and 2^63 - 1. The\n            default value is 0.\n        progress_callback:\n            Callback for progress with signature function(current, total) where\n            current is the number of bytes transfered so far, and total is the\n            size of the blob, or None if the total size is unknown.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        _validate_not_none('file_path', file_path)\n\n        count = path.getsize(file_path)\n        with open(file_path, 'rb') as stream:\n            self.put_page_blob_from_file(container_name,\n                                         blob_name,\n                                         stream,\n                                         count,\n                                         content_encoding,\n                                         content_language,\n                                         content_md5,\n                                         cache_control,\n                                         x_ms_blob_content_type,\n                                         x_ms_blob_content_encoding,\n                                         x_ms_blob_content_language,\n                                         x_ms_blob_content_md5,\n                                         x_ms_blob_cache_control,\n                                         x_ms_meta_name_values,\n                                         x_ms_lease_id,\n                                         x_ms_blob_sequence_number,\n                                         progress_callback)\n\n    def put_page_blob_from_file(self, container_name, blob_name, stream, count,\n                                content_encoding=None, content_language=None,\n                                content_md5=None, cache_control=None,\n                                x_ms_blob_content_type=None,\n                                x_ms_blob_content_encoding=None,\n                                x_ms_blob_content_language=None,\n                                x_ms_blob_content_md5=None,\n                                x_ms_blob_cache_control=None,\n                                x_ms_meta_name_values=None,\n                                x_ms_lease_id=None,\n                                x_ms_blob_sequence_number=None,\n                                progress_callback=None):\n        '''\n        Creates a new page blob from a file/stream, or updates the content of an\n        existing page blob, with automatic chunking and progress notifications.\n\n        container_name: Name of existing container.\n        blob_name: Name of blob to create or update.\n        stream: Opened file/stream to upload as the blob content.\n        count:\n            Number of bytes to read from the stream. This is required, a page\n            blob cannot be created if the count is unknown.\n        content_encoding:\n            Optional. Specifies which content encodings have been applied to\n            the blob. This value is returned to the client when the Get Blob\n            (REST API) operation is performed on the blob resource. The client\n            can use this value when returned to decode the blob content.\n        content_language:\n            Optional. Specifies the natural languages used by this resource.\n        content_md5:\n            Optional. An MD5 hash of the blob content. This hash is used to\n            verify the integrity of the blob during transport. When this header\n            is specified, the storage service checks the hash that has arrived\n            with the one that was sent. If the two hashes do not match, the\n            operation will fail with error code 400 (Bad Request).\n        cache_control:\n            Optional. The Blob service stores this value but does not use or\n            modify it.\n        x_ms_blob_content_type: Optional. Set the blob's content type.\n        x_ms_blob_content_encoding: Optional. Set the blob's content encoding.\n        x_ms_blob_content_language: Optional. Set the blob's content language.\n        x_ms_blob_content_md5: Optional. Set the blob's MD5 hash.\n        x_ms_blob_cache_control: Optional. Sets the blob's cache control.\n        x_ms_meta_name_values: A dict containing name, value for metadata.\n        x_ms_lease_id: Required if the blob has an active lease.\n        x_ms_blob_sequence_number:\n            Optional. Set for page blobs only. The sequence number is a\n            user-controlled value that you can use to track requests. The\n            value of the sequence number must be between 0 and 2^63 - 1. The\n            default value is 0.\n        progress_callback:\n            Callback for progress with signature function(current, total) where\n            current is the number of bytes transfered so far, and total is the\n            size of the blob, or None if the total size is unknown.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        _validate_not_none('stream', stream)\n        _validate_not_none('count', count)\n\n        if count < 0:\n            raise TypeError(_ERROR_VALUE_NEGATIVE.format('count'))\n\n        if count % _PAGE_SIZE != 0:\n            raise TypeError(_ERROR_PAGE_BLOB_SIZE_ALIGNMENT.format(count))\n\n        if progress_callback:\n            progress_callback(0, count)\n\n        self.put_blob(container_name,\n                      blob_name,\n                      b'',\n                      'PageBlob',\n                      content_encoding,\n                      content_language,\n                      content_md5,\n                      cache_control,\n                      x_ms_blob_content_type,\n                      x_ms_blob_content_encoding,\n                      x_ms_blob_content_language,\n                      x_ms_blob_content_md5,\n                      x_ms_blob_cache_control,\n                      x_ms_meta_name_values,\n                      x_ms_lease_id,\n                      count,\n                      x_ms_blob_sequence_number)\n\n        remain_bytes = count\n        page_start = 0\n        while True:\n            request_count = min(remain_bytes, self._BLOB_MAX_CHUNK_DATA_SIZE)\n            data = stream.read(request_count)\n            if data:\n                length = len(data)\n                remain_bytes = remain_bytes - length\n                page_end = page_start + length - 1\n                self.put_page(container_name,\n                              blob_name,\n                              data,\n                              'bytes={0}-{1}'.format(page_start, page_end),\n                              'update',\n                              x_ms_lease_id=x_ms_lease_id)\n                page_start = page_start + length\n\n                if progress_callback:\n                    progress_callback(page_start, count)\n            else:\n                break\n\n    def put_page_blob_from_bytes(self, container_name, blob_name, blob,\n                                 index=0, count=None, content_encoding=None,\n                                 content_language=None, content_md5=None,\n                                 cache_control=None,\n                                 x_ms_blob_content_type=None,\n                                 x_ms_blob_content_encoding=None,\n                                 x_ms_blob_content_language=None,\n                                 x_ms_blob_content_md5=None,\n                                 x_ms_blob_cache_control=None,\n                                 x_ms_meta_name_values=None,\n                                 x_ms_lease_id=None,\n                                 x_ms_blob_sequence_number=None,\n                                 progress_callback=None):\n        '''\n        Creates a new page blob from an array of bytes, or updates the content\n        of an existing page blob, with automatic chunking and progress\n        notifications.\n\n        container_name: Name of existing container.\n        blob_name: Name of blob to create or update.\n        blob: Content of blob as an array of bytes.\n        index: Start index in the array of bytes.\n        count:\n            Number of bytes to upload. Set to None or negative value to upload\n            all bytes starting from index.\n        content_encoding:\n            Optional. Specifies which content encodings have been applied to\n            the blob. This value is returned to the client when the Get Blob\n            (REST API) operation is performed on the blob resource. The client\n            can use this value when returned to decode the blob content.\n        content_language:\n            Optional. Specifies the natural languages used by this resource.\n        content_md5:\n            Optional. An MD5 hash of the blob content. This hash is used to\n            verify the integrity of the blob during transport. When this header\n            is specified, the storage service checks the hash that has arrived\n            with the one that was sent. If the two hashes do not match, the\n            operation will fail with error code 400 (Bad Request).\n        cache_control:\n            Optional. The Blob service stores this value but does not use or\n            modify it.\n        x_ms_blob_content_type: Optional. Set the blob's content type.\n        x_ms_blob_content_encoding: Optional. Set the blob's content encoding.\n        x_ms_blob_content_language: Optional. Set the blob's content language.\n        x_ms_blob_content_md5: Optional. Set the blob's MD5 hash.\n        x_ms_blob_cache_control: Optional. Sets the blob's cache control.\n        x_ms_meta_name_values: A dict containing name, value for metadata.\n        x_ms_lease_id: Required if the blob has an active lease.\n        x_ms_blob_sequence_number:\n            Optional. Set for page blobs only. The sequence number is a\n            user-controlled value that you can use to track requests. The\n            value of the sequence number must be between 0 and 2^63 - 1. The\n            default value is 0.\n        progress_callback:\n            Callback for progress with signature function(current, total) where\n            current is the number of bytes transfered so far, and total is the\n            size of the blob, or None if the total size is unknown.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        _validate_not_none('blob', blob)\n        _validate_type_bytes('blob', blob)\n\n        if index < 0:\n            raise TypeError(_ERROR_VALUE_NEGATIVE.format('index'))\n\n        if count is None or count < 0:\n            count = len(blob) - index\n\n        stream = BytesIO(blob)\n        stream.seek(index)\n\n        self.put_page_blob_from_file(container_name,\n                                     blob_name,\n                                     stream,\n                                     count,\n                                     content_encoding,\n                                     content_language,\n                                     content_md5,\n                                     cache_control,\n                                     x_ms_blob_content_type,\n                                     x_ms_blob_content_encoding,\n                                     x_ms_blob_content_language,\n                                     x_ms_blob_content_md5,\n                                     x_ms_blob_cache_control,\n                                     x_ms_meta_name_values,\n                                     x_ms_lease_id,\n                                     x_ms_blob_sequence_number,\n                                     progress_callback)\n\n    def get_blob(self, container_name, blob_name, snapshot=None,\n                 x_ms_range=None, x_ms_lease_id=None,\n                 x_ms_range_get_content_md5=None):\n        '''\n        Reads or downloads a blob from the system, including its metadata and\n        properties.\n\n        See get_blob_to_* for high level functions that handle the download\n        of large blobs with automatic chunking and progress notifications.\n\n        container_name: Name of existing container.\n        blob_name: Name of existing blob.\n        snapshot:\n            Optional. The snapshot parameter is an opaque DateTime value that,\n            when present, specifies the blob snapshot to retrieve.\n        x_ms_range:\n            Optional. Return only the bytes of the blob in the specified range.\n        x_ms_lease_id: Required if the blob has an active lease.\n        x_ms_range_get_content_md5:\n            Optional. When this header is set to true and specified together\n            with the Range header, the service returns the MD5 hash for the\n            range, as long as the range is less than or equal to 4 MB in size.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/' + _str(container_name) + '/' + _str(blob_name) + ''\n        request.headers = [\n            ('x-ms-range', _str_or_none(x_ms_range)),\n            ('x-ms-lease-id', _str_or_none(x_ms_lease_id)),\n            ('x-ms-range-get-content-md5',\n             _str_or_none(x_ms_range_get_content_md5))\n        ]\n        request.query = [('snapshot', _str_or_none(snapshot))]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request, None)\n\n        return _create_blob_result(response)\n\n    def get_blob_to_path(self, container_name, blob_name, file_path,\n                         open_mode='wb', snapshot=None, x_ms_lease_id=None,\n                         progress_callback=None):\n        '''\n        Downloads a blob to a file path, with automatic chunking and progress\n        notifications.\n\n        container_name: Name of existing container.\n        blob_name: Name of existing blob.\n        file_path: Path of file to write to.\n        open_mode: Mode to use when opening the file.\n        snapshot:\n            Optional. The snapshot parameter is an opaque DateTime value that,\n            when present, specifies the blob snapshot to retrieve.\n        x_ms_lease_id: Required if the blob has an active lease.\n        progress_callback:\n            Callback for progress with signature function(current, total) where\n            current is the number of bytes transfered so far, and total is the\n            size of the blob.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        _validate_not_none('file_path', file_path)\n        _validate_not_none('open_mode', open_mode)\n\n        with open(file_path, open_mode) as stream:\n            self.get_blob_to_file(container_name,\n                                  blob_name,\n                                  stream,\n                                  snapshot,\n                                  x_ms_lease_id,\n                                  progress_callback)\n\n    def get_blob_to_file(self, container_name, blob_name, stream,\n                         snapshot=None, x_ms_lease_id=None,\n                         progress_callback=None):\n        '''\n        Downloads a blob to a file/stream, with automatic chunking and progress\n        notifications.\n\n        container_name: Name of existing container.\n        blob_name: Name of existing blob.\n        stream: Opened file/stream to write to.\n        snapshot:\n            Optional. The snapshot parameter is an opaque DateTime value that,\n            when present, specifies the blob snapshot to retrieve.\n        x_ms_lease_id: Required if the blob has an active lease.\n        progress_callback:\n            Callback for progress with signature function(current, total) where\n            current is the number of bytes transfered so far, and total is the\n            size of the blob.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        _validate_not_none('stream', stream)\n\n        props = self.get_blob_properties(container_name, blob_name)\n        blob_size = int(props['content-length'])\n\n        if blob_size < self._BLOB_MAX_DATA_SIZE:\n            if progress_callback:\n                progress_callback(0, blob_size)\n\n            data = self.get_blob(container_name,\n                                 blob_name,\n                                 snapshot,\n                                 x_ms_lease_id=x_ms_lease_id)\n\n            stream.write(data)\n\n            if progress_callback:\n                progress_callback(blob_size, blob_size)\n        else:\n            if progress_callback:\n                progress_callback(0, blob_size)\n\n            index = 0\n            while index < blob_size:\n                chunk_range = 'bytes={0}-{1}'.format(\n                    index,\n                    index + self._BLOB_MAX_CHUNK_DATA_SIZE - 1)\n                data = self.get_blob(\n                    container_name, blob_name, x_ms_range=chunk_range)\n                length = len(data)\n                index += length\n                if length > 0:\n                    stream.write(data)\n                    if progress_callback:\n                        progress_callback(index, blob_size)\n                    if length < self._BLOB_MAX_CHUNK_DATA_SIZE:\n                        break\n                else:\n                    break\n\n    def get_blob_to_bytes(self, container_name, blob_name, snapshot=None,\n                          x_ms_lease_id=None, progress_callback=None):\n        '''\n        Downloads a blob as an array of bytes, with automatic chunking and\n        progress notifications.\n\n        container_name: Name of existing container.\n        blob_name: Name of existing blob.\n        snapshot:\n            Optional. The snapshot parameter is an opaque DateTime value that,\n            when present, specifies the blob snapshot to retrieve.\n        x_ms_lease_id: Required if the blob has an active lease.\n        progress_callback:\n            Callback for progress with signature function(current, total) where\n            current is the number of bytes transfered so far, and total is the\n            size of the blob.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n\n        stream = BytesIO()\n        self.get_blob_to_file(container_name,\n                              blob_name,\n                              stream,\n                              snapshot,\n                              x_ms_lease_id,\n                              progress_callback)\n\n        return stream.getvalue()\n\n    def get_blob_to_text(self, container_name, blob_name, text_encoding='utf-8',\n                         snapshot=None, x_ms_lease_id=None,\n                         progress_callback=None):\n        '''\n        Downloads a blob as unicode text, with automatic chunking and progress\n        notifications.\n\n        container_name: Name of existing container.\n        blob_name: Name of existing blob.\n        text_encoding: Encoding to use when decoding the blob data.\n        snapshot:\n            Optional. The snapshot parameter is an opaque DateTime value that,\n            when present, specifies the blob snapshot to retrieve.\n        x_ms_lease_id: Required if the blob has an active lease.\n        progress_callback:\n            Callback for progress with signature function(current, total) where\n            current is the number of bytes transfered so far, and total is the\n            size of the blob.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        _validate_not_none('text_encoding', text_encoding)\n\n        result = self.get_blob_to_bytes(container_name,\n                                        blob_name,\n                                        snapshot,\n                                        x_ms_lease_id,\n                                        progress_callback)\n\n        return result.decode(text_encoding)\n\n    def get_blob_metadata(self, container_name, blob_name, snapshot=None,\n                          x_ms_lease_id=None):\n        '''\n        Returns all user-defined metadata for the specified blob or snapshot.\n\n        container_name: Name of existing container.\n        blob_name: Name of existing blob.\n        snapshot:\n            Optional. The snapshot parameter is an opaque DateTime value that,\n            when present, specifies the blob snapshot to retrieve.\n        x_ms_lease_id: Required if the blob has an active lease.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(container_name) + '/' + _str(blob_name) + '?comp=metadata'\n        request.headers = [('x-ms-lease-id', _str_or_none(x_ms_lease_id))]\n        request.query = [('snapshot', _str_or_none(snapshot))]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request)\n\n        return _parse_response_for_dict_prefix(response, prefixes=['x-ms-meta'])\n\n    def set_blob_metadata(self, container_name, blob_name,\n                          x_ms_meta_name_values=None, x_ms_lease_id=None):\n        '''\n        Sets user-defined metadata for the specified blob as one or more\n        name-value pairs.\n\n        container_name: Name of existing container.\n        blob_name: Name of existing blob.\n        x_ms_meta_name_values: Dict containing name and value pairs.\n        x_ms_lease_id: Required if the blob has an active lease.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(container_name) + '/' + _str(blob_name) + '?comp=metadata'\n        request.headers = [\n            ('x-ms-meta-name-values', x_ms_meta_name_values),\n            ('x-ms-lease-id', _str_or_none(x_ms_lease_id))\n        ]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        self._perform_request(request)\n\n    def lease_blob(self, container_name, blob_name, x_ms_lease_action,\n                   x_ms_lease_id=None, x_ms_lease_duration=60,\n                   x_ms_lease_break_period=None, x_ms_proposed_lease_id=None):\n        '''\n        Establishes and manages a one-minute lock on a blob for write\n        operations.\n\n        container_name: Name of existing container.\n        blob_name: Name of existing blob.\n        x_ms_lease_action:\n            Required. Possible values: acquire|renew|release|break|change\n        x_ms_lease_id: Required if the blob has an active lease.\n        x_ms_lease_duration:\n            Specifies the duration of the lease, in seconds, or negative one\n            (-1) for a lease that never expires. A non-infinite lease can be\n            between 15 and 60 seconds. A lease duration cannot be changed\n            using renew or change. For backwards compatibility, the default is\n            60, and the value is only used on an acquire operation.\n        x_ms_lease_break_period:\n            Optional. For a break operation, this is the proposed duration of\n            seconds that the lease should continue before it is broken, between\n            0 and 60 seconds. This break period is only used if it is shorter\n            than the time remaining on the lease. If longer, the time remaining\n            on the lease is used. A new lease will not be available before the\n            break period has expired, but the lease may be held for longer than\n            the break period. If this header does not appear with a break\n            operation, a fixed-duration lease breaks after the remaining lease\n            period elapses, and an infinite lease breaks immediately.\n        x_ms_proposed_lease_id:\n            Optional for acquire, required for change. Proposed lease ID, in a\n            GUID string format.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        _validate_not_none('x_ms_lease_action', x_ms_lease_action)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(container_name) + '/' + _str(blob_name) + '?comp=lease'\n        request.headers = [\n            ('x-ms-lease-id', _str_or_none(x_ms_lease_id)),\n            ('x-ms-lease-action', _str_or_none(x_ms_lease_action)),\n            ('x-ms-lease-duration', _str_or_none(x_ms_lease_duration\\\n                if x_ms_lease_action == 'acquire' else None)),\n            ('x-ms-lease-break-period', _str_or_none(x_ms_lease_break_period)),\n            ('x-ms-proposed-lease-id', _str_or_none(x_ms_proposed_lease_id)),\n        ]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request)\n\n        return _parse_response_for_dict_filter(\n            response,\n            filter=['x-ms-lease-id', 'x-ms-lease-time'])\n\n    def snapshot_blob(self, container_name, blob_name,\n                      x_ms_meta_name_values=None, if_modified_since=None,\n                      if_unmodified_since=None, if_match=None,\n                      if_none_match=None, x_ms_lease_id=None):\n        '''\n        Creates a read-only snapshot of a blob.\n\n        container_name: Name of existing container.\n        blob_name: Name of existing blob.\n        x_ms_meta_name_values: Optional. Dict containing name and value pairs.\n        if_modified_since: Optional. Datetime string.\n        if_unmodified_since: DateTime string.\n        if_match:\n            Optional. snapshot the blob only if its ETag value matches the\n            value specified.\n        if_none_match: Optional. An ETag value\n        x_ms_lease_id: Required if the blob has an active lease.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(container_name) + '/' + _str(blob_name) + '?comp=snapshot'\n        request.headers = [\n            ('x-ms-meta-name-values', x_ms_meta_name_values),\n            ('If-Modified-Since', _str_or_none(if_modified_since)),\n            ('If-Unmodified-Since', _str_or_none(if_unmodified_since)),\n            ('If-Match', _str_or_none(if_match)),\n            ('If-None-Match', _str_or_none(if_none_match)),\n            ('x-ms-lease-id', _str_or_none(x_ms_lease_id))\n        ]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request)\n\n        return _parse_response_for_dict_filter(\n            response,\n            filter=['x-ms-snapshot', 'etag', 'last-modified'])\n\n    def copy_blob(self, container_name, blob_name, x_ms_copy_source,\n                  x_ms_meta_name_values=None,\n                  x_ms_source_if_modified_since=None,\n                  x_ms_source_if_unmodified_since=None,\n                  x_ms_source_if_match=None, x_ms_source_if_none_match=None,\n                  if_modified_since=None, if_unmodified_since=None,\n                  if_match=None, if_none_match=None, x_ms_lease_id=None,\n                  x_ms_source_lease_id=None):\n        '''\n        Copies a blob to a destination within the storage account.\n\n        container_name: Name of existing container.\n        blob_name: Name of existing blob.\n        x_ms_copy_source:\n            URL up to 2 KB in length that specifies a blob. A source blob in\n            the same account can be private, but a blob in another account\n            must be public or accept credentials included in this URL, such as\n            a Shared Access Signature. Examples:\n            https://myaccount.blob.core.windows.net/mycontainer/myblob\n            https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot=<DateTime>\n        x_ms_meta_name_values: Optional. Dict containing name and value pairs.\n        x_ms_source_if_modified_since:\n            Optional. An ETag value. Specify this conditional header to copy\n            the source blob only if its ETag matches the value specified.\n        x_ms_source_if_unmodified_since:\n            Optional. An ETag value. Specify this conditional header to copy\n            the blob only if its ETag does not match the value specified.\n        x_ms_source_if_match:\n            Optional. A DateTime value. Specify this conditional header to\n            copy the blob only if the source blob has been modified since the\n            specified date/time.\n        x_ms_source_if_none_match:\n            Optional. An ETag value. Specify this conditional header to copy\n            the source blob only if its ETag matches the value specified.\n        if_modified_since: Optional. Datetime string.\n        if_unmodified_since: DateTime string.\n        if_match:\n            Optional. Snapshot the blob only if its ETag value matches the\n            value specified.\n        if_none_match: Optional. An ETag value\n        x_ms_lease_id: Required if the blob has an active lease.\n        x_ms_source_lease_id:\n            Optional. Specify this to perform the Copy Blob operation only if\n            the lease ID given matches the active lease ID of the source blob.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        _validate_not_none('x_ms_copy_source', x_ms_copy_source)\n\n        if x_ms_copy_source.startswith('/'):\n            # Backwards compatibility for earlier versions of the SDK where\n            # the copy source can be in the following formats:\n            # - Blob in named container:\n            #     /accountName/containerName/blobName\n            # - Snapshot in named container:\n            #     /accountName/containerName/blobName?snapshot=<DateTime>\n            # - Blob in root container:\n            #     /accountName/blobName\n            # - Snapshot in root container:\n            #     /accountName/blobName?snapshot=<DateTime>\n            account, _, source =\\\n                x_ms_copy_source.partition('/')[2].partition('/')\n            x_ms_copy_source = self.protocol + '://' + \\\n                account + self.host_base + '/' + source\n\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + _str(container_name) + '/' + _str(blob_name) + ''\n        request.headers = [\n            ('x-ms-copy-source', _str_or_none(x_ms_copy_source)),\n            ('x-ms-meta-name-values', x_ms_meta_name_values),\n            ('x-ms-source-if-modified-since',\n             _str_or_none(x_ms_source_if_modified_since)),\n            ('x-ms-source-if-unmodified-since',\n             _str_or_none(x_ms_source_if_unmodified_since)),\n            ('x-ms-source-if-match', _str_or_none(x_ms_source_if_match)),\n            ('x-ms-source-if-none-match',\n             _str_or_none(x_ms_source_if_none_match)),\n            ('If-Modified-Since', _str_or_none(if_modified_since)),\n            ('If-Unmodified-Since', _str_or_none(if_unmodified_since)),\n            ('If-Match', _str_or_none(if_match)),\n            ('If-None-Match', _str_or_none(if_none_match)),\n            ('x-ms-lease-id', _str_or_none(x_ms_lease_id)),\n            ('x-ms-source-lease-id', _str_or_none(x_ms_source_lease_id))\n        ]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request)\n\n        return _parse_response_for_dict(response)\n\n    def abort_copy_blob(self, container_name, blob_name, x_ms_copy_id,\n                        x_ms_lease_id=None):\n        '''\n         Aborts a pending copy_blob operation, and leaves a destination blob\n         with zero length and full metadata.\n\n         container_name: Name of destination container.\n         blob_name: Name of destination blob.\n         x_ms_copy_id:\n            Copy identifier provided in the x-ms-copy-id of the original\n            copy_blob operation.\n         x_ms_lease_id:\n            Required if the destination blob has an active infinite lease.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        _validate_not_none('x_ms_copy_id', x_ms_copy_id)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + _str(container_name) + '/' + \\\n            _str(blob_name) + '?comp=copy&copyid=' + \\\n            _str(x_ms_copy_id)\n        request.headers = [\n            ('x-ms-lease-id', _str_or_none(x_ms_lease_id)),\n            ('x-ms-copy-action', 'abort'),\n        ]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        self._perform_request(request)\n\n    def delete_blob(self, container_name, blob_name, snapshot=None,\n                    x_ms_lease_id=None):\n        '''\n        Marks the specified blob or snapshot for deletion. The blob is later\n        deleted during garbage collection.\n\n        To mark a specific snapshot for deletion provide the date/time of the\n        snapshot via the snapshot parameter.\n\n        container_name: Name of existing container.\n        blob_name: Name of existing blob.\n        snapshot:\n            Optional. The snapshot parameter is an opaque DateTime value that,\n            when present, specifies the blob snapshot to delete.\n        x_ms_lease_id: Required if the blob has an active lease.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        request = HTTPRequest()\n        request.method = 'DELETE'\n        request.host = self._get_host()\n        request.path = '/' + _str(container_name) + '/' + _str(blob_name) + ''\n        request.headers = [('x-ms-lease-id', _str_or_none(x_ms_lease_id))]\n        request.query = [('snapshot', _str_or_none(snapshot))]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        self._perform_request(request)\n\n    def put_block(self, container_name, blob_name, block, blockid,\n                  content_md5=None, x_ms_lease_id=None):\n        '''\n        Creates a new block to be committed as part of a blob.\n\n        container_name: Name of existing container.\n        blob_name: Name of existing blob.\n        block: Content of the block.\n        blockid:\n            Required. A value that identifies the block. The string must be\n            less than or equal to 64 bytes in size.\n        content_md5:\n            Optional. An MD5 hash of the block content. This hash is used to\n            verify the integrity of the blob during transport. When this\n            header is specified, the storage service checks the hash that has\n            arrived with the one that was sent.\n        x_ms_lease_id: Required if the blob has an active lease.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        _validate_not_none('block', block)\n        _validate_not_none('blockid', blockid)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(container_name) + '/' + _str(blob_name) + '?comp=block'\n        request.headers = [\n            ('Content-MD5', _str_or_none(content_md5)),\n            ('x-ms-lease-id', _str_or_none(x_ms_lease_id))\n        ]\n        request.query = [('blockid', _encode_base64(_str_or_none(blockid)))]\n        request.body = _get_request_body_bytes_only('block', block)\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        self._perform_request(request)\n\n    def put_block_list(self, container_name, blob_name, block_list,\n                       content_md5=None, x_ms_blob_cache_control=None,\n                       x_ms_blob_content_type=None,\n                       x_ms_blob_content_encoding=None,\n                       x_ms_blob_content_language=None,\n                       x_ms_blob_content_md5=None, x_ms_meta_name_values=None,\n                       x_ms_lease_id=None):\n        '''\n        Writes a blob by specifying the list of block IDs that make up the\n        blob. In order to be written as part of a blob, a block must have been\n        successfully written to the server in a prior Put Block (REST API)\n        operation.\n\n        container_name: Name of existing container.\n        blob_name: Name of existing blob.\n        block_list: A str list containing the block ids.\n        content_md5:\n            Optional. An MD5 hash of the block content. This hash is used to\n            verify the integrity of the blob during transport. When this header\n            is specified, the storage service checks the hash that has arrived\n            with the one that was sent.\n        x_ms_blob_cache_control:\n            Optional. Sets the blob's cache control. If specified, this\n            property is stored with the blob and returned with a read request.\n        x_ms_blob_content_type:\n            Optional. Sets the blob's content type. If specified, this property\n            is stored with the blob and returned with a read request.\n        x_ms_blob_content_encoding:\n            Optional. Sets the blob's content encoding. If specified, this\n            property is stored with the blob and returned with a read request.\n        x_ms_blob_content_language:\n            Optional. Set the blob's content language. If specified, this\n            property is stored with the blob and returned with a read request.\n        x_ms_blob_content_md5:\n            Optional. An MD5 hash of the blob content. Note that this hash is\n            not validated, as the hashes for the individual blocks were\n            validated when each was uploaded.\n        x_ms_meta_name_values: Optional. Dict containing name and value pairs.\n        x_ms_lease_id: Required if the blob has an active lease.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        _validate_not_none('block_list', block_list)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(container_name) + '/' + _str(blob_name) + '?comp=blocklist'\n        request.headers = [\n            ('Content-MD5', _str_or_none(content_md5)),\n            ('x-ms-blob-cache-control', _str_or_none(x_ms_blob_cache_control)),\n            ('x-ms-blob-content-type', _str_or_none(x_ms_blob_content_type)),\n            ('x-ms-blob-content-encoding',\n             _str_or_none(x_ms_blob_content_encoding)),\n            ('x-ms-blob-content-language',\n             _str_or_none(x_ms_blob_content_language)),\n            ('x-ms-blob-content-md5', _str_or_none(x_ms_blob_content_md5)),\n            ('x-ms-meta-name-values', x_ms_meta_name_values),\n            ('x-ms-lease-id', _str_or_none(x_ms_lease_id))\n        ]\n        request.body = _get_request_body(\n            _convert_block_list_to_xml(block_list))\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        self._perform_request(request)\n\n    def get_block_list(self, container_name, blob_name, snapshot=None,\n                       blocklisttype=None, x_ms_lease_id=None):\n        '''\n        Retrieves the list of blocks that have been uploaded as part of a\n        block blob.\n\n        container_name: Name of existing container.\n        blob_name: Name of existing blob.\n        snapshot:\n            Optional. Datetime to determine the time to retrieve the blocks.\n        blocklisttype:\n            Specifies whether to return the list of committed blocks, the list\n            of uncommitted blocks, or both lists together. Valid values are:\n            committed, uncommitted, or all.\n        x_ms_lease_id: Required if the blob has an active lease.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(container_name) + '/' + _str(blob_name) + '?comp=blocklist'\n        request.headers = [('x-ms-lease-id', _str_or_none(x_ms_lease_id))]\n        request.query = [\n            ('snapshot', _str_or_none(snapshot)),\n            ('blocklisttype', _str_or_none(blocklisttype))\n        ]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request)\n\n        return _convert_response_to_block_list(response)\n\n    def put_page(self, container_name, blob_name, page, x_ms_range,\n                 x_ms_page_write, timeout=None, content_md5=None,\n                 x_ms_lease_id=None, x_ms_if_sequence_number_lte=None,\n                 x_ms_if_sequence_number_lt=None,\n                 x_ms_if_sequence_number_eq=None,\n                 if_modified_since=None, if_unmodified_since=None,\n                 if_match=None, if_none_match=None):\n        '''\n        Writes a range of pages to a page blob.\n\n        container_name: Name of existing container.\n        blob_name: Name of existing blob.\n        page: Content of the page.\n        x_ms_range:\n            Required. Specifies the range of bytes to be written as a page.\n            Both the start and end of the range must be specified. Must be in\n            format: bytes=startByte-endByte. Given that pages must be aligned\n            with 512-byte boundaries, the start offset must be a modulus of\n            512 and the end offset must be a modulus of 512-1. Examples of\n            valid byte ranges are 0-511, 512-1023, etc.\n        x_ms_page_write:\n            Required. You may specify one of the following options:\n                update (lower case):\n                    Writes the bytes specified by the request body into the\n                    specified range. The Range and Content-Length headers must\n                    match to perform the update.\n                clear (lower case):\n                    Clears the specified range and releases the space used in\n                    storage for that range. To clear a range, set the\n                    Content-Length header to zero, and the Range header to a\n                    value that indicates the range to clear, up to maximum\n                    blob size.\n        timeout: the timeout parameter is expressed in seconds.\n        content_md5:\n            Optional. An MD5 hash of the page content. This hash is used to\n            verify the integrity of the page during transport. When this header\n            is specified, the storage service compares the hash of the content\n            that has arrived with the header value that was sent. If the two\n            hashes do not match, the operation will fail with error code 400\n            (Bad Request).\n        x_ms_lease_id: Required if the blob has an active lease.\n        x_ms_if_sequence_number_lte:\n            Optional. If the blob's sequence number is less than or equal to\n            the specified value, the request proceeds; otherwise it fails.\n        x_ms_if_sequence_number_lt:\n            Optional. If the blob's sequence number is less than the specified\n            value, the request proceeds; otherwise it fails.\n        x_ms_if_sequence_number_eq:\n            Optional. If the blob's sequence number is equal to the specified\n            value, the request proceeds; otherwise it fails.\n        if_modified_since:\n            Optional. A DateTime value. Specify this conditional header to\n            write the page only if the blob has been modified since the\n            specified date/time. If the blob has not been modified, the Blob\n            service fails.\n        if_unmodified_since:\n            Optional. A DateTime value. Specify this conditional header to\n            write the page only if the blob has not been modified since the\n            specified date/time. If the blob has been modified, the Blob\n            service fails.\n        if_match:\n            Optional. An ETag value. Specify an ETag value for this conditional\n            header to write the page only if the blob's ETag value matches the\n            value specified. If the values do not match, the Blob service fails.\n        if_none_match:\n            Optional. An ETag value. Specify an ETag value for this conditional\n            header to write the page only if the blob's ETag value does not\n            match the value specified. If the values are identical, the Blob\n            service fails.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        _validate_not_none('page', page)\n        _validate_not_none('x_ms_range', x_ms_range)\n        _validate_not_none('x_ms_page_write', x_ms_page_write)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(container_name) + '/' + _str(blob_name) + '?comp=page'\n        request.headers = [\n            ('x-ms-range', _str_or_none(x_ms_range)),\n            ('Content-MD5', _str_or_none(content_md5)),\n            ('x-ms-page-write', _str_or_none(x_ms_page_write)),\n            ('x-ms-lease-id', _str_or_none(x_ms_lease_id)),\n            ('x-ms-if-sequence-number-le',\n             _str_or_none(x_ms_if_sequence_number_lte)),\n            ('x-ms-if-sequence-number-lt',\n             _str_or_none(x_ms_if_sequence_number_lt)),\n            ('x-ms-if-sequence-number-eq',\n             _str_or_none(x_ms_if_sequence_number_eq)),\n            ('If-Modified-Since', _str_or_none(if_modified_since)),\n            ('If-Unmodified-Since', _str_or_none(if_unmodified_since)),\n            ('If-Match', _str_or_none(if_match)),\n            ('If-None-Match', _str_or_none(if_none_match))\n        ]\n        request.query = [('timeout', _int_or_none(timeout))]\n        request.body = _get_request_body_bytes_only('page', page)\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        self._perform_request(request)\n\n    def get_page_ranges(self, container_name, blob_name, snapshot=None,\n                        range=None, x_ms_range=None, x_ms_lease_id=None):\n        '''\n        Retrieves the page ranges for a blob.\n\n        container_name: Name of existing container.\n        blob_name: Name of existing blob.\n        snapshot:\n            Optional. The snapshot parameter is an opaque DateTime value that,\n            when present, specifies the blob snapshot to retrieve information\n            from.\n        range:\n            Optional. Specifies the range of bytes over which to list ranges,\n            inclusively. If omitted, then all ranges for the blob are returned.\n        x_ms_range:\n            Optional. Specifies the range of bytes to be written as a page.\n            Both the start and end of the range must be specified. Must be in\n            format: bytes=startByte-endByte. Given that pages must be aligned\n            with 512-byte boundaries, the start offset must be a modulus of\n            512 and the end offset must be a modulus of 512-1. Examples of\n            valid byte ranges are 0-511, 512-1023, etc.\n        x_ms_lease_id: Required if the blob has an active lease.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(container_name) + '/' + _str(blob_name) + '?comp=pagelist'\n        request.headers = [\n            ('Range', _str_or_none(range)),\n            ('x-ms-range', _str_or_none(x_ms_range)),\n            ('x-ms-lease-id', _str_or_none(x_ms_lease_id))\n        ]\n        request.query = [('snapshot', _str_or_none(snapshot))]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request)\n\n        return _parse_simple_list(response, PageList, PageRange, \"page_ranges\")\n"
  },
  {
    "path": "DSC/azure/storage/cloudstorageaccount.py",
    "content": "#-------------------------------------------------------------------------\n# Copyright (c) Microsoft.  All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#--------------------------------------------------------------------------\nfrom azure.storage.blobservice import BlobService\nfrom azure.storage.tableservice import TableService\nfrom azure.storage.queueservice import QueueService\n\n\nclass CloudStorageAccount(object):\n\n    \"\"\"\n    Provides a factory for creating the blob, queue, and table services\n    with a common account name and account key.  Users can either use the\n    factory or can construct the appropriate service directly.\n    \"\"\"\n\n    def __init__(self, account_name=None, account_key=None):\n        self.account_name = account_name\n        self.account_key = account_key\n\n    def create_blob_service(self):\n        return BlobService(self.account_name, self.account_key)\n\n    def create_table_service(self):\n        return TableService(self.account_name, self.account_key)\n\n    def create_queue_service(self):\n        return QueueService(self.account_name, self.account_key)\n"
  },
  {
    "path": "DSC/azure/storage/queueservice.py",
    "content": "#-------------------------------------------------------------------------\n# Copyright (c) Microsoft.  All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#--------------------------------------------------------------------------\nfrom azure import (\n    WindowsAzureConflictError,\n    WindowsAzureError,\n    DEV_QUEUE_HOST,\n    QUEUE_SERVICE_HOST_BASE,\n    xml_escape,\n    _convert_class_to_xml,\n    _dont_fail_not_exist,\n    _dont_fail_on_exist,\n    _get_request_body,\n    _int_or_none,\n    _parse_enum_results_list,\n    _parse_response,\n    _parse_response_for_dict_filter,\n    _parse_response_for_dict_prefix,\n    _str,\n    _str_or_none,\n    _update_request_uri_query_local_storage,\n    _validate_not_none,\n    _ERROR_CONFLICT,\n    )\nfrom azure.http import (\n    HTTPRequest,\n    HTTP_RESPONSE_NO_CONTENT,\n    )\nfrom azure.storage import (\n    Queue,\n    QueueEnumResults,\n    QueueMessagesList,\n    StorageServiceProperties,\n    _update_storage_queue_header,\n    )\nfrom azure.storage.storageclient import _StorageClient\n\n\nclass QueueService(_StorageClient):\n\n    '''\n    This is the main class managing queue resources.\n    '''\n\n    def __init__(self, account_name=None, account_key=None, protocol='https',\n                 host_base=QUEUE_SERVICE_HOST_BASE, dev_host=DEV_QUEUE_HOST):\n        '''\n        account_name: your storage account name, required for all operations.\n        account_key: your storage account key, required for all operations.\n        protocol: Optional. Protocol. Defaults to http.\n        host_base:\n            Optional. Live host base url. Defaults to Azure url. Override this\n            for on-premise.\n        dev_host: Optional. Dev host url. Defaults to localhost.\n        '''\n        super(QueueService, self).__init__(\n            account_name, account_key, protocol, host_base, dev_host)\n\n    def get_queue_service_properties(self, timeout=None):\n        '''\n        Gets the properties of a storage account's Queue Service, including\n        Windows Azure Storage Analytics.\n\n        timeout: Optional. The timeout parameter is expressed in seconds.\n        '''\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/?restype=service&comp=properties'\n        request.query = [('timeout', _int_or_none(timeout))]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_queue_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request)\n\n        return _parse_response(response, StorageServiceProperties)\n\n    def list_queues(self, prefix=None, marker=None, maxresults=None,\n                    include=None):\n        '''\n        Lists all of the queues in a given storage account.\n\n        prefix:\n            Filters the results to return only queues with names that begin\n            with the specified prefix.\n        marker:\n            A string value that identifies the portion of the list to be\n            returned with the next list operation. The operation returns a\n            NextMarker element within the response body if the list returned\n            was not complete. This value may then be used as a query parameter\n            in a subsequent call to request the next portion of the list of\n            queues. The marker value is opaque to the client.\n        maxresults:\n            Specifies the maximum number of queues to return. If maxresults is\n            not specified, the server will return up to 5,000 items.\n        include:\n            Optional. Include this parameter to specify that the container's\n            metadata be returned as part of the response body.\n        '''\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/?comp=list'\n        request.query = [\n            ('prefix', _str_or_none(prefix)),\n            ('marker', _str_or_none(marker)),\n            ('maxresults', _int_or_none(maxresults)),\n            ('include', _str_or_none(include))\n        ]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_queue_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request)\n\n        return _parse_enum_results_list(\n            response, QueueEnumResults, \"Queues\", Queue)\n\n    def create_queue(self, queue_name, x_ms_meta_name_values=None,\n                     fail_on_exist=False):\n        '''\n        Creates a queue under the given account.\n\n        queue_name: name of the queue.\n        x_ms_meta_name_values:\n            Optional. A dict containing name-value pairs to associate with the\n            queue as metadata.\n        fail_on_exist: Specify whether throw exception when queue exists.\n        '''\n        _validate_not_none('queue_name', queue_name)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + _str(queue_name) + ''\n        request.headers = [('x-ms-meta-name-values', x_ms_meta_name_values)]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_queue_header(\n            request, self.account_name, self.account_key)\n        if not fail_on_exist:\n            try:\n                response = self._perform_request(request)\n                if response.status == HTTP_RESPONSE_NO_CONTENT:\n                    return False\n                return True\n            except WindowsAzureError as ex:\n                _dont_fail_on_exist(ex)\n                return False\n        else:\n            response = self._perform_request(request)\n            if response.status == HTTP_RESPONSE_NO_CONTENT:\n                raise WindowsAzureConflictError(\n                    _ERROR_CONFLICT.format(response.message))\n            return True\n\n    def delete_queue(self, queue_name, fail_not_exist=False):\n        '''\n        Permanently deletes the specified queue.\n\n        queue_name: Name of the queue.\n        fail_not_exist:\n            Specify whether throw exception when queue doesn't exist.\n        '''\n        _validate_not_none('queue_name', queue_name)\n        request = HTTPRequest()\n        request.method = 'DELETE'\n        request.host = self._get_host()\n        request.path = '/' + _str(queue_name) + ''\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_queue_header(\n            request, self.account_name, self.account_key)\n        if not fail_not_exist:\n            try:\n                self._perform_request(request)\n                return True\n            except WindowsAzureError as ex:\n                _dont_fail_not_exist(ex)\n                return False\n        else:\n            self._perform_request(request)\n            return True\n\n    def get_queue_metadata(self, queue_name):\n        '''\n        Retrieves user-defined metadata and queue properties on the specified\n        queue. Metadata is associated with the queue as name-values pairs.\n\n        queue_name: Name of the queue.\n        '''\n        _validate_not_none('queue_name', queue_name)\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/' + _str(queue_name) + '?comp=metadata'\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_queue_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request)\n\n        return _parse_response_for_dict_prefix(\n            response,\n            prefixes=['x-ms-meta', 'x-ms-approximate-messages-count'])\n\n    def set_queue_metadata(self, queue_name, x_ms_meta_name_values=None):\n        '''\n        Sets user-defined metadata on the specified queue. Metadata is\n        associated with the queue as name-value pairs.\n\n        queue_name: Name of the queue.\n        x_ms_meta_name_values:\n            Optional. A dict containing name-value pairs to associate with the\n            queue as metadata.\n        '''\n        _validate_not_none('queue_name', queue_name)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + _str(queue_name) + '?comp=metadata'\n        request.headers = [('x-ms-meta-name-values', x_ms_meta_name_values)]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_queue_header(\n            request, self.account_name, self.account_key)\n        self._perform_request(request)\n\n    def put_message(self, queue_name, message_text, visibilitytimeout=None,\n                    messagettl=None):\n        '''\n        Adds a new message to the back of the message queue. A visibility\n        timeout can also be specified to make the message invisible until the\n        visibility timeout expires. A message must be in a format that can be\n        included in an XML request with UTF-8 encoding. The encoded message can\n        be up to 64KB in size for versions 2011-08-18 and newer, or 8KB in size\n        for previous versions.\n\n        queue_name: Name of the queue.\n        message_text: Message content.\n        visibilitytimeout:\n            Optional. If not specified, the default value is 0. Specifies the\n            new visibility timeout value, in seconds, relative to server time.\n            The new value must be larger than or equal to 0, and cannot be\n            larger than 7 days. The visibility timeout of a message cannot be\n            set to a value later than the expiry time. visibilitytimeout\n            should be set to a value smaller than the time-to-live value.\n        messagettl:\n            Optional. Specifies the time-to-live interval for the message, in\n            seconds. The maximum time-to-live allowed is 7 days. If this\n            parameter is omitted, the default time-to-live is 7 days.\n        '''\n        _validate_not_none('queue_name', queue_name)\n        _validate_not_none('message_text', message_text)\n        request = HTTPRequest()\n        request.method = 'POST'\n        request.host = self._get_host()\n        request.path = '/' + _str(queue_name) + '/messages'\n        request.query = [\n            ('visibilitytimeout', _str_or_none(visibilitytimeout)),\n            ('messagettl', _str_or_none(messagettl))\n        ]\n        request.body = _get_request_body(\n            '<?xml version=\"1.0\" encoding=\"utf-8\"?> \\\n<QueueMessage> \\\n    <MessageText>' + xml_escape(_str(message_text)) + '</MessageText> \\\n</QueueMessage>')\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_queue_header(\n            request, self.account_name, self.account_key)\n        self._perform_request(request)\n\n    def get_messages(self, queue_name, numofmessages=None,\n                     visibilitytimeout=None):\n        '''\n        Retrieves one or more messages from the front of the queue.\n\n        queue_name: Name of the queue.\n        numofmessages:\n            Optional. A nonzero integer value that specifies the number of\n            messages to retrieve from the queue, up to a maximum of 32. If\n            fewer are visible, the visible messages are returned. By default,\n            a single message is retrieved from the queue with this operation.\n        visibilitytimeout:\n            Specifies the new visibility timeout value, in seconds, relative\n            to server time. The new value must be larger than or equal to 1\n            second, and cannot be larger than 7 days, or larger than 2 hours\n            on REST protocol versions prior to version 2011-08-18. The\n            visibility timeout of a message can be set to a value later than\n            the expiry time.\n        '''\n        _validate_not_none('queue_name', queue_name)\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/' + _str(queue_name) + '/messages'\n        request.query = [\n            ('numofmessages', _str_or_none(numofmessages)),\n            ('visibilitytimeout', _str_or_none(visibilitytimeout))\n        ]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_queue_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request)\n\n        return _parse_response(response, QueueMessagesList)\n\n    def peek_messages(self, queue_name, numofmessages=None):\n        '''\n        Retrieves one or more messages from the front of the queue, but does\n        not alter the visibility of the message.\n\n        queue_name: Name of the queue.\n        numofmessages:\n            Optional. A nonzero integer value that specifies the number of\n            messages to peek from the queue, up to a maximum of 32. By default,\n            a single message is peeked from the queue with this operation.\n        '''\n        _validate_not_none('queue_name', queue_name)\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/' + _str(queue_name) + '/messages?peekonly=true'\n        request.query = [('numofmessages', _str_or_none(numofmessages))]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_queue_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request)\n\n        return _parse_response(response, QueueMessagesList)\n\n    def delete_message(self, queue_name, message_id, popreceipt):\n        '''\n        Deletes the specified message.\n\n        queue_name: Name of the queue.\n        message_id: Message to delete.\n        popreceipt:\n            Required. A valid pop receipt value returned from an earlier call\n            to the Get Messages or Update Message operation.\n        '''\n        _validate_not_none('queue_name', queue_name)\n        _validate_not_none('message_id', message_id)\n        _validate_not_none('popreceipt', popreceipt)\n        request = HTTPRequest()\n        request.method = 'DELETE'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(queue_name) + '/messages/' + _str(message_id) + ''\n        request.query = [('popreceipt', _str_or_none(popreceipt))]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_queue_header(\n            request, self.account_name, self.account_key)\n        self._perform_request(request)\n\n    def clear_messages(self, queue_name):\n        '''\n        Deletes all messages from the specified queue.\n\n        queue_name: Name of the queue.\n        '''\n        _validate_not_none('queue_name', queue_name)\n        request = HTTPRequest()\n        request.method = 'DELETE'\n        request.host = self._get_host()\n        request.path = '/' + _str(queue_name) + '/messages'\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_queue_header(\n            request, self.account_name, self.account_key)\n        self._perform_request(request)\n\n    def update_message(self, queue_name, message_id, message_text, popreceipt,\n                       visibilitytimeout):\n        '''\n        Updates the visibility timeout of a message. You can also use this\n        operation to update the contents of a message.\n\n        queue_name: Name of the queue.\n        message_id: Message to update.\n        message_text: Content of message.\n        popreceipt:\n            Required. A valid pop receipt value returned from an earlier call\n            to the Get Messages or Update Message operation.\n        visibilitytimeout:\n            Required. Specifies the new visibility timeout value, in seconds,\n            relative to server time. The new value must be larger than or equal\n            to 0, and cannot be larger than 7 days. The visibility timeout of a\n            message cannot be set to a value later than the expiry time. A\n            message can be updated until it has been deleted or has expired.\n        '''\n        _validate_not_none('queue_name', queue_name)\n        _validate_not_none('message_id', message_id)\n        _validate_not_none('message_text', message_text)\n        _validate_not_none('popreceipt', popreceipt)\n        _validate_not_none('visibilitytimeout', visibilitytimeout)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(queue_name) + '/messages/' + _str(message_id) + ''\n        request.query = [\n            ('popreceipt', _str_or_none(popreceipt)),\n            ('visibilitytimeout', _str_or_none(visibilitytimeout))\n        ]\n        request.body = _get_request_body(\n            '<?xml version=\"1.0\" encoding=\"utf-8\"?> \\\n<QueueMessage> \\\n    <MessageText>' + xml_escape(_str(message_text)) + '</MessageText> \\\n</QueueMessage>')\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_queue_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request)\n\n        return _parse_response_for_dict_filter(\n            response,\n            filter=['x-ms-popreceipt', 'x-ms-time-next-visible'])\n\n    def set_queue_service_properties(self, storage_service_properties,\n                                     timeout=None):\n        '''\n        Sets the properties of a storage account's Queue service, including\n        Windows Azure Storage Analytics.\n\n        storage_service_properties: StorageServiceProperties object.\n        timeout: Optional. The timeout parameter is expressed in seconds.\n        '''\n        _validate_not_none('storage_service_properties',\n                           storage_service_properties)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/?restype=service&comp=properties'\n        request.query = [('timeout', _int_or_none(timeout))]\n        request.body = _get_request_body(\n            _convert_class_to_xml(storage_service_properties))\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_queue_header(\n            request, self.account_name, self.account_key)\n        self._perform_request(request)\n"
  },
  {
    "path": "DSC/azure/storage/sharedaccesssignature.py",
    "content": "#-------------------------------------------------------------------------\n# Copyright (c) Microsoft.  All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#--------------------------------------------------------------------------\nfrom azure import _sign_string, url_quote\nfrom azure.storage import X_MS_VERSION\n\n#-------------------------------------------------------------------------\n# Constants for the share access signature\nSIGNED_START = 'st'\nSIGNED_EXPIRY = 'se'\nSIGNED_RESOURCE = 'sr'\nSIGNED_PERMISSION = 'sp'\nSIGNED_IDENTIFIER = 'si'\nSIGNED_SIGNATURE = 'sig'\nSIGNED_VERSION = 'sv'\nRESOURCE_BLOB = 'b'\nRESOURCE_CONTAINER = 'c'\nSIGNED_RESOURCE_TYPE = 'resource'\nSHARED_ACCESS_PERMISSION = 'permission'\n\n#--------------------------------------------------------------------------\n\n\nclass WebResource(object):\n\n    '''\n    Class that stands for the resource to get the share access signature\n\n    path: the resource path.\n    properties: dict of name and values. Contains 2 item: resource type and\n            permission\n    request_url: the url of the webresource include all the queries.\n    '''\n\n    def __init__(self, path=None, request_url=None, properties=None):\n        self.path = path\n        self.properties = properties or {}\n        self.request_url = request_url\n\n\nclass Permission(object):\n\n    '''\n    Permission class. Contains the path and query_string for the path.\n\n    path: the resource path\n    query_string: dict of name, values. Contains SIGNED_START, SIGNED_EXPIRY\n            SIGNED_RESOURCE, SIGNED_PERMISSION, SIGNED_IDENTIFIER,\n            SIGNED_SIGNATURE name values.\n    '''\n\n    def __init__(self, path=None, query_string=None):\n        self.path = path\n        self.query_string = query_string\n\n\nclass SharedAccessPolicy(object):\n\n    ''' SharedAccessPolicy class. '''\n\n    def __init__(self, access_policy, signed_identifier=None):\n        self.id = signed_identifier\n        self.access_policy = access_policy\n\n\nclass SharedAccessSignature(object):\n\n    '''\n    The main class used to do the signing and generating the signature.\n\n    account_name:\n        the storage account name used to generate shared access signature\n    account_key: the access key to genenerate share access signature\n    permission_set: the permission cache used to signed the request url.\n    '''\n\n    def __init__(self, account_name, account_key, permission_set=None):\n        self.account_name = account_name\n        self.account_key = account_key\n        self.permission_set = permission_set\n\n    def generate_signed_query_string(self, path, resource_type,\n                                     shared_access_policy,\n                                     version=X_MS_VERSION):\n        '''\n        Generates the query string for path, resource type and shared access\n        policy.\n\n        path: the resource\n        resource_type: could be blob or container\n        shared_access_policy: shared access policy\n        version:\n            x-ms-version for storage service, or None to get a signed query\n            string compatible with pre 2012-02-12 clients, where the version\n            is not included in the query string.\n        '''\n\n        query_string = {}\n        if shared_access_policy.access_policy.start:\n            query_string[\n                SIGNED_START] = shared_access_policy.access_policy.start\n\n        if version:\n            query_string[SIGNED_VERSION] = version\n        query_string[SIGNED_EXPIRY] = shared_access_policy.access_policy.expiry\n        query_string[SIGNED_RESOURCE] = resource_type\n        query_string[\n            SIGNED_PERMISSION] = shared_access_policy.access_policy.permission\n\n        if shared_access_policy.id:\n            query_string[SIGNED_IDENTIFIER] = shared_access_policy.id\n\n        query_string[SIGNED_SIGNATURE] = self._generate_signature(\n            path, shared_access_policy, version)\n        return query_string\n\n    def sign_request(self, web_resource):\n        ''' sign request to generate request_url with sharedaccesssignature\n        info for web_resource.'''\n\n        if self.permission_set:\n            for shared_access_signature in self.permission_set:\n                if self._permission_matches_request(\n                        shared_access_signature, web_resource,\n                        web_resource.properties[\n                            SIGNED_RESOURCE_TYPE],\n                        web_resource.properties[SHARED_ACCESS_PERMISSION]):\n                    if web_resource.request_url.find('?') == -1:\n                        web_resource.request_url += '?'\n                    else:\n                        web_resource.request_url += '&'\n\n                    web_resource.request_url += self._convert_query_string(\n                        shared_access_signature.query_string)\n                    break\n        return web_resource\n\n    def _convert_query_string(self, query_string):\n        ''' Converts query string to str. The order of name, values is very\n        important and can't be wrong.'''\n\n        convert_str = ''\n        if SIGNED_START in query_string:\n            convert_str += SIGNED_START + '=' + \\\n                url_quote(query_string[SIGNED_START]) + '&'\n        convert_str += SIGNED_EXPIRY + '=' + \\\n            url_quote(query_string[SIGNED_EXPIRY]) + '&'\n        convert_str += SIGNED_PERMISSION + '=' + \\\n            query_string[SIGNED_PERMISSION] + '&'\n        convert_str += SIGNED_RESOURCE + '=' + \\\n            query_string[SIGNED_RESOURCE] + '&'\n\n        if SIGNED_IDENTIFIER in query_string:\n            convert_str += SIGNED_IDENTIFIER + '=' + \\\n                query_string[SIGNED_IDENTIFIER] + '&'\n        if SIGNED_VERSION in query_string:\n            convert_str += SIGNED_VERSION + '=' + \\\n                query_string[SIGNED_VERSION] + '&'\n        convert_str += SIGNED_SIGNATURE + '=' + \\\n            url_quote(query_string[SIGNED_SIGNATURE]) + '&'\n        return convert_str\n\n    def _generate_signature(self, path, shared_access_policy, version):\n        ''' Generates signature for a given path and shared access policy. '''\n\n        def get_value_to_append(value, no_new_line=False):\n            return_value = ''\n            if value:\n                return_value = value\n            if not no_new_line:\n                return_value += '\\n'\n            return return_value\n\n        if path[0] != '/':\n            path = '/' + path\n\n        canonicalized_resource = '/' + self.account_name + path\n\n        # Form the string to sign from shared_access_policy and canonicalized\n        # resource. The order of values is important.\n        string_to_sign = \\\n            (get_value_to_append(shared_access_policy.access_policy.permission) +\n             get_value_to_append(shared_access_policy.access_policy.start) +\n             get_value_to_append(shared_access_policy.access_policy.expiry) +\n             get_value_to_append(canonicalized_resource))\n\n        if version:\n            string_to_sign += get_value_to_append(shared_access_policy.id)\n            string_to_sign += get_value_to_append(version, True)\n        else:\n            string_to_sign += get_value_to_append(shared_access_policy.id, True)\n\n        return self._sign(string_to_sign)\n\n    def _permission_matches_request(self, shared_access_signature,\n                                    web_resource, resource_type,\n                                    required_permission):\n        ''' Check whether requested permission matches given\n        shared_access_signature, web_resource and resource type. '''\n\n        required_resource_type = resource_type\n        if required_resource_type == RESOURCE_BLOB:\n            required_resource_type += RESOURCE_CONTAINER\n\n        for name, value in shared_access_signature.query_string.items():\n            if name == SIGNED_RESOURCE and \\\n                required_resource_type.find(value) == -1:\n                return False\n            elif name == SIGNED_PERMISSION and \\\n                required_permission.find(value) == -1:\n                return False\n\n        return web_resource.path.find(shared_access_signature.path) != -1\n\n    def _sign(self, string_to_sign):\n        ''' use HMAC-SHA256 to sign the string and convert it as base64\n        encoded string. '''\n\n        return _sign_string(self.account_key, string_to_sign)\n"
  },
  {
    "path": "DSC/azure/storage/storageclient.py",
    "content": "#-------------------------------------------------------------------------\n# Copyright (c) Microsoft.  All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#--------------------------------------------------------------------------\nimport os\nimport sys\n\nfrom azure import (\n    WindowsAzureError,\n    DEV_ACCOUNT_NAME,\n    DEV_ACCOUNT_KEY,\n    _ERROR_STORAGE_MISSING_INFO,\n    )\nfrom azure.http import HTTPError\nfrom azure.http.httpclient import _HTTPClient\nfrom azure.storage import _storage_error_handler\n\n#--------------------------------------------------------------------------\n# constants for azure app setting environment variables\nAZURE_STORAGE_ACCOUNT = 'AZURE_STORAGE_ACCOUNT'\nAZURE_STORAGE_ACCESS_KEY = 'AZURE_STORAGE_ACCESS_KEY'\nEMULATED = 'EMULATED'\n\n#--------------------------------------------------------------------------\n\n\nclass _StorageClient(object):\n\n    '''\n    This is the base class for BlobManager, TableManager and QueueManager.\n    '''\n\n    def __init__(self, account_name=None, account_key=None, protocol='https',\n                 host_base='', dev_host=''):\n        '''\n        account_name: your storage account name, required for all operations.\n        account_key: your storage account key, required for all operations.\n        protocol: Optional. Protocol. Defaults to http.\n        host_base:\n            Optional. Live host base url. Defaults to Azure url. Override this\n            for on-premise.\n        dev_host: Optional. Dev host url. Defaults to localhost.\n        '''\n        self.account_name = account_name\n        self.account_key = account_key\n        self.requestid = None\n        self.protocol = protocol\n        self.host_base = host_base\n        self.dev_host = dev_host\n\n        # the app is not run in azure emulator or use default development\n        # storage account and key if app is run in emulator.\n        self.use_local_storage = False\n\n        # check whether it is run in emulator.\n        if EMULATED in os.environ:\n            self.is_emulated = os.environ[EMULATED].lower() != 'false'\n        else:\n            self.is_emulated = False\n\n        # get account_name and account key. If they are not set when\n        # constructing, get the account and key from environment variables if\n        # the app is not run in azure emulator or use default development\n        # storage account and key if app is run in emulator.\n        if not self.account_name or not self.account_key:\n            if self.is_emulated:\n                self.account_name = DEV_ACCOUNT_NAME\n                self.account_key = DEV_ACCOUNT_KEY\n                self.protocol = 'http'\n                self.use_local_storage = True\n            else:\n                self.account_name = os.environ.get(AZURE_STORAGE_ACCOUNT)\n                self.account_key = os.environ.get(AZURE_STORAGE_ACCESS_KEY)\n\n        if not self.account_name or not self.account_key:\n            raise WindowsAzureError(_ERROR_STORAGE_MISSING_INFO)\n\n        self._httpclient = _HTTPClient(\n            service_instance=self,\n            account_key=self.account_key,\n            account_name=self.account_name,\n            protocol=self.protocol)\n        self._batchclient = None\n        self._filter = self._perform_request_worker\n\n    def with_filter(self, filter):\n        '''\n        Returns a new service which will process requests with the specified\n        filter.  Filtering operations can include logging, automatic retrying,\n        etc...  The filter is a lambda which receives the HTTPRequest and\n        another lambda.  The filter can perform any pre-processing on the\n        request, pass it off to the next lambda, and then perform any\n        post-processing on the response.\n        '''\n        res = type(self)(self.account_name, self.account_key, self.protocol)\n        old_filter = self._filter\n\n        def new_filter(request):\n            return filter(request, old_filter)\n\n        res._filter = new_filter\n        return res\n\n    def set_proxy(self, host, port, user=None, password=None):\n        '''\n        Sets the proxy server host and port for the HTTP CONNECT Tunnelling.\n\n        host: Address of the proxy. Ex: '192.168.0.100'\n        port: Port of the proxy. Ex: 6000\n        user: User for proxy authorization.\n        password: Password for proxy authorization.\n        '''\n        self._httpclient.set_proxy(host, port, user, password)\n\n    def _get_host(self):\n        if self.use_local_storage:\n            return self.dev_host\n        else:\n            return self.account_name + self.host_base\n\n    def _perform_request_worker(self, request):\n        return self._httpclient.perform_request(request)\n\n    def _perform_request(self, request, text_encoding='utf-8'):\n        '''\n        Sends the request and return response. Catches HTTPError and hand it\n        to error handler\n        '''\n        try:\n            if self._batchclient is not None:\n                return self._batchclient.insert_request_to_batch(request)\n            else:\n                resp = self._filter(request)\n\n            if sys.version_info >= (3,) and isinstance(resp, bytes) and \\\n                text_encoding:\n                resp = resp.decode(text_encoding)\n\n        except HTTPError as ex:\n            _storage_error_handler(ex)\n\n        return resp\n"
  },
  {
    "path": "DSC/azure/storage/tableservice.py",
    "content": "#-------------------------------------------------------------------------\n# Copyright (c) Microsoft.  All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#--------------------------------------------------------------------------\nfrom azure import (\n    WindowsAzureError,\n    TABLE_SERVICE_HOST_BASE,\n    DEV_TABLE_HOST,\n    _convert_class_to_xml,\n    _convert_response_to_feeds,\n    _dont_fail_not_exist,\n    _dont_fail_on_exist,\n    _get_request_body,\n    _int_or_none,\n    _parse_response,\n    _parse_response_for_dict,\n    _parse_response_for_dict_filter,\n    _str,\n    _str_or_none,\n    _update_request_uri_query_local_storage,\n    _validate_not_none,\n    )\nfrom azure.http import HTTPRequest\nfrom azure.http.batchclient import _BatchClient\nfrom azure.storage import (\n    StorageServiceProperties,\n    _convert_entity_to_xml,\n    _convert_response_to_entity,\n    _convert_table_to_xml,\n    _convert_xml_to_entity,\n    _convert_xml_to_table,\n    _sign_storage_table_request,\n    _update_storage_table_header,\n    )\nfrom azure.storage.storageclient import _StorageClient\n\n\nclass TableService(_StorageClient):\n\n    '''\n    This is the main class managing Table resources.\n    '''\n\n    def __init__(self, account_name=None, account_key=None, protocol='https',\n                 host_base=TABLE_SERVICE_HOST_BASE, dev_host=DEV_TABLE_HOST):\n        '''\n        account_name: your storage account name, required for all operations.\n        account_key: your storage account key, required for all operations.\n        protocol: Optional. Protocol. Defaults to http.\n        host_base:\n            Optional. Live host base url. Defaults to Azure url. Override this\n            for on-premise.\n        dev_host: Optional. Dev host url. Defaults to localhost.\n        '''\n        super(TableService, self).__init__(\n            account_name, account_key, protocol, host_base, dev_host)\n\n    def begin_batch(self):\n        if self._batchclient is None:\n            self._batchclient = _BatchClient(\n                service_instance=self,\n                account_key=self.account_key,\n                account_name=self.account_name)\n        return self._batchclient.begin_batch()\n\n    def commit_batch(self):\n        try:\n            ret = self._batchclient.commit_batch()\n        finally:\n            self._batchclient = None\n        return ret\n\n    def cancel_batch(self):\n        self._batchclient = None\n\n    def get_table_service_properties(self):\n        '''\n        Gets the properties of a storage account's Table service, including\n        Windows Azure Storage Analytics.\n        '''\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/?restype=service&comp=properties'\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_table_header(request)\n        response = self._perform_request(request)\n\n        return _parse_response(response, StorageServiceProperties)\n\n    def set_table_service_properties(self, storage_service_properties):\n        '''\n        Sets the properties of a storage account's Table Service, including\n        Windows Azure Storage Analytics.\n\n        storage_service_properties: StorageServiceProperties object.\n        '''\n        _validate_not_none('storage_service_properties',\n                           storage_service_properties)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/?restype=service&comp=properties'\n        request.body = _get_request_body(\n            _convert_class_to_xml(storage_service_properties))\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_table_header(request)\n        response = self._perform_request(request)\n\n        return _parse_response_for_dict(response)\n\n    def query_tables(self, table_name=None, top=None, next_table_name=None):\n        '''\n        Returns a list of tables under the specified account.\n\n        table_name: Optional.  The specific table to query.\n        top: Optional. Maximum number of tables to return.\n        next_table_name:\n            Optional. When top is used, the next table name is stored in\n            result.x_ms_continuation['NextTableName']\n        '''\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        if table_name is not None:\n            uri_part_table_name = \"('\" + table_name + \"')\"\n        else:\n            uri_part_table_name = \"\"\n        request.path = '/Tables' + uri_part_table_name + ''\n        request.query = [\n            ('$top', _int_or_none(top)),\n            ('NextTableName', _str_or_none(next_table_name))\n        ]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_table_header(request)\n        response = self._perform_request(request)\n\n        return _convert_response_to_feeds(response, _convert_xml_to_table)\n\n    def create_table(self, table, fail_on_exist=False):\n        '''\n        Creates a new table in the storage account.\n\n        table:\n            Name of the table to create. Table name may contain only\n            alphanumeric characters and cannot begin with a numeric character.\n            It is case-insensitive and must be from 3 to 63 characters long.\n        fail_on_exist: Specify whether throw exception when table exists.\n        '''\n        _validate_not_none('table', table)\n        request = HTTPRequest()\n        request.method = 'POST'\n        request.host = self._get_host()\n        request.path = '/Tables'\n        request.body = _get_request_body(_convert_table_to_xml(table))\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_table_header(request)\n        if not fail_on_exist:\n            try:\n                self._perform_request(request)\n                return True\n            except WindowsAzureError as ex:\n                _dont_fail_on_exist(ex)\n                return False\n        else:\n            self._perform_request(request)\n            return True\n\n    def delete_table(self, table_name, fail_not_exist=False):\n        '''\n        table_name: Name of the table to delete.\n        fail_not_exist:\n            Specify whether throw exception when table doesn't exist.\n        '''\n        _validate_not_none('table_name', table_name)\n        request = HTTPRequest()\n        request.method = 'DELETE'\n        request.host = self._get_host()\n        request.path = '/Tables(\\'' + _str(table_name) + '\\')'\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_table_header(request)\n        if not fail_not_exist:\n            try:\n                self._perform_request(request)\n                return True\n            except WindowsAzureError as ex:\n                _dont_fail_not_exist(ex)\n                return False\n        else:\n            self._perform_request(request)\n            return True\n\n    def get_entity(self, table_name, partition_key, row_key, select=''):\n        '''\n        Get an entity in a table; includes the $select options.\n\n        partition_key: PartitionKey of the entity.\n        row_key: RowKey of the entity.\n        select: Property names to select.\n        '''\n        _validate_not_none('table_name', table_name)\n        _validate_not_none('partition_key', partition_key)\n        _validate_not_none('row_key', row_key)\n        _validate_not_none('select', select)\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/' + _str(table_name) + \\\n            '(PartitionKey=\\'' + _str(partition_key) + \\\n            '\\',RowKey=\\'' + \\\n            _str(row_key) + '\\')?$select=' + \\\n            _str(select) + ''\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_table_header(request)\n        response = self._perform_request(request)\n\n        return _convert_response_to_entity(response)\n\n    def query_entities(self, table_name, filter=None, select=None, top=None,\n                       next_partition_key=None, next_row_key=None):\n        '''\n        Get entities in a table; includes the $filter and $select options.\n\n        table_name: Table to query.\n        filter:\n            Optional. Filter as described at\n            http://msdn.microsoft.com/en-us/library/windowsazure/dd894031.aspx\n        select: Optional. Property names to select from the entities.\n        top: Optional. Maximum number of entities to return.\n        next_partition_key:\n            Optional. When top is used, the next partition key is stored in\n            result.x_ms_continuation['NextPartitionKey']\n        next_row_key:\n            Optional. When top is used, the next partition key is stored in\n            result.x_ms_continuation['NextRowKey']\n        '''\n        _validate_not_none('table_name', table_name)\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/' + _str(table_name) + '()'\n        request.query = [\n            ('$filter', _str_or_none(filter)),\n            ('$select', _str_or_none(select)),\n            ('$top', _int_or_none(top)),\n            ('NextPartitionKey', _str_or_none(next_partition_key)),\n            ('NextRowKey', _str_or_none(next_row_key))\n        ]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_table_header(request)\n        response = self._perform_request(request)\n\n        return _convert_response_to_feeds(response, _convert_xml_to_entity)\n\n    def insert_entity(self, table_name, entity,\n                      content_type='application/atom+xml'):\n        '''\n        Inserts a new entity into a table.\n\n        table_name: Table name.\n        entity:\n            Required. The entity object to insert. Could be a dict format or\n            entity object.\n        content_type: Required. Must be set to application/atom+xml\n        '''\n        _validate_not_none('table_name', table_name)\n        _validate_not_none('entity', entity)\n        _validate_not_none('content_type', content_type)\n        request = HTTPRequest()\n        request.method = 'POST'\n        request.host = self._get_host()\n        request.path = '/' + _str(table_name) + ''\n        request.headers = [('Content-Type', _str_or_none(content_type))]\n        request.body = _get_request_body(_convert_entity_to_xml(entity))\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_table_header(request)\n        response = self._perform_request(request)\n\n        return _convert_response_to_entity(response)\n\n    def update_entity(self, table_name, partition_key, row_key, entity,\n                      content_type='application/atom+xml', if_match='*'):\n        '''\n        Updates an existing entity in a table. The Update Entity operation\n        replaces the entire entity and can be used to remove properties.\n\n        table_name: Table name.\n        partition_key: PartitionKey of the entity.\n        row_key: RowKey of the entity.\n        entity:\n            Required. The entity object to insert. Could be a dict format or\n            entity object.\n        content_type: Required. Must be set to application/atom+xml\n        if_match:\n            Optional. Specifies the condition for which the merge should be\n            performed. To force an unconditional merge, set to the wildcard\n            character (*).\n        '''\n        _validate_not_none('table_name', table_name)\n        _validate_not_none('partition_key', partition_key)\n        _validate_not_none('row_key', row_key)\n        _validate_not_none('entity', entity)\n        _validate_not_none('content_type', content_type)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(table_name) + '(PartitionKey=\\'' + \\\n            _str(partition_key) + '\\',RowKey=\\'' + _str(row_key) + '\\')'\n        request.headers = [\n            ('Content-Type', _str_or_none(content_type)),\n            ('If-Match', _str_or_none(if_match))\n        ]\n        request.body = _get_request_body(_convert_entity_to_xml(entity))\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_table_header(request)\n        response = self._perform_request(request)\n\n        return _parse_response_for_dict_filter(response, filter=['etag'])\n\n    def merge_entity(self, table_name, partition_key, row_key, entity,\n                     content_type='application/atom+xml', if_match='*'):\n        '''\n        Updates an existing entity by updating the entity's properties. This\n        operation does not replace the existing entity as the Update Entity\n        operation does.\n\n        table_name: Table name.\n        partition_key: PartitionKey of the entity.\n        row_key: RowKey of the entity.\n        entity:\n            Required. The entity object to insert. Can be a dict format or\n            entity object.\n        content_type: Required. Must be set to application/atom+xml\n        if_match:\n            Optional. Specifies the condition for which the merge should be\n            performed. To force an unconditional merge, set to the wildcard\n            character (*).\n        '''\n        _validate_not_none('table_name', table_name)\n        _validate_not_none('partition_key', partition_key)\n        _validate_not_none('row_key', row_key)\n        _validate_not_none('entity', entity)\n        _validate_not_none('content_type', content_type)\n        request = HTTPRequest()\n        request.method = 'MERGE'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(table_name) + '(PartitionKey=\\'' + \\\n            _str(partition_key) + '\\',RowKey=\\'' + _str(row_key) + '\\')'\n        request.headers = [\n            ('Content-Type', _str_or_none(content_type)),\n            ('If-Match', _str_or_none(if_match))\n        ]\n        request.body = _get_request_body(_convert_entity_to_xml(entity))\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_table_header(request)\n        response = self._perform_request(request)\n\n        return _parse_response_for_dict_filter(response, filter=['etag'])\n\n    def delete_entity(self, table_name, partition_key, row_key,\n                      content_type='application/atom+xml', if_match='*'):\n        '''\n        Deletes an existing entity in a table.\n\n        table_name: Table name.\n        partition_key: PartitionKey of the entity.\n        row_key: RowKey of the entity.\n        content_type: Required. Must be set to application/atom+xml\n        if_match:\n            Optional. Specifies the condition for which the delete should be\n            performed. To force an unconditional delete, set to the wildcard\n            character (*).\n        '''\n        _validate_not_none('table_name', table_name)\n        _validate_not_none('partition_key', partition_key)\n        _validate_not_none('row_key', row_key)\n        _validate_not_none('content_type', content_type)\n        _validate_not_none('if_match', if_match)\n        request = HTTPRequest()\n        request.method = 'DELETE'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(table_name) + '(PartitionKey=\\'' + \\\n            _str(partition_key) + '\\',RowKey=\\'' + _str(row_key) + '\\')'\n        request.headers = [\n            ('Content-Type', _str_or_none(content_type)),\n            ('If-Match', _str_or_none(if_match))\n        ]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_table_header(request)\n        self._perform_request(request)\n\n    def insert_or_replace_entity(self, table_name, partition_key, row_key,\n                                 entity, content_type='application/atom+xml'):\n        '''\n        Replaces an existing entity or inserts a new entity if it does not\n        exist in the table. Because this operation can insert or update an\n        entity, it is also known as an \"upsert\" operation.\n\n        table_name: Table name.\n        partition_key: PartitionKey of the entity.\n        row_key: RowKey of the entity.\n        entity:\n            Required. The entity object to insert. Could be a dict format or\n            entity object.\n        content_type: Required. Must be set to application/atom+xml\n        '''\n        _validate_not_none('table_name', table_name)\n        _validate_not_none('partition_key', partition_key)\n        _validate_not_none('row_key', row_key)\n        _validate_not_none('entity', entity)\n        _validate_not_none('content_type', content_type)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(table_name) + '(PartitionKey=\\'' + \\\n            _str(partition_key) + '\\',RowKey=\\'' + _str(row_key) + '\\')'\n        request.headers = [('Content-Type', _str_or_none(content_type))]\n        request.body = _get_request_body(_convert_entity_to_xml(entity))\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_table_header(request)\n        response = self._perform_request(request)\n\n        return _parse_response_for_dict_filter(response, filter=['etag'])\n\n    def insert_or_merge_entity(self, table_name, partition_key, row_key,\n                               entity, content_type='application/atom+xml'):\n        '''\n        Merges an existing entity or inserts a new entity if it does not exist\n        in the table. Because this operation can insert or update an entity,\n        it is also known as an \"upsert\" operation.\n\n        table_name: Table name.\n        partition_key: PartitionKey of the entity.\n        row_key: RowKey of the entity.\n        entity:\n            Required. The entity object to insert. Could be a dict format or\n            entity object.\n        content_type: Required. Must be set to application/atom+xml\n        '''\n        _validate_not_none('table_name', table_name)\n        _validate_not_none('partition_key', partition_key)\n        _validate_not_none('row_key', row_key)\n        _validate_not_none('entity', entity)\n        _validate_not_none('content_type', content_type)\n        request = HTTPRequest()\n        request.method = 'MERGE'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(table_name) + '(PartitionKey=\\'' + \\\n            _str(partition_key) + '\\',RowKey=\\'' + _str(row_key) + '\\')'\n        request.headers = [('Content-Type', _str_or_none(content_type))]\n        request.body = _get_request_body(_convert_entity_to_xml(entity))\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_table_header(request)\n        response = self._perform_request(request)\n\n        return _parse_response_for_dict_filter(response, filter=['etag'])\n\n    def _perform_request_worker(self, request):\n        auth = _sign_storage_table_request(request,\n                                           self.account_name,\n                                           self.account_key)\n        request.headers.append(('Authorization', auth))\n        return self._httpclient.perform_request(request)\n"
  },
  {
    "path": "DSC/curlhttpclient.py",
    "content": "#!/usr/bin/env python2\n#\n# Copyright (C) Microsoft Corporation, All rights reserved.\n\n\"\"\"Curl CLI wrapper.\"\"\"\n\nimport base64\nimport random\nimport subprocess\nimport time\nimport traceback\nimport os\nimport sys\n\nimport subprocessfactory\nfrom httpclient import *\n\njson = serializerfactory.get_serializer(sys.version_info)\n\nCURL_ALIAS = \"curl\"\nCURL_HTTP_CODE_SPECIAL_VAR = \"%{http_code}\"\nOPTION_LOCATION = \"--location\"\nOPTION_SILENT = \"--silent\"\nOPTION_CERT = \"--cert\"\nOPTION_KEY = \"--key\"\nOPTION_WRITE_OUT = \"--write-out\"\nOPTION_HEADER = \"--header\"\nOPTION_REQUEST = \"--request\"\nOPTION_INSECURE = \"--insecure\"\nOPTION_DATA = \"--data\"\nOPTION_PROXY = \"--proxy\"\n\nOPTION_CONNECT_TIMEOUT = \"--connect-timeout\"\nOPTION_MAX_TIME = \"--max-time\"\nOPTION_RETRY = \"--retry\"\nOPTION_RETRY_DELAY = \"--retry-delay\"\nOPTION_RETRY_MAX_TIME = \"--retry-max-time\"\n\n# maximum  time  in seconds that you allow the whole operation to take\nVALUE_MAX_TIME = \"30\"\n\n# this only limits the connection phase, it has no impact once it has connected\nVALUE_CONNECT_TIMEOUT = \"15\"\n\n# if  a  transient  error is returned when curl tries to perform a transfer, it will retry this number of times\n# before giving up\nVALUE_RETRY = \"3\"\n\n# make  curl  sleep  this amount of time before each retry when a transfer has failed with a transient\nVALUE_RETRY_DELAY = \"3\"\n\n# retries will be done as usual as long as the timer hasn't reached this given limit\nVALUE_RETRY_MAX_TIME = \"60\"\n\n# curl status delimiter\nSTATUS_CODE_DELIMITER = \"\\n\\nstatus_code:\"\n\n# curl success exit code\nEXIT_SUCCESS = 0\n\n\nclass CurlHttpClient(HttpClient):\n    \"\"\"Curl CLI wrapper. Inherits from HttpClient.\n\n    Targets :\n        [2.4.0 - 2.7.9[\n\n    Implements the following method common to all classes inheriting HttpClient.\n        get     (url, headers)\n        post    (url, headers, data)\n\n    Curl documentation :\n    CLI         : https://curl.haxx.se/docs/manpage.html\n    Error code  : https://curl.haxx.se/libcurl/c/libcurl-errors.html\n    \"\"\"\n\n    @staticmethod\n    def parse_raw_output(output):\n        \"\"\"Parses stdout from Curl to extract response_body and status_code.\n\n        Args:\n            output : string, raw stdout from curl subprocess.\n\n        The format of the raw output should be of the following format (example request to www.microsoft.com):\n            <html><head><title>Microsoft Corporation</title><meta http-equiv=\"X-UA-Compatible\" content=\"IE=EmulateIE7\">\n            </meta><meta http-equiv=\"Content-Type\" content=\"text/html; chaset=utf-8\"></meta><meta name=\"SearchTitle\"\n            content=\"Microsoft.com\" scheme=\"\"></meta><meta name=\"Description\" content=\"Get product information, support,\n            and news from Microsoft.\" scheme=\"\"></meta><meta name=\"Title\" content=\"Microsoft.com Home Page\" scheme=\"\">\n            </meta><meta name=\"Keywords\" content=\"Microsoft, product, support, help, training Office, Windows,\n            software, download, trial, preview, demo,  business, security, update, free, computer, PC, server, search,\n            download, install, news\" scheme=\"\"></meta><mta name=\"SearchDescription\" content=\"Microsoft.com Homepage\"\n            scheme=\"\"></meta></head><body><p>Your current User-Agent string appears to be from an automated process,\n            if his is incorrect, please click this link:<a href=\"http://www.microsoft.com/en/us/default.aspx?redir=\n            true\">United States English Microsoft Homepage</a></p></body></html>\n\n\n            status_code:200\n\n        Returns:\n            A RequestResponse\n        \"\"\"\n        start_index = output.index(STATUS_CODE_DELIMITER)\n        response_body = output[:start_index]\n        status_code = output[start_index:].strip(\"\\n\").split(\":\")[1]\n        return RequestResponse(status_code, response_body)\n\n    def get_base_cmd(self):\n        \"\"\"Creates the base cmd array to invoke the Curl CLI.\n\n        Adds the following arguments for all request:\n            --location : Retry the request if the requested page has moved to a different location\n            --silent   : Silent or quiet mode\n\n        Adds the following optional arguments\n            --cert     : Tells curl to use the specified client certificate file when getting a file with HTTPS\n            --key      : Private key file name\n\n        Returns:\n            An array containing all required arguments to invoke curl, example:\n            [\"curl\", \"--location\", \"--silent\", \"--cert\", \"my_cert_file.crt\", \"--key\", \"my_key_file.key\"]\n        \"\"\"\n        # basic options\n        cmd = [CURL_ALIAS, OPTION_LOCATION, OPTION_SILENT]\n\n        # retry and timeout options\n        cmd += [OPTION_CONNECT_TIMEOUT, VALUE_CONNECT_TIMEOUT, OPTION_MAX_TIME, VALUE_MAX_TIME, OPTION_RETRY,\n                VALUE_RETRY, OPTION_RETRY_DELAY, VALUE_RETRY_DELAY, OPTION_RETRY_MAX_TIME, VALUE_RETRY_MAX_TIME]\n\n        if self.cert_path is not None:\n            cmd.extend([OPTION_CERT, self.cert_path, OPTION_KEY, self.key_path])\n\n        if self.proxy_configuration is not None:\n            cmd.extend([OPTION_PROXY, self.proxy_configuration])\n        return cmd\n\n    def build_request_cmd(self, url, headers, method=None, data_file_path=None):\n        \"\"\"Formats the final cmd array to invoke Curl. The final cmd is created from the based command and additional\n        optional parameters.\n\n        Args:\n            url             : string    , the URL.\n            headers         : dictionary, contains the required headers.\n            method          : string    , specifies the http method to use.\n            data_file_path  : string    , data file path.\n\n        Adds the following arguments to the base cmd when required:\n            --write-out : Makes curl display information on stdout after a completed transfer (i.e status_code).\n            --header    : Extra headers to include in the request when sending the request.\n            --request   : Specifies a custom request method to use for the request.\n            --insecure  : Explicitly allows curl to perform \"insecure\" SSL connections and transfers.\n\n        Returns:\n            An array containing the base cmd concatenated with any required extra argument, example:\n            [\"curl\", \"--location\", \"--silent\", \"--cert\", \"my_cert_file.crt\", \"--key\", \"my_key_file.key\", \"--insecure\",\n                \"https://www.microsoft.com\"]\n        \"\"\"\n        cmd = self.get_base_cmd()\n        cmd.append(OPTION_WRITE_OUT)\n        cmd.append(STATUS_CODE_DELIMITER + CURL_HTTP_CODE_SPECIAL_VAR + \"\\n\")\n\n        if headers is not None:\n            for key, value in headers.items():\n                cmd.append(OPTION_HEADER)\n                cmd.append(key + \": \" + value)\n\n        if method is not None:\n            cmd.append(OPTION_REQUEST)\n            cmd.append(method)\n            if data_file_path is not None:\n                cmd.append(OPTION_DATA)\n                cmd.append(\"@\" + data_file_path)\n\n        if self.insecure:\n            cmd.append(OPTION_INSECURE)\n\n        cmd.append('--verbose')\n        cmd.append(url)\n        return cmd\n\n    def issue_request(self, url, headers, method, data):\n        data_file_path = None\n        headers = self.merge_headers(self.default_headers, headers)\n\n        # if a body is included, write it to a temporary file (prevent body from leaking in ps/top)\n        if method != self.GET and data is not None:\n            serialized_data = self.json.dumps(data)\n\n            # write data to disk\n            data_file_name = base64.standard_b64encode(str(time.time()) +\n                                                       str(random.randint(0, sys.maxsize)) +\n                                                       str(random.randint(0, sys.maxsize)) +\n                                                       str(random.randint(0, sys.maxsize)) +\n                                                       str(random.randint(0, sys.maxsize)))\n            data_file_path = os.path.join(\"/tmp\", data_file_name)\n            f = open(data_file_path, \"wb\")\n            f.write(serialized_data)\n            f.close()\n\n            # insert Content-Type header\n            headers.update({self.CONTENT_TYPE_HEADER_KEY: self.APP_JSON_HEADER_VALUE})\n\n        # ** nesting of try statement is required since try/except/finally isn't supported prior to 2.5 **\n        try:\n            try:\n                cmd = self.build_request_cmd(url, headers, method=method, data_file_path=data_file_path)\n                env = os.environ.copy()\n                p = subprocessfactory.create_subprocess(cmd, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n                out, err = p.communicate()\n\n                if p.returncode != EXIT_SUCCESS:\n                    raise Exception(\"Http request failed due to curl error. [returncode=\" + str(p.returncode) + \"]\" +\n                                    \"[stderr=\" + str(err) + \"]\")\n\n                return self.parse_raw_output(out)\n            except Exception as e:\n                raise Exception(\"Unknown exception while issuing request. [exception=\" + str(e) + \"]\" +\n                                \"[stacktrace=\" + str(traceback.format_exc()) + \"]\")\n        finally:\n            if data_file_path is not None:\n                os.remove(data_file_path)\n\n    def get(self, url, headers=None, data=None):\n        \"\"\"Issues a GET request to the provided url using the provided headers.\n\n        Args:\n            url     : string    , the URl.\n            headers : dictionary, contains the headers key value pair (defaults to None).\n            data    : dictionary, contains the non-serialized request body (defaults to None).\n\n        Returns:\n            A RequestResponse\n        \"\"\"\n        return self.issue_request(url, headers, self.GET, data)\n\n    def post(self, url, headers=None, data=None):\n        \"\"\"Issues a POST request to the provided url using the provided headers.\n\n        Args:\n            url     : string    , the URl.\n            headers : dictionary, contains the headers key value pair (defaults to None).\n            data    : dictionary, contains the non-serialized request body (defaults to None).\n\n        Returns:\n            A RequestResponse\n        \"\"\"\n        return self.issue_request(url, headers, self.POST, data)\n\n    def put(self, url, headers=None, data=None):\n        \"\"\"Issues a PUT request to the provided url using the provided headers.\n\n        Args:\n            url     : string    , the URl.\n            headers : dictionary, contains the headers key value pair (defaults to None).\n            data    : dictionary, contains the non-serialized request body (defaults to None).\n\n        Returns:\n            A RequestResponse\n        \"\"\"\n        return self.issue_request(url, headers, self.PUT, data)\n\n    def delete(self, url, headers=None, data=None):\n        \"\"\"Issues a DELETE request to the provided url using the provided headers.\n\n        Args:\n            url     : string    , the URl.\n            headers : dictionary, contains the headers key value pair (defaults to None).\n            data    : dictionary, contains the non-serialized request body (defaults to None).\n\n        Returns:\n            A RequestResponse\n        \"\"\"\n        return self.issue_request(url, headers, self.DELETE, data)"
  },
  {
    "path": "DSC/dsc.py",
    "content": "#!/usr/bin/env python\n#\n# DSC extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport os.path\nimport re\nimport subprocess\nimport sys\nimport traceback\ntry:\n    from urllib.parse import urlparse, urlencode\n    from urllib.request import urlopen, Request\n    from urllib.error import HTTPError\nexcept ImportError:\n    from urlparse import urlparse\n    from urllib import urlencode\n    from urllib2 import urlopen, Request, HTTPError\nimport time\nimport platform\nimport json\nimport datetime\nimport serializerfactory\nimport httpclient\nimport urllib2httpclient\nimport urllib3httpclient\nimport httpclientfactory\n\nfrom azure.storage import BlobService\nfrom Utils.WAAgentUtil import waagent\nimport Utils.HandlerUtil as Util\n\n# Define global variables\n\nExtensionName = 'Microsoft.OSTCExtensions.DSCForLinux'\nExtensionShortName = 'DSCForLinux'\nDownloadDirectory = 'download'\n\nomi_package_prefix = 'packages/omi-1.7.3-0.ssl_'\ndsc_package_prefix = 'packages/dsc-1.2.4-0.ssl_'\nomi_major_version = 1\nomi_minor_version = 7\nomi_build = 3\nomi_release = 0\ndsc_major_version = 1\ndsc_minor_version = 2\ndsc_build = 4\ndsc_release = 0\npackage_pattern = '(\\d+).(\\d+).(\\d+).(\\d+)'\nnodeid_path = '/etc/opt/omi/conf/dsc/agentid'\ndate_time_format = \"%Y-%m-%dT%H:%M:%SZ\"\nextension_handler_version = \"3.0.0.6\"\npython_command = 'python3' if sys.version_info >= (3,0) else 'python'\ndsc_script_path = '/opt/microsoft/dsc/Scripts/python3' if sys.version_info >= (3,0) else '/opt/microsoft/dsc/Scripts'\nspace_string = \" \"\n\n# Error codes\nUnsupportedDistro = 51 #excludes from SLA\nDPKGLockedErrorCode = 51 #excludes from SLA\n\n# DSC-specific Operation\nclass Operation:\n    Download = \"Download\"\n    ApplyMof = \"ApplyMof\"\n    ApplyMetaMof = \"ApplyMetaMof\"\n    InstallModule = \"InstallModule\"\n    RemoveModule = \"RemoveModule\"\n    Register = \"Register\"\n    Enable = \"Enable\"\n\n\nclass DistroCategory:\n    debian = 1\n    redhat = 2\n    suse = 3\n\n\nclass Mode:\n    push = \"push\"\n    pull = \"pull\"\n    install = \"install\"\n    remove = \"remove\"\n    register = \"register\"\n\n\ndef main():\n    waagent.LoggerInit('/var/log/waagent.log', '/dev/stdout')\n    waagent.Log(\"%s started to handle.\" % (ExtensionShortName))\n\n    global hutil\n    hutil = Util.HandlerUtility(waagent.Log, waagent.Error)\n    hutil.try_parse_context()\n\n    global public_settings\n    public_settings = hutil.get_public_settings()\n    if not public_settings:\n        waagent.AddExtensionEvent(name=ExtensionShortName, op='MainInProgress', isSuccess=True,\n                                  message=\"Public settings are NOT provided.\")\n        public_settings = {}\n\n    global protected_settings\n    protected_settings = hutil.get_protected_settings()\n    if not protected_settings:\n        waagent.AddExtensionEvent(name=ExtensionShortName, op='MainInProgress', isSuccess=True,\n                                  message=\"protected settings are NOT provided.\")\n        protected_settings = {}\n\n    global distro_category\n    vm_supported, vm_dist, vm_ver = check_supported_OS()\n    distro_category = get_distro_category(vm_dist.lower(), vm_ver.lower())\n    \n\n    for a in sys.argv[1:]:\n        if re.match(\"^([-/]*)(disable)\", a):\n            disable()\n        elif re.match(\"^([-/]*)(uninstall)\", a):\n            uninstall()\n        elif re.match(\"^([-/]*)(install)\", a):\n            install()\n        elif re.match(\"^([-/]*)(enable)\", a):\n            enable()\n        elif re.match(\"^([-/]*)(update)\", a):\n            update()\n\n\ndef get_distro_category(distro_name,distro_version):\n    if distro_name.startswith('ubuntu') or (distro_name.startswith('debian')):\n        return DistroCategory.debian\n    elif distro_name.startswith('centos') or distro_name.startswith('redhat') or distro_name.startswith('oracle') or distro_name.startswith('red hat'):\n        return DistroCategory.redhat\n    elif distro_name.startswith('suse') or distro_name.startswith('sles'):\n        return DistroCategory.suse \n    waagent.AddExtensionEvent(name=ExtensionShortName, op='InstallInProgress', isSuccess=True, message=\"Unsupported distro :\" + distro_name + \"; distro_version: \" + distro_version)\n    hutil.do_exit(UnsupportedDistro, 'Install', 'error', str(UnsupportedDistro), distro_name + 'is not supported.')\n    \ndef check_supported_OS():\n    \"\"\"\n    Checks if the VM this extension is running on is supported by DSC\n    Returns for platform.linux_distribution() vary widely in format, such as\n    '7.3.1611' returned for a VM with CentOS 7, so the first provided\n    digits must match.\n    All other distros not supported will get error code 51\n    \"\"\"\n    supported_dists = {'redhat' : ['7', '8'], # CentOS\n                       'centos' : ['7', '8'], # CentOS\n                       'red hat' : ['7', '8'], # Redhat\n                       'debian' : ['8', '9', '10'], # Debian\n                       'ubuntu' : ['14.04', '16.04', '18.04', '20.04'], # Ubuntu\n                       'oracle' : ['7'], # Oracle\n                       'suse' : ['12', '15'], #SLES\n                       'sles' : ['12', '15']\n    }\n    vm_dist, vm_ver, vm_supported = '', '', False\n    \n    try:\n        vm_dist, vm_ver, vm_id = platform.linux_distribution()\n    except AttributeError:\n        try:\n            vm_dist, vm_ver, vm_id = platform.dist()\n        except:\n            waagent.Log(\"Falling back to /etc/os-release distribution parsing\")\n\n    # Fallback if either of the above fail; on some (especially newer)\n    # distros, linux_distribution() and dist() are unreliable or deprecated\n    if not vm_dist and not vm_ver:\n        try:\n            with open('/etc/os-release', 'r') as fp:\n                for line in fp:\n                    if line.startswith('ID='):\n                        vm_dist = line.split('=')[1]\n                        vm_dist = vm_dist.split('-')[0]\n                        vm_dist = vm_dist.replace('\\\"', '').replace('\\n', '')\n                    elif line.startswith('VERSION_ID='):\n                        vm_ver = line.split('=')[1]\n                        vm_ver = vm_ver.replace('\\\"', '').replace('\\n', '')\n        except:\n            waagent.Log('Indeterminate operating system')\n            return vm_supported, 'Indeterminate operating system', ''\n\n    # Find this VM distribution in the supported list\n    for supported_dist in supported_dists.keys():\n        if vm_dist.lower().startswith(supported_dist):\n            # Check if this VM distribution version is supported\n            vm_ver_split = vm_ver.split('.')\n            for supported_ver in supported_dists[supported_dist]:\n                supported_ver_split = supported_ver.split('.')\n\n                # If vm_ver is at least as precise (at least as many digits) as\n                # supported_ver and matches all the supported_ver digits, then\n                # this VM is supported\n                vm_ver_match = True\n                for idx, supported_ver_num in enumerate(supported_ver_split):\n                    try:\n                        supported_ver_num = int(supported_ver_num)\n                        vm_ver_num = int(vm_ver_split[idx])\n                    except IndexError:\n                        vm_ver_match = False\n                        break\n                    if vm_ver_num is not supported_ver_num:\n                        vm_ver_match = False\n                        break\n                if vm_ver_match:\n                    vm_supported = True\n                    break\n\n    if not vm_supported:\n        waagent.AddExtensionEvent(name=ExtensionShortName, op='InstallInProgress', isSuccess=True, message=\"Unsupported OS :\" + vm_dist + \"; distro_version: \" + vm_ver)\n        hutil.do_exit(UnsupportedDistro, 'Install', 'error', str(UnsupportedDistro), vm_dist + \"; distro_version: \" + vm_ver + ' is not supported.')\n    \n    return vm_supported, vm_dist, vm_ver\n\n\ndef install():\n    hutil.do_parse_context('Install')\n    try:\n        waagent.AddExtensionEvent(name=ExtensionShortName, op='InstallInProgress', isSuccess=True,\n                                  message=\"Installing DSCForLinux extension\")\n        remove_old_dsc_packages()\n        install_dsc_packages()\n        waagent.AddExtensionEvent(name=ExtensionShortName, op='InstallInProgress', isSuccess=True,\n                                  message=\"successfully installed DSCForLinux extension\")\n        hutil.do_exit(0, 'Install', 'success', '0', 'Install Succeeded.')\n    except Exception as e:\n        waagent.AddExtensionEvent(name=ExtensionShortName, op='InstallInProgress', isSuccess=True,\n                                  message=\"failed to install DSC extension with error: {0} and stacktrace: {1}\".format(\n                                      str(e), traceback.format_exc()))\n        hutil.error(\n            \"Failed to install DSC extension with error: %s, stack trace: %s\" % (str(e), traceback.format_exc()))\n        hutil.do_exit(1, 'Install', 'error', '1', 'Install Failed.')\n\n\ndef enable():\n    hutil.do_parse_context('Enable')\n    hutil.exit_if_enabled()\n    try:\n        start_omiservice()\n        mode = get_config('Mode')\n        if mode == '':\n            mode = get_config('ExtensionAction')\n        waagent.AddExtensionEvent(name=ExtensionShortName, op='EnableInProgress', isSuccess=True,\n                                  message=\"Enabling the DSC extension - mode/ExtensionAction: \" + mode)\n        if mode == '':\n            mode = Mode.push\n        else:\n            mode = mode.lower()\n            if not hasattr(Mode, mode):\n                waagent.AddExtensionEvent(name=ExtensionShortName,\n                                          op=Operation.Enable,\n                                          isSuccess=True,\n                                          message=\"(03001)Argument error, invalid ExtensionAction/mode.\")\n                hutil.do_exit(51, 'Enable', 'error', '51', 'Enable failed, unknown ExtensionAction/mode: ' + mode)\n        if mode == Mode.remove:\n            remove_module()\n        elif mode == Mode.register:\n            registration_key = get_config('RegistrationKey')\n            registation_url = get_config('RegistrationUrl')\n            # Optional\n            node_configuration_name = get_config('NodeConfigurationName')\n            refresh_freq = get_config('RefreshFrequencyMins')\n            configuration_mode_freq = get_config('ConfigurationModeFrequencyMins')\n            configuration_mode = get_config('ConfigurationMode')\n            exit_code, err_msg = register_automation(registration_key, registation_url, node_configuration_name,\n                                                     refresh_freq, configuration_mode_freq, configuration_mode.lower())\n            if exit_code != 0:\n                hutil.do_exit(exit_code, 'Enable', 'error', str(exit_code), err_msg)\n\n            extension_status_event = \"ExtensionRegistration\"\n            response = send_heart_beat_msg_to_agent_service(extension_status_event)\n            status_file_path, agent_id, vm_uuid = get_status_message_details()\n            update_statusfile(status_file_path, agent_id, vm_uuid, response)\n            sys.exit(0)\n        else:\n            file_path = download_file()\n            if mode == Mode.pull:\n                current_config = apply_dsc_meta_configuration(file_path)\n            elif mode == Mode.push:\n                current_config = apply_dsc_configuration(file_path)\n            else:\n                install_module(file_path)\n        if mode == Mode.push or mode == Mode.pull:\n            if check_dsc_configuration(current_config):\n                if mode == Mode.push:\n                    waagent.AddExtensionEvent(name=ExtensionShortName,\n                                              op=Operation.ApplyMof,\n                                              isSuccess=True,\n                                              message=\"(03104)Succeeded to apply MOF configuration through Push Mode\")\n                else:\n                    waagent.AddExtensionEvent(name=ExtensionShortName,\n                                              op=Operation.ApplyMetaMof,\n                                              isSuccess=True,\n                                              message=\"(03106)Succeeded to apply meta MOF configuration through Pull Mode\")\n                    extension_status_event = \"ExtensionRegistration\"\n                    response = send_heart_beat_msg_to_agent_service(extension_status_event)\n                    status_file_path, agent_id, vm_uuid = get_status_message_details()\n                    update_statusfile(status_file_path, agent_id, vm_uuid, response)\n                    sys.exit(0)\n            else:\n                if mode == Mode.push:\n                    waagent.AddExtensionEvent(name=ExtensionShortName,\n                                              op=Operation.ApplyMof,\n                                              isSuccess=False,\n                                              message=\"(03105)Failed to apply MOF configuration through Push Mode\")\n                else:\n                    waagent.AddExtensionEvent(name=ExtensionShortName,\n                                              op=Operation.ApplyMetaMof,\n                                              isSuccess=False,\n                                              message=\"(03107)Failed to apply meta MOF configuration through Pull Mode\")\n                hutil.do_exit(1, 'Enable', 'error', '1', 'Enable failed. ' + current_config)\n\n        hutil.do_exit(0, 'Enable', 'success', '0', 'Enable Succeeded')\n    except Exception as e:\n        waagent.AddExtensionEvent(name=ExtensionShortName, op='EnableInProgress', isSuccess=True,\n                                  message=\"Enable failed with the error: {0}, stacktrace: {1} \".format(str(e),\n                                                                                                       traceback.format_exc()))\n        hutil.error('Failed to enable the extension with error: %s, stack trace: %s' % (str(e), traceback.format_exc()))\n        hutil.do_exit(1, 'Enable', 'error', '1', 'Enable failed: {0}'.format(e))\n\n\ndef send_heart_beat_msg_to_agent_service(status_event_type):\n    response = None\n    try:\n        retry_count = 0\n        canRetry = True\n        while retry_count <= 5 and canRetry:\n            waagent.AddExtensionEvent(name=ExtensionShortName, op='HeartBeatInProgress', isSuccess=True,\n                                      message=\"In send_heart_beat_msg_to_agent_service method\")\n            code, output, stderr = run_cmd( python_command + space_string +  dsc_script_path + \"/GetDscLocalConfigurationManager.py\")\n            if code == 0 and \"RefreshMode=Pull\" in output:\n                waagent.AddExtensionEvent(name=ExtensionShortName, op='HeartBeatInProgress', isSuccess=True,\n                                          message=\"sends heartbeat message in pullmode\")\n                m = re.search(\"ServerURL=([^\\n]+)\", output)\n                if not m:\n                    return\n                registration_url = m.group(1)\n                agent_id = get_nodeid(nodeid_path)\n                node_extended_properties_url = registration_url + \"/Nodes(AgentId='\" + agent_id + \"')/ExtendedProperties\"\n                waagent.AddExtensionEvent(name=ExtensionShortName, op='HeartBeatInProgress', isSuccess=True,\n                                          message=\"Url is \" + node_extended_properties_url)\n                headers = {'Content-Type': \"application/json; charset=utf-8\", 'Accept': \"application/json\",\n                           \"ProtocolVersion\": \"2.0\"}\n                data = construct_node_extension_properties(output, status_event_type)\n\n                http_client_factory = httpclientfactory.HttpClientFactory(\"/etc/opt/omi/ssl/oaas.crt\",\n                                                                          \"/etc/opt/omi/ssl/oaas.key\")\n                http_client = http_client_factory.create_http_client(sys.version_info)\n\n                response = http_client.post(node_extended_properties_url, headers=headers, data=data)\n                waagent.AddExtensionEvent(name=ExtensionShortName, op='HeartBeatInProgress', isSuccess=True,\n                                          message=\"response code is \" + str(response.status_code))\n                if response.status_code >= 500 and response.status_code < 600:\n                    canRetry = True\n                    time.sleep(10)\n                else:\n                    canRetry = False\n            retry_count += 1\n    except Exception as e:\n        waagent.AddExtensionEvent(name=ExtensionShortName, op='HeartBeatInProgress', isSuccess=True,\n                                  message=\"Failed to send heartbeat message to DSC agent service: {0}, stacktrace: {1} \".format(\n                                      str(e), traceback.format_exc()))\n        hutil.error('Failed to send heartbeat message to DSC agent service: %s, stack trace: %s' % (\n            str(e), traceback.format_exc()))\n    return response\n\n\ndef get_lcm_config_setting(setting_name, lcmconfig):\n    valuegroup = re.search(setting_name + \"=([^\\n]+)\", lcmconfig)\n    if not valuegroup:\n        return \"\"\n    value = valuegroup.group(1)\n\n    return value\n\n\ndef construct_node_extension_properties(lcmconfig, status_event_type):\n    waagent.AddExtensionEvent(name=ExtensionShortName, op='HeartBeatInProgress', isSuccess=True,\n                              message=\"Getting properties\")\n    OMSCLOUD_ID = get_omscloudid()\n    \n    vm_dist, vm_ver, vm_id = '', '', ''\n    \n    try:\n        vm_dist, vm_ver, vm_id = platform.linux_distribution()\n    except AttributeError:\n        try:\n            vm_dist, vm_ver, vm_id = platform.dist()\n        except AttributeError:\n            waagent.Log(\"Falling back to /etc/os-release distribution parsing\")\n\n    # Fallback if either of the above fail; on some (especially newer)\n    # distros, linux_distribution() and dist() are unreliable or deprecated\n    if not vm_dist and not vm_ver:\n        try:\n            with open('/etc/os-release', 'r') as fp:\n                for line in fp:\n                    if line.startswith('ID='):\n                        vm_dist = line.split('=')[1]\n                        vm_dist = vm_dist.split('-')[0]\n                        vm_dist = vm_dist.replace('\\\"', '').replace('\\n', '')\n                    elif line.startswith('VERSION_ID='):\n                        vm_ver = line.split('=')[1]\n                        vm_ver = vm_ver.replace('\\\"', '').replace('\\n', '')\n        except:\n            waagent.Log('Indeterminate operating system')\n            vm_dist, vm_ver, vm_id = \"Indeterminate operating system\", \"\",\"\"\n\n    if len(vm_ver.split('.')) == 1:\n        major_version = vm_ver.split('.')[0]\n        minor_version = 0\n    if len(vm_ver.split('.')) >= 2:\n        major_version = vm_ver.split('.')[0]\n        minor_version = vm_ver.split('.')[1]\n        \n    VMUUID = get_vmuuid()\n    node_config_names = get_lcm_config_setting('ConfigurationNames', lcmconfig)\n    configuration_mode = get_lcm_config_setting(\"ConfigurationMode\", lcmconfig)\n    configuration_mode_frequency = get_lcm_config_setting(\"ConfigurationModeFrequencyMins\", lcmconfig)\n    refresh_frequency_mins = get_lcm_config_setting(\"RefreshFrequencyMins\", lcmconfig)\n    reboot_node = get_lcm_config_setting(\"RebootNodeIfNeeded\", lcmconfig)\n    action_after_reboot = get_lcm_config_setting(\"ActionAfterReboot\", lcmconfig)\n    allow_module_overwrite = get_lcm_config_setting(\"AllowModuleOverwrite\", lcmconfig)\n\n    waagent.AddExtensionEvent(name=ExtensionShortName, op='HeartBeatInProgress', isSuccess=True,\n                              message=\"Constructing properties data\")\n\n    properties_data = {\n        \"OMSCloudId\": OMSCLOUD_ID,\n        \"TimeStamp\": time.strftime(date_time_format, time.gmtime()),\n        \"VMResourceId\": \"\",\n        \"ExtensionStatusEvent\": status_event_type,\n        \"ExtensionInformation\": {\n            \"Name\": \"Microsoft.OSTCExtensions.DSCForLinux\",\n            \"Version\": extension_handler_version\n        },\n        \"OSProfile\": {\n            \"Name\": vm_dist,\n            \"Type\": \"Linux\",\n            \"MinorVersion\": minor_version,\n            \"MajorVersion\": major_version,\n            \"VMUUID\": VMUUID\n        },\n        \"RegistrationMetaData\": {\n            \"NodeConfigurationName\": node_config_names,\n            \"ConfigurationMode\": configuration_mode,\n            \"ConfigurationModeFrequencyMins\": configuration_mode_frequency,\n            \"RefreshFrequencyMins\": refresh_frequency_mins,\n            \"RebootNodeIfNeeded\": reboot_node,\n            \"ActionAfterReboot\": action_after_reboot,\n            \"AllowModuleOverwrite\": allow_module_overwrite\n        }\n    }\n    return properties_data\n\n\ndef uninstall():\n    hutil.do_parse_context('Uninstall')\n    try:\n        extension_status_event = \"ExtensionUninstall\"\n        send_heart_beat_msg_to_agent_service(extension_status_event)\n        hutil.do_exit(0, 'Uninstall', 'success', '0', 'Uninstall Succeeded')\n    except Exception as e:\n        waagent.AddExtensionEvent(name=ExtensionShortName, op='UninstallInProgress', isSuccess=False,\n                                  message='Failed to uninstall the extension with error: %s, stack trace: %s' % (\n                                      str(e), traceback.format_exc()))\n        hutil.error(\n            'Failed to uninstall the extension with error: %s, stack trace: %s' % (str(e), traceback.format_exc()))\n        hutil.do_exit(1, 'Uninstall', 'error', '1', 'Uninstall failed: {0}'.format(e))\n\n\ndef disable():\n    hutil.do_parse_context('Disable')\n    hutil.do_exit(0, 'Disable', 'success', '0', 'Disable Succeeded')\n\n\ndef update():\n    hutil.do_parse_context('Update')\n    try:\n        extension_status_event = \"ExtensionUpgrade\"\n        send_heart_beat_msg_to_agent_service(extension_status_event)\n        hutil.do_exit(0, 'Update', 'success', '0', 'Update Succeeded')\n    except Exception as e:\n        waagent.AddExtensionEvent(name=ExtensionShortName, op='UpdateInProgress', isSuccess=False,\n                                  message='Failed to update the extension with error: %s, stack trace: %s' % (\n                                      str(e), traceback.format_exc()))\n        hutil.error('Failed to update the extension with error: %s, stack trace: %s' % (str(e), traceback.format_exc()))\n        hutil.do_exit(1, 'Update', 'error', '1', 'Update failed: {0}'.format(e))\n\n\ndef run_cmd(cmd):\n    proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, close_fds=True)\n    exit_code = proc.wait()\n    stdout, stderr = proc.communicate()\n    stdout = stdout.decode(\"ISO-8859-1\") if isinstance(stdout, bytes) else stdout\n    stderr = stderr.decode(\"ISO-8859-1\") if isinstance(stderr, bytes) else stderr\n    return exit_code, stdout, stderr\n\ndef run_dpkg_cmd_with_retry(cmd):\n    \"\"\"\n    Attempts to run the cmd - if it fails, checks to see if dpkg is locked by another\n    process, if so, it will sleep for 5 seconds and then try running the command again.\n    If dpkg is still locked, then it will return the DPKGLockedErrorCode which won't\n    count against our SLA numbers.\n    \"\"\"\n    exit_code, output, stderr = run_cmd(cmd)\n    if not exit_code == 0:\n        dpkg_locked = is_dpkg_locked(exit_code, stderr)\n        if dpkg_locked:\n            # Try one more time:\n            time.sleep(5)\n            exit_code, output, stderr = run_cmd(cmd)\n            dpkg_locked = is_dpkg_locked(exit_code, stderr)\n            if dpkg_locked:\n                exit_code = DPKGLockedErrorCode\n\n    return exit_code, output, stderr\n\ndef get_config(key):\n    if key in public_settings:\n        value = public_settings.get(key)\n        if value:\n            return str(value).strip()\n    if key in protected_settings:\n        value = protected_settings.get(key)\n        if value:\n            return str(value).strip()\n    return ''\n\n\ndef remove_old_dsc_packages():\n    waagent.AddExtensionEvent(name=ExtensionShortName, op='InstallInProgress', isSuccess=True,\n                              message=\"Deleting DSC and omi packages\")\n    if distro_category == DistroCategory.debian:\n        deb_remove_incomptible_dsc_package()\n        # remove the package installed by Linux DSC 1.0, in later versions the package name is changed to 'omi'\n        deb_remove_old_oms_package('omiserver', '1.0.8.2')\n    elif distro_category == DistroCategory.redhat or distro_category == DistroCategory.suse:\n        rpm_remove_incomptible_dsc_package()\n        # remove the package installed by Linux DSC 1.0, in later versions the package name is changed to 'omi'\n        rpm_remove_old_oms_package('omiserver', '1.0.8-2')\n\n\ndef deb_remove_incomptible_dsc_package():\n    version = deb_get_pkg_version('dsc')\n    if version is not None and is_incomptible_dsc_package(version):\n        deb_uninstall_package('dsc')\n\n\ndef is_incomptible_dsc_package(package_version):\n    version = re.match(package_pattern, package_version)\n    # uninstall DSC package if the version is 1.0.x because upgrading from 1.0 to 1.1 is broken\n    if version is not None and (int(version.group(1)) == 1 and int(version.group(2)) == 0):\n        return True\n    return False\n\n\ndef is_old_oms_server(package_name):\n    if package_name == 'omiserver':\n        return True\n    return False\n\n\ndef deb_remove_old_oms_package(package_name, version):\n    system_pkg_version = deb_get_pkg_version(package_name)\n    if system_pkg_version is not None and is_old_oms_server(package_name):\n        deb_uninstall_package(package_name)\n\n\ndef deb_get_pkg_version(package_name):\n    code, output, stderr = run_dpkg_cmd_with_retry('dpkg -s ' + package_name + ' | grep Version:')\n    if code == 0:\n        code, output, stderr = run_dpkg_cmd_with_retry(\"dpkg -s \" + package_name + \" | grep Version: | awk '{print $2}'\")\n        if code == 0:\n            return output\n\n\ndef rpm_remove_incomptible_dsc_package():\n    code, version, stderr = run_cmd('rpm -q --queryformat \"%{VERSION}.%{RELEASE}\" dsc')\n    if code == 0 and is_incomptible_dsc_package(version):\n        rpm_uninstall_package('dsc')\n\n\ndef rpm_remove_old_oms_package(package_name, version):\n    if rpm_check_old_oms_package(package_name, version):\n        rpm_uninstall_package(package_name)\n\n\ndef rpm_check_old_oms_package(package_name, version):\n    code, output, stderr = run_cmd('rpm -q ' + package_name)\n    if code == 0 and is_old_oms_server(package_name):\n        return True\n    return False\n\n\ndef install_dsc_packages():\n    openssl_version = get_openssl_version()\n    omi_package_path = omi_package_prefix + openssl_version\n    dsc_package_path = dsc_package_prefix + openssl_version\n    compiler_mitigated_omi_flag = get_compiler_mitigated_omi_flag()\n    waagent.AddExtensionEvent(name=ExtensionShortName, op='InstallInProgress', isSuccess=True,\n                              message=\"Installing omipackage version: \" + omi_package_path + \"; dsc package version: \" + dsc_package_path)\n    if distro_category == DistroCategory.debian:\n        deb_install_pkg(omi_package_path + '.ulinux' + compiler_mitigated_omi_flag + '.x64.deb', 'omi', omi_major_version, omi_minor_version, omi_build,\n                        omi_release, ' --force-confold --force-confdef --refuse-downgrade ')\n        deb_install_pkg(dsc_package_path + '.x64.deb', 'dsc', dsc_major_version, dsc_minor_version, dsc_build,\n                        dsc_release, '')\n    elif distro_category == DistroCategory.redhat or distro_category == DistroCategory.suse:\n        rpm_install_pkg(omi_package_path + '.ulinux' + compiler_mitigated_omi_flag + '.x64.rpm', 'omi', omi_major_version, omi_minor_version, omi_build,\n                        omi_release)\n        rpm_install_pkg(dsc_package_path + '.x64.rpm', 'dsc', dsc_major_version, dsc_minor_version, dsc_build,\n                        dsc_release)\n\ndef get_compiler_mitigated_omi_flag():\n    vm_supported, vm_dist, vm_ver = check_supported_OS()\n\n    if is_compiler_mitigated_omi_supported(vm_dist.lower(), vm_ver.lower()):\n        return '.s'\n\n    return ''\n\ndef is_compiler_mitigated_omi_supported(dist_name, dist_version):\n    # Compiler-mitigated OMI is not supported in the following\n    # SLES 11\n    \n    # To be enhanced if there are future distros not supporting compiler-mitigated OMI package\n    if dist_name.startswith('sles') and dist_version.startswith('11'):\n        return False\n    \n    return True\n\ndef compare_pkg_version(system_package_version, major_version, minor_version, build, release):\n    version = re.match(package_pattern, system_package_version)\n    if version is not None and ((int(version.group(1)) > major_version) or (\n            int(version.group(1)) == major_version and int(version.group(2)) > minor_version) or (\n                                        int(version.group(1)) == major_version and int(\n                                    version.group(2)) == minor_version and int(version.group(3)) > build) or (\n                                        int(version.group(1)) == major_version and int(\n                                    version.group(2)) == minor_version and int(version.group(3)) == build and int(\n                                    version.group(4)) >= release)):\n        return 1\n    return 0\n\n\ndef rpm_check_pkg_exists(package_name, major_version, minor_version, build, release):\n    code, output, stderr = run_cmd('rpm -q --queryformat \"%{VERSION}.%{RELEASE}\" ' + package_name)\n    waagent.AddExtensionEvent(name=ExtensionShortName, op='InstallInProgress', isSuccess=True,\n                              message=\"package name: \" + package_name + \";  existing package version:\" + output)\n    hutil.log(\"package name: \" + package_name + \";  existing package version:\" + output)\n    if code == 0:\n        return compare_pkg_version(output, major_version, minor_version, build, release)\n\n\ndef rpm_install_pkg(package_path, package_name, major_version, minor_version, build, release):\n    if rpm_check_pkg_exists(package_name, major_version, minor_version, build, release) == 1:\n        # package is already installed\n        return\n    else:\n        code, output, stderr = run_cmd('rpm -Uvh ' + package_path)\n        if code == 0:\n            hutil.log(package_name + ' is installed successfully')\n        else:\n            waagent.AddExtensionEvent(name=ExtensionShortName, op='InstallInProgress', isSuccess=True,\n                                      message=\"Failed to install RPM package :\" + package_path)\n            raise Exception('Failed to install package {0}: stdout: {1}, stderr: {2}'.format(package_name, output, stderr))\n\n\ndef deb_install_pkg(package_path, package_name, major_version, minor_version, build, release, install_options):\n    version = deb_get_pkg_version(package_name)\n    if version is not None and compare_pkg_version(version, major_version, minor_version, build, release) == 1:\n        # package is already installed\n        hutil.log(package_name + ' version ' + version + ' is already installed')\n        waagent.AddExtensionEvent(name=ExtensionShortName, op='InstallInProgress', isSuccess=True,\n                                  message=\"dsc package with version: \" + version + \"is already installed.\")\n        return\n    else:\n        cmd = 'dpkg -i ' + install_options + ' ' + package_path\n        code, output, stderr = run_dpkg_cmd_with_retry(cmd)\n        if code == 0:\n            hutil.log(package_name + ' version ' + str(major_version) + '.' + str(minor_version) + '.' + str(\n                build) + '.' + str(release) + ' is installed successfully')\n        elif code == DPKGLockedErrorCode:\n            hutil.do_exit(DPKGLockedErrorCode, 'Install', 'error', str(DPKGLockedErrorCode), 'Install failed because the package manager on the VM is currently locked. Please try installing again.')\n        else:\n            waagent.AddExtensionEvent(name=ExtensionShortName, op='InstallInProgress', isSuccess=False,\n                                      message=\"Failed to install debian package :\" + package_path)\n            raise Exception('Failed to install package {0}: stdout: {1}, stderr: {2}'.format(package_name, output, stderr))\n\n\ndef install_package(package):\n    if distro_category == DistroCategory.debian:\n        apt_package_install(package)\n    elif distro_category == DistroCategory.redhat:\n        yum_package_install(package)\n    elif distro_category == DistroCategory.suse:\n        zypper_package_install(package)\n\n\ndef zypper_package_install(package):\n    hutil.log('zypper --non-interactive in ' + package)\n    code, output, stderr = run_cmd('zypper --non-interactive in ' + package)\n    if code == 0:\n        hutil.log('Package ' + package + ' is installed successfully')\n    else:\n        waagent.AddExtensionEvent(name=ExtensionShortName, op='InstallInProgress', isSuccess=True,\n                                  message=\"Failed to install zypper package :\" + package)\n        raise Exception('Failed to install package {0}: stdout: {1}, stderr: {2}'.format(package, output, stderr))\n\n\ndef yum_package_install(package):\n    hutil.log('yum install -y ' + package)\n    code, output, stderr = run_cmd('yum install -y ' + package)\n    if code == 0:\n        hutil.log('Package ' + package + ' is installed successfully')\n    else:\n        waagent.AddExtensionEvent(name=ExtensionShortName, op='InstallInProgress', isSuccess=True,\n                                  message=\"Failed to install yum package :\" + package)\n        raise Exception('Failed to install package {0}: stdout: {1}, stderr: {2}'.format(package, output, stderr))\n\n\ndef apt_package_install(package):\n    hutil.log('apt-get install -y --force-yes ' + package)\n    code, output, stderr = run_cmd('apt-get install -y --force-yes ' + package)\n    if code == 0:\n        hutil.log('Package ' + package + ' is installed successfully')\n    else:\n        waagent.AddExtensionEvent(name=ExtensionShortName, op='InstallInProgress', isSuccess=True,\n                                  message=\"Failed to install apt package :\" + package)\n        raise Exception('Failed to install package {0}: stdout: {1}, stderr: {2}'.format(package, output, stderr))\n\n\ndef get_openssl_version():\n    cmd_result = waagent.RunGetOutput(\"openssl version\")\n    cmd_result = cmd_result.decode() if isinstance(cmd_result, bytes) else cmd_result\n    openssl_version = cmd_result[1].split()[1]\n    if re.match('^1.0.*', openssl_version):\n        return '100'\n    elif re.match('^1.1.*', openssl_version):\n        return '110'\n    else:\n        error_msg = 'This system does not have a supported version of OpenSSL installed. Supported version: 1.0.*, 1.1.*'\n        hutil.error(error_msg)\n        waagent.AddExtensionEvent(name=ExtensionShortName, op='InstallInProgress', isSuccess=True,\n                                  message=\"System doesn't have supported OpenSSL version:\" + openssl_version)\n        hutil.do_exit(51, 'Install', 'error', '51', openssl_version + 'is not supported.')\n\n\ndef start_omiservice():\n    run_cmd('/opt/omi/bin/service_control start')\n    code, output, stderr =run_cmd('service omid status')\n    if code == 0:\n        hutil.log('Service omid is started')\n    else:\n        raise Exception('Failed to start service omid, status: stdout: {0}, stderr: {1}'.format(output, stderr))\n\n\ndef download_file():\n    waagent.AddExtensionEvent(name=ExtensionShortName, op=\"EnableInProgress\", isSuccess=True,\n                              message=\"Downloading file\")\n    download_dir = prepare_download_dir(hutil.get_seq_no())\n    storage_account_name = get_config('StorageAccountName')\n    storage_account_key = get_config('StorageAccountKey')\n    file_uri = get_config('FileUri')\n\n    if not file_uri:\n        error_msg = 'Missing FileUri configuration'\n        waagent.AddExtensionEvent(name=ExtensionShortName,\n                                  op=Operation.Download,\n                                  isSuccess=False,\n                                  message=\"(03000)Argument error, invalid file location\")\n        hutil.do_exit(51, 'Enable', 'error', '51', '(03000)Argument error, invalid file location')\n\n    if storage_account_name and storage_account_key:\n        hutil.log('Downloading file from azure storage...')\n        path = download_azure_blob(storage_account_name, storage_account_key, file_uri, download_dir)\n        return path\n    else:\n        hutil.log('Downloading file from external link...')\n        waagent.AddExtensionEvent(name=ExtensionShortName, op=\"EnableInProgress\", isSuccess=True,\n                                  message=\"Downloading file from external link...\")\n        path = download_external_file(file_uri, download_dir)\n        return path\n\n\ndef download_azure_blob(account_name, account_key, file_uri, download_dir):\n    waagent.AddExtensionEvent(name=ExtensionShortName, op=\"EnableInProgress\", isSuccess=True,\n                              message=\"Downloading from azure blob\")\n    try:\n        (blob_name, container_name) = parse_blob_uri(file_uri)\n        host_base = get_host_base_from_uri(file_uri)\n\n        blob_parent_path = os.path.join(download_dir, os.path.dirname(blob_name))\n        if not os.path.exists(blob_parent_path):\n            os.makedirs(blob_parent_path)\n\n        download_path = os.path.join(download_dir, blob_name)\n        blob_service = BlobService(account_name, account_key, host_base=host_base)\n    except Exception as e:\n        waagent.AddExtensionEvent(name=ExtensionShortName, op='DownloadInProgress', isSuccess=True,\n                                  message='Enable failed with the azure storage error : {0}, stack trace: {1}'.format(\n                                      str(e), traceback.format_exc()))\n        hutil.error('Failed to enable the extension with error: %s, stack trace: %s' % (str(e), traceback.format_exc()))\n        hutil.do_exit(1, 'Enable', 'error', '1', 'Enable failed: {0}'.format(e))\n\n    max_retry = 3\n    for retry in range(1, max_retry + 1):\n        try:\n            blob_service.get_blob_to_path(container_name, blob_name, download_path)\n        except Exception:\n            hutil.error('Failed to download Azure blob, retry = ' + str(retry) + ', max_retry = ' + str(max_retry))\n            if retry != max_retry:\n                hutil.log('Sleep 10 seconds')\n                time.sleep(10)\n            else:\n                waagent.AddExtensionEvent(name=ExtensionShortName,\n                                          op=Operation.Download,\n                                          isSuccess=False,\n                                          message=\"(03303)Failed to download file from Azure Storage\")\n                raise Exception('Failed to download azure blob: ' + blob_name)\n    waagent.AddExtensionEvent(name=ExtensionShortName,\n                              op=Operation.Download,\n                              isSuccess=True,\n                              message=\"(03301)Succeeded to download file from Azure Storage\")\n    return download_path\n\n\ndef parse_blob_uri(blob_uri):\n    path = get_path_from_uri(blob_uri).strip('/')\n    first_sep = path.find('/')\n    if first_sep == -1:\n        waagent.AddExtensionEvent(name=ExtensionShortName, op=\"EnableInProgress\", isSuccess=False,\n                                  message=\"Error occured while extracting container and blob name.\")\n        hutil.error(\"Failed to extract container and blob name from \" + blob_uri)\n    blob_name = path[first_sep + 1:]\n    container_name = path[:first_sep]\n    return (blob_name, container_name)\n\n\ndef get_path_from_uri(uri):\n    uri = urlparse(uri)\n    return uri.path\n\n\ndef get_host_base_from_uri(blob_uri):\n    uri = urlparse(blob_uri)\n    netloc = uri.netloc\n    if netloc is None:\n        return None\n    return netloc[netloc.find('.'):]\n\n\ndef download_external_file(file_uri, download_dir):\n    waagent.AddExtensionEvent(name=ExtensionShortName, op=\"EnableInProgress\", isSuccess=True,\n                              message=\"Downloading from external file\")\n    path = get_path_from_uri(file_uri)\n    file_name = path.split('/')[-1]\n    file_path = os.path.join(download_dir, file_name)\n    max_retry = 3\n    for retry in range(1, max_retry + 1):\n        try:\n            download_and_save_file(file_uri, file_path)\n            waagent.AddExtensionEvent(name=ExtensionShortName, op=Operation.Download, isSuccess=True,\n                                      message=\"(03302)Succeeded to download file from public URI\")\n            return file_path\n        except Exception as e:\n            hutil.error('Failed to download public file, retry = ' + str(retry) + ', max_retry = ' + str(max_retry))\n            if retry != max_retry:\n                hutil.log('Sleep 10 seconds')\n                time.sleep(10)\n            else:\n                waagent.AddExtensionEvent(name=ExtensionShortName,\n                                          op=Operation.Download,\n                                          isSuccess=False,\n                                          message='(03304)Failed to download file from public URI,  error : %s, stack trace: %s' % (\n                                              str(e), traceback.format_exc()))\n                raise Exception('Failed to download public file: ' + file_name)\n\n\ndef download_and_save_file(uri, file_path):\n    src = urlopen(uri)\n    dest = open(file_path, 'wb')\n    buf_size = 1024\n    buf = src.read(buf_size)\n    while (buf):\n        dest.write(buf)\n        buf = src.read(buf_size)\n\n\ndef prepare_download_dir(seq_no):\n    main_download_dir = os.path.join(os.getcwd(), DownloadDirectory)\n    if not os.path.exists(main_download_dir):\n        os.makedirs(main_download_dir)\n    cur_download_dir = os.path.join(main_download_dir, seq_no)\n    if not os.path.exists(cur_download_dir):\n        os.makedirs(cur_download_dir)\n    return cur_download_dir\n\n\ndef apply_dsc_configuration(config_file_path):\n    cmd = dsc_script_path + '/StartDscConfiguration.py -configurationmof ' + config_file_path\n    waagent.AddExtensionEvent(name=ExtensionShortName, op='EnableInProgress', isSuccess=True,\n                              message='running the cmd: ' + cmd)\n    code, output, stderr = run_cmd(cmd)\n    if code == 0:\n        code, output, stderr = run_cmd(dsc_script_path + '/GetDscConfiguration.py')\n        return output\n    else:\n        error_msg = 'Failed to apply MOF configuration: stdout: {0}, stderr: {1}'.format(output, stderr)\n        waagent.AddExtensionEvent(name=ExtensionShortName, op=Operation.ApplyMof, isSuccess=True, message=error_msg)\n        hutil.error(error_msg)\n        raise Exception(error_msg)\n\n\ndef apply_dsc_meta_configuration(config_file_path):\n    cmd = dsc_script_path + '/SetDscLocalConfigurationManager.py -configurationmof ' + config_file_path\n    waagent.AddExtensionEvent(name=ExtensionShortName, op='EnableInProgress', isSuccess=True,\n                              message='running the cmd: ' + cmd)\n    code, output, stderr = run_cmd(cmd)\n    if code == 0:\n        code, output, stderr = run_cmd(dsc_script_path + '/GetDscLocalConfigurationManager.py')\n        return output\n    else:\n        error_msg = 'Failed to apply Meta MOF configuration: stdout: {0}, stderr: {1}'.format(output, stderr)\n        hutil.error(error_msg)\n        waagent.AddExtensionEvent(name=ExtensionShortName,\n                                  op=Operation.ApplyMetaMof,\n                                  isSuccess=False,\n                                  message=\"(03107)\" + error_msg)\n        raise Exception(error_msg)\n\n\ndef get_statusfile_path():\n    seq_no = hutil.get_seq_no()\n    waagent.AddExtensionEvent(name=ExtensionShortName, op=\"EnableInProgress\", isSuccess=True,\n                              message=\"sequence number is :\" + seq_no)\n    status_file = None\n\n    handlerEnvironment = None\n    handler_env_path = os.path.join(os.getcwd(), 'HandlerEnvironment.json')\n    try:\n        with open(handler_env_path, 'r') as handler_env_file:\n            handler_env_txt = handler_env_file.read()\n        handler_env = json.loads(handler_env_txt)\n        if type(handler_env) == list:\n            handler_env = handler_env[0]\n        handlerEnvironment = handler_env\n    except Exception as e:\n        hutil.error(e.message)\n        waagent.AddExtensionEvent(name=ExtensionShortName, op=\"EnableInProgress\", isSuccess=True,\n                                  message='exception in retrieving status_dir error : %s, stack trace: %s' % (\n                                      str(e), traceback.format_exc()))\n\n    status_dir = handlerEnvironment['handlerEnvironment']['statusFolder']\n    status_file = status_dir + '/' + seq_no + '.status'\n    waagent.AddExtensionEvent(name=ExtensionShortName, op=\"EnableInProgress\", isSuccess=True,\n                              message=\"status file path: \" + status_file)\n    return status_file\n\n\ndef get_status_message_details():\n    agent_id = get_nodeid(nodeid_path)\n    vm_uuid = get_vmuuid()\n    status_file_path = None\n    if vm_uuid is not None and agent_id is not None:\n        status_file_path = get_statusfile_path()\n\n    return status_file_path, agent_id, vm_uuid\n\n\ndef update_statusfile(status_filepath, node_id, vmuuid, response):\n    waagent.AddExtensionEvent(name=ExtensionShortName, op=\"EnableInProgress\", isSuccess=True,\n                              message=\"updating the status file \" + '[statusfile={0}][vmuuid={1}][node_id={2}]'.format(\n                                  status_filepath, vmuuid, node_id))\n    if status_filepath is None:\n        error_msg = \"Unable to locate a status file\"\n        hutil.error(error_msg)\n        waagent.AddExtensionEvent(name=ExtensionShortName, op=\"EnableInProgress\", isSuccess=False, message=error_msg)\n        return None\n\n    status_data = None\n    if os.path.exists(status_filepath):\n        jsonData = open(status_filepath)\n        status_data = json.load(jsonData)\n        jsonData.close()\n\n    accountName = response.deserialized_data[\"AccountName\"]\n    rgName = response.deserialized_data[\"ResourceGroupName\"]\n    subId = response.deserialized_data[\"SubscriptionId\"]\n\n    metadatastatus = [{\"status\": \"success\", \"code\": \"0\", \"name\": \"metadata\", \"formattedMessage\": {\"lang\": \"en-US\",\n                                                                                                  \"message\": \"AgentID=\" + node_id + \";VMUUID=\" + vmuuid + \";AutomationAccountName=\" + accountName + \";ResourceGroupName=\" + rgName + \";Subscription=\" + subId}}]\n    with open(status_filepath, \"w\") as fp:\n        status_file_content = [{\"status\":\n                                    {\"status\": \"success\",\n                                     \"formattedMessage\": {\"lang\": \"en-US\", \"message\": \"Enable Succeeded\"},\n                                     \"operation\": \"Enable\", \"code\": \"0\", \"name\": \"Microsoft.OSTCExtensions.DSCForLinux\",\n                                     \"substatus\": metadatastatus\n                                     },\n                                \"version\": \"1.0\", \"timestampUTC\": time.strftime(date_time_format, time.gmtime())\n                                }]\n        json.dump(status_file_content, fp)\n    waagent.AddExtensionEvent(name=ExtensionShortName, op=\"EnableInProgress\", isSuccess=True,\n                              message=\"successfully written nodeid and vmuuid\")\n    waagent.AddExtensionEvent(name=ExtensionName, op=\"Enable\", isSuccess=True,\n                              message=\"successfully executed enable functionality\")\n\n\ndef get_nodeid(file_path):\n    id = None\n    try:\n        if os.path.exists(file_path):\n            with open(file_path) as f:\n                id = f.readline().strip()\n    except Exception as e:\n        error_msg = 'get_nodeid() failed: Unable to open id file {0}'.format(file_path)\n        hutil.error(error_msg)\n        waagent.AddExtensionEvent(name=ExtensionShortName, op=\"EnableInProgress\", isSuccess=False, message=error_msg)\n        return None\n    if not id:\n        error_msg = 'get_nodeid() failed: Empty content in id file {0}'.format(file_path)\n        hutil.error(error_msg)\n        waagent.AddExtensionEvent(name=ExtensionShortName, op=\"EnableInProgress\", isSuccess=False, message=error_msg)\n        return None\n    return id\n\n\ndef get_vmuuid():\n    UUID = None\n    code, output, stderr = run_cmd(\"sudo dmidecode | grep UUID | sed -e 's/UUID: //'\")\n    if code == 0:\n        UUID = output.strip()\n    return UUID\n\n\ndef get_omscloudid():\n    OMSCLOUD_ID = None\n    code, output, stderr = run_cmd(\"sudo dmidecode | grep 'Tag: 77' | sed -e 's/Asset Tag: //'\")\n    if code == 0:\n        OMSCLOUD_ID = output.strip()\n    return OMSCLOUD_ID\n\n\ndef check_dsc_configuration(current_config):\n    outputlist = re.split(\"\\n\", current_config)\n    for line in outputlist:\n        if re.match(r'ReturnValue=0', line.strip()):\n            return True\n    return False\n\n\ndef install_module(file_path):\n    install_package('unzip')\n    cmd = dsc_script_path + '/InstallModule.py ' + file_path\n    code, output, stderr = run_cmd(cmd)\n    waagent.AddExtensionEvent(name=ExtensionShortName,\n                              op=\"InstallModuleInProgress\",\n                              isSuccess=True,\n                              message=\"Running the cmd: \" + cmd)\n    if not code == 0:\n        error_msg = 'Failed to install DSC Module ' + file_path + ' stdout: {0}, stderr: {1}'.format(output, stderr)\n        hutil.error(error_msg)\n        waagent.AddExtensionEvent(name=ExtensionShortName,\n                                  op=Operation.InstallModule,\n                                  isSuccess=False,\n                                  message=\"(03100)\" + error_msg)\n        raise Exception(error_msg)\n    waagent.AddExtensionEvent(name=ExtensionShortName,\n                              op=Operation.InstallModule,\n                              isSuccess=True,\n                              message=\"(03101)Succeeded to install DSC Module\")\n\n\ndef remove_module():\n    module_name = get_config('ResourceName')\n    cmd = dsc_script_path + '/RemoveModule.py ' + module_name\n    code, output, stderr = run_cmd(cmd)\n    waagent.AddExtensionEvent(name=ExtensionShortName,\n                              op=\"RemoveModuleInProgress\",\n                              isSuccess=True,\n                              message=\"Running the cmd: \" + cmd)\n    if not code == 0:\n        error_msg = 'Failed to remove DSC Module ' + module_name + ' stdout: {0}, stderr: {1}'.format(output, stderr)\n        hutil.error(error_msg)\n        waagent.AddExtensionEvent(name=ExtensionShortName,\n                                  op=Operation.RemoveModule,\n                                  isSuccess=False,\n                                  message=\"(03102)\" + error_msg)\n        raise Exception(error_msg)\n    waagent.AddExtensionEvent(name=ExtensionShortName,\n                              op=Operation.RemoveModule,\n                              isSuccess=True,\n                              message=\"(03103)Succeeded to remove DSC Module\")\n\n\ndef uninstall_package(package_name):\n    waagent.AddExtensionEvent(name=ExtensionShortName, op='InstallInProgress', isSuccess=True,\n                              message=\"uninstalling the package\" + package_name)\n    if distro_category == DistroCategory.debian:\n        deb_uninstall_package(package_name)\n    elif distro_category == DistroCategory.redhat or distro_category == DistroCategory.suse:\n        rpm_uninstall_package(package_name)\n\n\ndef deb_uninstall_package(package_name):\n    cmd = 'dpkg -P ' + package_name\n    code, output, stderr = run_dpkg_cmd_with_retry(cmd)\n    if code == 0:\n        hutil.log('Package ' + package_name + ' was removed successfully')\n    elif code == DPKGLockedErrorCode:\n        hutil.do_exit(DPKGLockedErrorCode, 'Install', 'error', str(DPKGLockedErrorCode), 'Operation failed because the package manager on the VM is currently locked. Please try again.')\n    else:\n        waagent.AddExtensionEvent(name=ExtensionShortName, op='InstallInProgress', isSuccess=True,\n                                  message=\"failed to remove the package\" + package_name)\n        raise Exception('Failed to remove package ' + package_name)\n\n\ndef rpm_uninstall_package(package_name):\n    cmd = 'rpm -e ' + package_name\n    code, output, stderr = run_cmd(cmd)\n    if code == 0:\n        hutil.log('Package ' + package_name + ' was removed successfully')\n    else:\n        waagent.AddExtensionEvent(name=ExtensionShortName, op='InstallInProgress', isSuccess=True,\n                                  message=\"failed to remove the package\" + package_name)\n        raise Exception('Failed to remove package ' + package_name)\n        \ndef is_dpkg_locked(exit_code, output):\n    \"\"\"\n    If dpkg is locked, the output will contain a message similar to 'dpkg\n    status database is locked by another process'\n    \"\"\"\n    if exit_code is not 0:\n        dpkg_locked_search = r'^.*dpkg.+lock.*$'\n        dpkg_locked_re = re.compile(dpkg_locked_search, re.M)\n        if dpkg_locked_re.search(output):\n            return True\n    return False\n\n\ndef register_automation(registration_key, registation_url, node_configuration_name, refresh_freq,\n                        configuration_mode_freq, configuration_mode):\n    if (registration_key == '' or registation_url == ''):\n        err_msg = \"Either the Registration Key or Registration URL is NOT provided\"\n        hutil.error(err_msg)\n        waagent.AddExtensionEvent(name=ExtensionShortName, op='RegisterInProgress', isSuccess=True, message=err_msg)\n        return 51, err_msg\n    if configuration_mode != '' and not (\n            configuration_mode == 'applyandmonitor' or configuration_mode == 'applyandautocorrect' or configuration_mode == 'applyonly'):\n        err_msg = \"ConfigurationMode: \" + configuration_mode + \" is not valid.\"\n        hutil.error(err_msg + \"It should be one of the values : (ApplyAndMonitor | ApplyAndAutoCorrect | ApplyOnly)\")\n        waagent.AddExtensionEvent(name=ExtensionShortName, op='RegisterInProgress', isSuccess=True, message=err_msg)\n        return 51, err_msg\n    cmd = dsc_script_path + '/Register.py' + ' --RegistrationKey ' + registration_key \\\n          + ' --ServerURL ' + registation_url\n    optional_parameters = \"\"\n    if node_configuration_name != '':\n        optional_parameters += ' --ConfigurationName ' + node_configuration_name\n    if refresh_freq != '':\n        optional_parameters += ' --RefreshFrequencyMins ' + refresh_freq\n    if configuration_mode_freq != '':\n        optional_parameters += ' --ConfigurationModeFrequencyMins ' + configuration_mode_freq\n    if configuration_mode != '':\n        optional_parameters += ' --ConfigurationMode ' + configuration_mode\n    waagent.AddExtensionEvent(name=ExtensionShortName,\n                              op=\"RegisterInProgress\",\n                              isSuccess=True,\n                              message=\"Registration URL \" + registation_url + \"Optional parameters to Registration\" + optional_parameters)\n    code, output, stderr = run_cmd(cmd + optional_parameters)\n    if not code == 0:\n        error_msg = '(03109)Failed to register with Azure Automation DSC: stdout: {0}, stderr: {1}'.format(output, stderr)\n        hutil.error(error_msg)\n        waagent.AddExtensionEvent(name=ExtensionShortName,\n                                  op=Operation.Register,\n                                  isSuccess=False,\n                                  message=error_msg)\n        return 1, error_msg\n    waagent.AddExtensionEvent(name=ExtensionShortName,\n                              op=Operation.Register,\n                              isSuccess=True,\n                              message=\"(03108)Succeeded to register with Azure Automation DSC\")\n    return 0, ''\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "DSC/extension_shim.sh",
    "content": "#!/usr/bin/env bash\n\n# Keeping the default command\nCOMMAND=\"\"\nPYTHON=\"\"\n\n# We are writing logs to error stream in extension_shim.sh as the logs written to output stream are being overriden by HandlerUtil.py. This has been done as part of OMIGOD hotfix\n# Default variables for OMI Package Upgrade\nREQUIRED_OMI_VERSION=\"1.7.3.0\"\nINSTALLED_OMI_VERSION=\"\"\nUPGRADED_OMI_VERSION=\"\"\nOPENSSL_VERSION=\"\"\nOMI_PACKAGE_PREFIX='packages/omi-1.7.3-0.ssl_'\nOMI_PACKAGE_PATH=\"\"\nOMI_SERVICE_STATE=\"\"\n\nUSAGE=\"$(basename \"$0\") [-h] [-i|--install] [-u|--uninstall] [-d|--disable] [-e|--enable] [-p|--update]\n\nProgram to find the installed python on the box and invoke a Python extension script.\n\nwhere:\n    -h|--help       show this help text\n    -i|--install    install the extension\n    -u|--uninstall  uninstall the extension\n    -d|--disable    disable the extension\n    -e|--enable     enable the extension\n    -p|--update     update the extension\n    -c|--command    command to run\n\nexample:\n# Install usage\n$ bash extension_shim.sh -i\npython ./vmaccess.py -install\n\n# Custom executable python file\n$ bash extension_shim.sh -c \"\"hello.py\"\" -i\npython hello.py -install\n\n# Custom executable python file with arguments\n$ bash extension_shim.sh -c \"\"hello.py --install\"\"\npython hello.py --install\n\"\n\nfunction find_python(){\n    local python_exec_command=$1\n\n    # Check if there is python2 defined.\n    if command -v python2 >/dev/null 2>&1 ; then\n        eval ${python_exec_command}=\"python2\"\n    else\n        # Python2 was not found. Searching for Python3 now.\n        if command -v python3 >/dev/null 2>&1 ; then\n            eval ${python_exec_command}=\"python3\"\n        fi\n    fi\n}\n\nfunction get_openssl_version(){\n    openssl=`openssl version | awk '{print $2}'`\n    if [[ ${openssl} =~ ^1.0.* ]]; then\n        OPENSSL_VERSION=\"100\"\n    else\n        if [[ ${openssl} =~ ^1.1.* ]]; then\n            OPENSSL_VERSION=\"110\"\n        else\n            if [[ ${openssl} =~ ^0.9.8* ]]; then\n                OPENSSL_VERSION=\"098\"\n            fi\n        fi\n    fi\n}\n\nfunction start_omiservice(){\n    echo \"Attempting to start OMI service\" >&2\n    RESULT=`/opt/omi/bin/service_control start >/dev/null 2>&1`\n    RESULT=`service omid status >/dev/null 2>&1`\n    if [ $? -eq 0 ]; then\n        echo \"OMI service succesfully started.\" >&2\n    else\n        echo \"OMI service could not be started.\" >&2\n    fi\n}\n\nfunction stop_omiservice(){\n    echo \"Attempting to stop OMI service\" >&2\n    RESULT=`/opt/omi/bin/service_control stop >/dev/null 2>&1`\n    RESULT=`service omid status >/dev/null 2>&1`\n    if [ $? -eq 3 ]; then\n        echo \"OMI service succesfully stopped.\" >&2\n    else\n        echo \"OMI service could not be stopped.\" >&2\n    fi\n}\n\nfunction compare_versions(){\n    if [[ $1 == $2 ]]\n    then\n        return 0\n    fi\n    local IFS=.\n    local i v1=($1) v2=($2)\n    for ((i=0; i<${#v1[@]}; i++)) \n    do\n        if ((${v1[i]} > ${v2[i]}))\n        then\n            return 1\n        fi\n        if ((${v1[i]} < ${v2[i]}))\n        then\n            return 2\n        fi\n    done\n    return 0\n}\n\nfunction get_compiler_mitigated_flag() {\n    OS_NAME=`grep '^NAME' /etc/os-release | tr -d 'NAME=' | tr -d '\"' | tr '[:upper:]' '[:lower:]'`\n    echo \"OS: ${OS_NAME}\" >&2\n    OS_VERSION=`grep '^VERSION_ID' /etc/os-release | tr -d 'VERSION_ID=' | tr -d '\"' | tr '[:upper:]' '[:lower:]'`\n    echo \"OS VERSION: ${OS_VERSION}\" >&2\n\n    # Compiler-mitigated OMI is not supported in the following\n    # SLES 11\n    \n    # To be enhanced if there are future distros not supporting compiler-mitigated OMI package\n\t\n\tFLAG=\"\"\n\tif [[ $OS_NAME == sles* && $OS_VERSION == 11* ]]\n\tthen\n\t\tFLAG=\"\"\n\telse\n\t\tFLAG=\".s\"\n\tfi\n\techo $FLAG\n}\n\nfunction ensure_required_omi_version_exists(){\n    # Populate SSL Version\n    get_openssl_version\n\n    echo \"Checking if OMI is installed. Required OMI version: ${REQUIRED_OMI_VERSION};\" >&2\n\n    COMPILER_MITIGATED_VERSION_FLAG=$( get_compiler_mitigated_flag )\n    echo \"OMI compiler-mitigated flag: (${COMPILER_MITIGATED_VERSION_FLAG})\" >&2\n\n    # Check if RPM exists\n    if command -v rpm >/dev/null 2>&1 ; then\n        echo \"Package Manager Type: RPM\" >&2\n        INSTALLED_OMI_VERSION=`rpm -q --queryformat \"%{VERSION}.%{RELEASE}\" omi 2>&1` \n        if [ -z \"$INSTALLED_OMI_VERSION\" -o \"$INSTALLED_OMI_VERSION\" = \"package omi is not installed\" ]; then\n            echo \"OMI is not installed on the machine.\" >&2\n        else\n            RESULT=`service omid status >/dev/null 2>&1`\n            OMI_SERVICE_STATE=$?\n            echo \"OMI is already installed. Installed OMI version: ${INSTALLED_OMI_VERSION}; OMI Service State: ${OMI_SERVICE_STATE};\" >&2 # Add current running status\n            compare_versions ${INSTALLED_OMI_VERSION} ${REQUIRED_OMI_VERSION}\n            if [ $? -eq 2 ]; then\n                OMI_PACKAGE_PATH=\"${OMI_PACKAGE_PREFIX}${OPENSSL_VERSION}.ulinux${COMPILER_MITIGATED_VERSION_FLAG}.x64.rpm\"\n                echo \"Installed OMI version is lower than the Required OMI version. Trying to upgrade.\" >&2\n                if [ -f ${OMI_PACKAGE_PATH} ]; then\n                    echo \"The OMI package exists at ${OMI_PACKAGE_PATH}. Using this to upgrade.\" >&2\n                    stop_omiservice\n                    RESULT=`rpm -Uvh ${OMI_PACKAGE_PATH} >/dev/null 2>&1`\n                    if [ $? -eq 0 ]; then\n                        UPGRADED_OMI_VERSION=`rpm -q --queryformat \"%{VERSION}.%{RELEASE}\" omi 2>&1`\n                        echo \"Succesfully upgraded the OMI. Installed: ${INSTALLED_OMI_VERSION}; Required: ${REQUIRED_OMI_VERSION}; Upgraded: ${UPGRADED_OMI_VERSION};\" >&2\n                    else\n                        echo \"Failed to upgrade the OMI. Installed: ${INSTALLED_OMI_VERSION}; Required: ${REQUIRED_OMI_VERSION};\" >&2\n                    fi\n                    # Start OMI only if previous state was running\n                    if [ $OMI_SERVICE_STATE -eq 0 ]; then\n                        start_omiservice\n                    fi\n                else          \n                    echo \"The OMI package does not exists at ${OMI_PACKAGE_PATH}. Skipping upgrade.\" >&2\n                fi\n            else\n                echo \"Installed OMI version is equal to or greater than the Required OMI version. No action needed.\" >&2\n            fi\n        fi\n        INSTALLED_OMI_VERSION=`rpm -q --queryformat \"%{VERSION}.%{RELEASE}\" omi 2>&1`\n        RESULT=`service omid status >/dev/null 2>&1`\n        OMI_SERVICE_STATE=$?\n        echo \"OMI upgrade is complete. Installed OMI version: ${INSTALLED_OMI_VERSION}; OMI Service State: ${OMI_SERVICE_STATE};\" >&2\n    else \n        # Check if DPKG exists\n        if command -v dpkg >/dev/null 2>&1 ; then\n            echo \"Package Manager Type: DPKG\" >&2\n            INSTALLED_OMI_VERSION=`dpkg -s omi 2>&1 | grep Version: | awk '{print $2}'`\n            if [ -z \"$INSTALLED_OMI_VERSION\" -o \"$INSTALLED_OMI_VERSION\" = \"package omi is not installed\" ]; then\n                echo \"OMI is not installed on the machine.\" >&2\n            else\n                RESULT=`service omid status >/dev/null 2>&1`\n                OMI_SERVICE_STATE=$?\n                echo \"OMI is already installed. Installed OMI version: ${INSTALLED_OMI_VERSION}; OMI Service State: ${OMI_SERVICE_STATE};\" >&2\n                compare_versions ${INSTALLED_OMI_VERSION} ${REQUIRED_OMI_VERSION}\n                if [ $? -eq 2 ]; then\n                    OMI_PACKAGE_PATH=\"${OMI_PACKAGE_PREFIX}${OPENSSL_VERSION}.ulinux${COMPILER_MITIGATED_VERSION_FLAG}.x64.deb\"\n                    echo \"Installed OMI version is lower than the Required OMI version. Trying to upgrade.\" >&2\n                    if [ -f ${OMI_PACKAGE_PATH} ]; then\n                        echo \"The OMI package exists at ${OMI_PACKAGE_PATH}. Using this to upgrade.\" >&2\n                        stop_omiservice\n                        RESULT=`dpkg -i --force-confold --force-confdef --refuse-downgrade ${OMI_PACKAGE_PATH} >/dev/null 2>&1`\n                        if [ $? -eq 0 ]; then\n                            UPGRADED_OMI_VERSION=`dpkg -s omi 2>&1 | grep Version: | awk '{print $2}'`\n                            echo \"Succesfully upgraded the OMI. Installed: ${INSTALLED_OMI_VERSION}; Required: ${REQUIRED_OMI_VERSION}; Upgraded: ${UPGRADED_OMI_VERSION};\" >&2\n                        else\n                            echo \"Failed to upgrade the OMI. Installed: ${INSTALLED_OMI_VERSION}; Required: ${REQUIRED_OMI_VERSION};\" >&2\n                        fi\n                        # Start OMI only if previous state was running\n                        if [ $OMI_SERVICE_STATE -eq 0 ]; then\n                            start_omiservice\n                        fi\n                    else          \n                        echo \"The OMI package does not exists at ${OMI_PACKAGE_PATH}. Skipping upgrade.\" >&2                 \n                    fi \n                else\n                    echo \"Installed OMI version is equal to or greater than the Required OMI version. No action needed.\" >&2\n                fi\n            fi\n            INSTALLED_OMI_VERSION=`dpkg -s omi 2>&1 | grep Version: | awk '{print $2}'`\n            RESULT=`service omid status >/dev/null 2>&1`\n            OMI_SERVICE_STATE=$?\n            echo \"OMI upgrade is complete. Installed OMI version: ${INSTALLED_OMI_VERSION}; OMI Service State: ${OMI_SERVICE_STATE};\" >&2\n        fi\n    fi\n}\n\n# Transform long options to short ones for getopts support (getopts doesn't support long args)\nfor arg in \"$@\"; do\n  shift\n  case \"$arg\" in\n    \"--help\")       set -- \"$@\" \"-h\" ;;\n    \"--install\")    set -- \"$@\" \"-i\" ;;\n    \"--update\")     set -- \"$@\" \"-p\" ;;\n    \"--enable\")     set -- \"$@\" \"-e\" ;;\n    \"--disable\")    set -- \"$@\" \"-d\" ;;\n    \"--uninstall\")  set -- \"$@\" \"-u\" ;;\n    *)              set -- \"$@\" \"$arg\"\n  esac\ndone\n\nif [ -z \"$arg\" ]\nthen\n   echo \"$USAGE\" >&2\n   exit 1\nfi\n\n# Get the arguments\nwhile getopts \"iudephc:?\" o; do\n    case \"${o}\" in\n        h|\\?)\n            echo \"$USAGE\"\n            exit 0\n            ;;\n        i)\n            operation=\"-install\"\n            ;;\n        u)\n            operation=\"-uninstall\"\n            ;;\n        d)\n            operation=\"-disable\"\n            ;;\n        e)\n            operation=\"-enable\"\n            ;;\n        p)\n            operation=\"-update\"\n            ;;\n        c)\n            COMMAND=\"$OPTARG\"\n            ;;\n        *)\n            echo \"$USAGE\" >&2\n            exit 1\n            ;;\n    esac\ndone\n\nshift $((OPTIND-1))\n\n# Ensure OMI package if exists is of required version.\nensure_required_omi_version_exists\n\n# If find_python is not able to find a python installed, $PYTHON will be null.\nfind_python PYTHON\n\n\nif [ -z \"$PYTHON\" ]; then\n   echo \"No Python interpreter found on the box\" >&2\n   exit 51 # Not Supported\nelse\n    `${PYTHON} --version`\nfi\n\n${PYTHON} ${COMMAND} ${operation}\n# DONE"
  },
  {
    "path": "DSC/httpclient.py",
    "content": "#!/usr/bin/env python2\n#\n# Copyright (C) Microsoft Corporation, All rights reserved.\n\n\"\"\"HttpClient base class.\"\"\"\n\nimport os\nimport sys\n\nimport serializerfactory\n\n\nclass HttpClient:\n    \"\"\"Base class to provide common attributes and functionality to all HttpClient implementation.\"\"\"\n    ACCEPT_HEADER_KEY = \"Accept\"\n    CONTENT_TYPE_HEADER_KEY = \"Content-Type\"\n    CONNECTION_HEADER_KEY = \"Connection\"\n    USER_AGENT_HEADER_KEY = \"User-Agent\"\n\n    APP_JSON_HEADER_VALUE = \"application/json\"\n    KEEP_ALIVE_HEADER_VALUE = \"keep-alive\"\n\n    GET = \"GET\"\n    POST = \"POST\"\n    PUT = \"PUT\"\n    DELETE = \"DELETE\"\n\n    def __init__(self, cert_path, key_path, insecure=False, proxy_configuration=None):\n        self.cert_path = cert_path\n        self.key_path = key_path\n        self.insecure = insecure\n        self.proxy_configuration = proxy_configuration\n\n        # validate presence of cert/key in case they were removed after process creation\n        if (cert_path is not None and not os.path.isfile(self.cert_path)) or \\\n                (key_path is not None and not os.path.isfile(self.key_path)):\n            print(cert_path)\n            raise Exception(\"Invalid certificate or key file path.\")\n\n        self.default_headers = {self.ACCEPT_HEADER_KEY: self.APP_JSON_HEADER_VALUE,\n                                self.CONNECTION_HEADER_KEY: self.KEEP_ALIVE_HEADER_VALUE\n                                                            }\n        self.json = serializerfactory.get_serializer(sys.version_info)\n\n    @staticmethod\n    def merge_headers(client_headers, request_headers):\n        \"\"\"Merges client_headers and request_headers into a single dictionary. If a request_header key is also present\n        in the client_headers, the request_header value will override the client_header one.\n\n        Args:\n            client_headers  : dictionary, the default client's headers.\n            request_headers : dictionary, request specific headers.\n\n        Returns:\n            A dictionary containing a set of both the client_headers and the request_headers\n        \"\"\"\n        if request_headers is not None:\n            client_headers.update(request_headers.copy())\n        else:\n            request_headers = client_headers.copy()\n        return request_headers\n\n    def get(self, url, headers=None):\n        \"\"\"Issues a GET request to the provided url using the provided headers.\n\n        Args:\n            url     : string    , the URl.\n            headers : dictionary, contains the headers key value pair (defaults to None).\n\n        Returns:\n            A RequestResponse\n        \"\"\"\n        pass\n\n    def post(self, url, headers=None, data=None):\n        \"\"\"Issues a POST request to the provided url using the provided headers.\n\n        Args:\n            url     : string    , the URl.\n            headers : dictionary, contains the headers key value pair (defaults to None).\n            data    : dictionary, contains the non-serialized request body (defaults to None).\n\n        Returns:\n            A RequestResponse\n        \"\"\"\n        pass\n\n    def put(self, url, headers=None, data=None):\n        \"\"\"Issues a PUT request to the provided url using the provided headers.\n\n        Args:\n            url     : string    , the URl.\n            headers : dictionary, contains the headers key value pair (defaults to None).\n            data    : dictionary, contains the non-serialized request body (defaults to None).\n\n        Returns:\n            A RequestResponse\n        \"\"\"\n        pass\n\n    def delete(self, url, headers=None, data=None):\n        \"\"\"Issues a DELETE request to the provided url using the provided headers.\n\n        Args:\n            url     : string    , the URl.\n            headers : dictionary, contains the headers key value pair (defaults to None).\n            data    : dictionary, contains the non-serialized request body (defaults to None).\n\n        Returns:\n            A RequestResponse\n        \"\"\"\n        pass\n\n\nclass RequestResponse:\n    \"\"\"Encapsulates all request response for http clients. Will also deserialize the response when the raw response\n    data is deserializable.\n    \"\"\"\n    def __init__(self, status_code, raw_response_data=None):\n        self.status_code = int(status_code)\n        self.raw_data = raw_response_data\n\n        self.json = serializerfactory.get_serializer(sys.version_info)\n        if raw_response_data is not None:\n            try:\n                self.deserialized_data = self.json.loads(self.raw_data)\n            except ValueError:\n                self.deserialized_data = None\n"
  },
  {
    "path": "DSC/httpclientfactory.py",
    "content": "#!/usr/bin/env python2\n#\n# Copyright (C) Microsoft Corporation, All rights reserved.\n\nimport os\n\nfrom curlhttpclient import CurlHttpClient\n\n\nPY_MAJOR_VERSION = 0\nPY_MINOR_VERSION = 1\nPY_MICRO_VERSION = 2\n\n\nclass HttpClientFactory:\n    \"\"\"Factory which returns the appropriate HttpClient based on the provided python version.\n\n    Targets :\n        [2.4.0 - 2.7.9[ : CurlHttpclient\n        [2.7.9 - 2.7.9+ : Urllib2Httpclient\n        3.0+ : Urllib3Httpclient\n\n        This is due to the lack of built-in strict certificate verification prior to 2.7.9.\n        The ssl module was also unavailable for [2.4.0 - 2.6.0[.\n    \"\"\"\n\n    def __init__(self, cert, key, insecure=False):\n        self.cert = cert\n        self.key = key\n        self.insecure = insecure\n        self.proxy_configuration = None\n\n\n    def create_http_client(self, version_info):\n        \"\"\"Create a new instance of the appropriate HttpClient.\n\n        Args:\n            version_info    : array, the build-in python version_info array.\n            insecure        : bool, when set to True, httpclient wil bypass certificate verification.\n\n        Returns:\n            An instance of CurlHttpClient if the installed Python version is below 2.7.9\n            An instance of Urllib2 if the installed Python version is or is above 2.7.9\n        \"\"\"\n        if version_info[PY_MAJOR_VERSION] == 3:\n            from urllib3httpclient import Urllib3HttpClient\n            return Urllib3HttpClient(self.cert, self.key, self.insecure, self.proxy_configuration)\n        elif version_info[PY_MAJOR_VERSION] == 2 and version_info[PY_MINOR_VERSION] < 7:\n            from urllib2httpclient import Urllib2HttpClient\n            return CurlHttpClient(self.cert, self.key, self.insecure, self.proxy_configuration)\n        elif version_info[PY_MAJOR_VERSION] == 2 and version_info[PY_MINOR_VERSION] <= 7 and version_info[\n            PY_MICRO_VERSION] < 9:\n            from urllib2httpclient import Urllib2HttpClient\n            return CurlHttpClient(self.cert, self.key, self.insecure, self.proxy_configuration)\n        else:\n            from urllib2httpclient import Urllib2HttpClient\n            return Urllib2HttpClient(self.cert, self.key, self.insecure, self.proxy_configuration)"
  },
  {
    "path": "DSC/manifest.xml",
    "content": "<?xml version='1.0' encoding='utf-8' ?>\n<ExtensionImage xmlns=\"http://schemas.microsoft.com/windowsazure\">\n  <ProviderNameSpace>Microsoft.OSTCExtensions</ProviderNameSpace>\n  <Type>DSCForLinux</Type>\n  <Version>2.71.1.0</Version>\n  <Label>Microsoft Azure DSC Extension for Linux Virtual Machines</Label>\n  <HostingResources>VmRole</HostingResources>\n  <MediaLink></MediaLink>\n  <Description>Microsoft Azure DSC Extension for Linux Virtual Machines</Description>\n  <IsInternalExtension>true</IsInternalExtension>\n  <Eula>https://github.com/Azure/azure-linux-extensions/blob/master/LICENSE-2_0.txt</Eula>\n  <PrivacyUri>http://www.microsoft.com/privacystatement/en-us/OnlineServices/Default.aspx</PrivacyUri>\n  <HomepageUri>https://github.com/Azure/azure-linux-extensions</HomepageUri>\n  <IsJsonExtension>true</IsJsonExtension>\n  <SupportedOS>Linux</SupportedOS>\n  <CompanyName>Microsoft</CompanyName>\n  <!--%REGIONS%-->\n</ExtensionImage>\n"
  },
  {
    "path": "DSC/serializerfactory.py",
    "content": "#!/usr/bin/env python2\n#\n# Copyright (C) Microsoft Corporation, All rights reserved.\n\n\"\"\"Serializer factory.\"\"\"\n\nPY_MAJOR_VERSION = 0\nPY_MINOR_VERSION = 1\n\n\ndef get_serializer(version_info):\n    \"\"\"Returns the appropriate serializer module based on version_info. Python 2.6 and 2.6+ have the json module\n    built-in, other version (2.6-) have to rely on the ancestral implementation (simplejson) which is included under\n    the worker package.\n\n    An instance of simplejson module if the installed Python version is below 2.6\n    An instance of json module if the installed Python version is or is above 2.6\n\n    Args:\n        version_info : array, the build-in python version_info\n\n    Returns:\n        Json module\n    \"\"\"\n    if version_info[PY_MAJOR_VERSION] == 2 and version_info[PY_MINOR_VERSION] < 6:\n        import simplejson as json\n    else:\n        import json\n    return json\n"
  },
  {
    "path": "DSC/subprocessfactory.py",
    "content": "#!/usr/bin/env python2\n#\n# Copyright (C) Microsoft Corporation, All rights reserved.\n\n\"\"\"Process factory which returns a process enforcing the preexec_fn.\"\"\"\n\ntry:\n    import ctypes\n    # See : http://man7.org/linux/man-pages/man2/prctl.2.html\n    # See : http://lxr.free-electrons.com/source/include/uapi/linux/prctl.h\n    libc = ctypes.CDLL(\"libc.so.6\")\n    PR_SET_PDEATHSIG = 1\n\n    def set_process_death_signal(death_signal):\n        libc.prctl(PR_SET_PDEATHSIG, death_signal)\nexcept ImportError:\n    # TODO(dalbe): Trace\n    pass\nexcept:\n    # For test to run on windows\n    # TODO(dalbe): Trace\n    pass\n\nimport os\nimport signal\nimport subprocess\nimport sys\n\nCTYPES_MODULE_NAME = \"ctypes\"\n\n\ndef create_subprocess(cmd, env=None, stdout=None, stderr=None, cwd=None):\n    \"\"\"Creates a process forcing and sets the SIGTERM signal handler using Ctypes (when available). Else creates a\n    process based on the pipe_output argument.\n\n    Ctypes are only available in 2.5+ so processes create in python 2.4 won't die if their parent process dies.\n\n    Args:\n        cmd         : string            , the cmd to execute.\n        env         : dictonary(string) , the process level environment variable.\n        stdout      : boolean           , sets the stdout to subprocess.PIPE when True, else stdout is left untouched.\n        stderr      : boolean           , sets the stderr to subprocess.PIPE when True, else stdout is left untouched.\n\n    Returns:\n        The process object.\n    \"\"\"\n    if CTYPES_MODULE_NAME not in sys.modules or os.name.lower() == \"nt\":\n        return subprocess.Popen(cmd, env=env, stdout=stdout, stderr=stderr, cwd=cwd)\n    else:\n        return subprocess.Popen(cmd, env=env, stdout=stdout, stderr=stderr, cwd=cwd,\n                                preexec_fn=set_process_death_signal(signal.SIGTERM))\n"
  },
  {
    "path": "DSC/test/MockUtil.py",
    "content": "#!/usr/bin/env python\n#\n#CustomScript extension\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nclass MockUtil:\n    def __init__(self, test):\n        self.test = test\n\n    def get_log_dir(self):\n        return \"/tmp\"\n\n    def log(self, msg):\n        print(msg)\n\n    def error(self, msg):\n        print(msg)\n\n    def get_seq_no(self):\n        return \"0\"\n    \n    def do_parse_context(self, operation):\n        return \"0\"\n\n    def do_status_report(self, operation, status, status_code, message):\n        self.test.assertNotEqual(None, message)\n\n    def do_exit(self,exit_code,operation,status,code,message):\n        self.test.assertNotEqual(None, message)\n"
  },
  {
    "path": "DSC/test/env.py",
    "content": "#!/usr/bin/env python\n#\n#CustomScript extension\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nimport os\n\n#append installer directory to sys.path\nroot = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append(root)\n\nmanifestFile = os.path.join(root, 'HandlerManifest.json')\nif os.path.exists(manifestFile):\n    import json \n    jsonData = open(manifestFile)\n    manifest = json.load(jsonData)\n    jsonData.close()\n    extName=\"{0}-{1}\".format(manifest[0][\"name\"], manifest[0][\"version\"])\n    print(\"Start test: %s\" % extName)\n\n    extDir=os.path.join(\"/var/lib/waagent\", extName)\n    if os.path.isdir(extDir):\n        os.chdir(extDir)\n        print(\"Switching to dir: %s\" % os.getcwd())\n\n"
  },
  {
    "path": "DSC/test/mof/azureautomation.df.meta.mof",
    "content": "\n\tinstance of MSFT_WebDownloadManager as $MSFT_WebDownloadManager1ref\n\t{\n\tResourceID = \"[ConfigurationRepositoryWeb]AzureAutomationDSC\";\n\t SourceInfo = \"C:\\\\OaaS-RegistrationMetaConfig2.ps1::20::9::ConfigurationRepositoryWeb\";\n\t RegistrationKey = \"TsyfxalOa7P4lNWIqAVrEWhdiRNGx+2A2WYZEE1wR+lXH5snJojB9pONu79iWZVeviC/sPylSGZQlVsmCUPGOQ==\"; \n\t ServerURL = \"https://oaasagentsvcdf.test.azure-automation.net/accounts/a654020d-4757-41cd-bbf2-528ef2fefacb\";\n\t};\n\n\tinstance of MSFT_WebResourceManager as $MSFT_WebResourceManager1ref\n\t{\n\t SourceInfo = \"C:\\\\OaaS-RegistrationMetaConfig2.ps1::27::9::ResourceRepositoryWeb\";\n\t ServerURL = \"https://oaasagentsvcdf.test.azure-automation.net/accounts/a654020d-4757-41cd-bbf2-528ef2fefacb\";\n\t ResourceID = \"[ResourceRepositoryWeb]AzureAutomationDSC\";\n\t RegistrationKey = \"TsyfxalOa7P4lNWIqAVrEWhdiRNGx+2A2WYZEE1wR+lXH5snJojB9pONu79iWZVeviC/sPylSGZQlVsmCUPGOQ==\"; \n\t};\n\n\tinstance of MSFT_WebReportManager as $MSFT_WebReportManager1ref\n\t{\n\t SourceInfo = \"C:\\\\OaaS-RegistrationMetaConfig2.ps1::34::9::ReportServerWeb\";\n\t ServerURL = \"https://oaasagentsvcdf.test.azure-automation.net/accounts/a654020d-4757-41cd-bbf2-528ef2fefacb\";\n\t ResourceID = \"[ReportServerWeb]AzureAutomationDSC\";\n\t RegistrationKey = \"TsyfxalOa7P4lNWIqAVrEWhdiRNGx+2A2WYZEE1wR+lXH5snJojB9pONu79iWZVeviC/sPylSGZQlVsmCUPGOQ==\"; \n\t};\n\n\tinstance of MSFT_DSCMetaConfiguration as $MSFT_DSCMetaConfiguration1ref\n\t{\n\t RefreshMode = \"Pull\";\n\t AllowModuleOverwrite = False;\n\t ActionAfterReboot = \"ContinueConfiguration\";\n\t RefreshFrequencyMins = 30;\n\t RebootNodeIfNeeded = False;\n\t ConfigurationModeFrequencyMins = 15;\n\t ConfigurationMode = \"ApplyAndMonitor\";\n\n\t  ResourceModuleManagers = {\n\t  $MSFT_WebResourceManager1ref  \n\t};\n\t  ReportManagers = {\n\t  $MSFT_WebReportManager1ref  \n\t };\n\t  ConfigurationDownloadManagers = {\n\t  $MSFT_WebDownloadManager1ref  \n\t };\n\t};\n\n\tinstance of OMI_ConfigurationDocument\n\t{\n\t Version=\"2.0.0\";\n\t MinimumCompatibleVersion = \"2.0.0\";\n\t CompatibleVersionAdditionalProperties= { \"MSFT_DSCMetaConfiguration:StatusRetentionTimeInDays\" };\n\t Author=\"azureautomation\";\n\t GenerationDate=\"04/17/2015 11:41:09\";\n\t GenerationHost=\"azureautomation-01\";\n\t Name=\"RegistrationMetaConfig\";\n\t};\n\t"
  },
  {
    "path": "DSC/test/status/0.status",
    "content": "[{\n    \"version\": 1.0,\n    \"timestampUTC\": \"<current utc time>\",\n    \"status\" : {\n        \"name\": \"<Handler workload name>\",\n        \"operation\": \"<name of the operation being performed>\",\n        \"configurationAppliedTime\": \"<UTC time indicating when the configuration was last successfully applied>\",\n        \"status\": \"<transitioning | error | success | warning>\",\n        \"code\": 0,\n        \"message\": {\n            \"id\": \"id of the localized resource\",\n            \"params\": [\n                \"MyParam0\",\n                \"MyParam1\"\n            ]\n        },\n        \"formattedMessage\": {\n            \"lang\": \"Lang[-locale]\",\n            \"message\": \"formatted user message\"\n        },\n        \"substatus\": [{\n            \"name\": \"<Handler workload subcomponent name>\",\n            \"status\": \"<transitioning | error | success | warning>\",\n            \"code\": 0,\n            \"Message\": {\n            \t\t\"id\": \"id of the localized resource\",\n            \t\t\"params\": [\n                \t\t\"MyParam0\",\n                \t\t\"MyParam1\"\n            \t\t]\n        \t},\n      \t  \t\"FormattedMessage\": {\n            \t\t\"Lang\": \"Lang[-locale]\",\n            \t\t\"Message\": \"formatted user message\"\n        \t}        \n        }]\n    }\n}]"
  },
  {
    "path": "DSC/test/test_apply_meta_mof.py",
    "content": "#!/usr/bin/env python\n#\n# DSC Extension For Linux\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nimport env\nimport dsc\nimport os\nimport platform\nfrom Utils.WAAgentUtil import waagent\nfrom MockUtil import MockUtil\n\nwaagent.LoggerInit('/tmp/test.log','/dev/null')\n\nclass TestApplyMof(unittest.TestCase):\n    def test_apply_mof(self):\n        dsc.distro_category = dsc.get_distro_category()\n        dsc.hutil = MockUtil(self)\n        dsc.install_dsc_packages()\n        dsc.start_omiservice()\n        config = dsc.apply_dsc_meta_configuration('mof/dscnode.nxFile.meta.mof')\n        self.assertTrue('ReturnValue=0' in config)\n        \nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "DSC/test/test_apply_mof.py",
    "content": "#!/usr/bin/env python\n#\n# DSC Extension For Linux\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nimport env\nimport dsc\nimport os\nimport platform\nfrom Utils.WAAgentUtil import waagent\nfrom MockUtil import MockUtil\n\nwaagent.LoggerInit('/tmp/test.log','/dev/null')\n\nclass TestApplyMof(unittest.TestCase):\n    def test_apply_mof(self):\n        dsc.distro_category = dsc.get_distro_category()\n        dsc.hutil = MockUtil(self)\n        dsc.install_dsc_packages()\n        dsc.start_omiservice()\n        config = dsc.apply_dsc_meta_configuration('mof/dscnode.nxFile.meta.push.mof')\n        dsc.apply_dsc_configuration('mof/localhost.nxFile.mof')\n        self.assertTrue(os.path.exists('/tmp/dsctest'))\n        \nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "DSC/test/test_compare_pkg_version.py",
    "content": "#!/usr/bin/env python\n#\n# DSC Extension For Linux\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nimport env\nimport dsc\nimport os\nimport platform\nfrom Utils.WAAgentUtil import waagent\nfrom MockUtil import MockUtil\n\nwaagent.LoggerInit('/tmp/test.log','/dev/null')\n\nclass Dummy(object):\n    pass\n\nclass CompareRPMPackageVersions(unittest.TestCase):\n    def test_with_equal_version(self):\n        dsc.distro_category = dsc.get_distro_category()\n        dsc.hutil = Dummy()\n        dsc.hutil.log = waagent.Log \n        output = dsc.compare_pkg_version('1.1.1.294', 1, 1, 1, 294)\n        self.assertEqual(1, output)\n\n    def test_with_higher_version(self):\n        dsc.distro_category = dsc.get_distro_category()\n        dsc.hutil = Dummy()\n        dsc.hutil.log = waagent.Log \n        output = dsc.compare_pkg_version('1.2.0.35', 1, 1, 1, 294)\n        self.assertEqual(1, output)\t\n\n    def test_with_lower_version(self):\n        dsc.distro_category = dsc.get_distro_category()\n        dsc.hutil = Dummy()\n        dsc.hutil.log = waagent.Log \n        output = dsc.compare_pkg_version('1.0.4.35', 1, 1, 1, 294)\n        self.assertEqual(0, output)\t\t\t\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "DSC/test/test_download_file.py",
    "content": "#!/usr/bin/env python\n#\n# DSC Extension For Linux\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nimport env\nimport dsc\nimport os\nfrom Utils.WAAgentUtil import waagent\nfrom MockUtil import MockUtil\n\nwaagent.LoggerInit('/tmp/test.log','/dev/null')\n\nclass TestDownloadFile(unittest.TestCase):\n    def test_download_file(self):\n        dsc.hutil = MockUtil(self)\t\n        dsc.download_external_file('https://raw.githubusercontent.com/balukambala/azure-linux-extensions/master/DSC/test/mof/dscnode.nxFile.meta.mof', '/tmp')\n        \nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "DSC/test/test_node_extension_properties.py",
    "content": "#!/usr/bin/env python\n#\n# DSC Extension For Linux\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nimport env\nimport dsc\nimport os\nimport json\nfrom Utils.WAAgentUtil import waagent\nfrom MockUtil import MockUtil\n\nwaagent.LoggerInit('/tmp/test.log','/dev/null')\n\nclass TestNodeExtensionProperties(unittest.TestCase):\n    def test_properties_for_pull(self):\n        dsc.distro_category = dsc.get_distro_category()\n        dsc.hutil = MockUtil(self)\n        dsc.install_dsc_packages()\n        dsc.start_omiservice()\n        config = dsc.apply_dsc_meta_configuration('mof/dscnode.nxFile.meta.mof')\n        self.assertTrue('ReturnValue=0' in config)\n        \n        content = dsc.construct_node_extension_properties(config, \"upgrade\")\n        data = json.dumps(content)\n        self.assertTrue('OMSCloudId' in data, \"OMSCLoudID doesn't exist\")\n        \n        \n        #self.assertTrue('ExtHandlerVersion' in extensionInformation, \"ExtHandlerVersion doesn't exist\")\n        \n        #self.assertEqual('Microsoft.OSTCExtensions.DSCForLinux', extensionInformation['ExtHandlerName'])\n\n    def test_send_request_to_pullserver(self):\n        dsc.distro_category = dsc.get_distro_category()\n        dsc.hutil = MockUtil(self)\n        dsc.install_dsc_packages()\n        dsc.start_omiservice()\n        config = dsc.apply_dsc_meta_configuration('mof/azureautomation.df.meta.mof')\n        self.assertTrue('ReturnValue=0' in config)\n        \n        response  = dsc.send_heart_beat_msg_to_agent_service(\"install\")\n        self.assertEqual(response.status_code, 200)\n  \n    def test_push_request_properties(self):\n        dsc.distro_category = dsc.get_distro_category()\n        dsc.hutil = MockUtil(self)\n        dsc.install_dsc_packages()\n        dsc.start_omiservice()\n        config = dsc.apply_dsc_meta_configuration('mof/dscnode.nxFile.meta.push.mof')\n        self.assertTrue('ReturnValue=0' in config)\n        \n        response  = dsc.send_heart_beat_msg_to_agent_service(\"install\")\n        self.assertIsNone(response)\n       \n    def test_update_node_properties(self):\n        dsc.distro_category = dsc.get_distro_category()\n        dsc.hutil = MockUtil(self)\n        \n        response  = dsc.update()\n        self.assertIsNone(response)\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "DSC/test/test_register.py",
    "content": "#!/usr/bin/env python\n#\n# DSC Extension For Linux\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nimport env\nimport dsc\nimport os\nfrom Utils.WAAgentUtil import waagent\nfrom MockUtil import MockUtil\n\nwaagent.LoggerInit('/tmp/test.log','/dev/null')\n\nclass TestRegister(unittest.TestCase):\n    def test_register_without_registration_info(self):\n        print \"Register test case with invalid Registration url and Registration key\"\n        dsc.distro_category = dsc.get_distro_category()\n        dsc.hutil = MockUtil(self)\n        dsc.install_dsc_packages()\n        dsc.start_omiservice()\n        exit_code, output = dsc.register_automation('','','','','','')\n        self.assertEqual(exit_code, 51)\n\t\t\n    def test_register_invalid_configuration_mode(self):\n        print \"Register test case with invalid configuration mode\"\n        dsc.distro_category = dsc.get_distro_category()\n        dsc.hutil = MockUtil(self)\n        dsc.install_dsc_packages()\n        dsc.start_omiservice()\n        exit_code, output = dsc.register_automation('somekey','http://dummy','','','','some')\t\n        self.assertEqual(exit_code, 51)\n\t\n    def test_register(self):\n        print \"Register test case with valid parameters\"\n        dsc.distro_category = dsc.get_distro_category()\n        dsc.hutil = MockUtil(self)\n        dsc.install_dsc_packages()\n        dsc.start_omiservice()\n        exit_code, output = dsc.register_automation('somekey','http://dummy','test.localhost.mof','15','30','applyandmonitor')\n        self.assertEqual(exit_code, 0)\n        \nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "DSC/test/test_status_update.py",
    "content": "#!/usr/bin/env python\n#\n# DSC Extension For Linux\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nimport env\nimport dsc\nimport os\nimport json\nfrom Utils.WAAgentUtil import waagent\nfrom MockUtil import MockUtil\n\nwaagent.LoggerInit('/tmp/test.log','/dev/null')\n\nclass TestStatusUpdate(unittest.TestCase):\n\n    def verify_nodeid_vmuuid(self, status_file):\n        self.assertTrue(os.path.exists(status_file), \"file exists\")\n        if os.path.exists(status_file):\n            jsonData = open(status_file)\n            status_data = json.load(jsonData)[0]\n            self.assertTrue('status' in status_data, \"status doesn't exists\")\n            substatusArray = status_data['status']['substatus']\n            isMetaDataFound = False\n            metasubstatus = None\n            if 'metadata' in  substatusArray[0].viewvalues():\n                metasubstatus = substatusArray[0]\n            self.assertTrue('formattedMessage' in metasubstatus, \"formattedMessage doesn't exists\")\n            formatedMessage = metasubstatus['formattedMessage']\n            self.assertTrue('message' in formatedMessage, \"message doesn't exists\")\n            self.assertTrue('AgentID' in formatedMessage['message'], \"AgentID doesn't exists\")\n            \n    def test_vmuuid(self):\n        dsc.hutil = MockUtil(self)\n        vmuuid = dsc.get_vmuuid()\n        self.assertTrue(vmuuid is not None, \"vm uuid is none\")\n    \n    def test_nodeid_with_dsc(self):\n        dsc.distro_category = dsc.get_distro_category()\n        dsc.hutil = MockUtil(self)\n        dsc.install_dsc_packages()\n        dsc.start_omiservice()\n        config = dsc.apply_dsc_meta_configuration('mof/dscnode.nxFile.meta.push.mof')\n        nodeid = dsc.get_nodeid('/etc/opt/omi/conf/omsconfig/agentid')\n        self.assertTrue(nodeid is not None, \"nodeid is none\")\n\n    def test_nodeid_without_dsc(self):\n        dsc.distro_category = dsc.get_distro_category()\n        dsc.hutil = MockUtil(self)\n        nodeid = dsc.get_nodeid('/etc/opt/omi/conf/omsconfig/agentid1')\n        self.assertTrue(nodeid is None, \"nodeid is not none\")\n    \n    def test_statusfile_update(self):\n        status_file = 'status/0.status'\n        dsc.distro_category = dsc.get_distro_category()\n        dsc.hutil = MockUtil(self)\n\n        class cresponse: deserialized_data= { \"AccountName\" : \"test\", \"ResourceGroupName\" : \"rgName\", \"SubscriptionId\" : \"testsubid\" }\n\n        dsc.update_statusfile(status_file, '123','345', cresponse)\n        self.verify_nodeid_vmuuid(status_file)\n        \n    def test_is_statusfile_update_idempotent(self):\n        status_file = 'status/0.status'\n        dsc.distro_category = dsc.get_distro_category()\n        dsc.hutil = MockUtil(self)\n\n        class cresponse: deserialized_data= { \"AccountName\" : \"test\", \"ResourceGroupName\" : \"rgName\", \"SubscriptionId\" : \"testsubid\" }\n\n        dsc.update_statusfile(status_file, '123','345', cresponse)\n        dsc.update_statusfile(status_file, '123','345', cresponse)\n        self.verify_nodeid_vmuuid(status_file)\n\n    def test_is_statusfile_update_register(self):\n        status_file = 'status/0.status'\n        dsc.distro_category = dsc.get_distro_category()\n        dsc.hutil = MockUtil(self)\n        dsc.install_dsc_packages()\n        dsc.start_omiservice()\n        exit_code, output = dsc.register_automation('somekey','http://dummy','','','','')\n        self.verify_nodeid_vmuuid(status_file)\n\n    def test_is_statusfile_update_pull(self):\n        status_file = 'status/0.status'\n        dsc.distro_category = dsc.get_distro_category()\n        dsc.hutil = MockUtil(self)\n        dsc.install_dsc_packages()\n        dsc.start_omiservice()\n        config = dsc.apply_dsc_meta_configuration('mof/dscnode.nxFile.meta.mof')\n        self.assertTrue('ReturnValue=0' in config)\n        self.verify_nodeid_vmuuid(status_file)\n\n    def test_is_statusfile_update_push(self):\n        status_file = 'status/0.status'\n        dsc.distro_category = dsc.get_distro_category()\n        dsc.hutil = MockUtil(self)\n        dsc.install_dsc_packages()\n        dsc.start_omiservice()\n        config = dsc.apply_dsc_meta_configuration('mof/dscnode.nxFile.meta.push.mof')\n        dsc.apply_dsc_configuration('mof/localhost.nxFile.mof')\n        self.assertTrue(os.path.exists('/tmp/dsctest'))\n        self.verify_nodeid_vmuuid(status_file)\n\n        \nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "DSC/urllib2httpclient.py",
    "content": "#!/usr/bin/env python2\n\n#\n# Copyright (C) Microsoft Corporation, All rights reserved.\n\n\"\"\"Urllib2 HttpClient.\"\"\"\n\ntry:\n    from http.client import HTTPSConnection\nexcept ImportError:\n    from httplib import HTTPSConnection\nimport socket\nimport time\nimport traceback\nimport sys\ntry:\n    from urllib.parse import urlparse, urlencode\n    from urllib.request import urlopen, Request, HTTPSHandler, build_opener, ProxyHandler\n    from urllib.error import HTTPError\nexcept ImportError:\n    from urlparse import urlparse\n    from urllib import urlencode\n    from urllib2 import urlopen, Request, HTTPError, HTTPSHandler, build_opener, ProxyHandler\n\nfrom httpclient import *\n\nPY_MAJOR_VERSION = 0\nPY_MINOR_VERSION = 1\nPY_MICRO_VERSION = 2\n\nSSL_MODULE_NAME = \"ssl\"\n\n# On some system the ssl module might be missing\ntry:\n    import ssl\nexcept ImportError:\n    ssl = None\n\n\nclass HttpsClientHandler(HTTPSHandler):\n    \"\"\"Https handler to enable attaching cert/key to request. Also used to disable strict cert verification for\n    testing.\n    \"\"\"\n\n    def __init__(self, cert_path, key_path, insecure=False):\n        self.cert_path = cert_path\n        self.key_path = key_path\n\n        ssl_context = None\n        if insecure and SSL_MODULE_NAME in sys.modules and (sys.version_info[PY_MAJOR_VERSION] == 2 and\n                                                                    sys.version_info[PY_MINOR_VERSION] >= 7 and\n                                                                    sys.version_info[PY_MICRO_VERSION] >= 9):\n            ssl_context = ssl.create_default_context()\n            ssl_context.check_hostname = False\n            ssl_context.verify_mode = ssl.CERT_NONE\n        HTTPSHandler.__init__(self, context=ssl_context)  # Context can be None here\n\n    def https_open(self, req):\n        return self.do_open(self.get_https_connection, req, context=self._context)\n\n    def get_https_connection(self, host, context=None, timeout=180):\n        \"\"\"urllib2's AbstractHttpHandler will invoke this method with the host/timeout parameter. See urllib2's\n        AbstractHttpHandler for more details.\n\n        Args:\n            host    : string        , the host.\n            context : ssl_context   , the ssl context.\n            timeout : int           , the timeout value in seconds.\n\n        Returns:\n            An HttpsConnection\n        \"\"\"\n        socket.setdefaulttimeout(180)\n        if self.cert_path is None or self.key_path is None:\n            return HTTPSConnection(host, timeout=timeout, context=context)\n        else:\n            return HTTPSConnection(host, cert_file=self.cert_path, key_file=self.key_path, timeout=timeout,\n                                           context=context)\n\n\ndef request_retry_handler(func):\n    def decorated_func(*args, **kwargs):\n        max_retry_count = 3\n        for iteration in range(0, max_retry_count, 1):\n            try:\n                ret = func(*args, **kwargs)\n                return ret\n            except Exception as exception:\n                if iteration >= max_retry_count - 1:\n                    raise RetryAttemptExceededException(traceback.format_exc())\n                elif SSL_MODULE_NAME in sys.modules:\n                    if type(exception).__name__ == 'SSLError':\n                        time.sleep(5 + iteration)\n                        continue\n                raise exception\n    return decorated_func\n\n\nclass Urllib2HttpClient(HttpClient):\n    \"\"\"Urllib2 http client. Inherits from HttpClient.\n\n    Targets:\n        [2.7.9 - 2.7.9+] only due to the lack of strict certificate verification prior to this version.\n\n    Implements the following method common to all classes inheriting HttpClient.\n        get     (url, headers)\n        post    (url, headers, data)\n    \"\"\"\n\n    def __init__(self, cert_path, key_path, insecure=False, proxy_configuration=None):\n        HttpClient.__init__(self, cert_path, key_path, insecure, proxy_configuration)\n\n    @request_retry_handler\n    def issue_request(self, url, headers, method=None, data=None):\n        \"\"\"Issues a GET request to the provided url and using the provided headers.\n\n        Args:\n            url     : string    , the url.\n            headers : dictionary, contains the headers key value pair.\n            data    : string    , contains the serialized request body.\n\n        Returns:\n            A RequestResponse\n            :param method:\n        \"\"\"\n        https_handler = HttpsClientHandler(self.cert_path, self.key_path, self.insecure)\n        opener = build_opener(https_handler)\n        if self.proxy_configuration is not None:\n            proxy_handler = ProxyHandler({'http': self.proxy_configuration,\n                                                  'https': self.proxy_configuration})\n            opener.add_handler(proxy_handler)\n        req = Request(url, data=data, headers=headers)\n        req.get_method = lambda: method\n        response = opener.open(req, timeout=30)\n        opener.close()\n        https_handler.close()\n\n        return response\n\n    def get(self, url, headers=None):\n        \"\"\"Issues a GET request to the provided url and using the provided headers.\n\n        Args:\n            url     : string    , the url.\n            headers : dictionary, contains the headers key value pair.\n\n        Returns:\n            An http_response\n        \"\"\"\n        headers = self.merge_headers(self.default_headers, headers)\n\n        try:\n            response = self.issue_request(url, headers=headers, method=self.GET)\n        except HTTPError:\n            exception_type, error = sys.exc_info()[:2]\n            return RequestResponse(error.code)\n\n        return RequestResponse(response.getcode(), response.read())\n\n    def post(self, url, headers=None, data=None):\n        \"\"\"Issues a POST request to the provided url and using the provided headers.\n\n        Args:\n            url     : string    , the url.\n            headers : dictionary, contains the headers key value pair.\n            data    : dictionary, contains the non-serialized request body.\n\n        Returns:\n            A RequestResponse\n        \"\"\"\n        headers = self.merge_headers(self.default_headers, headers)\n\n        if data is None:\n            serial_data = \"\"\n        else:\n            serial_data = self.json.dumps(data)\n            headers.update({self.CONTENT_TYPE_HEADER_KEY: self.APP_JSON_HEADER_VALUE})\n\n        try:\n            response = self.issue_request(url, headers=headers, method=self.POST, data=serial_data)\n        except HTTPError:\n            exception_type, error = sys.exc_info()[:2]\n            return RequestResponse(error.code)\n\n        return RequestResponse(response.getcode(), response.read())\n\n    def put(self, url, headers=None, data=None):\n        \"\"\"Issues a PUT request to the provided url and using the provided headers.\n\n        Args:\n            url     : string    , the url.\n            headers : dictionary, contains the headers key value pair.\n            data    : dictionary, contains the non-serialized request body.\n\n        Returns:\n            A RequestResponse\n        \"\"\"\n        headers = self.merge_headers(self.default_headers, headers)\n\n        if data is None:\n            serial_data = \"\"\n        else:\n            serial_data = self.json.dumps(data)\n            headers.update({self.CONTENT_TYPE_HEADER_KEY: self.APP_JSON_HEADER_VALUE})\n\n        try:\n            response = self.issue_request(url, headers=headers, method=self.PUT, data=serial_data)\n        except HTTPError:\n            exception_type, error = sys.exc_info()[:2]\n            return RequestResponse(error.code)\n\n        return RequestResponse(response.getcode(), response.read())\n\n    def delete(self, url, headers=None, data=None):\n        \"\"\"Issues a DELETE request to the provided url and using the provided headers.\n\n        Args:\n            url     : string    , the url.\n            headers : dictionary, contains the headers key value pair.\n            data    : dictionary, contains the non-serialized request body.\n\n        Returns:\n            A RequestResponse\n        \"\"\"\n        headers = self.merge_headers(self.default_headers, headers)\n\n        if data is None:\n            serial_data = \"\"\n        else:\n            serial_data = self.json.dumps(data)\n            headers.update({self.CONTENT_TYPE_HEADER_KEY: self.APP_JSON_HEADER_VALUE})\n\n        try:\n            response = self.issue_request(url, headers=headers, method=self.DELETE, data=serial_data)\n        except HTTPError:\n            exception_type, error = sys.exc_info()[:2]\n            return RequestResponse(error.code)\n\n        return RequestResponse(response.getcode(), response.read())\n"
  },
  {
    "path": "DSC/urllib3httpclient.py",
    "content": "#!/usr/bin/env python2\n\n#\n# Copyright (C) Microsoft Corporation, All rights reserved.\n\n\"\"\"Urllib2 HttpClient.\"\"\"\n\ntry:\n    from http.client import HTTPSConnection\nexcept ImportError:\n    from httplib import HTTPSConnection\nimport socket\nimport time\nimport traceback\nimport sys\ntry:\n    from urllib.parse import urlparse, urlencode\n    from urllib.request import urlopen, Request, HTTPSHandler, build_opener, ProxyHandler\n    from urllib.error import HTTPError\nexcept ImportError:\n    from urlparse import urlparse\n    from urllib import urlencode\n    from urllib2 import urlopen, Request, HTTPError, HTTPSHandler, build_opener, ProxyHandler\n\nfrom httpclient import *\n\nPY_MAJOR_VERSION = 0\nPY_MINOR_VERSION = 1\nPY_MICRO_VERSION = 2\n\nSSL_MODULE_NAME = \"ssl\"\n\n# On some system the ssl module might be missing\ntry:\n    import ssl\nexcept ImportError:\n    ssl = None\n\n\nclass HttpsClientHandler(HTTPSHandler):\n    \"\"\"Https handler to enable attaching cert/key to request. Also used to disable strict cert verification for\n    testing.\n    \"\"\"\n\n    def __init__(self, cert_path, key_path, insecure=False):\n        self.cert_path = cert_path\n        self.key_path = key_path\n\n        ssl_context = None\n        if insecure and SSL_MODULE_NAME in sys.modules and (sys.version_info[PY_MAJOR_VERSION] == 2 and\n                                                                    sys.version_info[PY_MINOR_VERSION] >= 7 and\n                                                                    sys.version_info[PY_MICRO_VERSION] >= 9):\n            ssl_context = ssl.create_default_context()\n            ssl_context.check_hostname = False\n            ssl_context.verify_mode = ssl.CERT_NONE\n        HTTPSHandler.__init__(self, context=ssl_context)  # Context can be None here\n\n    def https_open(self, req):\n        return self.do_open(self.get_https_connection, req, context=self._context)\n\n    def get_https_connection(self, host, context=None, timeout=180):\n        \"\"\"urllib2's AbstractHttpHandler will invoke this method with the host/timeout parameter. See urllib2's\n        AbstractHttpHandler for more details.\n\n        Args:\n            host    : string        , the host.\n            context : ssl_context   , the ssl context.\n            timeout : int           , the timeout value in seconds.\n\n        Returns:\n            An HttpsConnection\n        \"\"\"\n        socket.setdefaulttimeout(180)\n        if self.cert_path is None or self.key_path is None:\n            return HTTPSConnection(host, timeout=timeout, context=context)\n        else:\n            return HTTPSConnection(host, cert_file=self.cert_path, key_file=self.key_path, timeout=timeout,\n                                           context=context)\n\n\ndef request_retry_handler(func):\n    def decorated_func(*args, **kwargs):\n        max_retry_count = 3\n        for iteration in range(0, max_retry_count, 1):\n            try:\n                ret = func(*args, **kwargs)\n                return ret\n            except Exception as exception:\n                if iteration >= max_retry_count - 1:\n                    raise RetryAttemptExceededException(traceback.format_exc())\n                elif SSL_MODULE_NAME in sys.modules:\n                    if type(exception).__name__ == 'SSLError':\n                        time.sleep(5 + iteration)\n                        continue\n                raise exception\n    return decorated_func\n\n\nclass Urllib3HttpClient(HttpClient):\n    \"\"\"Urllib2 http client. Inherits from HttpClient.\n\n    Targets:\n        [2.7.9 - 2.7.9+] only due to the lack of strict certificate verification prior to this version.\n\n    Implements the following method common to all classes inheriting HttpClient.\n        get     (url, headers)\n        post    (url, headers, data)\n    \"\"\"\n\n    def __init__(self, cert_path, key_path, insecure=False, proxy_configuration=None):\n        HttpClient.__init__(self, cert_path, key_path, insecure, proxy_configuration)\n\n    @request_retry_handler\n    def issue_request(self, url, headers, method=None, data=None):\n        \"\"\"Issues a GET request to the provided url and using the provided headers.\n\n        Args:\n            url     : string    , the url.\n            headers : dictionary, contains the headers key value pair.\n            data    : string    , contains the serialized request body.\n\n        Returns:\n            A RequestResponse\n            :param method:\n        \"\"\"\n        https_handler = HttpsClientHandler(self.cert_path, self.key_path, self.insecure)\n        opener = build_opener(https_handler)\n        if self.proxy_configuration is not None:\n            proxy_handler = ProxyHandler({'http': self.proxy_configuration,\n                                                  'https': self.proxy_configuration})\n            opener.add_handler(proxy_handler)\n        if sys.version_info >= (3,0):\n            if data is not None:\n                data = data.encode(\"utf-8\")\n        req = Request(url, data=data, headers=headers)\n        req.get_method = lambda: method\n        response = opener.open(req, timeout=30)\n        opener.close()\n        https_handler.close()\n\n        return response\n\n    def get(self, url, headers=None):\n        \"\"\"Issues a GET request to the provided url and using the provided headers.\n\n        Args:\n            url     : string    , the url.\n            headers : dictionary, contains the headers key value pair.\n\n        Returns:\n            An http_response\n        \"\"\"\n        headers = self.merge_headers(self.default_headers, headers)\n\n        try:\n            response = self.issue_request(url, headers=headers, method=self.GET)\n        except HTTPError:\n            exception_type, error = sys.exc_info()[:2]\n            return RequestResponse(error.code)\n\n        return RequestResponse(response.getcode(), response.read())\n\n    def post(self, url, headers=None, data=None):\n        \"\"\"Issues a POST request to the provided url and using the provided headers.\n\n        Args:\n            url     : string    , the url.\n            headers : dictionary, contains the headers key value pair.\n            data    : dictionary, contains the non-serialized request body.\n\n        Returns:\n            A RequestResponse\n        \"\"\"\n        headers = self.merge_headers(self.default_headers, headers)\n\n        if data is None:\n            serial_data = \"\"\n        else:\n            serial_data = self.json.dumps(data)\n            headers.update({self.CONTENT_TYPE_HEADER_KEY: self.APP_JSON_HEADER_VALUE})\n\n        try:\n            response = self.issue_request(url, headers=headers, method=self.POST, data=serial_data)\n        except HTTPError:\n            exception_type, error = sys.exc_info()[:2]\n            return RequestResponse(error.code)\n\n        return RequestResponse(response.getcode(), response.read().decode('utf-8'))\n\n    def put(self, url, headers=None, data=None):\n        \"\"\"Issues a PUT request to the provided url and using the provided headers.\n\n        Args:\n            url     : string    , the url.\n            headers : dictionary, contains the headers key value pair.\n            data    : dictionary, contains the non-serialized request body.\n\n        Returns:\n            A RequestResponse\n        \"\"\"\n        headers = self.merge_headers(self.default_headers, headers)\n\n        if data is None:\n            serial_data = \"\"\n        else:\n            serial_data = self.json.dumps(data)\n            headers.update({self.CONTENT_TYPE_HEADER_KEY: self.APP_JSON_HEADER_VALUE})\n\n        try:\n            response = self.issue_request(url, headers=headers, method=self.PUT, data=serial_data)\n        except HTTPError:\n            exception_type, error = sys.exc_info()[:2]\n            return RequestResponse(error.code)\n\n        return RequestResponse(response.getcode(), response.read().decode('utf-8'))\n\n    def delete(self, url, headers=None, data=None):\n        \"\"\"Issues a DELETE request to the provided url and using the provided headers.\n\n        Args:\n            url     : string    , the url.\n            headers : dictionary, contains the headers key value pair.\n            data    : dictionary, contains the non-serialized request body.\n\n        Returns:\n            A RequestResponse\n        \"\"\"\n        headers = self.merge_headers(self.default_headers, headers)\n\n        if data is None:\n            serial_data = \"\"\n        else:\n            serial_data = self.json.dumps(data)\n            headers.update({self.CONTENT_TYPE_HEADER_KEY: self.APP_JSON_HEADER_VALUE})\n\n        try:\n            response = self.issue_request(url, headers=headers, method=self.DELETE, data=serial_data)\n        except HTTPError:\n            exception_type, error = sys.exc_info()[:2]\n            return RequestResponse(error.code)\n\n        return RequestResponse(response.getcode(), response.read())\n"
  },
  {
    "path": "Diagnostic/ChangeLogs",
    "content": "2020-11-06: LAD-3.0.131\n    - Fix issue #1262: Crashing bug caused by task synchronization issue in XJsonBlobRequest\n\n2020-07-01: LAD-3.0.129\n    - Fix issue #499 : Need a better error message if LAD protectedSettings is missing\n    - Fix issue #944 : Allow installing LAD without storage account sink\n\n2020-01-30: LAD-3.0.127\n    - Fix issue #996: Remove fluent-gem-plugin from LAD\n    - Fix issue #994: Move LAD's out_mdsd buffer path to own directory\n    - Fix issue #948: Failed to launch mdsd with error: cannot concatenate 'str' and 'int' objects\n    - Fix issue #978: LAD is limited to upload 2 events per second to EventHubs\n\n2019-10-24: LAD-3.0.125\n    - Reinstall OMI if it is failing to start as recovery action\n    - VM extension config update needs to regenerate the config artifacts\n\n2019-08-14: LAD-3.0.123\n    - Fix a race condition in install. The dependencies were installed during \"enable\" step; which is not idempotent.\n\n2019-06-17: LAD-3.0.121\n    - Add logrotate policy to manage mdsd log files.\n\n2019-01-15: LAD-3.0.119\n    - Add blobEndpoint for storage accounts; bug fix for National clouds.\n\n2017-09-05: LAD-3.0.111\n    - Ensure SAS storage token is supplied\n    - Explicitly reject deprecated use of LAD 2.3's storageAccountKey\n\n2017-08-11: LAD-3.0.109\n    - Fix waagent-related issue on Debian distros\n    - Add additional unit tests\n    - Replace multiple uses of \"local\" with \"locale\"\n    - Improve error reporting when catching an exception\n    - Fix #398, #399, #340\n\n2017-05-16: LAD-3.0.107\n    - Move resourceId field generation for JSON events from LAD to mdsd\n\n2017-05-10: LAD-3.0.103\n    - Allow '*' in syslog spec, add more fields in syslog records for\n      EventHubs\n\n2017-05-08: LAD-3.0.101\n    - New release of LAD 3.0. Refer to README.md\n\n2017-01-13: LAD-2.3.9021\n    - Fix rsyslogd core dump issue when re-enabling the extension\n    - Take latest mdsd binary that fixes other issues like missing perf\n      counter logs when there's a race condition between mdsd and omiserver.\n\n2016-11-30: LAD-2.3.9017\n    - Fix scx upgrade issue on RPM-based distros when apache or mysql is\n      installed.\n\n2016-11-11: LAD-2.3.9015\n    - Correctly fail Enable when mdsd dependency set up fails.\n    - Added /etc/fstab watcher feature (logging to /dev/console so that issues\n      can be found on serial logs)\n    - Add storage account SAS token support (replacing storage account key)\n    - Encrypt storage secret in xmlCfg.xml\n\n2016-10-31: LAD-2.3.9013\n    - Use semodule -u (upgrade) to reduce unnecessary SELinux policy\n      re-install time\n    - Use the latest scx package version (1.6.2-337)\n    - Issue #265: Don't remove port 1270 from omiserver.conf if omsagent is\n      installed.\n\n2016-10-07: LAD-2.3.9011\n    - Update OpenSSL library to the latest\n    - Update rsyslog output modules for all versions of rsyslog (5/7/8) to use\n      Unix domain socket.\n    - Update mdsd binary to the latest (1.2.104) with various fixes\n    - Dependencies are now installed at Enable time, to reduce VM deployment\n      time.\n\n2016-09-16: LAD-2.3.9009\n    - Underlying monitoring agent binary (mdsd) upgrade with many fixes and\n      improvements\n    - Fixes storage end point bug (affected Mooncake and Blackforest)\n\n2016-07-14: LAD-2.3.9007\n    - Fixes install issues on some RH-based distros (e.g., OracleLinux 7) due\n      to lack of tar.\n    - Fixes duplicate logging (on /var/log/syslog) issue on fileCfg\n\n2016-06-30: LAD-2.3.9005\n    - Fixes non-starting monitoring agent issue on systemd-enabled distros (#180)\n    - HandlerUtil unified with other extensions\n    - Telemetry (logging) improvement\n    - Remove possibility of logging some password\n\n2016-06-21: LAD-2.3.9003\n    - Monitoring agent (mdsd) updates for a memory issue fix, a signal handler fix,\n      and a fix to avoid a spin loop under certain circumstances\n    - doesn't count non-quick crashes (>30 mins) towards retry limit\n    - OMI reconfiguration not to listen to port 1270\n    - Use systemd on Ubuntu 16.04 as well\n    - Validate mdsd XML config before starting mdsd, fails fast on invalid\n      config (with success)\n    - Small Python 2.6 bug fix (syslog.openlog())\n\n2016-06-06: LAD-2.3.9001\n    - Fix issue of syslog messages not collected by default on SLES 11\n    - Minor config syntactic fixes\n    - Logging fix to show correct extension version\n    - Monitoring agent kill is no longer SIGKILL, but SIGTERM.\n    - Monitoring agent listening port is now dynamic if the specified port (29131) is in use.\n    - Monitoring agent core dump is enabled (dumped on its current working directory)\n    - Newer monitoring agent bits with added features (not available on LAD yet)\n\n2016-05-04: LAD-2.3.9\n    - mdsd bits are now built as statically as possible, so that a single monolithic executable\n      can be used on as many distros/versions.\n    - OMI install result is checked and tried up to 3 times. If all fail, LAD install fails as well.\n    - OMI is checked periodically for its health and LAD will restart it if OMI is not up.\n\n2016-03-26: LAD-2.3.7\n    - mdsd http proxy config through waagent.conf\n    - OpenSUSE 13 support revival\n    - LAD no longer (re)starts apache/mysql invasively (restarts only when they were running)\n    - Bundle libglibmm*.so (no longer downloaded/installed when LAD is installed)\n    - AppInsights configuration changes\n\n2016-03-08: LAD-2.3.6\n    - mdsd http proxy support (mdsd binary change)\n    - Ubuntu 16.04 glibmm install issue fix\n    - Report success extension event for unsupported distros/versions\n\n2016-02-25: LAD-2.3.5. Reviving SUSE 11 support and consolidating binaries of diff versions of same distro\n\n2016-02-25: LAD-2.3.4. Hotfix for portal perf graphs not showing (xmlCfg parsing bug)\n\n2016-02-15: LAD-2.3.3. No changes on mdsd/LAD code. Just rebuilding to take in the most recent AISDK fixes\n"
  },
  {
    "path": "Diagnostic/DistroSpecific.py",
    "content": "#!/usr/bin/env python\n#\n# Azure Linux extension\n# Distribution-specific actions\n#\n# Linux Azure Diagnostic Extension (Current version is specified in manifest.xml)\n# Copyright (c) Microsoft Corporation\n# All rights reserved.\n# MIT License\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"\"Software\"\"), to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the following conditions:\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the\n# Software.\n# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\n# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nimport exceptions\nimport time\nimport subprocess\nimport re\nfrom Utils.WAAgentUtil import waagent\n\n\nclass CommonActions:\n    def __init__(self, logger):\n        self.logger = logger\n\n    def filterNonAsciiCharacters(self, output_msg):\n        return output_msg.encode('utf-8').decode('ascii','ignore')\n        \n    def log_run_get_output(self, cmd, should_log=True):\n        \"\"\"\n        Execute a command in a subshell\n        :param str cmd: The command to be executed\n        :param bool should_log: If true, log command execution\n        :rtype: int, str\n        :return: A tuple of (subshell exit code, contents of stdout)\n        \"\"\"\n        if should_log:\n            self.logger(\"RunCmd \" + cmd)\n        error, msg = waagent.RunGetOutput(cmd, chk_err=should_log)\n        if should_log:\n            self.logger(\"Return \" + str(error) + \":\" + msg)\n        return int(error), self.filterNonAsciiCharacters(msg)\n\n    def log_run_ignore_output(self, cmd, should_log=True):\n        \"\"\"\n        Execute a command in a subshell\n        :param str cmd: The command to be executed\n        :param bool should_log: True if command execution should be logged. (False preserves privacy of parameters.)\n        :rtype: int\n        :return: The subshell exit code\n        \"\"\"\n        error, msg = self.log_run_get_output(cmd, should_log)\n        return int(error)\n\n    def log_run_with_timeout(self, cmd, timeout=3600):\n        \"\"\"\n        Execute a command in a subshell, killing the subshell if it runs too long\n        :param str cmd: The command to be executed\n        :param int timeout: The maximum elapsed time, in seconds, to wait for the subshell to return; default 360\n        :rtype: int, str\n        :return: (1, \"Process timeout\\n\") if timeout, else (subshell exit code, contents of stdout)\n        \"\"\"\n        self.logger(\"Run with timeout: \" + cmd)\n        process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True,\n                                   executable='/bin/bash')\n        time.sleep(1)\n        while process.poll() is None and timeout > 0:\n            time.sleep(1)\n            timeout -= 1\n        if process.poll() is None:\n            self.logger(\"Timeout while running:\" + cmd)\n            process.kill()\n            return 1, \"Process timeout\\n\"\n        output, error = process.communicate()\n        self.logger(\"Return \" + str(error))\n        return int(process.returncode), output\n\n    def log_run_multiple_cmds(self, cmds, with_timeout, timeout=360):\n        \"\"\"\n        Execute multiple commands in subshells, with optional timeout protection\n        :param Iterable[str] cmds: An iterable of commands to be executed\n        :param bool with_timeout: True if commands should be run with timeout\n        :param int timeout: The timeout, in seconds; default 360. Ignored if with_timeout is False.\n        :rtype: int, str\n        :return: A tuple of (sum of status codes, concatenated stdout from commands)\n        \"\"\"\n        errors = 0\n        output = []\n        for cmd in cmds:\n            if with_timeout:\n                err, msg = self.log_run_with_timeout(cmd, timeout)\n            else:\n                err, msg = self.log_run_get_output(cmd)\n            errors += err\n            output.append(msg)\n        return errors, ''.join(output)\n\n    def extract_om_path_and_version(self, results):\n        \"\"\"\n        Get information about rsyslogd\n        :param str results: Package information about omprog.so or version\n        :rtype: str, str\n        :return: (Path where rsyslogd output modules are installed, major version of rsyslogd)\n        \"\"\"\n        match = re.search(r\"(.+)omprog\\.so\", results)\n        if not match:\n            return None, ''\n        path = match.group(1)\n        match = re.search(r\"Version\\s*:\\s*(\\d+)\\D\", results)\n        if not match:\n            self.logger(\"rsyslog is present but version could not be determined\")\n            return None, ''\n        version = match.group(1)\n        return path, version\n\n    def install_extra_packages(self, packages, with_timeout=False):\n        \"\"\"\n        Ensure an arbitrary set of packages is installed\n        :param list[str] packages: Iterable of package names\n        :param bool with_timeout: true if package installations should be aborted if they take too long\n        :rtype: int\n        :return:\n        \"\"\"\n        return 0, ''\n\n    def install_required_packages(self):\n        \"\"\"\n        Install packages required by this distro to meet the common bar required of all distros\n        :rtype: int, str\n        :return: (status, concatenated stdout from all package installs)\n        \"\"\"\n        return 0, \"no additional packages were needed\"\n\n    def is_package_handler(self, package_manager):\n        \"\"\"\n        Checks if the distro's package manager matches the specified tool.\n        :param str package_manager: The tool to be checked against the distro's native package manager\n        :rtype: bool\n        :return: True if the distro's native package manager is package_manager\n        \"\"\"\n        return False\n\n    def prepare_for_mdsd_install(self):\n        return 0, ''\n\n    def extend_environment(self, env):\n        \"\"\"\n        Add required environment variables to process environment\n        :param dict[str, str] env: Process environment\n        \"\"\"\n        pass\n\n    def use_systemd(self):\n        \"\"\"\n        Determine if the distro uses systemd as its system management tool.\n        :rtype: bool\n        :return: True if the distro uses systemd as its system management tool.\n        \"\"\"\n        return False\n\n    def install_lad_mdsd(self):\n        \"\"\"\n        Install the mdsd binary using the bundled .deb/.rpm packages.\n        Should be overridden by each direct subclass for Debian/Redhat.\n        Can't be called for this base class.\n        :rtype: int, str\n        :return: (status, concatenated stdout from the package install)\n        \"\"\"\n        assert False, \"Can't be called on the base class (CommonActions)!\"\n\n    def remove_lad_mdsd(self):\n        \"\"\"\n        Remove the mdsd binary that was installed with the bundled .deb/.rpm packages.\n        Should be overridden by each direct subclass for Debian/Redhat.\n        Can't be called for this base class.\n        :rtype: int, str\n        :return: (status, concatenated stdout from the package remove)\n        \"\"\"\n        assert False, \"Can't be called on the base class (CommonActions)!\"\n\n\nclass DebianActions(CommonActions):\n    def __init__(self, logger):\n        CommonActions.__init__(self, logger)\n\n    def is_package_handler(self, package_manager):\n        return package_manager == \"dpkg\"\n\n    def install_extra_packages(self, packages, with_timeout=False):\n        cmd = 'dpkg-query -l PACKAGE |grep ^ii; if [ ! $? == 0 ]; then apt-get update; apt-get install -y PACKAGE; fi'\n        return self.log_run_multiple_cmds([cmd.replace(\"PACKAGE\", p) for p in packages], with_timeout)\n\n    def extend_environment(self, env):\n        env.update({\"SSL_CERT_DIR\": \"/usr/lib/ssl/certs\", \"SSL_CERT_FILE\": \"/usr/lib/ssl/cert.pem\"})\n\n    def install_lad_mdsd(self):\n        return self.log_run_get_output('dpkg -i lad-mdsd-*.deb')\n\n    def remove_lad_mdsd(self):\n        return self.log_run_get_output('dpkg -P lad-mdsd')\n\n\nclass CredativActions(DebianActions):\n    def __init__(self, logger):\n        DebianActions.__init__(self, logger)\n\n    def install_required_packages(self):\n        # curl not installed by default on Credative Debian Linux, now required by omsagent\n        return self.install_extra_packages(('curl',), True)\n\n\nclass Ubuntu1510OrHigherActions(DebianActions):\n    def __init__(self, logger):\n        DebianActions.__init__(self, logger)\n\n    def install_extra_packages(self, packages, with_timeout=False):\n        count = len(packages)\n        if count == 0:\n            return 0, ''\n        package_list = str.join(' ', packages)\n        cmd = '[ $(dpkg -l PACKAGES |grep ^ii |wc -l) -eq \\'COUNT\\' ] || apt-get install -y PACKAGES'\n        cmd = cmd.replace('PACKAGES', package_list).replace('COUNT', str(count))\n        if with_timeout:\n            return self.log_run_with_timeout(cmd)\n        else:\n            return self.log_run_get_output(cmd)\n\n    def use_systemd(self):\n        return True\n\n\nclass RedhatActions(CommonActions):\n    def __init__(self, logger):\n        CommonActions.__init__(self, logger)\n\n    def install_extra_packages(self, packages, with_timeout=False):\n        install_cmd = 'rpm -q PACKAGE; if [ ! $? == 0 ]; then yum install -y PACKAGE; fi'\n        return self.log_run_multiple_cmds([install_cmd.replace(\"PACKAGE\", p) for p in packages], with_timeout)\n\n    def install_required_packages(self):\n        # policycoreutils-python missing on Oracle Linux (still needed to manipulate SELinux policy).\n        # tar is really missing on Oracle Linux 7!\n        return self.install_extra_packages(('policycoreutils-python', 'tar'), True)\n\n    def is_package_handler(self, package_manager):\n        return package_manager == \"rpm\"\n\n    def extend_environment(self, env):\n        env.update({\"SSL_CERT_DIR\": \"/etc/pki/tls/certs\", \"SSL_CERT_FILE\": \"/etc/pki/tls/cert.pem\"})\n\n    def install_lad_mdsd(self):\n        return self.log_run_get_output('rpm -i --force lad-mdsd-*.rpm')\n\n    def remove_lad_mdsd(self):\n        return self.log_run_get_output('rpm -e lad-mdsd')\n\nclass Redhat8Actions(RedhatActions):\n    def __init__(self, logger):\n        RedhatActions.__init__(self, logger)\n\n    def install_required_packages(self):\n        return self.install_extra_packages(('policycoreutils-python-utils', 'tar'), True)\n\nclass Suse11Actions(RedhatActions):\n    def __init__(self, logger):\n        RedhatActions.__init__(self, logger)\n        self.certs_file = \"/etc/ssl/certs/mdsd-ca-certs.pem\"\n\n    def install_extra_packages(self, packages, with_timeout=False):\n        install_cmd = 'rpm -qi PACKAGE;  if [ ! $? == 0 ]; then zypper --non-interactive install PACKAGE;fi'\n        return self.log_run_multiple_cmds([install_cmd.replace(\"PACKAGE\", p) for p in packages], with_timeout)\n\n    def install_required_packages(self):\n        return 0, \"no additional packages were needed\"\n\n    # For SUSE11, we need to create a CA certs file for our statically linked OpenSSL 1.0 libs\n    def prepare_for_mdsd_install(self):\n        commands = (\n            r'cp /dev/null {0}'.format(self.certs_file),\n            r'chown 0:0 {0}'.format(self.certs_file),\n            r'chmod 0644 {0}'.format(self.certs_file),\n            r\"cat /etc/ssl/certs/????????.[0-9a-f] | sed '/^#/d' >> {0}\".format(self.certs_file)\n        )\n        return self.log_run_multiple_cmds(commands, False)\n\n    def extend_environment(self, env):\n        env.update({\"SSL_CERT_FILE\": self.certs_file})\n\n\nclass Suse12Actions(RedhatActions):\n    def __init__(self, logger):\n        RedhatActions.__init__(self, logger)\n\n    def install_extra_packages(self, packages, with_timeout=False):\n        install_cmd = 'rpm -qi PACKAGE; if [ ! $? == 0 ]; then zypper --non-interactive install PACKAGE;fi'\n        return self.log_run_multiple_cmds([install_cmd.replace(\"PACKAGE\", p) for p in packages], with_timeout)\n\n    def install_required_packages(self):\n        return self.install_extra_packages(('libgthread-2_0-0', 'ca-certificates-mozilla', 'rsyslog'), True)\n\n    def extend_environment(self, env):\n        env.update({\"SSL_CERT_DIR\": \"/var/lib/ca-certificates/openssl\", \"SSL_CERT_FILE\": \"/etc/ssl/cert.pem\"})\n\n\nclass CentosActions(RedhatActions):\n    def __init__(self, logger):\n        RedhatActions.__init__(self, logger)\n\n    def install_extra_packages(self, packages, with_timeout=False):\n        install_cmd = 'rpm -qi PACKAGE; if [ ! $? == 0 ]; then yum install -y PACKAGE; fi'\n        return self.log_run_multiple_cmds([install_cmd.replace(\"PACKAGE\", p) for p in packages], with_timeout)\n\n    def install_required_packages(self):\n        # policycoreutils-python missing on CentOS (still needed to manipulate SELinux policy)\n        return self.install_extra_packages(('policycoreutils-python',), True)\n\n\nclass Centos8Actions(RedhatActions):\n    def __init__(self, logger):\n        RedhatActions.__init__(self, logger)\n\n    def install_required_packages(self):\n        return self.install_extra_packages(('policycoreutils-python-utils', 'tar'), True)\n\n\nDistroMap = {\n    'debian': CredativActions,  # Credative Debian Linux took the 'debian' platform name with the curl deficiency,\n                                # when all other Debian-based distros have curl, so is this strange mapping...\n    'kali': DebianActions,\n    'ubuntu': DebianActions,\n    'ubuntu:16.04': Ubuntu1510OrHigherActions,\n    'ubuntu:18.04': Ubuntu1510OrHigherActions,\n    'redhat': RedhatActions,\n    'redhat:8': Redhat8Actions,\n    'centos': CentosActions,\n    'centos:8':Centos8Actions,\n    'oracle': RedhatActions,\n    'suse:12': Suse12Actions,\n    'suse': Suse12Actions,\n    'sles:15': Suse12Actions,\n    'opensuse:15':Suse12Actions,\n    'almalinux':Redhat8Actions\n}\n\n\ndef get_distro_actions(name, version, logger):\n    name_and_version = name + \":\" + version\n    if name_and_version in DistroMap:\n        return DistroMap[name_and_version](logger)\n    else:\n        major_version = version.split(\".\")[0]\n        name_and_major_version = name + \":\" + major_version\n        if name_and_major_version in DistroMap:\n            return DistroMap[name_and_major_version](logger)\n        if name in DistroMap:\n            return DistroMap[name](logger)\n    raise exceptions.LookupError('{0} is not a supported distro'.format(name_and_version))\n"
  },
  {
    "path": "Diagnostic/HandlerManifest.json",
    "content": "[\n  {\n    \"version\": 1.0,\n    \"handlerManifest\": {\n      \"disableCommand\": \"shim.sh -disable\",\n      \"enableCommand\": \"shim.sh -enable\",\n      \"installCommand\": \"shim.sh -install\",\n      \"uninstallCommand\": \"shim.sh -uninstall\",\n      \"updateCommand\": \"shim.sh -update\",\n      \"rebootAfterInstall\": false,\n      \"reportHeartbeat\": false,\n      \"updateMode\": \"updatewithinstall\"\n    }\n  }\n]\n"
  },
  {
    "path": "Diagnostic/Makefile",
    "content": "all: package\n.PHONY: all\n.PHONY: clean\n.PHONY: package\n\nLADSOURCES = \\\n\tdiagnostic.py \\\n\twatcherutil.py \\\n\ttests \\\n\tHandlerManifest.json \\\n\tlicense.txt \\\n\tmanifest.xml \\\n\trun_unittests.sh \\\n\tservices \\\n\tUtils \\\n\t\nUTILSOURCES = \\\n\t../Utils/HandlerUtil.py \\\n\t../Utils/__init__.py \\\n\t../Utils/WAAgentUtil.py \\\n\nclean:\n\trm -rf output\n\npackage: $(LADSOURCES) $(UTILSOURCES)\n\tmkdir -p output\n\tcp -t output -r $(LADSOURCES)\n\tcp -t output/Utils -r $(UTILSOURCES)\n\n\t\n"
  },
  {
    "path": "Diagnostic/Providers/Builtin.py",
    "content": "#!/usr/bin/env python\n#\n# Azure Linux extension\n#\n# Linux Azure Diagnostic Extension (Current version is specified in manifest.xml)\n# Copyright (c) Microsoft Corporation\n# All rights reserved.\n# MIT License\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"\"Software\"\"), to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the following conditions:\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of\n#  the Software.\n# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\n# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n# A provider is responsible for taking a particular syntax of configuration instructions, as found in the JSON config\n# blob, and using it to enable collection of data as specified in those instructions.\n\n# The \"Builtin\" configuration instructions are agnostic to the collection mechanism used to implement them; it's simply\n# a list of metrics to be collected on a particular schedule. The metric names are collected into classes for ease\n# of understanding by the user. The predefined classes and metric names are available without regard to how the\n# underlying mechanism might name them.\n#\n# This specific implementation of the Builtin provider converts the configuration instructions into a set of OMI\n# queries to be executed by the mdsd agent. The agent executes the queries are written by this provider and uploads\n# the results to the appropriate table in the customer's storage account.\n#\n# A different implementation might use fluentd to collect the data and to upload the results to table storage.\n\n# A different provider (e.g. an OMI provider) would expect configuration instructions bound directly to OMI; that is,\n# the PublicConfig JSON delivered to LAD would itself contain actual OMI queries. The implementation of such a provider\n# might construct an mdsd configuration file cause mdsd to run the specified queries and store the data in tables.\n\nimport Utils.ProviderUtil as ProvUtil\nfrom collections import defaultdict\nimport xml.etree.ElementTree as ET\nimport Utils.XmlUtil as XmlUtil\nfrom xml.sax.saxutils import quoteattr\n\n\n# These are the built-in metrics this code provides, grouped by class. The builtin countername space is\n# case insensitive; this collection of maps converts to the case-sensitive OMI name.\n_builtIns = {\n    'processor':  { 'percentidletime': 'PercentIdleTime', 'percentprocessortime': 'PercentProcessorTime',\n                    'percentiowaittime': 'PercentIOWaitTime', 'percentinterrupttime': 'PercentInterruptTime',\n                    'percentusertime': 'PercentUserTime', 'percentnicetime': 'PercentNiceTime',\n                    'percentprivilegedtime': 'PercentPrivilegedTime' },\n    'memory':     { 'availablememory': 'AvailableMemory', 'percentavailablememory': 'PercentAvailableMemory',\n                    'usedmemory': 'UsedMemory', 'percentusedmemory': 'PercentUsedMemory',\n                    'pagespersec': 'PagesPerSec', 'pagesreadpersec': 'PagesReadPerSec',\n                    'pageswrittenpersec': 'PagesWrittenPerSec', 'availableswap': 'AvailableSwap',\n                    'percentavailableswap': 'PercentAvailableSwap', 'usedswap': 'UsedSwap',\n                    'percentusedswap': 'PercentUsedSwap'},\n    'network':    { 'bytestransmitted': 'BytesTransmitted', 'bytesreceived': 'BytesReceived',\n                    'bytestotal': 'BytesTotal', 'packetstransmitted': 'PacketsTransmitted',\n                    'packetsreceived': 'PacketsReceived', 'totalrxerrors': 'TotalRxErrors',\n                    'totaltxerrors': 'TotalTxErrors', 'totalcollisions': 'TotalCollisions' },\n    'filesystem': { 'freespace': 'FreeMegabytes', 'usedspace': 'UsedMegabytes',\n                    'percentfreespace': 'PercentFreeSpace', 'percentusedspace': 'PercentUsedSpace',\n                    'percentfreeinodes': 'PercentFreeInodes', 'percentusedinodes': 'PercentUsedInodes',\n                    'bytesreadpersecond': 'ReadBytesPerSecond', 'byteswrittenpersecond': 'WriteBytesPerSecond',\n                    'bytespersecond': 'BytesPerSecond', 'readspersecond': 'ReadsPerSecond',\n                    'writespersecond': 'WritesPerSecond', 'transferspersecond': 'TransfersPerSecond' },\n    'disk':       { 'readspersecond': 'ReadsPerSecond', 'writespersecond': 'WritesPerSecond',\n                    'transferspersecond': 'TransfersPerSecond', 'averagereadtime': 'AverageReadTime',\n                    'averagewritetime': 'AverageWriteTime', 'averagetransfertime': 'AverageTransferTime',\n                    'averagediskqueuelength': 'AverageDiskQueueLength', 'readbytespersecond': 'ReadBytesPerSecond',\n                    'writebytespersecond': 'WriteBytesPerSecond', 'bytespersecond': 'BytesPerSecond' }\n    }\n\n_omiClassName = { 'processor': 'SCX_ProcessorStatisticalInformation',\n                  'memory': 'SCX_MemoryStatisticalInformation',\n                  'network': 'SCX_EthernetPortStatistics',\n                  'filesystem': 'SCX_FileSystemStatisticalInformation',\n                  'disk': 'SCX_DiskDriveStatisticalInformation'\n                }\n\n# Default CQL condition clause (WHERE ...) for relevant counter classes\n_defaultCqlCondition = {\n                        #'network': '...',  # No 'Name' or 'IsAggregate' columns from SCX_EthernetPort... cql query.\n                                            # If there are multiple NICs, this might cause some issue. Beware.\n                                            # The column/value distinguishing NICs is e.g., 'InstanceID=\"eth0\"'.\n                        'filesystem': 'IsAggregate=TRUE',  # For specific file system (e.g., root fs), use 'Name=\"/\"'\n                        'disk': 'IsAggregate=TRUE',  # For specific disk (e.g., /dev/sda), use 'Name=\"sda\"'\n                        'processor': 'IsAggregate=TRUE',  # For specific processor core, use 'Name=\"0\"'\n                        #'memory': 'IsAggregate=TRUE',  # No separate instances of memory, so no WHERE condition is needed\n                       }\n\n# The Azure Metrics infrastructure, along with App Insights, requires that quantities be measured\n# in one of these units: Percent, Count, Seconds, Milliseconds, Bytes, BytesPerSecond, CountPerSecond\n#\n# Some of the OMI metrics are retrieved in some other unit (e.g. \"MiB\") and need to be scaled\n# to the expected unit before being passed along the pipeline. The _scaling map holds all OMI counter\n# names that need to be scaled. If a counterSpecifier isn't in this list, no scaling is needed.\n_scaling = defaultdict(lambda:defaultdict(str),\n            { 'memory' : defaultdict(str,\n                { 'AvailableMemory': 'scaleUp=\"1048576\"',\n                  'UsedMemory': 'scaleUp=\"1048576\"',\n                  'AvailableSwap': 'scaleUp=\"1048576\"',\n                  'UsedSwap': 'scaleUp=\"1048576\"'\n                } ),\n              'filesystem' : defaultdict(str,\n                 {'FreeMegabytes': 'scaleUp=\"1048576\"',\n                  'UsedMegabytes': 'scaleUp=\"1048576\"',\n                  }),\n              } )\n\n_metrics = defaultdict(list)\n_eventNames = {}\n\n_defaultSampleRate = 15\n\n\ndef SetDefaultSampleRate(rate):\n    global _defaultSampleRate\n    _defaultSampleRate = rate\n\n\ndef default_condition(class_name):\n    return _defaultCqlCondition[class_name] if class_name in _defaultCqlCondition else ''\n\n\nclass BuiltinMetric:\n    def __init__(self, counterSpec):\n        \"\"\"\n        Construct an instance of the BuiltinMetric class. Values are case-insensitive unless otherwise noted.\n\n        \"type\": the provider type. If present, must have value \"builtin\". If absent, assumed to be \"builtin\".\n        \"class\": the name of the class within which this metric is scoped. Must be a key in the _builtIns dict.\n        \"counter\": the name of the metric, within the class. Must appear in the list of metric names for this class\n                found in the _builtIns dict. In this implementation, the builtin counter name is mapped to the OMI\n                counter name\n        \"instanceId\": the identifier for the specific instance of the metric, if any. Must be \"None\" for uninstanced\n                metrics.\n        \"counterSpecifier\": the name under which this retrieved metric will be stored\n        \"sampleRate\": a string containing an ISO8601-compliant duration.\n\n        :param counterSpec: A dict containing the key/value settings that define the metric to be collected.\n        \"\"\"\n        t = ProvUtil.GetCounterSetting(counterSpec, 'type')\n        if t is None:\n            self._Type = 'builtin'\n        else:\n            self._Type = t.lower()\n            if t != 'builtin':\n                raise ProvUtil.UnexpectedCounterType('Expected type \"builtin\" but saw type \"{0}\"'.format(self._Type))\n\n        self._CounterClass = ProvUtil.GetCounterSetting(counterSpec, 'class')\n        if self._CounterClass is None:\n            raise ProvUtil.InvalidCounterSpecification('Builtin metric spec missing \"class\"')\n        self._CounterClass = self._CounterClass.lower()\n        if self._CounterClass not in _builtIns:\n            raise ProvUtil.InvalidCounterSpecification('Unknown Builtin class {0}'.format(self._CounterClass))\n        builtin_raw_counter_name = ProvUtil.GetCounterSetting(counterSpec, 'counter')\n        if builtin_raw_counter_name is None:\n            raise ProvUtil.InvalidCounterSpecification('Builtin metric spec missing \"counter\"')\n        builtin_counter_name = builtin_raw_counter_name.lower()\n        if builtin_counter_name not in _builtIns[self._CounterClass]:\n            raise ProvUtil.InvalidCounterSpecification(\n                'Counter {0} not in builtin class {1}'.format(builtin_raw_counter_name, self._CounterClass))\n        self._Counter = _builtIns[self._CounterClass][builtin_counter_name]\n        self._Condition = ProvUtil.GetCounterSetting(counterSpec, 'condition')\n        self._Label = ProvUtil.GetCounterSetting(counterSpec, 'counterSpecifier')\n        if self._Label is None:\n            raise ProvUtil.InvalidCounterSpecification(\n                'No counterSpecifier set for builtin {1} {0}'.format(self._Counter, self._CounterClass))\n        self._SampleRate = ProvUtil.GetCounterSetting(counterSpec, 'sampleRate')\n\n    def is_type(self, t):\n        \"\"\"\n        Returns True if the metric is of the specified type.\n        :param t: The name of the metric type to be checked\n        :return bool:\n        \"\"\"\n        return self._Type == t.lower()\n\n    def class_name(self):\n        return self._CounterClass\n\n    def counter_name(self):\n        return self._Counter\n\n    def condition(self):\n        return self._Condition\n\n    def label(self):\n        return self._Label\n\n    def sample_rate(self):\n        \"\"\"\n        Determine how often this metric should be retrieved. If the metric didn't define a sample period, return the\n        default.\n        :return int: Number of seconds between collecting samples of this metric.\n        \"\"\"\n        if self._SampleRate is None:\n            return _defaultSampleRate\n        else:\n            return ProvUtil.IntervalToSeconds(self._SampleRate)\n\n\ndef AddMetric(counter_spec):\n    \"\"\"\n    Add a metric to the list of metrics to be collected.\n    :param counter_spec: The specification of a builtin metric.\n    :return: the generated local-table name in mdsd into which this metric will be fetched, or None\n    \"\"\"\n    global _metrics, _eventNames\n    try:\n        metric = BuiltinMetric(counter_spec)\n    except ProvUtil.ParseException as ex:\n        print \"Couldn't create metric: \", ex\n        return None\n\n    # (class, instanceId, sampleRate) -> [ metric ]\n    # Given a class, instance within that class, and sample rate, we have a list of the requested metrics\n    # matching those constraints. For that set of constraints, we also have a common eventName, the local\n    # table where we store the collected metrics.\n\n    key = (metric.class_name(), metric.condition(), metric.sample_rate())\n    if key not in _eventNames:\n        _eventNames[key] = ProvUtil.MakeUniqueEventName('builtin')\n    _metrics[key].append(metric)\n    return _eventNames[key]\n\n\ndef UpdateXML(doc):\n    \"\"\"\n    Add to the mdsd XML the minimal set of OMI queries which will retrieve the metrics requested via AddMetric(). This\n    provider doesn't need any configuration external to mdsd; if it did, that would be generated here as well.\n\n    :param doc: XML document object to be updated\n    :return: None\n    \"\"\"\n    global _metrics, _eventNames, _omiClassName\n    for group in _metrics:\n        (class_name, condition_clause, sample_rate) = group\n        if not condition_clause:\n            condition_clause = default_condition(class_name)\n        columns = []\n        mappings = []\n        for metric in _metrics[group]:\n            omi_name = metric.counter_name()\n            scale = _scaling[class_name][omi_name]\n            columns.append(omi_name)\n            mappings.append('<MapName name=\"{0}\" {1}>{2}</MapName>'.format(omi_name, scale, metric.label()))\n        column_string = ','.join(columns)\n        if condition_clause:\n            cql_query = quoteattr(\"SELECT {0} FROM {1} WHERE {2}\".format(column_string,\n                                                                         _omiClassName[class_name], condition_clause))\n        else:\n            cql_query = quoteattr(\"SELECT {0} FROM {1}\".format(column_string, _omiClassName[class_name]))\n        query = '''\n<OMIQuery cqlQuery={qry} eventName={evname} omiNamespace=\"root/scx\" sampleRateInSeconds=\"{rate}\" storeType=\"local\">\n  <Unpivot columnName=\"CounterName\" columnValue=\"Value\" columns={columns}>\n    {mappings}\n  </Unpivot>\n</OMIQuery>'''.format(\n            qry=cql_query,\n            evname=quoteattr(_eventNames[group]),\n            columns=quoteattr(column_string),\n            rate=sample_rate,\n            mappings='\\n    '.join(mappings)\n        )\n        XmlUtil.addElement(doc, 'Events/OMI', ET.fromstring(query))\n    return\n"
  },
  {
    "path": "Diagnostic/Providers/__init__.py",
    "content": "# Providers module package\n"
  },
  {
    "path": "Diagnostic/README.md",
    "content": "# [DEPRECATED] Linux Azure Diagnostic (LAD) Extension\n\n> :warning: The Azure Diagnostic extension has been **deprecated** and has no support as of **March 31, 2026.** If you use the Azure Diagnostic extension to collect data, [migrate now to the new Azure Monitor agent](https://learn.microsoft.com/en-us/azure/azure-monitor/agents/azure-monitor-agent-migration-wad-lad).\n\nAllow the owner of a Linux-based Azure Virtual Machine to obtain diagnostic data.\n\nCurrent version is 3.0.129.\n\nLinux Azure Diagnostic (LAD) extension version 3.0 is released with the following changes:\n\n- Fully configurable Azure Portal metrics, including a broader set of metrics to choose from.\n- Syslog message collection is now opt-in (off by default), and customers can selectively pick and choose syslog facilities and minimum severities of their interests.\n- Customers can now use CLI to configure their Azure Linux VMs for Azure Portal VM metrics charting experiences.\n- Customers can now send any metrics and logs as Azure EventHubs events (additional Azure EventHubs charges may apply).\n- Customers can also store any metrics and logs in Azure Storage JSON blobs (additional Azure Storage charges may apply).\n\nLAD 3.0 is NOT compatible with LAD 2.3. Users of LAD 2.3 must first uninstall that extension before installing LAD 3.0.\n\nLAD 3.0 is installed and configured via Azure CLI, Azure PowerShell cmdlets, or Azure Resource Manager templates. The Azure Portal controls installation and configuration of LAD 2.3 only. The Azure Metrics UI can display performance counters collected by either version of LAD.\n\nPlease refer to [this document](https://docs.microsoft.com/azure/virtual-machines/linux/diagnostic-extension) for more details on configuring and using LAD 3.0. The tests folder contains [a sample JSON configuration](https://raw.githubusercontent.com/Azure/azure-linux-extensions/master/Diagnostic/tests/lad_2_3_compatible_portal_pub_settings.json) which sets LAD 3.0 to collecting exactly the same metrics and logs as the default configuration for LAD 2.3 collected. \n\n## Supported Linux Distributions\n\nList of supported Linux distributions is on https://docs.microsoft.com/en-us/azure/virtual-machines/extensions/diagnostics-linux#supported-linux-distributions\n\n## Debug\n\n- The status of the extension is reported back to Azure so that user can see the status on Azure Portal\n- The operation log of the extension is `/var/log/azure/Microsoft.Azure.Diagnostics.LinuxDiagnostic/<version>/` directory.\n\n[azure-powershell]: https://azure.microsoft.com/en-us/documentation/articles/powershell-install-configure/\n[azure-cli]: https://azure.microsoft.com/en-us/documentation/articles/xplat-cli/\n[arm-template]: http://azure.microsoft.com/en-us/documentation/templates/ \n[arm-overview]: https://azure.microsoft.com/en-us/documentation/articles/resource-group-overview/\n"
  },
  {
    "path": "Diagnostic/Utils/LadDiagnosticUtil.py",
    "content": "#!/usr/bin/env python\n#\n# Azure Linux extension\n#\n# Copyright (c) Microsoft Corporation  \n# All rights reserved.   \n# MIT License\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"\"Software\"\"), to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,\n# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above\n# copyright notice and this permission notice shall be included in all copies or substantial portions of the\n# Software. THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT\n# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT\n# SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF\n#  CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n# IN THE SOFTWARE.\n\n\n# Get elements from DiagnosticsMonitorConfiguration in LadCfg based on element name\ndef getDiagnosticsMonitorConfigurationElement(ladCfg, elementName):\n    if ladCfg and 'diagnosticMonitorConfiguration' in ladCfg:\n        if elementName in ladCfg['diagnosticMonitorConfiguration']:\n            return ladCfg['diagnosticMonitorConfiguration'][elementName]\n    return None\n\n\n# Get fileCfg form FileLogs in LadCfg\ndef getFileCfgFromLadCfg(ladCfg):\n    fileLogs = getDiagnosticsMonitorConfigurationElement(ladCfg, 'fileLogs')\n    if fileLogs and 'fileLogConfiguration' in fileLogs:\n        return fileLogs['fileLogConfiguration']\n    return None\n\n\n# Get resource Id from LadCfg\ndef getResourceIdFromLadCfg(ladCfg):\n    metricsConfiguration = getDiagnosticsMonitorConfigurationElement(ladCfg, 'metrics')\n    if metricsConfiguration and 'resourceId' in metricsConfiguration:\n        return metricsConfiguration['resourceId']\n    return None\n\n\n# Get event volume from LadCfg\ndef getEventVolumeFromLadCfg(ladCfg):\n    return getDiagnosticsMonitorConfigurationElement(ladCfg, 'eventVolume')\n\n\n# Get default sample rate from LadCfg\ndef getDefaultSampleRateFromLadCfg(ladCfg):\n    if ladCfg and 'sampleRateInSeconds' in ladCfg:\n        return ladCfg['sampleRateInSeconds']\n    return None\n\n\ndef getPerformanceCounterCfgFromLadCfg(ladCfg):\n    \"\"\"\n    Return the array of metric definitions\n    :param ladCfg:\n    :return: array of metric definitions\n    \"\"\"\n    performanceCounters = getDiagnosticsMonitorConfigurationElement(ladCfg, 'performanceCounters')\n    if performanceCounters and 'performanceCounterConfiguration' in performanceCounters:\n        return performanceCounters['performanceCounterConfiguration']\n    return None\n\n\ndef getAggregationPeriodsFromLadCfg(ladCfg):\n    \"\"\"\n    Return an array of aggregation periods as specified. If nothing appears in the config, default PT1H\n    :param ladCfg:\n    :return: array of ISO 8601 intervals\n    :rtype: List(str)\n    \"\"\"\n    results = []\n    metrics = getDiagnosticsMonitorConfigurationElement(ladCfg, 'metrics')\n    if metrics and 'metricAggregation' in metrics:\n        for item in metrics['metricAggregation']:\n            if 'scheduledTransferPeriod' in item:\n                # assert isinstance(item['scheduledTransferPeriod'], str)\n                results.append(item['scheduledTransferPeriod'])\n    return results\n\n\ndef getSinkList(feature_config):\n    \"\"\"\n    Returns the list of sink names to which all data should be forwarded, according to this config\n    :param feature_config: The JSON config for a feature (e.g. the struct for \"performanceCounters\" or \"syslogEvents\")\n    :return: the list of names; might be an empty list\n    :rtype: [str]\n    \"\"\"\n    if feature_config and 'sinks' in feature_config and feature_config['sinks']:\n        return [sink_name.strip() for sink_name in feature_config['sinks'].split(',')]\n    return []\n\n\ndef getFeatureWideSinksFromLadCfg(ladCfg, feature_name):\n    \"\"\"\n    Returns the list of sink names to which all data for the given feature should be forwarded\n    :param ladCfg: The ladCfg JSON config\n    :param str feature_name: Name of the feature. Expected to be \"performanceCounters\" or \"syslogEvents\"\n    :return: the list of names; might be an empty list\n    :rtype: [str]\n    \"\"\"\n    return getSinkList(getDiagnosticsMonitorConfigurationElement(ladCfg, feature_name))\n\n\nclass SinkConfiguration:\n    def __init__(self):\n        self._sinks = {}\n\n    def insert_from_config(self, json):\n        \"\"\"\n        Walk through the sinksConfig JSON object and add all sinks within it. Every accepted sink is guaranteed to\n        have a 'name' and 'type' element.\n        :param json: A hash holding the body of a sinksConfig object\n        :return: A string containing warning messages, or an empty string\n        \"\"\"\n        msgs = []\n        if json and 'sink' in json:\n            for sink in json['sink']:\n                if 'name' in sink and 'type' in sink:\n                    self._sinks[sink['name']] = sink\n                else:\n                    msgs.append('Ignoring invalid sink definition {0}'.format(sink))\n        return '\\n'.join(msgs)\n\n    def get_sink_by_name(self, sink_name):\n        \"\"\"\n        Return the JSON object defining a particular sink.\n        :param sink_name: string name of sink\n        :return: JSON object or None\n        \"\"\"\n        if sink_name in self._sinks:\n            return self._sinks[sink_name]\n        return None\n\n    def get_all_sink_names(self):\n        \"\"\"\n        Return a list of all names of defined sinks.\n        :return: list of names\n        \"\"\"\n        return self._sinks.keys()\n\n    def get_sinks_by_type(self, sink_type):\n        \"\"\"\n        Return a list of all names of defined sinks.\n        :return: list of names\n        \"\"\"\n        return [self._sinks[name] for name in self._sinks if self._sinks[name]['type'] == sink_type]\n"
  },
  {
    "path": "Diagnostic/Utils/ProviderUtil.py",
    "content": "#!/usr/bin/env python\n#\n# Azure Linux extension\n#\n# Copyright (c) Microsoft Corporation\n# All rights reserved.\n# MIT License\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"\"Software\"\"), to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the following conditions:\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the\n# Software.\n# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\n# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nimport re\nfrom collections import defaultdict\n\n\ndef GetCounterSetting(counter_spec, name):\n    \"\"\"\n    Retrieve a particular setting from a counter specification; if that setting is not present, return None.\n    :param counter_spec: A dict of mappings from the name of a setting to its associated value.\n    :param name: The name of the setting of interest.\n    :return: Either the value of the setting (if present in counterSpec) or None.\n    \"\"\"\n    if name in counter_spec:\n        return counter_spec[name]\n    return None\n\n\ndef IntervalToSeconds(specified_interval):\n    \"\"\"\n    Convert an ISO8601 duration string (e.g. PT5M, PT1H30M, PT30S) to a number of seconds.\n    :param specified_interval: ISO8601 duration string. Must not include units larger than Hours.\n    :return: An integer number of seconds. Raises ValueError if the duration string is syntactically invalid or beyond\n             the supported range.\n    \"\"\"\n    interval = specified_interval.upper()\n    if interval[0] != 'P':\n        raise ValueError('\"{0}\" is not an IS8601 duration string'.format(interval))\n    if interval[1] != 'T':\n        raise ValueError('IS8601 durations based on days or larger intervals are not supported: \"{0}\"'.format(interval))\n\n    seconds = 0\n    matches = re.findall(r'(\\d+)(S|M|H)', interval[2:].upper())\n    for qty, unit in matches:\n        qty = int(qty)\n        if unit == 'S':\n            seconds += qty\n        elif unit == 'M':\n            seconds += qty * 60\n        elif unit == 'H':\n            seconds += qty * 3600\n\n    if 0 == seconds:\n        raise ValueError('Could not parse interval specification \"{0}\"'.format(specified_interval))\n    return seconds\n\n_EventNameUniquifiers = defaultdict(int)\n\n\ndef MakeUniqueEventName(prefix):\n    \"\"\"\n    Generate a unique event name given a prefix string.\n    :param prefix: The prefix for the unique name.\n    :return: The unique name, with prefix.\n    \"\"\"\n    _EventNameUniquifiers[prefix] += 1\n    return '{0}{1:0>6}'.format(prefix, _EventNameUniquifiers[prefix])\n\n\nclass ParseException(Exception):\n    pass\n\n\nclass UnexpectedCounterType(ParseException):\n    pass\n\n\nclass InvalidCounterSpecification(ParseException):\n    pass\n\n\n"
  },
  {
    "path": "Diagnostic/Utils/XmlUtil.py",
    "content": "#!/usr/bin/env python\n#\n# Azure Linux extension\n#\n# Copyright (c) Microsoft Corporation  \n# All rights reserved.   \n# MIT License  \n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n#  documentation files (the \"\"Software\"\"), to deal in the Software without restriction, including without limitation\n#  the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n#  permit persons to whom the Software is furnished to do so, subject to the following conditions:\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of\n#  the Software.\n# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n#  WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n#  OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n#  OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nimport xml.etree.ElementTree as ET\n\n\ndef setXmlValue(xml,path,property,value,selector=[]):\n    elements = xml.findall(path)\n    for element in elements:\n        if selector and element.get(selector[0])!=selector[1]:\n            continue\n        if not property:\n            element.text = value\n        elif not element.get(property) or len(element.get(property))==0 :\n            element.set(property,value)\n\n\ndef getXmlValue(xml,path,property):\n    element = xml.find(path)\n    if element is not None:\n        return element.get(property)\n\n\ndef addElement(xml,path,el,selector=[],addOnlyOnce=False):\n    elements = xml.findall(path)\n    for element in elements:\n        if selector and element.get(selector[0])!=selector[1]:\n            continue\n        element.append(el)\n        if addOnlyOnce:\n            return\n\n\ndef createElement(schema):\n    return ET.fromstring(schema)\n\n\ndef removeElement(tree, parent_path, removed_element_name):\n    parents = tree.findall(parent_path)\n    for parent in parents:\n        element = parent.find(removed_element_name)\n        while element is not None:\n            parent.remove(element)\n            element = parent.find(removed_element_name)"
  },
  {
    "path": "Diagnostic/Utils/__init__.py",
    "content": "# Providers module package\n"
  },
  {
    "path": "Diagnostic/Utils/imds_util.py",
    "content": "#!/usr/bin/env python\n#\n# Azure Linux extension\n#\n# Linux Azure Diagnostic Extension (Current version is specified in manifest.xml)\n# Copyright (c) Microsoft Corporation\n# All rights reserved.\n# MIT License\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"\"Software\"\"), to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the following conditions:\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the\n# Software.\n# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\n# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nimport datetime\nimport urllib2\nimport time\nimport traceback\n\n\ndef get_imds_data(node, json=True):\n    \"\"\"\n    Query IMDS endpoint for instance metadata and return the response as a Json string.\n\n    :param str node: Instance metadata node we are querying about\n    :param bool json: Indicates whether to query for Json output or not\n    :return: Queried IMDS result in string\n    :rtype: str\n    \"\"\"\n    if not node:\n        return None\n    separator = '' if node[0] == '/' else '/'\n    imds_url = 'http://169.254.169.254{0}{1}{2}'.format(\n        separator, node, '?format=json&api-version=latest_internal' if json else '')\n    imds_headers = {'Metadata': 'True'}\n    req = urllib2.Request(url=imds_url, headers=imds_headers)\n    resp = urllib2.urlopen(req)\n    data = resp.read()\n    data_str = data.decode('utf-8')\n    return data_str\n\n\nclass ImdsLogger:\n    \"\"\"\n    Periodically probes IMDS endpoint and log the result as WALA events.\n    \"\"\"\n\n    def __init__(self, ext_name, ext_ver, ext_op_type, ext_event_logger, ext_logger=None,\n                 imds_data_getter=get_imds_data, logging_interval_in_minutes=60):\n        \"\"\"\n        Constructor\n        :param str ext_name: Extension name (e.g., hutil.get_name())\n        :param str ext_ver: Extension version (e.g., hutil.get_version())\n        :param str ext_op_type: Extension operation type (e.g., HeartBeat)\n        :param ext_event_logger: Extension event logger (e.g., waagent.AddExtensionEvent)\n        :param ext_logger: Extension message logger (e.g., hutil.log)\n        :param imds_data_getter: IMDS data getter function (e.g., get_imds_data)\n        :param int logging_interval_in_minutes: Logging interval in minutes\n        \"\"\"\n        self._ext_name = ext_name\n        self._ext_ver = ext_ver\n        self._ext_op_type = ext_op_type\n        self._ext_logger = ext_logger  # E.g., hutil.log\n        self._ext_event_logger = ext_event_logger  # E.g., waagent.AddExtensionEvent\n        self._last_log_time = datetime.datetime.fromordinal(1)\n        self._imds_data_getter = imds_data_getter\n        self._logging_interval = datetime.timedelta(minutes=logging_interval_in_minutes)\n\n    def _ext_log_if_enabled(self, msg):\n        \"\"\"\n        Log an extension message if logger is specified.\n        :param str msg: Message to log\n        :return: None\n        \"\"\"\n        if self._ext_logger:\n            self._ext_logger(msg)\n\n    def log_imds_data_if_right_time(self, log_as_ext_event=False):\n        \"\"\"\n        Query and log IMDS data if it's right time to do so.\n        :param bool log_as_ext_event: Indicates whether to log IMDS data as a waagent/extension event.\n        :return: None\n        \"\"\"\n        now = datetime.datetime.now()\n        if now < self._last_log_time + self._logging_interval:\n            return\n\n        try:\n            imds_data = self._imds_data_getter('/metadata/instance/')\n        except Exception as e:\n            self._ext_log_if_enabled('Exception occurred while getting IMDS data: {0}\\n'\n                                     'stacktrace: {1}'.format(e, traceback.format_exc()))\n            imds_data = '{0}'.format(e)\n\n        msg = 'IMDS instance data = {0}'.format(imds_data)\n        if log_as_ext_event:\n            self._ext_event_logger(name=self._ext_name,\n                                   op=self._ext_op_type,\n                                   isSuccess=True,\n                                   version=self._ext_ver,\n                                   message=msg)\n        self._ext_log_if_enabled(msg)\n        self._last_log_time = now\n\n\nif __name__ == '__main__':\n\n    def fake_get_imds_data(node, json=True):\n        result = 'fake_get_imds_data(node=\"{0}\", json=\"{1}\")'.format(node, json)\n        print result\n        return result\n\n\n    def default_ext_logger(msg):\n        print 'default_ext_logger(msg=\"{0}\")'.format(msg)\n\n\n    def default_ext_event_logger(*args, **kwargs):\n        print 'default_ext_event_logger(*args, **kwargs)'\n        print 'args:'\n        for arg in args:\n            print arg\n        print 'kwargs:'\n        for k in kwargs:\n            print('\"{0}\"=\"{1}\"'.format(k, kwargs[k]))\n\n\n    imds_logger = ImdsLogger('Microsoft.OSTCExtensions.LinuxDiagnostic', '2.3.9021', 'Heartbeat',\n                             ext_logger=default_ext_logger, ext_event_logger=default_ext_event_logger,\n                             imds_data_getter=fake_get_imds_data, logging_interval_in_minutes=1)\n    start_time = datetime.datetime.now()\n    done = False\n    while not done:\n        now = datetime.datetime.now()\n        print 'Test loop iteration starting at {0}'.format(now)\n        imds_logger.log_imds_data_if_right_time()\n        if now >= start_time + datetime.timedelta(minutes=2):\n            done = True\n        else:\n            print 'Sleeping 10 seconds'\n            time.sleep(10)\n"
  },
  {
    "path": "Diagnostic/Utils/lad_exceptions.py",
    "content": "#!/usr/bin/env python\n#\n# Azure Linux extension\n#\n# Copyright (c) Microsoft Corporation\n# All rights reserved.\n# MIT License\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"\"Software\"\"), to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the following conditions:\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the\n# Software.\n# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\n# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\nclass LadLoggingConfigException(Exception):\n    \"\"\"\n    Custom exception class for LAD logging (syslog & filelogs) config errors\n    \"\"\"\n    pass\n\n\nclass LadPerfCfgConfigException(Exception):\n    \"\"\"\n    Custom exception class for LAD perfCfg (raw OMI queries) config errors\n    \"\"\"\n    pass\n"
  },
  {
    "path": "Diagnostic/Utils/lad_ext_settings.py",
    "content": "#!/usr/bin/env python\n#\n# Azure Linux extension\n#\n# Linux Azure Diagnostic Extension (Current version is specified in manifest.xml)\n# Copyright (c) Microsoft Corporation\n# All rights reserved.\n# MIT License\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"\"Software\"\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nimport base64\nimport copy\nimport json\nimport traceback\nimport Utils.LadDiagnosticUtil as LadUtil\nimport Utils.XmlUtil as XmlUtil\n\n\nclass ExtSettings(object):\n    \"\"\"\n    Wrapper class around any generic Azure extension settings Json objects.\n    TODO This class may better go to some place else (e.g., HandlerUtil.py).\n    \"\"\"\n    def __init__(self, handler_settings):\n        \"\"\"\n        Constructor\n        :param handler_settings: Json object (dictionary) decoded from the extension settings Json string.\n        \"\"\"\n        self._handler_settings = handler_settings if handler_settings else {}\n        public_settings = self._handler_settings.get('publicSettings')\n        self._public_settings = public_settings if public_settings else {}\n        protected_settings = self._handler_settings.get('protectedSettings')\n        self._protected_settings = protected_settings if protected_settings else {}\n\n    def get_handler_settings(self):\n        \"\"\"\n        Hanlder settings (Json dictionary) getter\n        :return: Handler settings Json object\n        \"\"\"\n        return self._handler_settings\n\n    def has_public_config(self, key):\n        \"\"\"\n        Determine if a particular setting is present in the public config\n        :param str key: The setting to look for\n        :return: True if the setting is present (regardless of its value)\n        :rtype: bool\n        \"\"\"\n        return key in self._public_settings\n\n    def read_public_config(self, key):\n        \"\"\"\n        Return the value of a particular public config setting\n        :param str key: The setting to retrieve\n        :return: The value of the setting if present; an empty string (*not* None) if the setting is not present\n        :rtype: str\n        \"\"\"\n        if key in self._public_settings:\n            return self._public_settings[key]\n        return ''\n\n    def read_protected_config(self, key):\n        \"\"\"\n        Return the value of a particular protected config setting\n        :param str key: The setting to retrive\n        :return: The value of the setting if present; an empty string (*not* None) if the setting is not present\n        :rtype: str\n        \"\"\"\n        if key in self._protected_settings:\n            return self._protected_settings[key]\n        return ''\n\n\nclass LadExtSettings(ExtSettings):\n    \"\"\"\n    LAD-specific extension settings object that supports LAD-specific member functions\n    \"\"\"\n    def __init__(self, handler_settings):\n        super(LadExtSettings, self).__init__(handler_settings)\n\n    def redacted_handler_settings(self):\n        \"\"\"\n        Get handler settings in string after redacting secrets (for diagnostic purpose w/ Geneva telemetry)\n        :rtype: str\n        :return: String for the handler settings JSON object with secrets redacted.\n        \"\"\"\n        # The logic below could have been a general-purpose JSON tree walker, but since the specific\n        # knowledge of where secrets are needs be applied anyway, it's coded for this specific schema anyway.\n        # Secrets are stored only in the following paths: .storageAccountSasToken, and .sinksConfig.sink[].sasURL.\n        # LAD 2.3 used to support storageAccountKey; although LAD 3.0 does not support it, some users might mistakenly\n        # supply it. We redact it, if present, even though we're going to throw an error later on; the protected\n        # settings are logged before we inspect them to pull out the credentials.\n\n        # Get and work on a copy of the handler settings dict. Note that it must be a deep copy!\n        # dict(self.get_handler_settings()) doesn't work!\n        handler_settings = copy.deepcopy(self.get_handler_settings())\n        protected_settings = handler_settings['protectedSettings']\n        if protected_settings:\n            if 'storageAccountSasToken' in protected_settings:\n                protected_settings['storageAccountSasToken'] = 'REDACTED_SECRET'\n            if 'storageAccountKey' in protected_settings:\n                protected_settings['storageAccountKey'] = 'REDACTED_SECRET'\n            if 'sinksConfig' in protected_settings and 'sink' in protected_settings['sinksConfig']:\n                for each_sink_dict in protected_settings['sinksConfig']['sink']:\n                    if 'sasURL' in each_sink_dict:\n                        each_sink_dict['sasURL'] = 'REDACTED_SECRET'\n        return json.dumps(handler_settings, sort_keys=True)\n\n    def log_ext_settings_with_secrets_redacted(self, logger_log, logger_err):\n        \"\"\"\n        Log entire extension settings with secrets redacted. This was introduced to help ourselves find any\n        misconfiguration issues related to the storageAccountEndPoint easier, and later extended to log all\n        extension settings with secrets redacted, for better diagnostics.\n        :param logger_log: Normal logging function (e.g., hutil.log)\n        :param logger_err: Error logging function (e.g., hutil.error)\n        :return: None\n        \"\"\"\n        try:\n            msg = \"LAD settings with secrets redacted: {0}\".format(\n                self.redacted_handler_settings())\n            logger_log(msg)\n        except Exception as e:\n            logger_err(\"Failed to log LAD settings with secrets redacted. Error:{0}\\n\"\n                       \"Stacktrace: {1}\".format(e, traceback.format_exc()))\n\n    def get_resource_id(self):\n        \"\"\"\n        Try to get resourceId from LadCfg. If not present, try to fetch from xmlCfg.\n        \"\"\"\n        lad_cfg = self.read_public_config('ladCfg')\n        resource_id = LadUtil.getResourceIdFromLadCfg(lad_cfg)\n        if not resource_id:\n            encoded_xml_cfg = self.read_public_config('xmlCfg').strip()\n            if encoded_xml_cfg:\n                xml_cfg = base64.b64decode(encoded_xml_cfg)\n                resource_id = XmlUtil.getXmlValue(XmlUtil.createElement(xml_cfg),\n                                                  'diagnosticMonitorConfiguration/metrics', 'resourceId')\n                # Azure portal uses xmlCfg which contains WadCfg which is pascal case\n                # Currently we will support both casing and deprecate one later\n                if not resource_id:\n                    resource_id = XmlUtil.getXmlValue(XmlUtil.createElement(xml_cfg),\n                                                      'DiagnosticMonitorConfiguration/Metrics', 'resourceId')\n        return resource_id\n\n    def get_syslogEvents_setting(self):\n        \"\"\"\n        Get 'ladCfg/syslogEvents' setting from LAD 3.0 public settings.\n        :return: A dictionary of syslog facility and minSeverity to monitor/ Refer to README.md for more details.\n        \"\"\"\n        return LadUtil.getDiagnosticsMonitorConfigurationElement(self.read_public_config('ladCfg'), 'syslogEvents')\n\n    def get_fileLogs_setting(self):\n        \"\"\"\n        Get 'fileLogs' setting from LAD 3.0 public settings.\n        :return: List of dictionaries specifying file to monitor and Azure table name for\n        destinations of the monitored file. Refer to README.md for more details\n        \"\"\"\n        return self.read_public_config('fileLogs')\n\n    def get_mdsd_trace_option(self):\n        \"\"\"\n        Return traceFlags, if any, from public config\n        :rtype: str\n        :return: trace flags or an empty string\n        \"\"\"\n        flags = self.read_public_config('traceFlags')\n        if flags:\n            return \" -T {0}\".format(flags)\n        else:\n            return \"\"\n"
  },
  {
    "path": "Diagnostic/Utils/lad_logging_config.py",
    "content": "#!/usr/bin/env python\n#\n# Azure Linux extension\n#\n# Copyright (c) Microsoft Corporation\n# All rights reserved.\n# MIT License\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"\"Software\"\"), to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the following conditions:\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the\n# Software.\n# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\n# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nfrom xml.etree import ElementTree as ET\n\nimport Utils.LadDiagnosticUtil as LadUtil\nfrom Utils.lad_exceptions import LadLoggingConfigException\nimport Utils.mdsd_xml_templates as mxt\nfrom Utils.omsagent_util import get_syslog_ng_src_name\n\n\nsyslog_src_name = 'mdsd.syslog'\n\n\nclass LadLoggingConfig:\n    \"\"\"\n    Utility class for obtaining syslog (rsyslog or syslog-ng) configurations for use with fluentd\n    (currently omsagent), and corresponding omsagent & mdsd configurations, based on the LAD 3.0\n    syslog config schema. This class also generates omsagent (fluentd) config for LAD 3.0's fileLogs settings\n    (using the fluentd tail plugin).\n    \"\"\"\n\n    def __init__(self, syslogEvents, fileLogs, sinksConfig, pkey_path, cert_path, encrypt_secret):\n        \"\"\"\n        Constructor to receive/store necessary LAD settings for the desired configuration generation.\n\n        :param dict syslogEvents: LAD 3.0 \"ladCfg\" - \"syslogEvents\" JSON object, or a False object if it's not given\n                             in the extension settings. An example is as follows:\n\n                             \"ladCfg\": {\n                                 \"syslogEvents\" : {\n                                     \"sinks\": \"SyslogSinkName0\",\n                                     \"syslogEventConfiguration\": {\n                                         \"facilityName1\": \"minSeverity1\",\n                                         \"facilityName2\": \"minSeverity2\"\n                                     }\n                                 }\n                             }\n\n                             Only the JSON object corresponding to \"syslogEvents\" key should be passed.\n\n                             facilityName1/2 is a syslog facility name (e.g., \"LOG_USER\", \"LOG_LOCAL0\").\n                             minSeverity1/2 is a syslog severity level (e.g., \"LOG_ERR\", \"LOG_CRIT\") or \"NONE\".\n                                 \"NONE\" means no logs from the facility will be captured (thus it's equivalent to\n                                  not specifying the facility at all).\n\n        :param dict fileLogs: LAD 3.0 \"fileLogs\" JSON object, or a False object if it's not given in the ext settings.\n                         An example is as follows:\n\n                         \"fileLogs\": {\n                             \"fileLogConfiguration\": [\n                                 {\n                                     \"file\": \"/var/log/mydaemonlog\",\n                                     \"table\": \"MyDaemonEvents\",\n                                     \"sinks\": \"FilelogSinkName1\",\n                                 },\n                                 {\n                                     \"file\": \"/var/log/myotherdaemonelog\",\n                                     \"table\": \"MyOtherDaemonEvents\",\n                                     \"sinks\": \"FilelogSinkName2\"\n                                 }\n                             ]\n                         }\n\n                         Only the JSON array corresponding to \"fileLogConfiguration\" key should be passed.\n\n                         \"file\" is the full path of the log file to be watched and captured. \"table\" is for the\n                         Azure storage table into which the lines of the watched file will be placed (one row per line).\n        :param LadUtil.SinkConfiguration sinksConfig:  SinkConfiguration object that's created out of \"sinksConfig\"\n                    LAD 3.0 JSON setting. Refer to LadUtil.SinkConfiguraiton documentation.\n        :param str pkey_path: Path to the VM's private key that should be passed to mdsd XML for decrypting encrypted\n                    secrets (EH SAS URL)\n        :param str cert_path: Path to the VM's certificate that should be used to encrypt secrets (EH SAS URL)\n        :param encrypt_secret: Function to encrypt a secret (string, 2nd param) with the provided cert path param (1st)\n        \"\"\"\n        self._syslogEvents = syslogEvents\n        self._fileLogs = fileLogs\n        self._sinksConfig = sinksConfig\n        self._pkey_path = pkey_path\n        self._cert_path = cert_path\n        self._encrypt_secret = encrypt_secret\n        self._fac_sev_map = None\n\n        try:\n            # Create facility-severity map. E.g.: { \"LOG_USER\" : \"LOG_ERR\", \"LOG_LOCAL0\", \"LOG_CRIT\" }\n            if self._syslogEvents:\n                self._fac_sev_map = self._syslogEvents['syslogEventConfiguration']\n            self._syslog_disabled = not self._fac_sev_map  # A convenience predicate\n\n            if self._fileLogs:\n                # Convert the 'fileLogs' JSON object array into a Python dictionary of 'file' - 'table'\n                # E.g., [{ 'file': '/var/log/mydaemonlog1', 'table': 'MyDaemon1Events', 'sinks': 'File1Sink'},\n                #        { 'file': '/var/log/mydaemonlog2', 'table': 'MyDaemon2Events', 'sinks': 'File2SinkA,File2SinkB'}]\n                self._file_table_map = dict([(entry['file'], entry['table'] if 'table' in entry else '')\n                                             for entry in self._fileLogs])\n                self._file_sinks_map = dict([(entry['file'], entry['sinks'] if 'sinks' in entry else '')\n                                             for entry in self._fileLogs])\n\n            self._rsyslog_config = None\n            self._syslog_ng_config = None\n            self._mdsd_syslog_config = None\n            self._mdsd_telegraf_config = None\n            self._mdsd_filelog_config = None\n        except KeyError as e:\n            raise LadLoggingConfigException(\"Invalid setting name provided (KeyError). Exception msg: {0}\".format(e))\n\n    def get_rsyslog_config(self):\n        \"\"\"\n        Returns rsyslog config (for use with omsagent) that corresponds to the syslogEvents or the syslogCfg\n        JSON object given in the construction parameters.\n\n        :rtype: str\n        :return: rsyslog config string that should be appended to /etc/rsyslog.d/95-omsagent.conf (new rsyslog)\n                 or to /etc/rsyslog.conf (old rsyslog)\n        \"\"\"\n        if not self._rsyslog_config:\n            if self._syslog_disabled:\n                self._rsyslog_config = ''\n            else:\n                # Generate/save/return rsyslog config string for the facility-severity pairs.\n                # E.g.: \"user.err @127.0.0.1:%SYSLOG_PORT%\\nlocal0.crit @127.0.0.1:%SYSLOG_PORT%\\n'\n                self._rsyslog_config = \\\n                    '\\n'.join('{0}.{1}  @127.0.0.1:%SYSLOG_PORT%'.format(syslog_name_to_rsyslog_name(fac),\n                                                                         syslog_name_to_rsyslog_name(sev))\n                              for fac, sev in self._fac_sev_map.iteritems()) + '\\n'\n        return self._rsyslog_config\n\n    def get_syslog_ng_config(self):\n        \"\"\"\n        Returns syslog-ng config (for use with omsagent) that corresponds to the syslogEvents or the syslogCfg\n        JSON object given in the construction parameters.\n\n        :rtype: str\n        :return: syslog-ng config string that should be appended to /etc/syslog-ng/syslog-ng.conf\n        \"\"\"\n        if not self._syslog_ng_config:\n            if self._syslog_disabled:\n                self._syslog_ng_config = ''\n            else:\n                # Generate/save/return syslog-ng config string for the facility-severity pairs.\n                # E.g.: \"log { source(src); filter(f_LAD_oms_f_user); filter(f_LAD_oms_ml_err); destination(d_LAD_oms); };\\nlog { source(src); filter(f_LAD_oms_f_local0); filter(f_LAD_oms_ml_crit); destination(d_LAD_oms); };\\n\"\n                self._syslog_ng_config = \\\n                    '\\n'.join('log {{ source({0}); filter(f_LAD_oms_f_{1}); filter(f_LAD_oms_ml_{2}); '\n                              'destination(d_LAD_oms); }};'.format(get_syslog_ng_src_name(),\n                                                                   syslog_name_to_rsyslog_name(fac),\n                                                                   syslog_name_to_rsyslog_name(sev))\n                              for fac, sev in self._fac_sev_map.iteritems()) + '\\n'\n        return self._syslog_ng_config\n\n\n    def parse_pt_duration(self, duration):\n        \"\"\"\n        Convert the ISO8601 Time Duration into seconds.\n        for ex PT2H3M20S will be 7400 seconds\n        :param duration: The ISO8601 duration string to be converted into seconds\n        \"\"\"\n        total_seconds = 0\n        count = \"\"\n        for ch in duration:\n            if ch.lower() == 'h':\n                total_seconds += int(count)*3600\n                count = \"\"\n            elif ch.lower() == 'm':\n                total_seconds += int(count)*60\n                count = \"\"\n            elif ch.lower() == 's':\n                total_seconds += int(count)\n                count = \"\"\n            elif ch in [\"0\",\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\"]:\n                count += ch\n        return str(total_seconds)+\"s\"\n\n\n\n    def parse_lad_perf_settings(self, ladconfig):\n        \"\"\"\n        Parses the LAD json config to create a list of entries per metric along with it's configuration\n        as required by telegraf config parser. See example below -\n        :param ladconfig: The lad json config element\n\n        Sample OMI metric json config can be of two types, taken from .settings file\n        It can have sampleRate key, if not then it defaults to sampleRateInSeconds key in the larger lad_cfg element\n\n        {\n            u'counterSpecifier': u'/builtin/network/packetstransmitted',\n            u'counter': u'packetstransmitted',\n            u'class': u'network',\n            u'sampleRate': u'PT15S',\n            u'type': u'builtin',\n            u'annotation': [{\n                    u'locale': u'en-us',\n                    u'displayName': u'Packets sent'\n                }],\n            u'unit': u'Count'\n        }\n\n\n        \"annotation\": [\n            {\n            \"displayName\": \"Disk write guest OS\",\n            \"locale\": \"en-us\"\n            }\n        ],\n        \"class\": \"disk\",\n        \"condition\": \"IsAggregate=TRUE\",\n        \"counter\": \"writebytespersecond\",\n        \"counterSpecifier\": \"/builtin/disk/writebytespersecond\",\n        \"type\": \"builtin\",\n        \"unit\": \"BytesPerSecond\"\n          },\n        \"\"\"\n        if not ladconfig:\n            return []\n        data = []\n        default_sample_rate = \"15s\"  #Lowest supported time interval\n        if \"sampleRateInSeconds\" in ladconfig and ladconfig[\"sampleRateInSeconds\"] != \"\":\n            default_sample_rate = str(ladconfig[\"sampleRateInSeconds\"]) + \"s\" #Example, converting 15 to 15s\n\n        if 'diagnosticMonitorConfiguration' in ladconfig and \"performanceCounters\" in ladconfig['diagnosticMonitorConfiguration']:\n            data =  ladconfig['diagnosticMonitorConfiguration'][\"performanceCounters\"]\n        else:\n            return []\n\n        if \"performanceCounterConfiguration\" not in data or len(data[\"performanceCounterConfiguration\"]) == 0:\n            return []\n\n        parsed_settings = []\n        perfconf = data[\"performanceCounterConfiguration\"]\n\n        for item in perfconf:\n            counter = {}\n            counter[\"displayName\"] = item[\"class\"].strip().lower() + \"->\" + item[\"annotation\"][0][\"displayName\"].strip().lower()\n            if \"sampleRate\" in item:\n                counter[\"interval\"] = self.parse_pt_duration(item[\"sampleRate\"])  #Converting ISO8601 to seconds string\n            else:\n                counter[\"interval\"] = default_sample_rate\n            parsed_settings.append(counter)\n\n        \"\"\"\n        Sample output after parsing the OMI metric\n        [\n            {\n                \"displayName\" : \"Network->Packets sent\",\n                \"interval\" : \"15s\"\n            },\n        ]\n        \"\"\"\n        return parsed_settings\n\n    def get_mdsd_syslog_config(self, disableStorageAccount = False):\n        \"\"\"\n        Get mdsd XML config string for syslog use with omsagent in LAD 3.0.\n        :rtype: str\n        :return: XML string that should be added to the mdsd config XML tree for syslog use with omsagent in LAD 3.0.\n        \"\"\"\n        if not self._mdsd_syslog_config:\n            self._mdsd_syslog_config = self.__generate_mdsd_syslog_config(disableStorageAccount)\n        return self._mdsd_syslog_config\n\n    def __generate_mdsd_syslog_config(self, disableStorageAccount = False):\n        \"\"\"\n        Helper method to generate oms_mdsd_syslog_config\n        \"\"\"\n        if self._syslog_disabled:\n            return ''\n\n        # For basic syslog conf (single dest table): Source name is unified as 'mdsd.syslog' and\n        # dest table (eventName) is 'LinuxSyslog'. This is currently the only supported syslog conf scheme.\n        syslog_routeevents = ''\n        if not disableStorageAccount:\n            syslog_routeevents = mxt.per_RouteEvent_tmpl.format(event_name='LinuxSyslog', opt_store_type='')\n        # Add RouteEvent elements for specified \"sinks\" for \"syslogEvents\" feature\n        # Also add EventStreamingAnnotation for EventHub sinks\n        syslog_eh_urls = ''\n        for sink_name in LadUtil.getSinkList(self._syslogEvents):\n            if sink_name == 'LinuxSyslog':\n                raise LadLoggingConfigException(\"'LinuxSyslog' can't be used as a sink name. \"\n                    \"It's reserved for default Azure Table name for syslog events.\")\n            routeevent, eh_url = self.__generate_routeevent_and_eh_url_for_extra_sink(sink_name,\n                                                                                      syslog_src_name)\n            syslog_routeevents += routeevent\n            syslog_eh_urls += eh_url\n\n        mdsd_event_source = ''\n        if syslog_routeevents:  # Do not add MdsdEventSource element if there's no associated RouteEvent generated.\n            mdsd_event_source = mxt.per_MdsdEventSource_tmpl.format(source=syslog_src_name,\n                                                                    routeevents=syslog_routeevents)\n\n        return mxt.top_level_tmpl_for_logging_only.format(\n            sources=mxt.per_source_tmpl.format(name=syslog_src_name), events=mdsd_event_source, eh_urls=syslog_eh_urls)\n\n    def get_mdsd_telegraf_config(self, namespaces):\n        \"\"\"\n        Get mdsd XML config string for telegraf use with mdsd in LAD 3.0.\n        This method is called during config generation to create source tags for mdsd xml\n        :param namespaces: The list of telegraf plugins being used to source the metrics requested by the user\n        :rtype: str\n        :return: XML string that should be added to the mdsd config XML tree for telegraf use with mdsd in LAD 3.0.\n        \"\"\"\n        if not self._mdsd_telegraf_config:\n            self._mdsd_telegraf_config = self.__generate_mdsd_telegraf_config(namespaces)\n        return self._mdsd_telegraf_config\n\n    def __generate_mdsd_telegraf_config(self, namespaces):\n        \"\"\"\n        Helper method to generate mdsd_telegraf_config\n        \"\"\"\n        if len(namespaces) == 0:\n            return ''\n\n        telegraf_sources = \"\"\n\n        for plugin in namespaces:\n            # # For telegraf conf we create a Source for each of the measurements(plugins) sent from telegraf\n            lad_specific_storage_plugin = \"storage-\" + plugin\n            telegraf_sources += mxt.per_source_tmpl.format(name=lad_specific_storage_plugin)\n\n        return mxt.top_level_tmpl_for_logging_only.format(sources=telegraf_sources, events=\"\", eh_urls=\"\")\n\n\n    def __generate_routeevent_and_eh_url_for_extra_sink(self, sink_name, src_name):\n        \"\"\"\n        Helper method to generate one RouteEvent element for each extra sink given.\n        Also generates an EventStreamingAnnotation element for EventHub sinks.\n        :param str sink_name: The name of the sink for the RouteEvent.\n        :param str src_name: The name of the ingested source that should be used for EventStreamingAnnotation.\n        :rtype str,str:\n        :return: A pair of the XML RouteEvent element string for the sink and the EventHubStreamingAnnotation\n                 XML string.\n        \"\"\"\n        sink = self._sinksConfig.get_sink_by_name(sink_name)\n        if not sink:\n            raise LadLoggingConfigException('Sink name \"{0}\" is not defined in sinksConfig'.format(sink_name))\n        sink_type = sink['type']\n        if not sink_type:\n            raise LadLoggingConfigException('Sink type for sink \"{0}\" is not defined in sinksConfig'.format(sink_name))\n        if sink_type == 'JsonBlob':\n            return mxt.per_RouteEvent_tmpl.format(event_name=sink_name,\n                                                  opt_store_type='storeType=\"JsonBlob\"'),\\\n                   ''  # No EventStreamingAnnotation for JsonBlob\n        elif sink_type == 'EventHub':\n            if 'sasURL' not in sink:\n                raise LadLoggingConfigException('sasURL is not specified for EventHub sink_name={0}'.format(sink_name))\n            # For syslog/filelogs (ingested events), the source name should be used for EventStreamingAnnotation name.\n            eh_url = mxt.per_eh_url_tmpl.format(eh_name=src_name, key_path=self._pkey_path,\n                                                enc_eh_url=self._encrypt_secret(self._cert_path, sink['sasURL']))\n            return '', eh_url  # No RouteEvent for logging event's EventHub sink\n        else:\n            raise LadLoggingConfigException('{0} sink type (for sink_name={1}) is not supported'.format(sink_type,\n                                                                                                        sink_name))\n\n    def get_mdsd_filelog_config(self):\n        \"\"\"\n        Get mdsd XML config string for filelog (tail) use with omsagent in LAD 3.0.\n        :rtype: str\n        :return: XML string that should be added to the mdsd config XML tree for filelog use with omsagent in LAD 3.0.\n        \"\"\"\n        if not self._mdsd_filelog_config:\n            self._mdsd_filelog_config = self.__generate_mdsd_filelog_config()\n        return self._mdsd_filelog_config\n\n    def __generate_mdsd_filelog_config(self):\n        \"\"\"\n        Helper method to generate oms_mdsd_filelog_config\n        \"\"\"\n        if not self._fileLogs:\n            return ''\n\n        # Per-file source name is 'mdsd.filelog<.path.to.file>' where '<.path.to.file>' is a full path\n        # with all '/' replaced by '.'.\n        filelogs_sources = ''\n        filelogs_mdsd_event_sources = ''\n        filelogs_eh_urls = ''\n        for file_key in sorted(self._file_table_map):\n            if not self._file_table_map[file_key] and not self._file_sinks_map[file_key]:\n                raise LadLoggingConfigException('Neither \"table\" nor \"sinks\" defined for file \"{0}\"'.format(file_key))\n            source_name = 'mdsd.filelog{0}'.format(file_key.replace('/', '.'))\n            filelogs_sources += mxt.per_source_tmpl.format(name=source_name)\n            per_file_routeevents = ''\n            if self._file_table_map[file_key]:\n                per_file_routeevents += mxt.per_RouteEvent_tmpl.format(event_name=self._file_table_map[file_key], opt_store_type='')\n            if self._file_sinks_map[file_key]:\n                for sink_name in self._file_sinks_map[file_key].split(','):\n                    routeevent, eh_url = self.__generate_routeevent_and_eh_url_for_extra_sink(sink_name, source_name)\n                    per_file_routeevents += routeevent\n                    filelogs_eh_urls += eh_url\n            if per_file_routeevents:  # Do not add MdsdEventSource element if there's no associated RouteEvent generated.\n                filelogs_mdsd_event_sources += \\\n                    mxt.per_MdsdEventSource_tmpl.format(source=source_name, routeevents=per_file_routeevents)\n        return mxt.top_level_tmpl_for_logging_only.format(sources=filelogs_sources, events=filelogs_mdsd_event_sources,\n                                                      eh_urls=filelogs_eh_urls)\n\n    def get_fluentd_syslog_src_config(self):\n        \"\"\"\n        Get Fluentd's syslog source config that should be used for this LAD's syslog configs.\n        :rtype: str\n        :return: Fluentd config string that should be overwritten to\n                 /etc/opt/microsoft/omsagent/LAD/conf/omsagent.d/syslog.conf\n                 (after replacing '%SYSLOG_PORT%' with the assigned/picked port number)\n        \"\"\"\n        fluentd_syslog_src_config = \"\"\"\n<source>\n  type syslog\n  port %SYSLOG_PORT%\n  bind 127.0.0.1\n  protocol_type udp\n  include_source_host true\n  tag mdsd.syslog\n</source>\n\n# Generate fields expected for existing mdsd syslog collection schema.\n<filter mdsd.syslog.**>\n  type record_transformer\n  enable_ruby\n  <record>\n    # Fields for backward compatibility with Azure Shoebox V1 (Table storage)\n    Ignore \"syslog\"\n    Facility ${tag_parts[2]}\n    Severity ${tag_parts[3]}\n    EventTime ${time.strftime('%Y-%m-%dT%H:%M:%S%z')}\n    SendingHost ${record[\"source_host\"]}\n    Msg ${record[\"message\"]}\n    # Rename 'host' key, as mdsd will add 'Host' for Azure Table and it'll be confusing\n    hostname ${record[\"host\"]}\n  </record>\n  remove_keys host,message,source_host  # Renamed (duplicated) fields, so just remove\n</filter>\n\"\"\"\n        return '' if self._syslog_disabled else fluentd_syslog_src_config\n\n    def get_fluentd_filelog_src_config(self):\n        \"\"\"\n        Get Fluentd's filelog (tail) source config that should be used for this LAD's fileLogs settings.\n        :rtype: str\n        :return: Fluentd config string that should be overwritten to\n                 /etc/opt/microsoft/omsagent/LAD/conf/omsagent.d/file.conf\n        \"\"\"\n        if not self._fileLogs:\n            return ''\n\n        fluentd_tail_src_config_template = \"\"\"\n# For all monitored files\n<source>\n  @type tail\n  path {file_paths}\n  pos_file /var/opt/microsoft/omsagent/LAD/tmp/filelogs.pos\n  tag mdsd.filelog.*\n  format none\n  message_key Msg  # LAD uses \"Msg\" as the field name\n</source>\n\n# Add FileTag field (existing LAD behavior)\n<filter mdsd.filelog.**>\n  @type record_transformer\n  <record>\n    FileTag ${{tag_suffix[2]}}\n  </record>\n</filter>\n\"\"\"\n        return fluentd_tail_src_config_template.format(file_paths=','.join(self._file_table_map.keys()))\n\n    def get_fluentd_out_mdsd_config(self):\n        \"\"\"\n        Get Fluentd's out_mdsd output config that should be used for LAD.\n        TODO This is not really syslog-specific, so should be moved outside from here.\n        :rtype: str\n        :return: Fluentd config string that should be overwritten to\n                 /etc/opt/microsoft/omsagent/LAD/conf/omsagent.d/z_out_mdsd.conf\n        \"\"\"\n        fluentd_out_mdsd_config_template = \"\"\"\n# Output to mdsd\n<match mdsd.**>\n    type mdsd\n    log_level warn\n    djsonsocket /var/run/mdsd/lad_mdsd_djson.socket  # Full path to mdsd dynamic json socket file\n    acktimeoutms 5000  # max time in milli-seconds to wait for mdsd acknowledge response. If 0, no wait.\n{tag_regex_cfg_line}    num_threads 1\n    buffer_chunk_limit 1000k\n    buffer_type file\n    buffer_path /var/opt/microsoft/omsagent/LAD/state/out_mdsd*.buffer\n    buffer_queue_limit 128\n    flush_interval 10s\n    retry_limit 3\n    retry_wait 10s\n</match>\n\"\"\"\n        tag_regex_cfg_line = '' if self._syslog_disabled \\\n            else r\"\"\"    mdsd_tag_regex_patterns [ \"^mdsd\\\\.syslog\" ] # fluentd tag patterns whose match will be used as mdsd source name\n\"\"\"\n        return fluentd_out_mdsd_config_template.format(tag_regex_cfg_line=tag_regex_cfg_line)\n\n\nsyslog_name_to_rsyslog_name_map = {\n    # facilities\n    'LOG_AUTH': 'auth',\n    'LOG_AUTHPRIV': 'authpriv',\n    'LOG_CRON': 'cron',\n    'LOG_DAEMON': 'daemon',\n    'LOG_FTP': 'ftp',\n    'LOG_KERN': 'kern',\n    'LOG_LOCAL0': 'local0',\n    'LOG_LOCAL1': 'local1',\n    'LOG_LOCAL2': 'local2',\n    'LOG_LOCAL3': 'local3',\n    'LOG_LOCAL4': 'local4',\n    'LOG_LOCAL5': 'local5',\n    'LOG_LOCAL6': 'local6',\n    'LOG_LOCAL7': 'local7',\n    'LOG_LPR': 'lpr',\n    'LOG_MAIL': 'mail',\n    'LOG_NEWS': 'news',\n    'LOG_SYSLOG': 'syslog',\n    'LOG_USER': 'user',\n    'LOG_UUCP': 'uucp',\n    # severities\n    'LOG_EMERG': 'emerg',\n    'LOG_ALERT': 'alert',\n    'LOG_CRIT': 'crit',\n    'LOG_ERR': 'err',\n    'LOG_WARNING': 'warning',\n    'LOG_NOTICE': 'notice',\n    'LOG_INFO': 'info',\n    'LOG_DEBUG': 'debug'\n}\n\n\ndef syslog_name_to_rsyslog_name(syslog_name):\n    \"\"\"\n    Convert a syslog name (e.g., \"LOG_USER\") to the corresponding rsyslog name (e.g., \"user\")\n    :param str syslog_name: A syslog name for a facility (e.g., \"LOG_USER\") or a severity (e.g., \"LOG_ERR\")\n    :rtype: str\n    :return: Corresponding rsyslog name (e.g., \"user\" or \"error\")\n    \"\"\"\n    if syslog_name == '*':\n        # We accept '*' as a facility name (also as a severity name, though it's not required)\n        # to allow customers to collect for reserved syslog facility numeric IDs (12-15)\n        return '*'\n    if syslog_name not in syslog_name_to_rsyslog_name_map:\n        raise LadLoggingConfigException('Invalid syslog name given: {0}'.format(syslog_name))\n    return syslog_name_to_rsyslog_name_map[syslog_name]\n\n\ndef copy_sub_elems(dst_xml, src_xml, path):\n    \"\"\"\n    Copy sub-elements of src_elem (XML) to dst_elem.\n    :param xml.etree.ElementTree.ElementTree dst_xml: Python xml tree object to which sub-elements will be copied.\n    :param xml.etree.ElementTree.ElementTree src_xml: Python xml tree object from which sub-elements will be copied.\n    :param str path: The path of the element whose sub-elements will be copied.\n    :return: None. dst_xml will be updated with copied sub-elements\n    \"\"\"\n    dst_elem = dst_xml.find(path)\n    src_elem = src_xml.find(path)\n    if src_elem is None:\n        return\n    for sub_elem in src_elem:\n        dst_elem.append(sub_elem)\n\n\ndef copy_source_mdsdevent_eh_url_elems(mdsd_xml_tree, mdsd_logging_xml_string):\n    \"\"\"\n    Copy MonitoringManagement/Schemas/Schema, MonitoringManagement/Sources/Source,\n    MonitoringManagement/Events/MdsdEvents/MdsdEventSource elements, and MonitoringManagement/EventStreamingAnnotations\n    /EventStreamingAnnontation elements from mdsd_rsyslog_xml_string to mdsd_xml_tree.\n    Used to actually add generated rsyslog mdsd config XML elements to the mdsd config XML tree.\n\n    :param xml.etree.ElementTree.ElementTree mdsd_xml_tree: Python xml.etree.ElementTree object that's generated from mdsd config XML template\n    :param str mdsd_logging_xml_string: XML string containing the generated logging (syslog/filelog) mdsd config XML elements.\n            See oms_syslog_mdsd_*_expected_xpaths member variables in test_lad_logging_config.py for examples in XPATHS format.\n    :return: None. mdsd_xml_tree object will contain the added elements.\n    \"\"\"\n    if not mdsd_logging_xml_string:\n        return\n\n    mdsd_logging_xml_tree = ET.ElementTree(ET.fromstring(mdsd_logging_xml_string))\n\n    # Copy Source elements (sub-elements of Sources element)\n    copy_sub_elems(mdsd_xml_tree, mdsd_logging_xml_tree, 'Sources')\n\n    # Copy MdsdEventSource elements (sub-elements of Events/MdsdEvents element)\n    copy_sub_elems(mdsd_xml_tree, mdsd_logging_xml_tree, 'Events/MdsdEvents')\n\n    # Copy EventStreamingAnnotation elements (sub-elements of EventStreamingAnnotations element)\n    copy_sub_elems(mdsd_xml_tree, mdsd_logging_xml_tree, 'EventStreamingAnnotations')\n"
  },
  {
    "path": "Diagnostic/Utils/mdsd_xml_templates.py",
    "content": "#!/usr/bin/env python\n#\n# Azure Linux extension\n#\n# Copyright (c) Microsoft Corporation\n# All rights reserved.\n# MIT License\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"\"Software\"\"), to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the following conditions:\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the\n# Software.\n# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\n# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n# Various XML templates definitions for use in constructing mdsd XML config file.\n\nper_eh_url_tmpl = \"\"\"    <EventStreamingAnnotation name=\"{eh_name}\">\n       <EventPublisher>\n         <Key decryptKeyPath=\"{key_path}\">{enc_eh_url}</Key>\n       </EventPublisher>\n    </EventStreamingAnnotation>\n\"\"\"\n\n\ntop_level_tmpl_for_logging_only = \"\"\"\n<MonitoringManagement eventVersion=\"2\" namespace=\"\" timestamp=\"2014-12-01T20:00:00.000\" version=\"1.0\">\n  <Sources>\n{sources}  </Sources>\n\n  <Events>\n    <MdsdEvents>\n{events}    </MdsdEvents>\n  </Events>\n\n  <EventStreamingAnnotations>\n{eh_urls}  </EventStreamingAnnotations>\n</MonitoringManagement>\n\"\"\"\n\n\nper_source_tmpl = \"\"\"    <Source name=\"{name}\" dynamic_schema=\"true\" />\n\"\"\"\n\n\nper_MdsdEventSource_tmpl = \"\"\"      <MdsdEventSource source=\"{source}\">\n        {routeevents}\n      </MdsdEventSource>\n\"\"\"\n\n\nper_RouteEvent_tmpl = \"\"\"\n    <RouteEvent dontUsePerNDayTable=\"true\" eventName=\"{event_name}\" priority=\"High\" {opt_store_type} />\n\"\"\"\n\n\nderived_event = \"\"\"\n<DerivedEvent duration=\"{interval}\" eventName=\"{target}\" isFullName=\"true\" source=\"{source}\" storeType=\"{type}\"/>\n\n\"\"\"\n\n\nlad_query = '<LADQuery columnName=\"CounterName\" columnValue=\"Average\" partitionKey=\"\" />'\n\n\nobo_field = '<OboDirectPartitionField name=\"{name}\" value=\"{value}\" />'\n\n# OMI is not used anymore\n\nentire_xml_cfg_tmpl = \"\"\"\n<MonitoringManagement eventVersion=\"2\" namespace=\"\" timestamp=\"2017-03-27T19:45:00.000\" version=\"1.0\">\n  <Accounts>\n    <Account account=\"\" isDefault=\"true\" key=\"\" moniker=\"moniker\" tableEndpoint=\"\" blobEndpoint=\"\" />\n    <SharedAccessSignature account=\"\" isDefault=\"true\" key=\"\" moniker=\"moniker\" tableEndpoint=\"\" blobEndpoint=\"\" />\n  </Accounts>\n\n  <Management defaultRetentionInDays=\"90\" eventVolume=\"\">\n    <Identity>\n      <IdentityComponent name=\"DeploymentId\" />\n      <IdentityComponent name=\"Host\" useComputerName=\"true\" />\n    </Identity>\n    <AgentResourceUsage diskQuotaInMB=\"50000\" />\n  </Management>\n\n  <Schemas>\n  </Schemas>\n\n  <Sources>\n  </Sources>\n\n  <Events>\n    <MdsdEvents>\n    </MdsdEvents>\n\n    <DerivedEvents>\n    </DerivedEvents>\n  </Events>\n\n  <!--  OMI is not used anymore -->\n\n  <EventStreamingAnnotations>\n  </EventStreamingAnnotations>\n\n</MonitoringManagement>\n\"\"\""
  },
  {
    "path": "Diagnostic/Utils/misc_helpers.py",
    "content": "#!/usr/bin/env python\n#\n# Azure Linux extension\n#\n# Linux Azure Diagnostic Extension (Current version is specified in manifest.xml)\n# Copyright (c) Microsoft Corporation\n# All rights reserved.\n# MIT License\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"\"Software\"\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nimport os\nimport tempfile\nimport re\nimport string\nimport traceback\nimport xml.dom.minidom\nimport binascii\n\nfrom Utils.WAAgentUtil import waagent\nfrom Utils.lad_exceptions import LadLoggingConfigException\n\n\ndef get_extension_operation_type(command):\n    if re.match(\"^([-/]*)(enable)\", command):\n        return waagent.WALAEventOperation.Enable\n    if re.match(\"^([-/]*)(daemon)\", command):   # LAD-specific extension operation (invoked from \"./diagnostic.py -enable\")\n        return \"Daemon\"\n    if re.match(\"^([-/]*)(install)\", command):\n        return waagent.WALAEventOperation.Install\n    if re.match(\"^([-/]*)(disable)\", command):\n        return waagent.WALAEventOperation.Disable\n    if re.match(\"^([-/]*)(uninstall)\", command):\n        return waagent.WALAEventOperation.Uninstall\n    if re.match(\"^([-/]*)(update)\", command):\n        return waagent.WALAEventOperation.Update\n\n\ndef wala_event_type_for_telemetry(ext_op_type):\n    return \"HeartBeat\" if ext_op_type == \"Daemon\" else ext_op_type\n\n\ndef get_storage_endpoints_with_account(account, endpoint_without_account):\n    endpoint = endpoint_without_account\n    if endpoint:\n        parts = endpoint.split('//', 1)\n        if len(parts) > 1:\n            tableEndpoint = parts[0]+'//'+account+\".table.\"+parts[1]\n            blobEndpoint = parts[0]+'//'+account+\".blob.\"+parts[1]\n        else:\n            tableEndpoint = 'https://'+account+\".table.\"+parts[0]\n            blobEndpoint = 'https://'+account+\".blob.\"+parts[0]\n    else:\n        tableEndpoint = 'https://'+account+'.table.core.windows.net'\n        blobEndpoint = 'https://'+account+'.blob.core.windows.net'\n    return (tableEndpoint, blobEndpoint)\n\n\ndef check_suspected_memory_leak(pid, logger_err):\n    \"\"\"\n    Check suspected memory leak of a process, by inspecting /proc/<pid>/status's VmRSS value.\n    :param pid: ID of the process we are checking.\n    :param logger_err: Error logging function (e.g., hutil.error)\n    :return (bool, int): Bool indicating whether memory leak is suspected. Int for memory usage in KB in true case.\n    \"\"\"\n    memory_leak_threshold_in_KB = 2000000  # Roughly 2GB. TODO: Make it configurable or automatically calculated\n    memory_usage_in_KB = 0\n    memory_leak_suspected = False\n\n    try:\n        # Check /proc/[pid]/status file for \"VmRSS\" to find out the process's virtual memory usage\n        # Note: \"VmSize\" for some reason starts out very high (>2000000) at this moment, so can't use that.\n        with open(\"/proc/{0}/status\".format(pid)) as proc_file:\n            for line in proc_file:\n                if line.startswith(\"VmRSS:\"):  # Example line: \"VmRSS:   33904 kB\"\n                    memory_usage_in_KB = int(line.split()[1])\n                    memory_leak_suspected = memory_usage_in_KB > memory_leak_threshold_in_KB\n                    break\n    except Exception as e:\n        # Not to throw in case any statement above fails (e.g., invalid pid). Just log.\n        logger_err(\"Failed to check memory usage of pid={0}.\\nError: {1}\\nTrace:\\n{2}\".format(pid, e, traceback.format_exc()))\n\n    return memory_leak_suspected, memory_usage_in_KB\n\n\nclass LadLogHelper(object):\n    \"\"\"\n    Various LAD log helper functions encapsulated here, so that we don't have to tag along all the parameters.\n    \"\"\"\n\n    def __init__(self, logger_log, logger_error, waagent_event_adder, status_reporter, ext_name, ext_ver):\n        \"\"\"\n        Constructor\n        :param logger_log: Normal logging function (e.g., hutil.log)\n        :param logger_error: Error logging function (e.g., hutil.error)\n        :param waagent_event_adder: waagent event add function (waagent.AddExtensionEvent)\n        :param status_reporter: waagent/extension status report function (hutil.do_status_report)\n        :param ext_name: Extension name (hutil.get_name())\n        :param ext_ver: Extension version (hutil.get_extension_version())\n        \"\"\"\n        self._logger_log = logger_log\n        self._logger_error = logger_error\n        self._waagent_event_adder = waagent_event_adder\n        self._status_reporter = status_reporter\n        self._ext_name = ext_name\n        self._ext_ver = ext_ver\n\n    def log_suspected_memory_leak_and_kill_mdsd(self, memory_usage_in_KB, mdsd_process, ext_op):\n        \"\"\"\n        Log suspected-memory-leak message both in ext logs and as a waagent event.\n        :param memory_usage_in_KB: Memory usage in KB (to be included in the log)\n        :param mdsd_process: Python Process object for the mdsd process to kill\n        :param ext_op: Extension operation type to use for waagent event (waagent.WALAEventOperation.HeartBeat)\n        :return: None\n        \"\"\"\n        memory_leak_msg = \"Suspected mdsd memory leak (Virtual memory usage: {0}MB). \" \\\n                          \"Recycling mdsd to self-mitigate.\".format(int((memory_usage_in_KB + 1023) / 1024))\n        self._logger_log(memory_leak_msg)\n        # Add a telemetry for a possible statistical analysis\n        self._waagent_event_adder(name=self._ext_name,\n                                  op=ext_op,\n                                  isSuccess=True,\n                                  version=self._ext_ver,\n                                  message=memory_leak_msg)\n        mdsd_process.kill()\n\n    def report_mdsd_dependency_setup_failure(self, ext_event_type, failure_msg):\n        \"\"\"\n        Report mdsd dependency setup failure to 3 destinations (ext log, status report, agent event)\n        :param ext_event_type: Type of extension event being performed (e.g., 'HeartBeat')\n        :param failure_msg: Dependency setup failure message to be added to the logs\n        :return: None\n        \"\"\"\n        dependencies_err_log_msg = \"Failed to set up mdsd dependencies: {0}\".format(failure_msg)\n        self._logger_error(dependencies_err_log_msg)\n        self._status_reporter(ext_event_type, 'error', '1', dependencies_err_log_msg)\n        self._waagent_event_adder(name=self._ext_name,\n                                  op=ext_event_type,\n                                  isSuccess=False,\n                                  version=self._ext_ver,\n                                  message=dependencies_err_log_msg)\n\n    def log_and_report_failed_config_generation(self, ext_event_type, config_invalid_reason, redacted_handler_settings):\n        \"\"\"\n        Report failed config generation from configurator.generate_all_configs().\n        :param str ext_event_type: Type of extension event being performed (most likely 'HeartBeat')\n        :param str config_invalid_reason: Msg from configurator.generate_all_configs()\n        :param str redacted_handler_settings: JSON string for the extension's protected/public settings after redacting\n                    secrets in the protected settings. This is for logging to Geneva for diagnostic purposes.\n        :return: None\n        \"\"\"\n        config_invalid_log = \"Invalid config settings given: \" + config_invalid_reason + \\\n                             \". Can't proceed, although this install/enable operation is reported as successful so \" \\\n                             \"the VM can complete successful startup.\"\n        self._logger_log(config_invalid_log)\n        self._status_reporter(ext_event_type, 'success', '0', config_invalid_log)\n        self._waagent_event_adder(name=self._ext_name,\n                                  op=ext_event_type,\n                                  isSuccess=True,  # Note this is True, because it is a user error.\n                                  version=self._ext_ver,\n                                  message=\"Invalid handler settings encountered: {0}\".format(redacted_handler_settings))\n\n    def log_and_report_invalid_mdsd_cfg(self, ext_event_type, config_validate_cmd_msg, mdsd_cfg_xml):\n        \"\"\"\n        Report invalid result from 'mdsd -v -c xmlCfg.xml'\n        :param ext_event_type: Type of extension event being performed (most likely 'HeartBeat')\n        :param config_validate_cmd_msg: Output of 'mdsd -v -c xmlCfg.xml'\n        :param mdsd_cfg_xml: Content of xmlCfg.xml to be sent to Geneva\n        :return: None\n        \"\"\"\n        message = \"Problem(s) detected in generated mdsd configuration. Can't enable, although this install/enable \" \\\n                  \"operation is reported as successful so the VM can complete successful startup. Linux Diagnostic \" \\\n                  \"Extension will exit. Config validation message: {0}\".format(config_validate_cmd_msg)\n        self._logger_log(message)\n        self._status_reporter(ext_event_type, 'success', '0', message)\n        self._waagent_event_adder(name=self._ext_name,\n                      op=ext_event_type,\n                      isSuccess=True,  # Note this is True, because it is a user error.\n                      version=self._ext_ver,\n                      message=\"Problem(s) detected in generated mdsd configuration: {0}\".format(mdsd_cfg_xml))\n\ndef read_uuid():\n    uuid = ''\n    uuid_file_path = '/sys/class/dmi/id/product_uuid'\n    try:\n        with open(uuid_file_path) as f:\n            uuid = f.readline().strip()\n    except Exception as e:\n        raise LadLoggingConfigException('read_uuid() failed: Unable to open uuid file {0}'.format(uuid_file_path))\n    if not uuid:\n        raise LadLoggingConfigException('read_uuid() failed: Empty content in uuid file {0}'.format(uuid_file_path))\n    return uuid\n\n\ndef encrypt_secret_with_cert(run_command, logger, cert_path, secret):\n    \"\"\"\n    update_account_settings() helper.\n    :param run_command: Function to run an arbitrary command\n    :param logger: Function to log error messages\n    :param cert_path: Cert file path\n    :param secret: Secret to encrypt\n    :return: Encrypted secret string. None if openssl command exec fails.\n    \"\"\"\n    f = tempfile.NamedTemporaryFile(suffix='mdsd', delete=True)\n    # Have openssl write to our temporary file (on Linux we don't have an exclusive lock on the temp file).\n    # openssl smime, when asked to put output in a file, simply overwrites the file; it does not unlink/creat or\n    # creat/rename.\n    cmd = \"echo -n '{0}' | openssl smime -aes256 -encrypt -outform DER -out {1} {2}\"\n    cmd_to_run = cmd.format(secret, f.name, cert_path)\n    ret_status, ret_msg = run_command(cmd_to_run, should_log=False)\n    if ret_status is not 0:\n        logger(\"Encrypting storage secret failed with the following message: \" + ret_msg)\n        return None\n    encrypted_secret = f.read()\n    f.close()   # Deletes the temp file\n    return binascii.b2a_hex(encrypted_secret).upper()\n\n\ndef tail(log_file, output_size=1024):\n    if not os.path.exists(log_file):\n        return \"\"\n    pos = min(output_size, os.path.getsize(log_file))\n    with open(log_file, \"r\") as log:\n        log.seek(-pos, 2)\n        buf = log.read(output_size)\n        buf = filter(lambda x: x in string.printable, buf)\n        return buf.decode(\"ascii\", \"ignore\")\n\n\ndef update_selinux_settings_for_rsyslogomazuremds(run_command, ext_dir):\n    # This is still needed for Redhat-based distros, which still require SELinux to be allowed\n    # for even Unix domain sockets.\n    # Anyway, we no longer use 'semanage' (so no need to install policycoreutils-python).\n    # We instead compile from the bundled SELinux module def for lad_mdsd\n    # TODO Either check the output of these commands or run without capturing output\n    if os.path.exists(\"/usr/sbin/semodule\") or os.path.exists(\"/sbin/semodule\"):\n        run_command('checkmodule -M -m -o {0}/lad_mdsd.mod {1}/lad_mdsd.te'.format(ext_dir, ext_dir))\n        run_command('semodule_package -o {0}/lad_mdsd.pp -m {1}/lad_mdsd.mod'.format(ext_dir, ext_dir))\n        run_command('semodule -u {0}/lad_mdsd.pp'.format(ext_dir))\n\n\ndef get_mdsd_proxy_config(waagent_setting, ext_settings, logger):\n    # mdsd http proxy setting\n    proxy_setting_name = 'mdsdHttpProxy'\n    proxy_config = waagent_setting  # waagent.HttpProxyConfigString from /etc/waagent.conf has highest priority\n    if not proxy_config:\n        proxy_config = ext_settings.read_protected_config(proxy_setting_name)  # Protected setting has next priority\n    if not proxy_config:\n        proxy_config = ext_settings.read_public_config(proxy_setting_name)\n    if not isinstance(proxy_config, basestring):\n        logger('Error: mdsdHttpProxy config is not a string. Ignored.')\n    else:\n        proxy_config = proxy_config.strip()\n        if proxy_config:\n            logger(\"mdsdHttpProxy setting was given and will be passed to mdsd, \"\n                   \"but not logged here in case there's a password in it\")\n            return proxy_config\n    return ''\n\n\ndef escape_nonalphanumerics(data):\n    return ''.join([ch if ch.isalnum() else \":{0:04X}\".format(ord(ch)) for ch in data])\n\n\n# TODO Should this be placed in WAAgentUtil.py?\ndef get_deployment_id_from_hosting_env_cfg(waagent_dir, logger_log, logger_error):\n    \"\"\"\n    Get deployment ID from waagent dir's HostingEnvironmentConfig.xml.\n\n    :param waagent_dir: Waagent dir path (/var/lib/waagent)\n    :param logger_log: Normal logging function (hutil.log)\n    :param logger_error: Error logging function (hutil.error)\n    :return: Obtained deployment ID string if the hosting env cfg xml exists & deployment ID is found.\n             \"unknown\" if the xml exists, but deployment ID can't be found.\n             None if the xml does not exist.\n    \"\"\"\n    identity = \"unknown\"\n    env_cfg_path = os.path.join(waagent_dir, \"HostingEnvironmentConfig.xml\")\n    if not os.path.exists(env_cfg_path):\n        logger_log(\"No Deployment ID (not running in a hosted environment\")\n        return identity\n\n    try:\n        with open(env_cfg_path, 'r') as env_cfg_file:\n            xml_text = env_cfg_file.read()\n        dom = xml.dom.minidom.parseString(xml_text)\n        deployment = dom.getElementsByTagName(\"Deployment\")\n        name = deployment[0].getAttribute(\"name\")\n        if name:\n            identity = name\n            logger_log(\"Deployment ID found: {0}.\".format(identity))\n    except Exception as e:\n        # use fallback identity\n        logger_error(\"Failed to retrieve deployment ID. Error:{0}\\nStacktrace: {1}\".format(e, traceback.format_exc()))\n\n    return identity\n\n\ndef write_lad_pids_to_file(pid_file_path, py_pid, mdsd_pid=None):\n    \"\"\"\n    Write LAD process IDs to file\n    :param int py_pid: PID of diagnostic.py\n    :param int mdsd_pid: PID of mdsd or None (when called before mdsd is started)\n    :param str pid_file_path: Path of the file to be written\n    :return: None\n    \"\"\"\n    with open(pid_file_path, 'w') as f:\n        f.write(str(py_pid) + '\\n')\n        if mdsd_pid is not None:\n            f.write(str(mdsd_pid) + '\\n')\n\n\ndef append_string_to_file(string, filepath):\n    \"\"\"\n    Append string content to file\n    :param string: A str object that holds the content to be appended to the file\n    :param filepath: Path to the file to be appended\n    :return: None\n    \"\"\"\n    with open(filepath, 'a') as f:\n        f.write(string)\n\n\ndef read_file_to_string(filepath):\n    \"\"\"\n    Read entire file and return it as string. If file can't be read, return \"Can't read <filepath>\"\n    :param str filepath: Path of the file to read\n    :rtype: str\n    :return: Content of the file in a single string, or \"Can't read <filepath>\" if file can't be read.\n    \"\"\"\n    try:\n        with open(filepath) as f:\n            return f.read()\n    except Exception as e:\n        return \"Can't read {0}. Exception thrown: {1}\".format(filepath, e)\n"
  },
  {
    "path": "Diagnostic/Utils/omsagent_util.py",
    "content": "#!/usr/bin/env python\n#\n# Azure Linux extension\n#\n# Copyright (c) Microsoft Corporation\n# All rights reserved.\n# MIT License\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"\"Software\"\"), to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,\n# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above\n# copyright notice and this permission notice shall be included in all copies or substantial portions of the\n# Software. THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT\n# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT\n# SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF\n#  CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n# IN THE SOFTWARE.\n\nimport os\nimport re\nimport socket\nimport time\n\nfrom Utils.misc_helpers import append_string_to_file\n\n# op is either '--upgrade' or '--remove'\nomsagent_universal_sh_cmd_template = 'sh omsagent-*.universal.x64.sh {op}'\n# args is either '-w LAD' or '-x LAD' or '-l'\nomsagent_lad_workspace_cmd_template = 'sh /opt/microsoft/omsagent/bin/omsadmin.sh {args}'\nomsagent_lad_dir = '/etc/opt/microsoft/omsagent/LAD/'\n\n\ndef setup_omsagent_for_lad(run_command):\n    \"\"\"\n    Install omsagent by executing the universal shell bundle. Also onboard omsagent for LAD.\n    :param run_command: External command execution function (e.g., RunGetOutput)\n    :rtype: int, str\n    :return: 2-tuple of process exit code and output (run_command's return values as is)\n    \"\"\"\n    # 1. Install omsagent. It's a noop if it's already installed.\n    cmd_exit_code, cmd_output = run_command(omsagent_universal_sh_cmd_template.format(op='--upgrade'))\n    if cmd_exit_code != 0:\n        return 1, 'setup_omsagent_for_lad(): omsagent universal installer shell execution failed. ' \\\n                  'Output: {0}'.format(cmd_output)\n\n    # 2. Onboard to LAD workspace. Should be a noop if it's already done.\n    if not os.path.isdir(omsagent_lad_dir):\n        cmd_exit_code, cmd_output = run_command(omsagent_lad_workspace_cmd_template.format(args='-w LAD'))\n        if cmd_exit_code != 0:\n            return 2, 'setup_omsagent_for_lad(): LAD workspace onboarding failed. Output: {0}'.format(cmd_output)\n\n    # All succeeded\n    return 0, 'setup_omsagent_for_lad() succeeded'\n\n\nomsagent_control_cmd_template = '/opt/microsoft/omsagent/bin/service_control {op} LAD'\n\n\ndef control_omsagent(op, run_command):\n    \"\"\"\n    Start/stop/restart omsagent service using omsagent service_control script.\n    :param op: Operation type. Must be 'start', 'stop', or 'restart'\n    :param run_command: External command execution function (e.g., RunGetOutput)\n    :rtype: int, str\n    :return: 2-tuple of process exit code and output (run_command's return values as is)\n    \"\"\"\n    cmd_exit_code, cmd_output = run_command(omsagent_control_cmd_template.format(op=op))\n    if cmd_exit_code != 0:\n        return 1, 'control_omsagent({0}) failed. Output: {1}'.format(op, cmd_output)\n    return 0, 'control_omsagent({0}) succeeded'.format(op)\n\n\ndef tear_down_omsagent_for_lad(run_command, remove_omsagent):\n    \"\"\"\n    Remove omsagent by executing the universal shell bundle. Remove LAD workspace before that.\n    Don't remove omsagent if OMSAgentForLinux extension is installed (i.e., if any other omsagent workspace exists).\n    :param run_command: External command execution function (e.g., RunGetOutput)\n    :param remove_omsagent: A boolean indicating whether to remove omsagent bundle or not.\n    :rtype: int, str\n    :return: 2-tuple of process exit code and output (run_command's return values)\n    \"\"\"\n    return_msg = ''\n    # 1. Unconfigure syslog. Ignore failure (just collect failure output).\n    cmd_exit_code, cmd_output = unconfigure_syslog(run_command)\n    if cmd_exit_code != 0:\n        return_msg += 'remove_omsagent_for_lad(): unconfigure_syslog() failed. ' \\\n                      'Exit code={0}, Output={1}'.format(cmd_exit_code, cmd_output)\n\n    # 2. Remove LAD workspace. Ignore failure.\n    cmd_exit_code, cmd_output = run_command(omsagent_lad_workspace_cmd_template.format(args='-x LAD'))\n    if cmd_exit_code != 0:\n        return_msg += 'remove_omsagent_for_lad(): LAD workspace removal failed. ' \\\n                      'Exit code={0}, Output={1}'.format(cmd_exit_code, cmd_output)\n\n    if remove_omsagent:\n        # 3. Uninstall omsagent when specified. Do this only if there's no other omsagent workspace.\n        cmd_exit_code, cmd_output = run_command(omsagent_lad_workspace_cmd_template.format(args='-l'))\n        if cmd_output.strip().lower() == 'no workspace':\n            cmd_exit_code, cmd_output = run_command(omsagent_universal_sh_cmd_template.format(op='--remove'))\n            if cmd_exit_code != 0:\n                return_msg += 'remove_omsagent_for_lad(): remove-omsagent failed. ' \\\n                              'Exit code={0}, Output={1}'.format(cmd_exit_code, cmd_output)\n        else:\n            return_msg += 'remove_omsagent_for_lad(): omsagent workspace listing failed. ' \\\n                          'Exit code={0}, Output={1}'.format(cmd_exit_code, cmd_output)\n\n    # Done\n    return 0, return_msg if return_msg else 'remove_omsagent_for_lad() succeeded'\n\n\nrsyslog_top_conf_path = '/etc/rsyslog.conf'\nrsyslog_d_path = '/etc/rsyslog.d/'\nrsyslog_d_omsagent_conf_path = '/etc/rsyslog.d/95-omsagent.conf'  # hard-coded by omsagent\nsyslog_ng_conf_path = '/etc/syslog-ng/syslog-ng.conf'\n\n\ndef is_rsyslog_installed():\n    \"\"\"\n    Returns true iff rsyslog is installed on the machine.\n    :rtype: bool\n    :return: True if rsyslog is installed. False otherwise.\n    \"\"\"\n    return os.path.exists(rsyslog_top_conf_path)\n\n\ndef is_new_rsyslog_installed():\n    \"\"\"\n    Returns true iff newer version of rsyslog (that has /etc/rsyslog.d/) is installed on the machine.\n    :rtype: bool\n    :return: True if /etc/rsyslog.d/ exists. False otherwise.\n    \"\"\"\n    return os.path.exists(rsyslog_d_path)\n\n\ndef is_syslog_ng_installed():\n    \"\"\"\n    Returns true iff syslog-ng is installed on the machine.\n    :rtype: bool\n    :return: True if syslog-ng is installed. False otherwise.\n    \"\"\"\n    return os.path.exists(syslog_ng_conf_path)\n\n\ndef get_syslog_ng_src_name():\n    \"\"\"\n    Some syslog-ng distributions use different source name (\"s_src\" vs \"src\"), causing syslog-ng restarts\n    to fail when we provide a non-existent source name. Need to search the syslog-ng.conf file and retrieve\n    the source name as below.\n    :rtype: str\n    :return: syslog-ng source name retrieved from syslog-ng.conf. 'src' if none available.\n    \"\"\"\n    syslog_ng_src_name = 'src'\n    try:\n        with open(syslog_ng_conf_path, 'r') as f:\n            syslog_ng_cfg = f.read()\n        src_match = re.search(r'\\n\\s*source\\s+([^\\s]+)\\s*{', syslog_ng_cfg)\n        if src_match:\n            syslog_ng_src_name = src_match.group(1)\n    except Exception as e:\n        pass  # Ignore any errors, because the default ('src') will do.\n\n    return syslog_ng_src_name\n\ndef get_fluentd_syslog_src_port():\n    \"\"\"\n    Returns a TCP/UDP port number that'll be supplied to the fluentd syslog src plugin (for it to listen to for\n    syslog events from rsyslog/syslog-ng). Ports from 25224 to 25423 will be tried for bind() and the first available\n    one will be returned. 25224 is the default port number that's picked by omsagent.\n    \n    This is definitely not 100% correct with potential races. The correct solution would be to let fluentd syslog\n    src plugin bind to 0 and write the resulting bound port number to a file, so that we can get the port number\n    from the file. However, the current fluentd in_syslog.rb doesn't write to a file, so that method won't\n    work. And yet we still want to minimize possibility of binding to an already-in-use port, so here's a workaround.\n    :rtype: int\n    :return: A successfully bound (& closed) TCP/UDP port number. -1 if all failed.\n    \"\"\"\n    for port in range(25229, 25424):\n        try:\n            s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n            s.bind(('', port))\n            s.close()\n            return port\n        except Exception as e:\n            pass\n    return -1\n\n\nomsagent_config_syslog_sh_cmd_template = 'sh /opt/microsoft/omsagent/bin/configure_syslog.sh {op} LAD {port}'\n\n\ndef run_omsagent_config_syslog_sh(run_command, op, port=''):\n    \"\"\"\n    Run omsagent's configure_syslog.sh script for LAD.\n    :param run_command: External command execution function (e.g., RunGetOutput)\n    :param op: Type of operation. Must be one of 'configure', 'unconfigure', and 'restart'\n    :param port: TCP/UDP port number to supply as fluentd in_syslog plugin listen port\n    :rtype: int, str\n    :return: 2-tuple of the process exit code and the resulting output string (basically run_command's return values)\n    \"\"\"\n    return run_command(omsagent_config_syslog_sh_cmd_template.format(op=op, port=port))\n\n\nfluentd_syslog_src_cfg_path = '/etc/opt/microsoft/omsagent/LAD/conf/omsagent.d/syslog.conf'\nsyslog_port_pattern_marker = '%SYSLOG_PORT%'\n\n\ndef configure_syslog(run_command, port, in_syslog_cfg, rsyslog_cfg, syslog_ng_cfg):\n    \"\"\"\n    Configure rsyslog/syslog-ng and fluentd's in_syslog with the given TCP port.\n    rsyslog/syslog-ng config is done by omsagent's configure_syslog.sh. We also try to unconfigure first,\n    to avoid duplicate entries in the related config files.\n    :param run_command: External command execution function (e.g., RunGetOutput)\n    :param port: TCP/UDP port number to be used for rsyslog/syslog-ng and fluentd's in_syslog\n    :param in_syslog_cfg: Fluentd's in_syslog config string. Should be overwritten to omsagent.d/syslog.conf\n    :param rsyslog_cfg: rsyslog config that's generated by LAD syslog configurator, that should be appended to\n                        /etc/rsyslog.d/95-omsagent.conf or /etc/rsyslog.conf\n    :param syslog_ng_cfg: syslog-ng config that's generated by LAD syslog configurator, that should be appended to\n                          /etc/syslog-ng/syslog-ng.conf\n    :rtype: int, str\n    :return: 2-tuple of the process exit code and the resulting output string (run_command's return values)\n    \"\"\"\n    if not is_rsyslog_installed() and not is_syslog_ng_installed():\n        return 0, 'configure_syslog(): Nothing to do: Neither rsyslog nor syslog-ng is installed on the system'\n\n    # 1. Unconfigure existing syslog instance (if any) to avoid duplicates\n    #    Continue even if this step fails (not critical)\n    cmd_exit_code, cmd_output = unconfigure_syslog(run_command)\n    extra_msg = ''\n    if cmd_exit_code != 0:\n        extra_msg = 'configure_syslog(): configure_syslog.sh unconfigure failed (still proceeding): ' + cmd_output\n\n    # 2. Configure new syslog instance with port number.\n    #    Ordering is very tricky. This must be done before modifying /etc/syslog-ng/syslog-ng.conf\n    #    or /etc/rsyslog.d/95-omsagent.conf below!\n    cmd_exit_code, cmd_output = run_omsagent_config_syslog_sh(run_command, 'configure', port)\n    if cmd_exit_code != 0:\n        return 2, 'configure_syslog(): configure_syslog.sh configure failed: ' + cmd_output\n\n    # 2.5. Replace '%SYSLOG_PORT%' in all passed syslog configs with the obtained port number\n    in_syslog_cfg = in_syslog_cfg.replace(syslog_port_pattern_marker, str(port))\n    rsyslog_cfg = rsyslog_cfg.replace(syslog_port_pattern_marker, str(port))\n    syslog_ng_cfg = syslog_ng_cfg.replace(syslog_port_pattern_marker, str(port))\n\n    # 3. Configure fluentd in_syslog plugin (write the fluentd plugin config file)\n    try:\n        with open(fluentd_syslog_src_cfg_path, 'w') as f:\n            f.write(in_syslog_cfg)\n    except Exception as e:\n        return 3, 'configure_syslog(): Writing to omsagent.d/syslog.conf failed: {0}'.format(e)\n\n    # 4. Update (add facilities/levels) rsyslog or syslog-ng config\n    try:\n        if is_syslog_ng_installed():\n            append_string_to_file(syslog_ng_cfg, syslog_ng_conf_path)\n        elif is_new_rsyslog_installed():\n            append_string_to_file(rsyslog_cfg, rsyslog_d_omsagent_conf_path)\n        else:  # old rsyslog, so append to rsyslog_top_conf_path\n            append_string_to_file(rsyslog_cfg, rsyslog_top_conf_path)\n    except Exception as e:\n        return 4, 'configure_syslog(): Adding facilities/levels to rsyslog/syslog-ng conf failed: {0}'.format(e)\n\n    # 5. Restart syslog\n    cmd_exit_code, cmd_output = restart_syslog(run_command)\n    if cmd_exit_code != 0:\n        return 5, 'configure_syslog(): Failed at restarting syslog (rsyslog or syslog-ng). ' \\\n                  'Exit code={0}, Output={1}'.format(cmd_exit_code, cmd_output)\n\n    # All succeeded\n    return 0, 'configure_syslog(): Succeeded. Extra message: {0}'.format(extra_msg if extra_msg else 'None')\n\n\nfluentd_tail_src_cfg_path = '/etc/opt/microsoft/omsagent/LAD/conf/omsagent.d/tail.conf'\n\n\ndef configure_filelog(in_tail_cfg):\n    \"\"\"\n    Configure fluentd's in_tail plugin for LAD file logging.\n    :param in_tail_cfg: Fluentd's in_tail plugin cfg for LAD filelog setting (obtained from LadConfigAll obj)\n    :rtype: str, int\n    :return: A 2-tuple of process exit code and output\n    \"\"\"\n    # Just needs to write to the omsagent.d/tail.conf file\n    try:\n        with open(fluentd_tail_src_cfg_path, 'w') as f:\n            f.write(in_tail_cfg)\n    except Exception as e:\n        return 1, 'configure_filelog(): Failed writing fluentd in_tail config file'\n    return 0, 'configure_filelog(): Succeeded writing fluentd in_tail config file'\n\n\nfluentd_out_mdsd_cfg_path = '/etc/opt/microsoft/omsagent/LAD/conf/omsagent.d/z_out_mdsd.conf'\n\n\ndef configure_out_mdsd(out_mdsd_cfg):\n    \"\"\"\n    Configure fluentd's out_mdsd plugin for LAD file logging.\n    :param out_mdsd_cfg: Fluentd's out_mdsd plugin cfg for the entire LAD setting (obtained from LadConfigAll obj)\n    :rtype: str, int\n    :return: A 2-tuple of process exit code and output\n    \"\"\"\n    # Just needs to write to the omsagent.d/tail.conf file\n    try:\n        with open(fluentd_out_mdsd_cfg_path, 'w') as f:\n            f.write(out_mdsd_cfg)\n    except Exception as e:\n        return 1, 'configure_out_mdsd(): Failed writing fluentd out_mdsd config file'\n    return 0, 'configure_out_mdsd(): Succeeded writing fluentd out_mdsd config file'\n\n\ndef unconfigure_syslog(run_command):\n    \"\"\"\n    Unconfigure rsyslog/syslog-ng and fluentd's in_syslog for LAD. rsyslog/syslog-ng unconfig is done\n    by omsagent's configure_syslog.sh.\n    :param run_command: External command execution function (e.g., RunGetOutput)\n    :rtype: int, str\n    :return: 2-tuple of the process exit code and the resulting output string (run_command's return values)\n    \"\"\"\n    # 1. Find the port number in fluentd's in_syslog conf..\n    if not os.path.isfile(fluentd_syslog_src_cfg_path):\n        return 0, \"unconfigure_syslog(): Nothing to unconfigure: omsagent fluentd's in_syslog is not configured\"\n\n    # 2. Read fluentd's in_syslog config\n    try:\n        with open(fluentd_syslog_src_cfg_path) as f:\n            fluentd_syslog_src_cfg = f.read()\n    except Exception as e:\n        return 1, \"unconfigure_syslog(): Failed reading fluentd's in_syslog config: {0}\".format(e)\n\n    # 3. Extract the port number and run omsagent's configure_syslog.sh to unconfigure\n    port_match = re.search(r'port\\s+(\\d+)', fluentd_syslog_src_cfg)\n    if not port_match:\n        return 2, 'unconfigure_syslog(): Invalid fluentd in_syslog config: port number setting not found'\n    port = int(port_match.group(1))\n    cmd_exit_code, cmd_output = run_omsagent_config_syslog_sh(run_command, 'unconfigure', port)\n    if cmd_exit_code != 0:\n        return 3, 'unconfigure_syslog(): configure_syslog.sh failed: ' + cmd_output\n\n    # 4. Remove fluentd's in_syslog conf file\n    try:\n        os.remove(fluentd_syslog_src_cfg_path)\n    except Exception as e:\n        return 4, 'unconfigure_syslog(): Removing omsagent.d/syslog.conf failed: {0}'.format(e)\n\n    #5. All succeeded\n    return 0, 'unconfigure_syslog(): Succeeded'\n\n\ndef restart_syslog(run_command):\n    \"\"\"\n    Restart rsyslog/syslog-ng (so that any new config will be applied)\n    :param run_command: External command execution function (e.g., RunGetOutput)\n    :rtype: int, str\n    :return: 2-tuple of the process exit code and the resulting output string (run_command's return values)\n    \"\"\"\n    return run_omsagent_config_syslog_sh(run_command, 'restart')  # port param is dummy here.\n\n\ndef restart_omiserver(run_command):\n    \"\"\"\n    Restart omiserver as needed (it crashes sometimes, and doesn't restart automatically yet)\n    :param run_command: External command execution function (e.g., RunGetOutput)\n    :rtype: int, str\n    :return: 2-tuple of the process exit code and the resulting output string (run_command's return values)\n    \"\"\"\n    return run_command('/opt/omi/bin/service_control restart')\n\n\ndef setup_omsagent(configurator, run_command, logger_log, logger_error):\n    \"\"\"\n    Set up omsagent. Install necessary components, configure them as needed, and start the agent.\n    :param configurator: A LadConfigAll object that's obtained from a valid LAD JSON settings config.\n                         This is needed to retrieve the syslog (rsyslog/syslog-ng) and the fluentd configs.\n    :param run_command: External command executor (e.g., RunGetOutput)\n    :param logger_log: Logger for normal logging messages (e.g., hutil.log)\n    :param logger_error: Logger for error loggin messages (e.g., hutil.error)\n    :return: Pair of status code and message. 0 status code for success. Non-zero status code\n            for a failure and the associated failure message.\n    \"\"\"\n    # Remember whether OMI (not omsagent) needs to be freshly installed.\n    # This is needed later to determine whether to reconfigure the omiserver.conf or not for security purpose.\n    need_fresh_install_omi = not os.path.exists('/opt/omi/bin/omiserver')\n\n    logger_log(\"Begin omsagent setup.\")\n\n    # 1. Install omsagent, onboard to LAD workspace\n    # We now try to install/setup all the time. If it's already installed. Any additional install is a no-op.\n    is_omsagent_setup_correctly = False\n    maxTries = 5  # Try up to 5 times to install omsagent\n    for trialNum in range(1, maxTries + 1):\n        cmd_exit_code, cmd_output = setup_omsagent_for_lad(run_command)\n        if cmd_exit_code == 0:  # Successfully set up\n            is_omsagent_setup_correctly = True\n            break\n        logger_error(\"omsagent setup failed (trial #\" + str(trialNum) + \").\")\n        if trialNum < maxTries:\n            logger_error(\"Retrying in 30 seconds...\")\n            time.sleep(30)\n    if not is_omsagent_setup_correctly:\n        logger_error(\"omsagent setup failed \" + str(maxTries) + \" times. Giving up...\")\n        return 1, \"omsagent setup failed {0} times. \" \\\n                  \"Last exit code={1}, Output={2}\".format(maxTries, cmd_exit_code, cmd_output)\n\n    # Issue #265. OMI httpsport shouldn't be reconfigured when LAD is re-enabled or just upgraded.\n    # In other words, OMI httpsport config should be updated only on a fresh OMI install.\n    if need_fresh_install_omi:\n        # Check if OMI is configured to listen to any non-zero port and reconfigure if so.\n        omi_listens_to_nonzero_port = run_command(r\"grep '^\\s*httpsport\\s*=' /etc/opt/omi/conf/omiserver.conf \"\n                                                  r\"| grep -v '^\\s*httpsport\\s*=\\s*0\\s*$'\")[0] is 0\n        if omi_listens_to_nonzero_port:\n            run_command(\"/opt/omi/bin/omiconfigeditor httpsport -s 0 < /etc/opt/omi/conf/omiserver.conf \"\n                        \"> /etc/opt/omi/conf/omiserver.conf_temp\")\n            run_command(\"mv /etc/opt/omi/conf/omiserver.conf_temp /etc/opt/omi/conf/omiserver.conf\")\n\n    # 2. Configure all fluentd plugins (in_syslog, in_tail, out_mdsd)\n    # 2.1. First get a free TCP/UDP port for fluentd in_syslog plugin.\n    port = get_fluentd_syslog_src_port()\n    if port < 0:\n        return 3, 'setup_omsagent(): Failed at getting a free TCP/UDP port for fluentd in_syslog'\n    # 2.2. Configure syslog\n    cmd_exit_code, cmd_output = configure_syslog(run_command, port,\n                                                 configurator.get_fluentd_syslog_src_config(),\n                                                 configurator.get_rsyslog_config(),\n                                                 configurator.get_syslog_ng_config())\n    if cmd_exit_code != 0:\n        return 4, 'setup_omsagent(): Failed at configuring in_syslog. Exit code={0}, Output={1}'.format(cmd_exit_code,\n                                                                                                        cmd_output)\n    # 2.3. Configure filelog\n    cmd_exit_code, cmd_output = configure_filelog(configurator.get_fluentd_tail_src_config())\n    if cmd_exit_code != 0:\n        return 5, 'setup_omsagent(): Failed at configuring in_tail. Exit code={0}, Output={1}'.format(cmd_exit_code,\n                                                                                                      cmd_output)\n    # 2.4. Configure out_mdsd\n    cmd_exit_code, cmd_output = configure_out_mdsd(configurator.get_fluentd_out_mdsd_config())\n    if cmd_exit_code != 0:\n        return 6, 'setup_omsagent(): Failed at configuring out_mdsd. Exit code={0}, Output={1}'.format(cmd_exit_code,\n                                                                                                       cmd_output)\n\n    # 3. Restart omsagent\n    cmd_exit_code, cmd_output = control_omsagent('restart', run_command)\n    if cmd_exit_code != 0:\n        return 8, 'setup_omsagent(): Failed at restarting omsagent (fluentd). ' \\\n                  'Exit code={0}, Output={1}'.format(cmd_exit_code, cmd_output)\n\n    # All done...\n    return 0, \"setup_omsagent(): Succeeded\"\n"
  },
  {
    "path": "Diagnostic/__init__.py",
    "content": ""
  },
  {
    "path": "Diagnostic/decrypt_protected_settings.sh",
    "content": "#!/bin/bash\n\n# A shell script utility to decrypt the extension's protected settings for debugging purpose\n# Must be run at /var/lib/waagent/Microsoft.Azure.Diagnostics.LinuxDiagnostic-.../\n# with the settings file path (e.g., config/0.settings) as the only cmdline arg\n\nif [ $# -lt 1 ]; then\n    echo \"Usage: $0 <ext_settings_file_path>\"\n    exit 1\nfi\n\nthumbprint=$(jq -r '.runtimeSettings[].handlerSettings.protectedSettingsCertThumbprint' $1)\njq -r '.runtimeSettings[].handlerSettings.protectedSettings' $1 | base64 --decode | openssl smime -inform DER -decrypt -recip ../$thumbprint.crt -inkey ../$thumbprint.prv | jq .\n"
  },
  {
    "path": "Diagnostic/diagnostic.py",
    "content": "#!/usr/bin/env python\n#\n# Azure Linux extension\n#\n# Linux Azure Diagnostic Extension (Current version is specified in manifest.xml)\n# Copyright (c) Microsoft Corporation All rights reserved.\n# MIT License\n# Permission is hereby granted, free of charge, to any person obtaining a copy of\n# this software and associated documentation files (the \"\"Software\"\"), to deal in the Software without restriction,\n# including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following\n# conditions: The above copyright notice and this permission notice shall be included in all copies or substantial\n# portions of the Software. THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nimport datetime\nimport exceptions\nimport os.path\nimport platform\nimport signal\nimport subprocess\nimport sys\nimport syslog\nimport threading\nimport time\nimport traceback\nimport xml.etree.ElementTree as ET\nimport json\n\n# Just wanted to be able to run 'python diagnostic.py ...' from a local dev box where there's no waagent.\n# Actually waagent import can succeed even on a Linux machine without waagent installed,\n# by setting PYTHONPATH env var to the azure-linux-extensions/Common/WALinuxAgent-2.0.16,\n# but let's just keep this try-except here on them for any potential local imports that may throw.\ntry:\n    # waagent, ext handler\n    from Utils.WAAgentUtil import waagent\n    import Utils.HandlerUtil as Util\n\n    # Old LAD utils\n    import Utils.LadDiagnosticUtil as LadUtil\n    import Utils.XmlUtil as XmlUtil\n\n    # New LAD  utils\n    import DistroSpecific\n    import watcherutil\n    from Utils.lad_ext_settings import LadExtSettings\n    from Utils.misc_helpers import *\n    import lad_config_all as lad_cfg\n    from Utils.imds_util import ImdsLogger\n    import Utils.omsagent_util as oms\n    import telegraf_utils.telegraf_config_handler as telhandler\n    import metrics_ext_utils.metrics_ext_handler as me_handler\n    import metrics_ext_utils.metrics_constants as metrics_constants\n\nexcept Exception as e:\n    print('A local import (e.g., waagent) failed. Exception: {0}\\nStacktrace: {1}'.format(e, traceback.format_exc()))\n    print(\"Can't proceed. Exiting with a special exit code 119.\")\n    sys.exit(119)  # This is the only thing we can do, as all logging depends on waagent/hutil.\n\n\n# Globals declaration/initialization (with const values only) for IDE\ng_ext_settings = None  # LAD extension settings object\ng_lad_log_helper = None  # LAD logging helper object\ng_dist_config = None  # Distro config object\ng_ext_dir = ''  # Extension directory (e.g., /var/lib/waagent/Microsoft.OSTCExtensions.LinuxDiagnostic-x.y.zzzz)\ng_mdsd_file_resources_dir = '/var/run/mdsd'\ng_mdsd_role_name = 'lad_mdsd'  # Different mdsd role name for multiple mdsd process instances\ng_mdsd_file_resources_prefix = ''  # Eventually '/var/run/mdsd/lad_mdsd'\ng_lad_pids_filepath = ''  # LAD process IDs (diagnostic.py, mdsd) file path. g_ext_dir + '/lad.pids'\ng_ext_op_type = None  # Extension operation type (e.g., Install, Enable, HeartBeat, ...)\ng_mdsd_bin_path = '/usr/local/lad/bin/mdsd'  # mdsd binary path. Fixed w/ lad-mdsd-*.{deb,rpm} pkgs\ng_diagnostic_py_filepath = ''  # Full path of this script. g_ext_dir + '/diagnostic.py'\n# Only 2 globals not following 'g_...' naming convention, for legacy readability...\nRunGetOutput = None  # External command executor callable\nhutil = None  # Handler util object\nenable_metrics_ext = False #Flag to enable/disable MetricsExtension\nenable_telegraf = False #Flag to enable/disable Telegraf\nme_msi_token_expiry_epoch = None\n\n\n\ndef init_distro_specific_actions():\n    \"\"\"\n    Identify the specific Linux distribution in use. Set the global distConfig to point to the corresponding\n    implementation class. If the distribution isn't supported, set the extension status appropriately and exit.\n    Expects the global hutil to already be initialized.\n    \"\"\"\n    # TODO Exit immediately if distro is unknown\n    global g_dist_config, RunGetOutput\n    dist = platform.dist()\n    name = ''\n    version = ''\n    try:\n        if dist[0] != '':\n            name = dist[0]\n            version = dist[1]\n        else:\n            try:\n                # platform.dist() in python 2.7.15 does not recognize SLES/OpenSUSE 15.\n                with open(\"/etc/os-release\", \"r\") as fp:\n                    for line in fp:\n                        if line.startswith(\"ID=\"):\n                            name = line.split(\"=\")[1]\n                            name = name.split(\"-\")[0]\n                            name = name.replace(\"\\\"\", \"\").replace(\"\\n\", \"\")\n                        elif line.startswith(\"VERSION_ID=\"):\n                            version = line.split(\"=\")[1]\n                            version = version.split(\".\")[0]\n                            version = version.replace(\"\\\"\", \"\").replace(\"\\n\", \"\")\n            except:\n                raise\n\n        hutil.log(\"os version: {0}:{1}\".format(name.lower(), version))\n        g_dist_config = DistroSpecific.get_distro_actions(name.lower(), version, hutil.log)\n        RunGetOutput = g_dist_config.log_run_get_output\n    except exceptions.LookupError as ex:\n        hutil.error(\"os version: {0}:{1} not supported\".format(dist[0], dist[1]))\n        # TODO Exit immediately if distro is unknown. This is currently done in main().\n        g_dist_config = None\n\n\ndef init_extension_settings():\n    \"\"\"Initialize extension's public & private settings. hutil must be already initialized prior to calling this.\"\"\"\n    global g_ext_settings\n\n    # Need to read/parse the Json extension settings (context) first.\n    hutil.try_parse_context()\n    hutil.set_verbose_log(False)  # This is default, but this choice will be made explicit and logged.\n\n    g_ext_settings = LadExtSettings(hutil.get_handler_settings())\n\n\ndef init_globals():\n    \"\"\"Initialize all the globals in a function so that we can catch any exceptions that might be raised.\"\"\"\n    global hutil, g_ext_dir, g_mdsd_file_resources_prefix, g_lad_pids_filepath\n    global g_diagnostic_py_filepath, g_lad_log_helper\n\n    waagent.LoggerInit('/var/log/waagent.log', '/dev/stdout')\n    waagent.Log(\"LinuxDiagnostic started to handle.\")\n    hutil = Util.HandlerUtility(waagent.Log, waagent.Error)\n    init_extension_settings()\n    init_distro_specific_actions()\n\n    g_ext_dir = os.getcwd()\n    g_mdsd_file_resources_prefix = os.path.join(g_mdsd_file_resources_dir, g_mdsd_role_name)\n    g_lad_pids_filepath = os.path.join(g_ext_dir, 'lad.pids')\n    g_diagnostic_py_filepath = os.path.join(os.getcwd(), __file__)\n    g_lad_log_helper = LadLogHelper(hutil.log, hutil.error, waagent.AddExtensionEvent, hutil.do_status_report,\n                                    hutil.get_name(), hutil.get_extension_version())\n\n\ndef setup_dependencies_and_mdsd(configurator):\n    \"\"\"\n    Set up dependencies for mdsd, such as following:\n    1) Distro-specific packages (see DistroSpecific.py)\n    2) Set up omsagent (fluentd), syslog (rsyslog or syslog-ng) for mdsd\n    :return: Status code and message\n    \"\"\"\n    install_package_error = \"\"\n    retry = 3\n    while retry > 0:\n        error, msg = g_dist_config.install_required_packages()\n        hutil.log(msg)\n        if error == 0:\n            break\n        else:\n            retry -= 1\n            hutil.log(\"Sleep 60 retry \" + str(retry))\n            install_package_error = msg\n            time.sleep(60)\n    if install_package_error:\n        if len(install_package_error) > 1024:\n            install_package_error = install_package_error[0:512] + install_package_error[-512:-1]\n        hutil.error(install_package_error)\n        return 2, install_package_error\n\n    # Run mdsd prep commands\n    g_dist_config.prepare_for_mdsd_install()\n\n    # Set up omsagent\n    omsagent_setup_exit_code, omsagent_setup_output = oms.setup_omsagent(configurator, RunGetOutput,\n                                                                         hutil.log, hutil.error)\n    if omsagent_setup_exit_code is not 0:\n        return 3, omsagent_setup_output\n\n    # Install lad-mdsd pkg (/usr/local/lad/bin/mdsd). Must be done after omsagent install because of dependencies\n    cmd_exit_code, cmd_output = g_dist_config.install_lad_mdsd()\n    if cmd_exit_code != 0:\n        return 4, 'lad-mdsd pkg install failed. Exit code={0}, Output={1}'.format(cmd_exit_code, cmd_output)\n\n    return 0, 'success'\n\n\ndef install_lad_as_systemd_service():\n    \"\"\"\n    Install LAD as a systemd service on systemd-enabled distros/versions (e.g., Ubuntu 16.04)\n    :return: None\n    \"\"\"\n    RunGetOutput('sed s#{WORKDIR}#' + g_ext_dir + '# ' +\n                 g_ext_dir + '/services/mdsd-lde.service > /lib/systemd/system/mdsd-lde.service')\n    RunGetOutput('systemctl daemon-reload')\n\n\ndef create_core_components_configs():\n    \"\"\"\n    Entry point to creating all configs of LAD's core components (mdsd, omsagent, rsyslog/syslog-ng, ...).\n    This function shouldn't be called on Install/Enable. Only Daemon op needs to call this.\n    :rtype: LadConfigAll\n    :return: A valid LadConfigAll object if config is valid. None otherwise.\n    \"\"\"\n    deployment_id = get_deployment_id_from_hosting_env_cfg(waagent.LibDir, hutil.log, hutil.error)\n\n    # Define wrappers around a couple misc_helpers. These can easily be mocked out in tests. PEP-8 says use\n    # def, don't assign a lambda to a variable. *shrug*\n    def encrypt_string(cert, secret):\n        return encrypt_secret_with_cert(RunGetOutput, hutil.error, cert, secret)\n\n    configurator = lad_cfg.LadConfigAll(g_ext_settings, g_ext_dir, waagent.LibDir, deployment_id,\n                                        read_uuid, encrypt_string, hutil.log, hutil.error)\n    try:\n        config_valid, config_invalid_reason = configurator.generate_all_configs()\n    except Exception as e:\n        config_invalid_reason =\\\n            'Exception while generating configs: {0}. Traceback: {1}'.format(e, traceback.format_exc())\n        hutil.error(config_invalid_reason)\n        config_valid = False\n\n    if not config_valid:\n        g_lad_log_helper.log_and_report_failed_config_generation(\n            g_ext_op_type, config_invalid_reason,\n            g_ext_settings.redacted_handler_settings())\n        return None\n\n    global enable_metrics_ext\n    global enable_telegraf\n    ladconfig = configurator._ladCfg()\n    # verify metrics extension should be enabled\n    sink = configurator._sink_configs_public.get_sink_by_name(\"AzMonSink\")\n    if sink is not None:\n        if sink['name'] == 'AzMonSink':\n            enable_metrics_ext = True\n    # verify telegraf should be enabled (either metrics intervals or performance counters configured)\n    metrics_intervals = LadUtil.getAggregationPeriodsFromLadCfg(ladconfig)\n    perf_counter_config = LadUtil.getDiagnosticsMonitorConfigurationElement(ladconfig, 'performanceCounters')\n    if ((metrics_intervals != []) or (perf_counter_config)):\n        enable_telegraf = True\n\n    return configurator\n\n\ndef check_for_supported_waagent_and_distro_version():\n    \"\"\"\n    Checks & returns if the installed waagent and the Linux distro/version are supported by this LAD.\n    :rtype: bool\n    :return: True iff so.\n    \"\"\"\n    for notsupport in ('WALinuxAgent-2.0.5', 'WALinuxAgent-2.0.4', 'WALinuxAgent-1'):\n        code, str_ret = RunGetOutput(\"grep 'GuestAgentVersion.*\" + notsupport + \"' /usr/sbin/waagent\",\n                                             should_log=False)\n        if code == 0 and str_ret.find(notsupport) > -1:\n            hutil.log(\"cannot run this extension on  \" + notsupport)\n            hutil.do_status_report(g_ext_op_type, \"error\", '1', \"cannot run this extension on  \" + notsupport)\n            return False\n\n    if g_dist_config is None:\n        msg = (\"LAD does not support distro/version ({0}); not installed. This extension install/enable operation is \"\n               \"still considered a success as it's an external error.\").format(str(platform.dist()))\n        hutil.log(msg)\n        hutil.do_status_report(g_ext_op_type, \"success\", '0', msg)\n        waagent.AddExtensionEvent(name=hutil.get_name(),\n                                  op=g_ext_op_type,\n                                  isSuccess=True,\n                                  version=hutil.get_extension_version(),\n                                  message=\"Can't be installed on this OS \" + str(platform.dist()))\n        return False\n\n    return True\n\n\ndef main(command):\n    init_globals()\n\n    global g_ext_op_type\n    global me_msi_token_expiry_epoch\n\n    g_ext_op_type = get_extension_operation_type(command)\n    waagent_ext_event_type = wala_event_type_for_telemetry(g_ext_op_type)\n\n    if not check_for_supported_waagent_and_distro_version():\n        return\n\n    try:\n        hutil.log(\"Dispatching command:\" + command)\n\n        if g_ext_op_type is waagent.WALAEventOperation.Disable:\n            if g_dist_config.use_systemd():\n                RunGetOutput('systemctl stop mdsd-lde && systemctl disable mdsd-lde')\n            else:\n                stop_mdsd()\n            oms.tear_down_omsagent_for_lad(RunGetOutput, False)\n\n            #Stop the telegraf and ME services\n            tel_out, tel_msg = telhandler.stop_telegraf_service(is_lad=True)\n            if tel_out:\n                hutil.log(tel_msg)\n            else:\n                hutil.error(tel_msg)\n\n            me_out, me_msg = me_handler.stop_metrics_service(is_lad=True)\n            if me_out:\n                hutil.log(me_msg)\n            else:\n                hutil.error(me_msg)\n\n            hutil.do_status_report(g_ext_op_type, \"success\", '0', \"Disable succeeded\")\n\n        elif g_ext_op_type is waagent.WALAEventOperation.Uninstall:\n            if g_dist_config.use_systemd():\n                RunGetOutput('systemctl stop mdsd-lde && systemctl disable mdsd-lde ' +\n                             '&& rm /lib/systemd/system/mdsd-lde.service')\n            else:\n                stop_mdsd()\n            # Must remove lad-mdsd package first because of the dependencies\n            cmd_exit_code, cmd_output = g_dist_config.remove_lad_mdsd()\n            if cmd_exit_code != 0:\n                hutil.error('lad-mdsd remove failed. Still proceeding to uninstall. '\n                            'Exit code={0}, Output={1}'.format(cmd_exit_code, cmd_output))\n            oms.tear_down_omsagent_for_lad(RunGetOutput, True)\n\n            #Delete the telegraf and ME services\n            tel_rm_out, tel_rm_msg = telhandler.remove_telegraf_service(is_lad=True)\n            if tel_rm_out:\n                hutil.log(tel_rm_msg)\n            else:\n                hutil.error(tel_rm_msg)\n\n            me_rm_out, me_rm_msg = me_handler.remove_metrics_service(is_lad=True)\n            if me_rm_out:\n                hutil.log(me_rm_msg)\n            else:\n                hutil.error(me_rm_msg)\n\n            hutil.do_status_report(g_ext_op_type, \"success\", '0', \"Uninstall succeeded\")\n\n        elif g_ext_op_type is waagent.WALAEventOperation.Install:\n            # Install dependencies (omsagent, which includes omi, scx).\n            configurator = create_core_components_configs()\n            dependencies_err, dependencies_msg = setup_dependencies_and_mdsd(configurator)\n            if dependencies_err != 0:\n                g_lad_log_helper.report_mdsd_dependency_setup_failure(waagent_ext_event_type, dependencies_msg)\n                hutil.do_status_report(g_ext_op_type, \"error\", '-1', \"Install failed\")\n                return\n\n            if g_dist_config.use_systemd():\n                install_lad_as_systemd_service()\n            hutil.do_status_report(g_ext_op_type, \"success\", '0', \"Install succeeded\")\n\n        elif g_ext_op_type is waagent.WALAEventOperation.Enable:\n            if hutil.is_current_config_seq_greater_inused():\n                configurator = create_core_components_configs()\n                dependencies_err, dependencies_msg = setup_dependencies_and_mdsd(configurator)\n                if dependencies_err != 0:\n                    g_lad_log_helper.report_mdsd_dependency_setup_failure(waagent_ext_event_type, dependencies_msg)\n                    hutil.do_status_report(g_ext_op_type, \"error\", '-1', \"Enabled failed\")\n                    return\n\n                # Start the Telegraf and ME services on enable after installation is complete\n                start_telegraf_res, log_messages = telhandler.start_telegraf(is_lad=True)\n                if start_telegraf_res:\n                    hutil.log(\"Successfully started metrics-sourcer.\")\n                else:\n                    hutil.error(log_messages)\n\n                if enable_metrics_ext:\n                    # Generate/regenerate MSI Token required by ME\n                    generate_token = False\n                    me_token_path = g_ext_dir + \"/metrics_configs/AuthToken-MSI.json\"\n\n                    if me_msi_token_expiry_epoch is None or me_msi_token_expiry_epoch == \"\":\n                        if os.path.isfile(me_token_path):\n                            with open(me_token_path, \"r\") as f:\n                                authtoken_content = f.read()\n                                if authtoken_content and \"expires_on\" in authtoken_content:\n                                    me_msi_token_expiry_epoch = authtoken_content[\"expires_on\"]\n                                else:\n                                    generate_token = True\n                        else:\n                            generate_token = True\n\n                    if me_msi_token_expiry_epoch:\n                        currentTime = datetime.datetime.now()\n                        token_expiry_time = datetime.datetime.fromtimestamp(me_msi_token_expiry_epoch)\n                        if token_expiry_time - currentTime < datetime.timedelta(minutes=30):\n                            # The MSI Token will expire within 30 minutes. We need to refresh the token\n                            generate_token = True\n\n                    if generate_token:\n                        generate_token = False\n                        msi_token_generated, me_msi_token_expiry_epoch, log_messages = me_handler.generate_MSI_token()\n                        if msi_token_generated:\n                            hutil.log(\"Successfully refreshed metrics-extension MSI Auth token.\")\n                        else:\n                            hutil.error(log_messages)\n\n                    start_metrics_out, log_messages = me_handler.start_metrics(is_lad=True)\n                    if start_metrics_out:\n                        hutil.log(\"Successfully started metrics-extension.\")\n                    else:\n                        hutil.error(log_messages)\n\n            if g_dist_config.use_systemd():\n                install_lad_as_systemd_service()\n                RunGetOutput('systemctl enable mdsd-lde')\n                mdsd_lde_active = RunGetOutput('systemctl status mdsd-lde')[0] is 0\n                if not mdsd_lde_active or hutil.is_current_config_seq_greater_inused():\n                    RunGetOutput('systemctl restart mdsd-lde')\n            else:\n                # if daemon process not runs\n                lad_pids = get_lad_pids()\n                hutil.log(\"get pids:\" + str(lad_pids))\n                if len(lad_pids) != 2 or hutil.is_current_config_seq_greater_inused():\n                    stop_mdsd()\n                    start_daemon()\n            hutil.set_inused_config_seq(hutil.get_seq_no())\n            hutil.do_status_report(g_ext_op_type, \"success\", '0', \"Enable succeeded, extension daemon started\")\n            # If the -daemon detects a problem, e.g. bad configuration, it will overwrite this status with a more\n            # informative one. If it succeeds, all is well.\n\n        elif g_ext_op_type is \"Daemon\":\n            configurator = create_core_components_configs()\n            if configurator:\n                start_mdsd(configurator)\n\n        elif g_ext_op_type is waagent.WALAEventOperation.Update:\n            hutil.do_status_report(g_ext_op_type, \"success\", '0', \"Update succeeded\")\n\n    except Exception as e:\n        hutil.error(\"Failed to perform extension operation {0} with error:{1}, {2}\".format(g_ext_op_type, e,\n                                                                                           traceback.format_exc()))\n        hutil.do_status_report(g_ext_op_type, 'error', '0',\n                               'Extension operation {0} failed:{1}'.format(g_ext_op_type, e))\n\n\ndef start_daemon():\n    \"\"\"\n    Start diagnostic.py as a daemon for scheduled tasks and to monitor mdsd daemon. If Popen() has a problem it will\n    raise an exception (often OSError)\n    :return: None\n    \"\"\"\n    args = ['python2', g_diagnostic_py_filepath, \"-daemon\"]\n    log = open(os.path.join(os.getcwd(), 'daemon.log'), 'w')\n    hutil.log('start daemon ' + str(args))\n    subprocess.Popen(args, stdout=log, stderr=log)\n\n\ndef start_watcher_thread():\n    \"\"\"\n    Start watcher thread that performs periodic monitoring activities (other than mdsd)\n    :return: None\n    \"\"\"\n    # Create monitor object that encapsulates monitoring activities\n    watcher = watcherutil.Watcher(hutil.error, hutil.log, log_to_console=True)\n    # Create an IMDS data logger and set it to the monitor object\n    imds_logger = ImdsLogger(hutil.get_name(), hutil.get_extension_version(),\n                             waagent.WALAEventOperation.HeartBeat, waagent.AddExtensionEvent)\n    watcher.set_imds_logger(imds_logger)\n    # Start a thread to perform periodic monitoring activity (e.g., /etc/fstab watcher, IMDS data logging)\n    thread_obj = threading.Thread(target=watcher.watch)\n    thread_obj.daemon = True\n    thread_obj.start()\n\n\ndef start_mdsd(configurator):\n    \"\"\"\n    Start mdsd and monitor its activities. Report if it crashes or emits error logs.\n    :param configurator: A valid LadConfigAll object that was obtained by create_core_components_config().\n                         This will be used for configuring rsyslog/syslog-ng/fluentd/in_syslog/out_mdsd components\n    :return: None\n    \"\"\"\n    # This must be done first, so that extension enable completion doesn't get delayed.\n    write_lad_pids_to_file(g_lad_pids_filepath, os.getpid())\n\n    # Need 'HeartBeat' instead of 'Daemon'\n    waagent_ext_event_type = wala_event_type_for_telemetry(g_ext_op_type)\n\n    # mdsd http proxy setting\n    proxy_config = get_mdsd_proxy_config(waagent.HttpProxyConfigString, g_ext_settings, hutil.log)\n    if proxy_config:\n        # Add MDSD_http_proxy to current environment. Child processes will inherit its value.\n        os.environ['MDSD_http_proxy'] = proxy_config\n\n    copy_env = os.environ.copy()\n    # Add MDSD_CONFIG_DIR as an env variable since new mdsd master branch LAD doesnt create this dir\n    mdsd_config_cache_dir = os.path.join(g_ext_dir, \"config\")\n    copy_env[\"MDSD_CONFIG_DIR\"] = mdsd_config_cache_dir\n\n\n    # We then validate the mdsd config and proceed only when it succeeds.\n    xml_file = os.path.join(g_ext_dir, 'xmlCfg.xml')\n    tmp_env_dict = {}  # Need to get the additionally needed env vars (SSL_CERT_*) for this mdsd run as well...\n    g_dist_config.extend_environment(tmp_env_dict)\n    added_env_str = ' '.join('{0}={1}'.format(k, tmp_env_dict[k]) for k in tmp_env_dict)\n    config_validate_cmd = '{0}{1}{2} -v -c {3} -r {4}'.format(added_env_str, ' ' if added_env_str else '',\n                                                       g_mdsd_bin_path, xml_file, g_ext_dir)\n    config_validate_cmd_status, config_validate_cmd_msg = RunGetOutput(config_validate_cmd)\n    if config_validate_cmd_status is not 0:\n        # Invalid config. Log error and report success.\n        g_lad_log_helper.log_and_report_invalid_mdsd_cfg(g_ext_op_type,\n                                                         config_validate_cmd_msg, read_file_to_string(xml_file))\n        return\n\n    # Start OMI if it's not running.\n    # This shouldn't happen, but this measure is put in place just in case (e.g., Ubuntu 16.04 systemd).\n    # Don't check if starting succeeded, as it'll be done in the loop below anyway.\n    omi_running = RunGetOutput(\"/opt/omi/bin/service_control is-running\", should_log=False)[0] is 1\n    if not omi_running:\n        hutil.log(\"OMI is not running. Restarting it.\")\n        RunGetOutput(\"/opt/omi/bin/service_control restart\")\n\n    log_dir = hutil.get_log_dir()\n    err_file_path = os.path.join(log_dir, 'mdsd.err')\n    info_file_path = os.path.join(log_dir, 'mdsd.info')\n    warn_file_path = os.path.join(log_dir, 'mdsd.warn')\n    qos_file_path = os.path.join(log_dir, 'mdsd.qos')\n    # Need to provide EH events and Rsyslog spool path since the new mdsd master branch LAD doesnt create the directory needed\n    eh_spool_path = os.path.join(log_dir, 'eh')\n\n    update_selinux_settings_for_rsyslogomazuremds(RunGetOutput, g_ext_dir)\n\n    mdsd_stdout_redirect_path = os.path.join(g_ext_dir, \"mdsd.log\")\n    mdsd_stdout_stream = None\n\n    g_dist_config.extend_environment(copy_env)\n\n    # Now prepare actual mdsd cmdline.\n    command = '{0} -A -C -c {1} -R -r {2} -e {3} -w {4} -q {8} -S {7} -o {5}{6}'.format(\n        g_mdsd_bin_path,\n        xml_file,\n        g_mdsd_role_name,\n        err_file_path,\n        warn_file_path,\n        info_file_path,\n        g_ext_settings.get_mdsd_trace_option(),\n        eh_spool_path,\n        qos_file_path).split(\" \")\n\n    try:\n        start_watcher_thread()\n\n        num_quick_consecutive_crashes = 0\n        mdsd_crash_msg = ''\n\n        while num_quick_consecutive_crashes < 3:  # We consider only quick & consecutive crashes for retries\n\n            RunGetOutput('rm -f ' + g_mdsd_file_resources_prefix + '.pidport')  # Must delete any existing port num file\n            mdsd_stdout_stream = open(mdsd_stdout_redirect_path, \"w\")\n            hutil.log(\"Start mdsd \" + str(command))\n            mdsd = subprocess.Popen(command,\n                                    cwd=g_ext_dir,\n                                    stdout=mdsd_stdout_stream,\n                                    stderr=mdsd_stdout_stream,\n                                    env=copy_env)\n\n            write_lad_pids_to_file(g_lad_pids_filepath, os.getpid(), mdsd.pid)\n\n            last_mdsd_start_time = datetime.datetime.now()\n            last_error_time = last_mdsd_start_time\n            omi_installed = True  # Remembers if OMI is installed at each iteration\n            telegraf_restart_retries = 0\n            me_restart_retries = 0\n            max_restart_retries = 10\n            # Continuously monitors mdsd process\n            while True:\n                time.sleep(30)\n                if \" \".join(get_lad_pids()).find(str(mdsd.pid)) < 0 and len(get_lad_pids()) >= 2:\n                    mdsd.kill()\n                    hutil.log(\"Another process is started, now exit\")\n                    return\n                if mdsd.poll() is not None:  # if mdsd has terminated\n                    time.sleep(60)\n                    mdsd_stdout_stream.flush()\n                    break\n\n                # mdsd is now up for at least 30 seconds. Do some monitoring activities.\n                # 1. Mitigate if memory leak is suspected.\n                mdsd_memory_leak_suspected, mdsd_memory_usage_in_KB = check_suspected_memory_leak(mdsd.pid, hutil.error)\n                if mdsd_memory_leak_suspected:\n                    g_lad_log_helper.log_suspected_memory_leak_and_kill_mdsd(mdsd_memory_usage_in_KB, mdsd,\n                                                                             waagent_ext_event_type)\n                    break\n                # 2. Restart OMI if it crashed (Issue #128)\n                omi_installed = restart_omi_if_crashed(omi_installed, mdsd)\n                # 3. Check if there's any new logs in mdsd.err and report\n                last_error_time = report_new_mdsd_errors(err_file_path, last_error_time)\n                # 4. Check if telegraf is running, if not, then restart\n                if enable_telegraf and not telhandler.is_running(is_lad=True):\n                    if telegraf_restart_retries < max_restart_retries:\n                        telegraf_restart_retries += 1\n                        hutil.log(\"Telegraf binary process is not running. Restarting telegraf now. Retry count - {0}\".format(telegraf_restart_retries))\n                        tel_out, tel_msg = telhandler.stop_telegraf_service(is_lad=True)\n                        if tel_out:\n                            hutil.log(tel_msg)\n                        else:\n                            hutil.error(tel_msg)\n                        start_telegraf_res, log_messages = telhandler.start_telegraf(is_lad=True)\n                        if start_telegraf_res:\n                            hutil.log(\"Successfully started metrics-sourcer.\")\n                        else:\n                            hutil.error(log_messages)\n                    else:\n                        hutil.error(\"Telegraf binary process is not running. Failed to restart after {0} retries. Please check telegraf.log at {1}\".format(max_restart_retries, log_dir))\n                else:\n                    telegraf_restart_retries = 0\n                # 5. Check if ME is running, if not, then restart\n                if enable_metrics_ext:\n                    if not me_handler.is_running(is_lad=True):\n                        if me_restart_retries < max_restart_retries:\n                            me_restart_retries += 1\n                            hutil.log(\"MetricsExtension binary process is not running. Restarting MetricsExtension now. Retry count - {0}\".format(me_restart_retries))\n                            me_out, me_msg = me_handler.stop_metrics_service(is_lad=True)\n                            if me_out:\n                                hutil.log(me_msg)\n                            else:\n                                hutil.error(me_msg)\n                            start_metrics_out, log_messages = me_handler.start_metrics(is_lad=True)\n                            if start_metrics_out:\n                                hutil.log(\"Successfully started metrics-extension.\")\n                            else:\n                                hutil.error(log_messages)\n                        else:\n                            hutil.error(\"MetricsExtension binary process is not running. Failed to restart after {0} retries. Please check /var/log/syslog for ME logs\".format(max_restart_retries))\n                    else:\n                        me_restart_retries = 0\n                    # 6. Regenerate the MSI auth token required for ME if it is nearing expiration\n                    # Generate/regenerate MSI Token required by ME\n                    global me_msi_token_expiry_epoch\n                    generate_token = False\n                    me_token_path = g_ext_dir + \"/config/metrics_configs/AuthToken-MSI.json\"\n\n                    if me_msi_token_expiry_epoch is None  or me_msi_token_expiry_epoch == \"\":\n                        if os.path.isfile(me_token_path):\n                            with open(me_token_path, \"r\") as f:\n                                authtoken_content = json.loads(f.read())\n                                if authtoken_content and \"expires_on\" in authtoken_content:\n                                    me_msi_token_expiry_epoch = authtoken_content[\"expires_on\"]\n                                else:\n                                    generate_token = True\n                        else:\n                            generate_token = True\n\n                    if me_msi_token_expiry_epoch:\n                        currentTime = datetime.datetime.now()\n                        token_expiry_time = datetime.datetime.fromtimestamp(float(me_msi_token_expiry_epoch))\n                        if token_expiry_time - currentTime < datetime.timedelta(minutes=30):\n                            # The MSI Token will expire within 30 minutes. We need to refresh the token\n                            generate_token = True\n\n                    if generate_token:\n                        generate_token = False\n                        msi_token_generated, me_msi_token_expiry_epoch, log_messages = me_handler.generate_MSI_token()\n                        if msi_token_generated:\n                            hutil.log(\"Successfully refreshed metrics-extension MSI Auth token.\")\n                        else:\n                            hutil.error(log_messages)\n\n            # Out of the inner while loop: mdsd terminated.\n            if mdsd_stdout_stream:\n                mdsd_stdout_stream.close()\n                mdsd_stdout_stream = None\n\n            # Check if this is NOT a quick crash -- we consider a crash quick\n            # if it's within 30 minutes from the start time. If it's not quick,\n            # we just continue by restarting mdsd.\n            mdsd_up_time = datetime.datetime.now() - last_mdsd_start_time\n            if mdsd_up_time > datetime.timedelta(minutes=30):\n                mdsd_terminated_msg = \"MDSD terminated after \" + str(mdsd_up_time) + \". \"\\\n                                      + tail(mdsd_stdout_redirect_path) + tail(err_file_path)\n                hutil.log(mdsd_terminated_msg)\n                num_quick_consecutive_crashes = 0\n                continue\n\n            # It's a quick crash. Log error and add an extension event.\n            num_quick_consecutive_crashes += 1\n\n            mdsd_crash_msg = \"MDSD crash(uptime=\" + str(mdsd_up_time) + \"):\" + tail(mdsd_stdout_redirect_path) + tail(err_file_path)\n            hutil.error(\"MDSD crashed:\" + mdsd_crash_msg)\n\n        # mdsd all 3 allowed quick/consecutive crashes exhausted\n        hutil.do_status_report(waagent_ext_event_type, \"error\", '1', \"mdsd stopped: \" + mdsd_crash_msg)\n        # Need to tear down omsagent setup for LAD before returning/exiting if it was set up earlier\n        oms.tear_down_omsagent_for_lad(RunGetOutput, False)\n        try:\n            waagent.AddExtensionEvent(name=hutil.get_name(),\n                                      op=waagent_ext_event_type,\n                                      isSuccess=False,\n                                      version=hutil.get_extension_version(),\n                                      message=mdsd_crash_msg)\n        except Exception:\n            pass\n\n    except Exception as e:\n        if mdsd_stdout_stream:\n            hutil.error(\"Error :\" + tail(mdsd_stdout_redirect_path))\n        errmsg = \"Failed to launch mdsd with error: {0}, traceback: {1}\".format(e, traceback.format_exc())\n        hutil.error(errmsg)\n        hutil.do_status_report(waagent_ext_event_type, 'error', '1', errmsg)\n        waagent.AddExtensionEvent(name=hutil.get_name(),\n                                  op=waagent_ext_event_type,\n                                  isSuccess=False,\n                                  version=hutil.get_extension_version(),\n                                  message=errmsg)\n    finally:\n        if mdsd_stdout_stream:\n            mdsd_stdout_stream.close()\n\n\ndef report_new_mdsd_errors(err_file_path, last_error_time):\n    \"\"\"\n    Monitors if there's any new stuff in mdsd.err and report it if any through the agent/ext status report mechanism.\n    :param err_file_path: Path of the mdsd.err file\n    :param last_error_time: Time when last error was reported.\n    :return: Time when the last error was reported. Same as the argument if there's no error reported in this call.\n             A new time (error file ctime) if a new error is reported.\n    \"\"\"\n    if not os.path.exists(err_file_path):\n        return last_error_time\n    err_file_ctime = datetime.datetime.strptime(time.ctime(int(os.path.getctime(err_file_path))), \"%a %b %d %H:%M:%S %Y\")\n    if last_error_time >= err_file_ctime:\n        return last_error_time\n    # No new error above. A new error below.\n    last_error_time = err_file_ctime\n    last_error = tail(err_file_path)\n    if len(last_error) > 0 and (datetime.datetime.now() - last_error_time) < datetime.timedelta(minutes=30):\n        # Only recent error logs (within 30 minutes) are reported.\n        hutil.log(\"Error in MDSD:\" + last_error)\n        hutil.do_status_report(g_ext_op_type, \"success\", '1',\n                               \"message in mdsd.err:\" + str(last_error_time) + \":\" + last_error)\n    return last_error_time\n\n\ndef stop_mdsd():\n    \"\"\"\n    Stop mdsd process\n    :return: None\n    \"\"\"\n    pids = get_lad_pids()\n    if not pids:\n        return 0, \"Already stopped\"\n\n    kill_cmd = \"kill \" + \" \".join(pids)\n    hutil.log(kill_cmd)\n    RunGetOutput(kill_cmd)\n\n    terminated = False\n    num_checked = 0\n    while not terminated and num_checked < 10:\n        time.sleep(2)\n        num_checked += 1\n        pids = get_lad_pids()\n        if not pids:\n            hutil.log(\"stop_mdsd(): All processes successfully terminated\")\n            terminated = True\n        else:\n            hutil.log(\"stop_mdsd() terminate check #{0}: Processes not terminated yet, rechecking in 2 seconds\".format(\n                num_checked))\n\n    if not terminated:\n        kill_cmd = \"kill -9 \" + \" \".join(get_lad_pids())\n        hutil.log(\"stop_mdsd(): Processes not terminated in 20 seconds. Sending SIGKILL (\" + kill_cmd + \")\")\n        RunGetOutput(kill_cmd)\n\n    RunGetOutput(\"rm \" + g_lad_pids_filepath)\n\n    return 0, \"Terminated\" if terminated else \"SIGKILL'ed\"\n\n\ndef get_lad_pids():\n    \"\"\"\n    Get LAD PIDs from the previously written file\n    :return: List of 2 PIDs. One for diagnostic.py, the other for mdsd\n    \"\"\"\n    lad_pids = []\n    if not os.path.exists(g_lad_pids_filepath):\n        return lad_pids\n\n    with open(g_lad_pids_filepath, \"r\") as f:\n        for pid in f.readlines():\n            is_still_alive = RunGetOutput(\"cat /proc/\" + pid.strip() + \"/cmdline\", should_log=False)[1]\n            if is_still_alive.find('/waagent/') > 0:\n                lad_pids.append(pid.strip())\n            else:\n                hutil.log(\"return not alive \" + is_still_alive.strip())\n    return lad_pids\n\n\n# Issue #128 LAD should restart OMI if it crashes\ndef restart_omi_if_crashed(omi_installed, mdsd):\n    \"\"\"\n    Restart OMI if it crashed. Called from the main monitoring loop.\n    :param omi_installed: bool indicating whether OMI was installed at the previous iteration.\n    :param mdsd: Python Process object for the mdsd process, because it might need to be signaled.\n    :return: bool indicating whether OMI was installed at this iteration (from this call)\n    \"\"\"\n    omicli_path = \"/opt/omi/bin/omicli\"\n    omicli_noop_query_cmd = omicli_path + \" noop\"\n    omi_was_installed = omi_installed  # Remember the OMI install status from the last iteration\n    omi_installed = os.path.isfile(omicli_path)\n\n    if omi_was_installed and not omi_installed:\n        hutil.log(\"OMI is uninstalled. This must have been intentional and externally done. \"\n                  \"Will no longer check if OMI is up and running.\")\n\n    omi_reinstalled = not omi_was_installed and omi_installed\n    if omi_reinstalled:\n        hutil.log(\"OMI is reinstalled. Will resume checking if OMI is up and running.\")\n\n    should_restart_omi = False\n    if omi_installed:\n        cmd_exit_status, cmd_output = RunGetOutput(cmd=omicli_noop_query_cmd, should_log=False)\n        should_restart_omi = cmd_exit_status is not 0\n        if should_restart_omi:\n            hutil.error(\"OMI noop query failed. Output: \" + cmd_output + \". OMI crash suspected. \"\n                        \"Restarting OMI and sending SIGHUP to mdsd after 5 seconds.\")\n            omi_restart_msg = RunGetOutput(\"/opt/omi/bin/service_control restart\")[1]\n            hutil.log(\"OMI restart result: \" + omi_restart_msg)\n            time.sleep(10)\n\n            # Query OMI once again to make sure restart fixed the issue.\n            # If not, attempt to re-install OMI as last resort.\n            cmd_exit_status, cmd_output = RunGetOutput(cmd=omicli_noop_query_cmd, should_log=False)\n            should_reinstall_omi = cmd_exit_status is not 0\n            if should_reinstall_omi:\n                hutil.error(\"OMI noop query failed even after OMI was restarted. Attempting to re-install the components.\")\n                configurator = create_core_components_configs()\n                dependencies_err, dependencies_msg = setup_dependencies_and_mdsd(configurator)\n                if dependencies_err != 0:\n                    hutil.error(\"Re-installing the components failed with error code: \" + str(dependencies_err) + \", error message: \" +  dependencies_msg)\n                    return omi_installed\n                else:\n                    omi_reinstalled = True\n\n    # mdsd needs to be signaled if OMI was restarted or reinstalled because mdsd used to give up connecting to OMI\n    # if it fails first time, and never retried until signaled. mdsd was fixed to retry now, but it's still\n    # limited (stops retrying beyond 30 minutes or so) and backoff-ed exponentially\n    # so it's still better to signal anyway.\n    should_signal_mdsd = should_restart_omi or omi_reinstalled\n    if should_signal_mdsd:\n        omi_up_and_running = RunGetOutput(omicli_noop_query_cmd)[0] is 0\n        if omi_up_and_running:\n            mdsd.send_signal(signal.SIGHUP)\n            hutil.log(\"SIGHUP sent to mdsd\")\n        else:  # OMI restarted but not staying up...\n            log_msg = \"OMI restarted but not staying up. Will be restarted in the next iteration.\"\n            hutil.error(log_msg)\n            # Also log this issue on syslog as well\n            syslog.openlog('diagnostic.py', syslog.LOG_PID,\n                           syslog.LOG_DAEMON)  # syslog.openlog(ident, logoption, facility) -- not taking kw args in Python 2.6\n            syslog.syslog(syslog.LOG_ALERT,\n                          log_msg)  # syslog.syslog(priority, message) -- not taking kw args\n            syslog.closelog()\n\n    return omi_installed\n\n\nif __name__ == '__main__':\n    if len(sys.argv) <= 1:\n        print('No command line argument was specified.\\nYou must be executing this program manually for testing.\\n'\n              'In that case, one of \"install\", \"enable\", \"disable\", \"uninstall\", or \"update\" should be given.')\n    else:\n        try:\n            main(sys.argv[1])\n        except Exception as e:\n            ext_version = ET.parse('manifest.xml').find('{http://schemas.microsoft.com/windowsazure}Version').text\n            msg = \"Unknown exception thrown from diagnostic.py.\\n\" \\\n                  \"Error: {0}\\nStackTrace: {1}\".format(e, traceback.format_exc())\n            wala_event_type = wala_event_type_for_telemetry(get_extension_operation_type(sys.argv[1]))\n            if len(sys.argv) == 2:\n                # Add a telemetry only if this is executed through waagent (in which\n                # we are guaranteed to have just one cmdline arg './diagnostic -xxx').\n                waagent.AddExtensionEvent(name=\"Microsoft.Azure.Diagnostic.LinuxDiagnostic\",\n                                          op=wala_event_type,\n                                          isSuccess=False,\n                                          version=ext_version,\n                                          message=msg)\n            else:\n                # Trick to print backtrace in case we execute './diagnostic.py -xxx yyy' from a terminal for testing.\n                # By just adding one more cmdline arg with any content, the above if condition becomes false,\\\n                # thus allowing us to run code here, printing the exception message with the stack trace.\n                print(msg)\n            # Need to exit with an error code, so that this situation can be detected by waagent and also\n            # reported to customer through agent/extension status blob.\n            hutil.do_exit(42, wala_event_type, 'Error', '42', msg)  # What's 42? Ask Abhi.\n"
  },
  {
    "path": "Diagnostic/lad_config_all.py",
    "content": "#!/usr/bin/env python\n#\n# Azure Linux extension\n#\n# Linux Azure Diagnostic Extension (Current version is specified in manifest.xml)\n# Copyright (c) Microsoft Corporation\n# All rights reserved.\n# MIT License\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n#  documentation files (the \"\"Software\"\"), to deal in the Software without restriction, including without limitation\n#  the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n#  permit persons to whom the Software is furnished to do so, subject to the following conditions:\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the\n#  Software.\n# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n#  WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n#  OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n#  OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nimport os\nimport traceback\nimport xml.etree.ElementTree as ET\n\nimport Providers.Builtin as BuiltIn\nimport Utils.ProviderUtil as ProvUtil\nimport Utils.LadDiagnosticUtil as LadUtil\nimport Utils.XmlUtil as XmlUtil\nimport Utils.mdsd_xml_templates as mxt\nimport telegraf_utils.telegraf_config_handler as telhandler\nimport metrics_ext_utils.metrics_constants as metrics_constants\nimport metrics_ext_utils.metrics_ext_handler as me_handler\nfrom Utils.lad_exceptions import LadLoggingConfigException, LadPerfCfgConfigException\nfrom Utils.lad_logging_config import LadLoggingConfig, copy_source_mdsdevent_eh_url_elems\nfrom Utils.misc_helpers import get_storage_endpoints_with_account, escape_nonalphanumerics\n\n\nclass LadConfigAll:\n    \"\"\"\n    A class to generate configs for all 3 core components of LAD: mdsd, omsagent (fluentd), and syslog\n    (rsyslog or syslog-ng) based on LAD's JSON extension settings.\n    The mdsd XML config file generated will be /var/lib/waagent/Microsoft. ...-x.y.zzzz/xmlCfg.xml (hard-coded).\n    Other config files whose contents are generated by this class are as follows:\n    - /etc/opt/microsoft/omsagent/LAD/conf/omsagent.d/syslog.conf : fluentd's syslog source config\n    - /etc/opt/microsoft/omsagent/LAD/conf/omsagent.d/tail.conf : fluentd's tail source config (fileLogs)\n    - /etc/opt/microsoft/omsagent/LAD/conf/omsagent.d/z_out_mdsd.conf : fluentd's out_mdsd out plugin config\n    - /etc/rsyslog.conf or /etc/rsyslog.d/95-omsagent.conf: rsyslog config for LAD's syslog settings\n       The content should be appended to the corresponding file, not overwritten. After that, the file should be\n       processed so that the '%SYSLOG_PORT%' pattern is replaced with the assigned TCP port number.\n    - /etc/syslog-ng.conf: syslog-ng config for LAD's syslog settings. The content should be appended, not overwritten.\n    \"\"\"\n    _default_perf_cfgs = [\n        {\"query\": \"SELECT PercentAvailableMemory, AvailableMemory, UsedMemory, PercentUsedSwap \"\n                  \"FROM SCX_MemoryStatisticalInformation\",\n         \"table\": \"LinuxMemory\"},\n        {\"query\": \"SELECT PercentProcessorTime, PercentIOWaitTime, PercentIdleTime \"\n                  \"FROM SCX_ProcessorStatisticalInformation WHERE Name='_TOTAL'\",\n         \"table\": \"LinuxCpu\"},\n        {\"query\": \"SELECT AverageWriteTime,AverageReadTime,ReadBytesPerSecond,WriteBytesPerSecond \"\n                  \"FROM  SCX_DiskDriveStatisticalInformation WHERE Name='_TOTAL'\",\n         \"table\": \"LinuxDisk\"}\n    ]\n\n    def __init__(self, ext_settings, ext_dir, waagent_dir, deployment_id,\n                 fetch_uuid, encrypt_string, logger_log, logger_error):\n        \"\"\"\n        Constructor.\n        :param ext_settings: A LadExtSettings (in Utils/lad_ext_settings.py) obj wrapping the Json extension settings.\n        :param ext_dir: Extension directory (e.g., /var/lib/waagent/Microsoft.OSTCExtensions.LinuxDiagnostic-2.3.xxxx)\n        :param waagent_dir: WAAgent directory (e.g., /var/lib/waagent)\n        :param deployment_id: Deployment ID string (or None) that should be obtained & passed by the caller\n                              from waagent's HostingEnvironmentCfg.xml.\n        :param fetch_uuid: A function which fetches the UUID for the VM\n        :param encrypt_string: A function which encrypts a string, given a cert_path\n        :param logger_log: Normal logging function (e.g., hutil.log) that takes only one param for the logged msg.\n        :param logger_error: Error logging function (e.g., hutil.error) that takes only one param for the logged msg.\n        \"\"\"\n        self._ext_settings = ext_settings\n        self._ext_dir = ext_dir\n        self._waagent_dir = waagent_dir\n        self._deployment_id = deployment_id\n        self._fetch_uuid = fetch_uuid\n        self._encrypt_secret = encrypt_string\n        self._logger_log = logger_log\n        self._logger_error = logger_error\n        self._telegraf_me_url = metrics_constants.lad_metrics_extension_influx_udp_url\n        self._telegraf_mdsd_url = metrics_constants.telegraf_influx_url\n\n        # Generated logging configs place holders\n        self._fluentd_syslog_src_config = None\n        self._fluentd_tail_src_config = None\n        self._fluentd_out_mdsd_config = None\n        self._rsyslog_config = None\n        self._syslog_ng_config = None\n        self._telegraf_config = None\n        self._telegraf_namespaces = None\n\n        self._mdsd_config_xml_tree = ET.ElementTree(ET.fromstring(mxt.entire_xml_cfg_tmpl))\n        self._sink_configs = LadUtil.SinkConfiguration()\n        self._sink_configs.insert_from_config(self._ext_settings.read_protected_config('sinksConfig'))\n\n        # Reading the AzMonSink info from the public config. \n        self._sink_configs_public = LadUtil.SinkConfiguration()\n        self._sink_configs_public.insert_from_config(self._ext_settings.read_public_config('sinksConfig'))\n        # If we decide to also read sinksConfig from ladCfg, do it first, so that private settings override\n\n        # Get encryption settings\n        handlerSettings = ext_settings.get_handler_settings()\n\n        if handlerSettings['protectedSettings'] is None:\n            errorMsg = \"Settings did not contain protectedSettings. For information on protected settings, \" \\\n                          \"visit https://docs.microsoft.com/en-us/azure/virtual-machines/extensions/diagnostics-linux#protected-settings.\"\n            self._logger_error(errorMsg)\n            raise LadLoggingConfigException(errorMsg)\n\n        if handlerSettings['protectedSettingsCertThumbprint'] is None:\n            errorMsg = \"Settings did not contain protectedSettingsCertThumbprint. For information on protected settings, \" \\\n                          \"visit https://docs.microsoft.com/en-us/azure/virtual-machines/extensions/diagnostics-linux#protected-settings.\"\n            self._logger_error(errorMsg)\n            raise LadLoggingConfigException(errorMsg)\n\n        thumbprint = handlerSettings['protectedSettingsCertThumbprint']\n\n        self._cert_path = os.path.join(waagent_dir, thumbprint + '.crt')\n        self._pkey_path = os.path.join(waagent_dir, thumbprint + '.prv')\n\n    def _ladCfg(self):\n        return self._ext_settings.read_public_config('ladCfg')\n\n    @staticmethod\n    def _wad_table_name(interval):\n        \"\"\"\n        Build the name and storetype of a metrics table based on the aggregation interval and presence/absence of sinks\n        :param str interval: String representation of aggregation interval\n        :return: table name\n        :rtype: str\n        \"\"\"\n        return 'WADMetrics{0}P10DV2S'.format(interval)\n\n    def _add_element_from_string(self, path, xml_string, add_only_once=True):\n        \"\"\"\n        Add an XML fragment to the mdsd config document in accordance with path\n        :param str path: Where to add the fragment\n        :param str xml_string: A string containing the XML element to add\n        :param bool add_only_once: Indicates whether to perform the addition only to the first match of the path.\n        \"\"\"\n        XmlUtil.addElement(xml=self._mdsd_config_xml_tree, path=path, el=ET.fromstring(xml_string),\n                           addOnlyOnce=add_only_once)\n\n    def _add_element_from_element(self, path, xml_elem, add_only_once=True):\n        \"\"\"\n        Add an XML fragment to the mdsd config document in accordance with path\n        :param str path: Where to add the fragment\n        :param ElementTree xml_elem: An ElementTree object XML fragment that should be added to the path.\n        :param bool add_only_once: Indicates whether to perform the addition only to the first match of the path.\n        \"\"\"\n        XmlUtil.addElement(xml=self._mdsd_config_xml_tree, path=path, el=xml_elem, addOnlyOnce=add_only_once)\n\n    def _add_derived_event(self, interval, source, event_name, store_type, add_lad_query=False):\n        \"\"\"\n        Add a <DerivedEvent> element to the configuration\n        :param str interval: Interval at which this DerivedEvent should be run\n        :param str source: Local table from which this DerivedEvent should pull\n        :param str event_name: Destination table to which this DerivedEvent should push\n        :param str store_type: The storage type of the destination table, e.g. Local, Central, JsonBlob\n        :param bool add_lad_query: True if a <LadQuery> subelement should be added to this <DerivedEvent> element\n        \"\"\"\n        derived_event = mxt.derived_event.format(interval=interval, source=source, target=event_name, type=store_type)\n        element = ET.fromstring(derived_event)\n        if add_lad_query:\n            XmlUtil.addElement(element, \".\", ET.fromstring(mxt.lad_query))\n        self._add_element_from_element('Events/DerivedEvents', element)\n\n    def _add_obo_field(self, name, value):\n        \"\"\"\n        Add an <OboDirectPartitionField> element to the <Management> element.\n        :param name: Name of the field\n        :param value: Value for the field\n        \"\"\"\n        self._add_element_from_string('Management', mxt.obo_field.format(name=name, value=value))\n\n    def _update_metric_collection_settings(self, ladCfg, namespaces):\n        \"\"\"\n        Update mdsd_config_xml_tree for Azure Portal metric collection. This method builds the necessary aggregation queries\n        that grind the ingested data and push it to the WADmetric table.\n        :param ladCfg: ladCfg object from extension config\n        :param namespaces: list of telegraf plugins sources obtained after parsing lad metrics config\n        :return: None\n        \"\"\"\n\n        # Aggregation is done by <LADQuery> within a <DerivedEvent>. If there are no alternate sinks, the DerivedQuery\n        # can send output directly to the WAD metrics table. If there *are* alternate sinks, have the LADQuery send\n        # output to a new local table, then arrange for additional derived queries to pull from that.\n\n        intervals = LadUtil.getAggregationPeriodsFromLadCfg(ladCfg)\n        sinks = LadUtil.getFeatureWideSinksFromLadCfg(ladCfg, 'performanceCounters')\n        for plugin in namespaces:\n            lad_specific_storage_plugin = \"storage-\" + plugin\n            for aggregation_interval in intervals:\n                if sinks:\n                    local_table_name = ProvUtil.MakeUniqueEventName('aggregationLocal')\n                    self._add_derived_event(aggregation_interval, lad_specific_storage_plugin,\n                                            local_table_name,\n                                            'Local', add_lad_query=True)\n                    self._handle_alternate_sinks(aggregation_interval, sinks, local_table_name)\n                else:\n                    self._add_derived_event(aggregation_interval, lad_specific_storage_plugin,\n                                            LadConfigAll._wad_table_name(aggregation_interval),\n                                            'Central', add_lad_query=True)\n\n    def _handle_alternate_sinks(self, interval, sinks, source):\n        \"\"\"\n        Update the XML config to accommodate alternate data sinks. Start by pumping the data from the local source to\n        the actual wad table; then run through the sinks and add annotations or additional DerivedEvents as needed.\n        :param str interval: Aggregation interval\n        :param [str] sinks: List of alternate destinations\n        :param str source: Name of local table from which data is to be pumped\n        :return:\n        \"\"\"\n        self._add_derived_event(interval, source, LadConfigAll._wad_table_name(interval), 'Central')\n        for name in sinks:\n            sink = self._sink_configs.get_sink_by_name(name)\n            if sink is None:\n                self._logger_log(\"Ignoring sink '{0}' for which no definition was found\".format(name))\n            elif sink['type'] == 'EventHub':\n                if 'sasURL' in sink:\n                    self._add_streaming_annotation(source, sink['sasURL'])\n                else:\n                    self._logger_error(\"Ignoring EventHub sink '{0}': no 'sasURL' was supplied\".format(name))\n            elif sink['type'] == 'JsonBlob':\n                self._add_derived_event(interval, source, name, 'JsonBlob')\n            else:\n                self._logger_log(\"Ignoring sink '{0}': unknown type '{1}'\".format(name, sink['type']))\n\n\n    def _add_streaming_annotation(self, sink_name, sas_url):\n        \"\"\"\n        Helper to add an EventStreamingAnnotation element for the given sink_name and sas_url\n        :param str sink_name: Name of the EventHub sink name for the SAS URL\n        :param str sas_url: Raw SAS URL string for the EventHub sink\n        \"\"\"\n        self._add_element_from_string('EventStreamingAnnotations',\n                                      mxt.per_eh_url_tmpl.format(eh_name=sink_name,\n                                                                 key_path=self._pkey_path,\n                                                                 enc_eh_url=self._encrypt_secret_with_cert(sas_url)))\n\n\n    def _encrypt_secret_with_cert(self, secret):\n        \"\"\"\n        update_account_settings() helper.\n        :param secret: Secret to encrypt\n        :return: Encrypted secret string. None if openssl command exec fails.\n        \"\"\"\n        return self._encrypt_secret(self._cert_path, secret)\n\n    def _update_account_settings(self, account, token, endpoints):\n        \"\"\"\n        Update the MDSD configuration Account element with Azure table storage properties.\n        Exactly one of (key, token) must be provided.\n        :param account: Storage account to which LAD should write data\n        :param token: SAS token to access the storage account\n        :param endpoints: Identifies the Azure storage endpoints (public or specific sovereign cloud) where the storage account is\n        \"\"\"\n        assert token, \"Token must be given.\"\n        assert self._mdsd_config_xml_tree is not None\n\n        token = self._encrypt_secret_with_cert(token)\n        assert token, \"Could not encrypt token\"\n        XmlUtil.setXmlValue(self._mdsd_config_xml_tree, 'Accounts/SharedAccessSignature',\n                            \"account\", account, ['isDefault', 'true'])\n        XmlUtil.setXmlValue(self._mdsd_config_xml_tree, 'Accounts/SharedAccessSignature',\n                            \"key\", token, ['isDefault', 'true'])\n        XmlUtil.setXmlValue(self._mdsd_config_xml_tree, 'Accounts/SharedAccessSignature',\n                            \"decryptKeyPath\", self._pkey_path, ['isDefault', 'true'])\n        XmlUtil.setXmlValue(self._mdsd_config_xml_tree, 'Accounts/SharedAccessSignature',\n                            \"tableEndpoint\", endpoints[0], ['isDefault', 'true'])\n        XmlUtil.setXmlValue(self._mdsd_config_xml_tree, 'Accounts/SharedAccessSignature',\n                            \"blobEndpoint\", endpoints[1], ['isDefault', 'true'])\n        XmlUtil.removeElement(self._mdsd_config_xml_tree, 'Accounts', 'Account')\n\n    def _set_xml_attr(self, key, value, xml_path, selector=[]):\n        \"\"\"\n        Set XML attribute on the element specified with xml_path.\n        :param key: The attribute name to set on the XML element.\n        :param value: The default value to be set, if there's no public config for that attribute.\n        :param xml_path: The path of the XML element(s) to which the attribute is applied.\n        :param selector: Selector for finding the actual XML element (see XmlUtil.setXmlValue)\n        :return: None. Change is directly applied to mdsd_config_xml_tree XML member object.\n        \"\"\"\n        assert self._mdsd_config_xml_tree is not None\n\n        v = self._ext_settings.read_public_config(key)\n        if not v:\n            v = value\n        XmlUtil.setXmlValue(self._mdsd_config_xml_tree, xml_path, key, v, selector)\n\n    def _set_event_volume(self, lad_cfg):\n        \"\"\"\n        Set event volume in mdsd config. Check if desired event volume is specified,\n        first in ladCfg then in public config. If in neither then default to Medium.\n        :param lad_cfg: 'ladCfg' Json object to look up for the event volume setting.\n        :return: None. The mdsd config XML tree's eventVolume attribute is directly updated.\n        :rtype: str\n        \"\"\"\n        assert self._mdsd_config_xml_tree is not None\n\n        event_volume = LadUtil.getEventVolumeFromLadCfg(lad_cfg)\n        if event_volume:\n            self._logger_log(\"Event volume found in ladCfg: \" + event_volume)\n        else:\n            event_volume = self._ext_settings.read_public_config(\"eventVolume\")\n            if event_volume:\n                self._logger_log(\"Event volume found in public config: \" + event_volume)\n            else:\n                event_volume = \"Medium\"\n                self._logger_log(\"Event volume not found in config. Using default value: \" + event_volume)\n        XmlUtil.setXmlValue(self._mdsd_config_xml_tree, \"Management\", \"eventVolume\", event_volume)\n\n    ######################################################################\n    # This is the main API that's called by user. All other methods are\n    # actually helpers for this, thus made private by convention.\n    ######################################################################\n    def generate_all_configs(self):\n        \"\"\"\n        Generates configs for all components required by LAD.\n        Generates XML cfg file for mdsd, from JSON config settings (public & private).\n        Also generates rsyslog/syslog-ng configs corresponding to 'syslogEvents' or 'syslogCfg' setting.\n        Also generates fluentd's syslog/tail src configs and out_mdsd configs.\n        The rsyslog/syslog-ng and fluentd configs are not yet saved to files. They are available through\n        the corresponding getter methods of this class (get_fluentd_*_config(), get_*syslog*_config()).\n\n        Returns (True, '') if config was valid and proper xmlCfg.xml was generated.\n        Returns (False, '...') if config was invalid and the error message.\n        \"\"\"\n\n        # 1. Add DeploymentId (if available) to identity columns\n        if self._deployment_id:\n            XmlUtil.setXmlValue(self._mdsd_config_xml_tree, \"Management/Identity/IdentityComponent\", \"\",\n                                self._deployment_id, [\"name\", \"DeploymentId\"])\n\n        # 2. Generate telegraf, MetricsExtension, omsagent (fluentd) configs, rsyslog/syslog-ng config, and update corresponding mdsd config XML\n        try:\n            lad_cfg = self._ladCfg()\n            if not lad_cfg:\n                return False, 'Unable to find Ladcfg element. Failed to generate configs for fluentd, syslog, and mdsd ' \\\n                          '(see extension error logs for more details)'\n\n            syslogEvents_setting = self._ext_settings.get_syslogEvents_setting()\n            fileLogs_setting = self._ext_settings.get_fileLogs_setting()\n            lad_logging_config_helper = LadLoggingConfig(syslogEvents_setting, fileLogs_setting, self._sink_configs,\n                                                         self._pkey_path, self._cert_path, self._encrypt_secret)\n            mdsd_syslog_config = lad_logging_config_helper.get_mdsd_syslog_config(self._ext_settings.read_protected_config('disableStorageAccount') == True)\n            mdsd_filelog_config = lad_logging_config_helper.get_mdsd_filelog_config()\n            copy_source_mdsdevent_eh_url_elems(self._mdsd_config_xml_tree, mdsd_syslog_config)\n            copy_source_mdsdevent_eh_url_elems(self._mdsd_config_xml_tree, mdsd_filelog_config)\n            self._fluentd_syslog_src_config = lad_logging_config_helper.get_fluentd_syslog_src_config()\n            self._fluentd_tail_src_config = lad_logging_config_helper.get_fluentd_filelog_src_config()\n            self._fluentd_out_mdsd_config = lad_logging_config_helper.get_fluentd_out_mdsd_config()\n            self._rsyslog_config = lad_logging_config_helper.get_rsyslog_config()\n            self._syslog_ng_config = lad_logging_config_helper.get_syslog_ng_config()\n            parsed_perf_settings = lad_logging_config_helper.parse_lad_perf_settings(lad_cfg)\n            if len(parsed_perf_settings) > 0:\n                self._telegraf_config, self._telegraf_namespaces = telhandler.handle_config(parsed_perf_settings, self._telegraf_me_url, self._telegraf_mdsd_url, True)\n\n                #Handle the EH, JsonBlob and AzMonSink logic\n                self._update_metric_collection_settings(lad_cfg, self._telegraf_namespaces)\n                mdsd_telegraf_config = lad_logging_config_helper.get_mdsd_telegraf_config(self._telegraf_namespaces)\n                copy_source_mdsdevent_eh_url_elems(self._mdsd_config_xml_tree, mdsd_telegraf_config)\n\n            resource_id = self._ext_settings.get_resource_id()\n            if resource_id:\n                # Set JsonBlob sink-related elements\n                uuid_for_instance_id = self._fetch_uuid()\n                self._add_obo_field(name='resourceId', value=resource_id)\n                self._add_obo_field(name='agentIdentityHash', value=uuid_for_instance_id)\n\n                XmlUtil.setXmlValue(self._mdsd_config_xml_tree, 'Events/DerivedEvents/DerivedEvent/LADQuery',\n                                    'partitionKey', escape_nonalphanumerics(resource_id))\n                lad_query_instance_id = \"\"\n                if resource_id.find(\"providers/Microsoft.Compute/virtualMachineScaleSets\") >= 0:\n                    lad_query_instance_id = uuid_for_instance_id\n                self._set_xml_attr(\"instanceID\", lad_query_instance_id, \"Events/DerivedEvents/DerivedEvent/LADQuery\")\n            else:\n                self._logger_log('Unable to find resource id in the config. Failed to generate configs for Metrics in mdsd ' \\\n                        '(see extension error logs for more details)')\n\n            #Only enable Metrics if AzMonSink is in the config\n            azmonsink = self._sink_configs_public.get_sink_by_name(\"AzMonSink\")\n            if azmonsink is None:\n                self._logger_log(\"Did not find AzMonSink in public config. Will not set up custom metrics through ME.\")\n            else:\n                self._logger_log(\"Found AzMonSink in public config. Setting up custom metrics through ME.\")\n                me_handler.setup_me(True)\n\n        except Exception as e:\n            self._logger_error(\"Failed to create omsagent (fluentd), rsyslog/syslog-ng configs, telegraf config or to update \"\n                               \"corresponding mdsd config XML. Error: {0}\\nStacktrace: {1}\"\n                               .format(e, traceback.format_exc()))\n            return False, 'Failed to generate configs for fluentd, syslog, and mdsd; see extension.log for more details.'\n\n        # 3. Before starting to update the storage account settings, log extension's entire settings\n        #    with secrets redacted, for diagnostic purpose.\n        self._ext_settings.log_ext_settings_with_secrets_redacted(self._logger_log, self._logger_error)\n\n        # 4. Actually update the storage account settings on mdsd config XML tree (based on extension's\n        #    protectedSettings).\n        account = self._ext_settings.read_protected_config('storageAccountName').strip()\n        if not account:\n            return False, \"Configuration Error: Must specify storageAccountName in protected settings. For information on protected settings, \" \\\n                          \"visit https://docs.microsoft.com/en-us/azure/virtual-machines/extensions/diagnostics-linux#protected-settings.\"\n        if self._ext_settings.read_protected_config('storageAccountKey'):\n            return False, \"Configuration Error: The storageAccountKey protected setting is deprecated in LAD 3.0 and cannot be used. \" \\\n                          \"Instead, use the storageAccountSasToken setting. For documentation of this setting and instructions for generating \" \\\n                          \"a SAS token, visit https://docs.microsoft.com/en-us/azure/virtual-machines/extensions/diagnostics-linux#protected-settings.\"\n        token = self._ext_settings.read_protected_config('storageAccountSasToken').strip()\n        if not token or token == '?':\n            return False, \"Configuration Error: Must specify storageAccountSasToken in the protected settings. For documentation of this setting and instructions \" \\\n                          \"for generating a SAS token, visit https://docs.microsoft.com/en-us/azure/virtual-machines/extensions/diagnostics-linux#protected-settings.\"\n        if '?' == token[0]:\n            token = token[1:]\n        endpoints = get_storage_endpoints_with_account(account,\n                                                     self._ext_settings.read_protected_config('storageAccountEndPoint'))\n        self._update_account_settings(account, token, endpoints)\n\n        # 5. Update mdsd config XML's eventVolume attribute based on the logic specified in the helper.\n        self._set_event_volume(lad_cfg)\n\n        # 6. Finally generate mdsd config XML file out of the constructed XML tree object.\n        self._mdsd_config_xml_tree.write(os.path.join(self._ext_dir, 'xmlCfg.xml'))\n\n        return True, \"\"\n\n    @staticmethod\n    def __throw_if_output_is_none(output):\n        \"\"\"\n        Helper to check if output is already generated (not None) and throw if it's not (None).\n        :return: None\n        \"\"\"\n        if output is None:\n            raise LadLoggingConfigException('LadConfigAll.get_*_config() should be called after '\n                                            'LadConfigAll.generate_mdsd_omsagent_syslog_config() is called')\n\n    def get_fluentd_syslog_src_config(self):\n        \"\"\"\n        Returns the obtained Fluentd's syslog src config. This getter (and all that follow) should be called\n        after self.generate_mdsd_omsagent_syslog_config() is called.\n        The return value should be overwritten to /etc/opt/microsoft/omsagent/LAD/conf/omsagent.d/syslog.conf\n        after replacing '%SYSLOG_PORT%' with the assigned TCP port number.\n        :rtype: str\n        :return: Fluentd syslog src config string\n        \"\"\"\n        LadConfigAll.__throw_if_output_is_none(self._fluentd_syslog_src_config)\n        return self._fluentd_syslog_src_config\n\n    def get_fluentd_tail_src_config(self):\n        \"\"\"\n        Returns the obtained Fluentd's tail src config. This getter (and all that follow) should be called\n        after self.generate_mdsd_omsagent_syslog_config() is called.\n        The return value should be overwritten to /etc/opt/microsoft/omsagent/LAD/conf/omsagent.d/tail.conf.\n        :rtype: str\n        :return: Fluentd tail src config string\n        \"\"\"\n        LadConfigAll.__throw_if_output_is_none(self._fluentd_tail_src_config)\n        return self._fluentd_tail_src_config\n\n    def get_fluentd_out_mdsd_config(self):\n        \"\"\"_fluentd_out_mdsd_config\n        Returns the obtained Fluentd's out_mdsd config. This getter (and all that follow) should be called\n        after self.generate_mdsd_omsagent_syslog_config() is called.\n        The return value should be overwritten to /etc/opt/microsoft/omsagent/LAD/conf/omsagent.d/z_out_mdsd.conf.\n        :rtype: str\n        :return: Fluentd out_mdsd config string\n        \"\"\"\n        LadConfigAll.__throw_if_output_is_none(self._fluentd_out_mdsd_config)\n        return self._fluentd_out_mdsd_config\n\n    def get_rsyslog_config(self):\n        \"\"\"\n        Returns the obtained rsyslog config. This getter (and all that follow) should be called\n        after self.generate_mdsd_omsagent_syslog_config() is called.\n        The return value should be appended to /etc/rsyslog.d/95-omsagent.conf if rsyslog ver is new (that is, if\n        /etc/rsyslog.d/ exists). It should be appended to /etc/rsyslog.conf if rsyslog ver is old (no /etc/rsyslog.d/).\n        The appended file (either /etc/rsyslog.d/95-omsagent.conf or /etc/rsyslog.conf) should be processed so that\n        the '%SYSLOG_PORT%' pattern in the file is replaced with the assigned TCP port number.\n        :rtype: str\n        :return: rsyslog config string\n        \"\"\"\n        LadConfigAll.__throw_if_output_is_none(self._rsyslog_config)\n        return self._rsyslog_config\n\n    def get_syslog_ng_config(self):\n        \"\"\"\n        Returns the obtained syslog-ng config. This getter (and all that follow) should be called\n        after self.generate_mdsd_omsagent_syslog_config() is called.\n        The return value should be appended to /etc/syslog-ng.conf.\n        The appended file (/etc/syslog-ng.conf) should be processed so that\n        the '%SYSLOG_PORT%' pattern in the file is replaced with the assigned TCP port number.\n        :rtype: str\n        :return: syslog-ng config string\n        \"\"\"\n        LadConfigAll.__throw_if_output_is_none(self._syslog_ng_config)\n        return self._syslog_ng_config\n\n"
  },
  {
    "path": "Diagnostic/lad_mdsd.te",
    "content": "# SELinux policy for mdsd on LAD, obtained by \"grep mdsd /var/log/audit/audit.log | audit2allow -m lad_mdsd.te\"\n# Note it combines different types (unconfined_t and initrc_t) to support both Redhat policy and CentOS policy\n\nmodule lad_mdsd 1.0;\n\nrequire {\n\ttype unconfined_t;\n\ttype initrc_t;\n\ttype syslogd_t;\n\ttype var_run_t;\n\tclass sock_file write;\n\tclass unix_stream_socket connectto;\n}\n\n#============= syslogd_t ==============\nallow syslogd_t unconfined_t:unix_stream_socket connectto;\nallow syslogd_t initrc_t:unix_stream_socket connectto;\nallow syslogd_t var_run_t:sock_file write;\n"
  },
  {
    "path": "Diagnostic/license.txt",
    "content": "Linux Azure Diagnostic Extension v.2.3.9\nCopyright (c) Microsoft Corporation\nAll rights reserved. \nMIT License\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"\"Software\"\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\nThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\nTHE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n"
  },
  {
    "path": "Diagnostic/manifest.xml",
    "content": "<?xml version='1.0' encoding='utf-8' ?>\n<ExtensionImage xmlns=\"http://schemas.microsoft.com/windowsazure\">\n  <ProviderNameSpace>Microsoft.Azure.Diagnostics</ProviderNameSpace>\n  <Type>LinuxDiagnostic</Type>\n  <Version>4.1.12</Version>\n  <Label>Microsoft Azure Diagnostic Extension for Linux Virtual Machines</Label>\n  <HostingResources>VmRole</HostingResources>\n  <MediaLink></MediaLink>\n  <Description>Microsoft Azure Diagnostic Extension for Linux Virtual Machines</Description>\n  <IsInternalExtension>true</IsInternalExtension>\n  <Eula>https://github.com/Azure/azure-linux-extensions/blob/master/LICENSE-2_0.txt</Eula>\n  <PrivacyUri>http://www.microsoft.com/privacystatement/en-us/OnlineServices/Default.aspx</PrivacyUri>\n  <HomepageUri>https://github.com/Azure/azure-linux-extensions</HomepageUri>\n  <IsJsonExtension>true</IsJsonExtension>\n  <SupportedOS>Linux</SupportedOS>\n  <CompanyName>Microsoft</CompanyName>\n  <!--%REGIONS%-->\n</ExtensionImage>\n"
  },
  {
    "path": "Diagnostic/mdsd/CMakeLists.txt",
    "content": "cmake_minimum_required(VERSION 2.6)\nproject(mdsd)\n\nset(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} \"${CMAKE_SOURCE_DIR}/cmake/Modules/\")\n\n# Platform (not compiler) specific settings\nif(UNIX) # This includes Linux\n    message(\"Build for Unix/Linux OS\")\nelse()\n  \tmessage(\"-- Unsupported Build Platform.\")\nendif()\n\n# Compiler (not platform) specific settings\nif(\"${CMAKE_CXX_COMPILER_ID}\" MATCHES \"Clang\")\n    message(\"-- Setting clang options\")\n    set(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} -stdlib=libc++\")\n    set(LINKSTDLIB \"c++\")\n    set(LIBSUFFIX \"-clang\")\n    set(WARNINGS \"${WARNINGS} -Wno-deprecated-register\")\nelseif(\"${CMAKE_CXX_COMPILER_ID}\" MATCHES \"GNU\")\n    message(\"-- Setting gcc options\")\n    set(WARNINGS \"${WARNINGS} -Wno-unused-local-typedefs\")\nelse()\n  \tmessage(\"-- Unknown compiler, success is doubtful.\")\nendif()\n\n# To turn off the option from cmdline, run: cmake -DBUILD_TESTS=OFF ...\noption(BUILD_TESTS \"Build tests.\" ON)\n# To add code coverage build options\noption(BUILD_COV \"Build with code coverage.\" OFF)\n\nset(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} -std=c++11\")\n\n# Common flags for both C and C++\nset(COMM_FLAGS \"${COMM_FLAGS} -fstack-protector-all\")\nset(COMM_FLAGS \"${COMM_FLAGS} -fPIC\")\nset(COMM_FLAGS \"${COMM_FLAGS} -D_FORTIFY_SOURCE=2\")\nset(COMM_FLAGS \"${COMM_FLAGS} -ffunction-sections\")\n\nif(BUILD_COV)\nset(COMM_FLAGS \"${COMM_FLAGS} -fprofile-arcs\")\nset(COMM_FLAGS \"${COMM_FLAGS} -ftest-coverage\")\nendif()\n\nset(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} ${COMM_FLAGS}\")\nset(CMAKE_C_FLAGS \"${CMAKE_C_FLAGS} ${COMM_FLAGS}\")\n\nset(WARNINGS \"${WARNINGS} -Wall\")\nset(WARNINGS \"${WARNINGS} -Wextra\")\n\nset(WARNINGS \"${WARNINGS} -Wno-unknown-pragmas\")\nset(WARNINGS \"${WARNINGS} -Wno-unused-parameter\")\n\nset(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} ${WARNINGS}\")\n\nset(CMAKE_CXX_FLAGS_DEBUG \"${CMAKE_CXX_FLAGS_DEBUG} -ggdb\")\nset(CMAKE_C_FLAGS_DEBUG \"${CMAKE_CXX_FLAGS_DEBUG} -ggdb\")\n\nset(LINKER_FLAGS \"-Wl,-z,relro -Wl,-z,now\")\nset(CMAKE_SHARED_LINKER_FLAGS \"${CMAKE_SHARED_LINKER_FLAGS} ${LINKER_FLAGS}\")\nset(CMAKE_EXE_LINKER_FLAGS \"${CMAKE_EXE_LINKER_FLAGS} ${LINKER_FLAGS}\")\n\n# Build static library only\noption(BUILD_SHARED_LIBS \"Build shared Libraries.\" OFF)\n\nset(OMI_INCLUDE_DIRS\n    /usr/include/omi\n    /usr/include/omi/common\n    /usr/include/omi/output/include\n    /usr/include/omi/micxx\n)\nset(OMI_LIB_PATH \"/opt/omi/lib\")\n\nset(CASABLANCA_INCLUDE_DIRS \"/usr/include/cpprest\")\nset(CASABLANCA_LIBRARIES \"/usr/lib/x86_64-linux-gnu/libcpprest${LIBSUFFIX}.a\")\n\nset(STORAGE_INCLUDE_DIRS \"/usr/include/azurestorage\")\nset(STORAGE_LIBRARIES \"/usr/lib/x86_64-linux-gnu/libazurestorage${LIBSUFFIX}.a\")\n\nset(MDSD_LIB_NAME mdsd-lib${LIBSUFFIX})\nset(LOG_LIB_NAME mdsdlog${LIBSUFFIX})\nset(UTIL_LIB_NAME mdsdutil${LIBSUFFIX})\nset(CMD_LIB_NAME mdscommands${LIBSUFFIX})\nset(INPUT_LIB_NAME mdsdinput${LIBSUFFIX})\nset(MDSDCFG_LIB_NAME mdsdcfg${LIBSUFFIX})\nset(MDSREST_LIB_NAME mdsrest${LIBSUFFIX})\n\n# Set rpath for all executables including mdsd, tests, etc\nSET(CMAKE_SKIP_BUILD_RPATH  FALSE)\nSET(CMAKE_BUILD_WITH_INSTALL_RPATH FALSE)\nSET(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE)\n\nadd_subdirectory(mdsdlog)\nadd_subdirectory(mdsdutil)\nadd_subdirectory(mdscommands)\nadd_subdirectory(mdsdinput)\nadd_subdirectory(mdsdcfg)\nadd_subdirectory(mdsrest)\nadd_subdirectory(mdsd)\n"
  },
  {
    "path": "Diagnostic/mdsd/Dockerfile",
    "content": "FROM ubuntu:trusty\n\nRUN apt-get update && apt-get install -y software-properties-common\n\nRUN apt-get update && \\\n    apt-get install -y sudo apt-utils openssh-server wget unzip git build-essential libtool && \\\n    apt-get upgrade -y && apt-get dist-upgrade -y\n\nEXPOSE 22\n\nRUN update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-4.8 50 && \\\n    update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-4.8 50\n\nRUN apt-get update && \\\n    apt-get install -y psmisc libxml++2.6-dev uuid-dev python-software-properties zlib1g-dev \\\n                       libssl1.0.0 libssl-dev cmake rpm liblzma-dev libjson-c-dev libjson-c2\n\nRUN apt-get update && ver=1.55 && \\\n    apt-get install -y libboost$ver-dev libboost-system$ver-dev libboost-thread$ver-dev \\\n                       libboost-filesystem$ver-dev libboost-random$ver-dev libboost-locale$ver-dev \\\n\t\t       libboost-regex$ver-dev libboost-iostreams$ver-dev libboost-log$ver-dev\n\nRUN apt-get update && ver=1.55.0 && \\\n    apt-get install -y libboost-system$ver libboost-thread$ver libboost-filesystem$ver \\\n                       libboost-random$ver libboost-locale$ver libboost-regex$ver \\\n\t\t       libboost-iostreams$ver libboost-log$ver\n\nADD azure.list /etc/apt/sources.list.d/azure.list\nRUN apt-key adv --keyserver packages.microsoft.com --recv-keys B02C46DF417A0893 && \\\n    apt-get install apt-transport-https\n\nRUN apt-get update && \\\n    apt-get install -y libcpprest-dev libazurestorage-dev libomi-dev libcpprest \\\n                       libazurestorage omi libbond-dev\n"
  },
  {
    "path": "Diagnostic/mdsd/LICENSE.txt",
    "content": "------------------------------------------ START OF LICENSE -----------------------------------------\n\nLinux mdsd Agent\n\nCopyright (c) Microsoft Corporation\n\nAll rights reserved. \n\nMIT License\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and\nassociated documentation files (the \"\"Software\"\"), to deal in the Software without restriction,\nincluding without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,\nand/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,\nsubject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all copies or substantial\nportions of the Software.\n\nTHE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT\nLIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\nIN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\nCONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n----------------------------------------------- END OF LICENSE ------------------------------------------\n"
  },
  {
    "path": "Diagnostic/mdsd/README.md",
    "content": "# mdsd agent\n\nThe mdsd agent  is the workhorse binary for the Linux Diagnostic Extension. The LAD extension constructs an mdsd configuration file based on the LAD configuration.\n\n## Dependencies\n\nThe Dockerfile defines an environment sufficient to build the mdsd binary. Most dependencies are satisfied by the Ubuntu \"trusty\" repositories. The exceptions are for open-source components released by Microsoft. These components are available in source form from github. For convenience, Microsoft has made installable .deb packages available in a public \"azurecore\" repository, which the Dockerfile references. These components are:\n\n- [CPPrest, a.k.a. \"Casablanca\"](https://github.com/Microsoft/cpprestsdk)\n- [Azure Storage SDK](https://github.com/Azure/azure-storage-cpp)\n- [Microsoft bond](https://github.com/Microsoft/bond)\n- [Open Management Infrastructure (OMI)](https://github.com/Microsoft/omi)\n\n## Building the program\n\nRun `buildcmake.sh` with the appropriate options. This will build all the necessary Makefiles, then build the program, then construct .deb and .rpm packages containing the built binary. For maximum portability across distros, the mdsd binary is built to use static libraries whenever possible. Build artifacts are dropped under `builddir` (which is symlinked to the actual directory hierarchy, which will differ based on the choice of debug vs optimized build). The release packages appear under the `lad-mdsd` directory.\n\n## Future direction\n\nOver time, the capabilities of this monolithic binary are likely be broken out into fluentd plug-ins. This will significantly reduce the amount of code involved and will enable  more flexible growth of the LAD extension.\n"
  },
  {
    "path": "Diagnostic/mdsd/SampleConfig-LAD-SAS.xml",
    "content": "<MonitoringManagement eventVersion=\"2\" namespace=\"\" timestamp=\"2014-12-01T20:00:00.000\" version=\"1.0\">\n <Accounts>\n <SharedAccessSignature moniker=\"ladtestmoniker\" account=\"ladtest\" isDefault=\"true\" key=\"valid account SAS with ss including bt, srt sco, sp rwlacu. Expiry should be infinite.\" />\n </Accounts>\n  <Management defaultRetentionInDays=\"90\" eventVolume=\"Medium\">\n    <Identity>\n      <IdentityComponent name=\"DeploymentId\">c9c8552dc3a1421da8ecc0c284082a39</IdentityComponent>\n      <IdentityComponent name=\"Host\" useComputerName=\"true\" />\n    </Identity>\n    <AgentResourceUsage diskQuotaInMB=\"50000\" />\n  </Management>\n\n    <Schemas>\n    <Schema name=\"syslog\">\n      <Column mdstype=\"mt:wstr\" name=\"Ignore\" type=\"str\" />\n      <Column mdstype=\"mt:wstr\" name=\"Facility\" type=\"str\" />\n      <Column mdstype=\"mt:int32\" name=\"Severity\" type=\"str\" />\n      <Column mdstype=\"mt:utc\" name=\"EventTime\" type=\"str-rfc3339\" />\n      <Column mdstype=\"mt:wstr\" name=\"SendingHost\" type=\"str\" />\n      <Column mdstype=\"mt:wstr\" name=\"Msg\" type=\"str\" />\n    </Schema>\n\n    <Schema name=\"ladfile\">\n      <Column mdstype=\"mt:wstr\" name=\"FileTag\" type=\"str\" />\n      <Column mdstype=\"mt:wstr\" name=\"Msg\" type=\"str\" />\n    </Schema>\n\n  </Schemas>\n\n\n  <Sources>\n    <Source name=\"syslog\" schema=\"syslog\" />\n  </Sources>\n\n <Events>\n    <MdsdEvents>\n      <MdsdEventSource source=\"syslog\">\n        <RouteEvent dontUsePerNDayTable=\"true\" eventName=\"Linuxsyslog\" priority=\"High\" />\n      </MdsdEventSource>\n    </MdsdEvents>\n\n<OMI>\n<OMIQuery cqlQuery=\"SELECT PercentAvailableMemory FROM SCX_MemoryStatisticalInformation\" dontUsePerNDayTable=\"true\" eventName=\"DiagnosticTestMemory\" omiNamespace=\"root/scx\" priority=\"High\" sampleRateInSeconds=\"60\" /><OMIQuery cqlQuery=\"SELECT PercentProcessorTime, PercentIOWaitTime, PercentIdleTime FROM SCX_ProcessorStatisticalInformation WHERE Name='_TOTAL'\" dontUsePerNDayTable=\"true\" eventName=\"DiagnosticTestCpu\" omiNamespace=\"root/scx\" priority=\"High\" sampleRateInSeconds=\"60\" /></OMI>\n\n\n<OMI>\n    <OMIQuery cqlQuery=\"SELECT AvailableMemory, PercentAvailableMemory, UsedMemory, PercentUsedMemory, PercentUsedByCache, PagesPerSec, PagesReadPerSec, PagesWrittenPerSec, AvailableSwap, PercentAvailableSwap, UsedSwap, PercentUsedSwap FROM SCX_MemoryStatisticalInformation \" eventName=\"memoryStats\" omiNamespace=\"root/scx\" sampleRateInSeconds=\"15\" storeType=\"local\">\n        <Unpivot columnName=\"CounterName\" columnValue=\"Value\" columns=\"AvailableMemory, PercentAvailableMemory, UsedMemory,PercentUsedMemory, PercentUsedByCache, PagesPerSec, PagesReadPerSec, PagesWrittenPerSec, AvailableSwap, PercentAvailableSwap, UsedSwap, PercentUsedSwap\">\n      <MapName name=\"AvailableMemory\" scaleUp=\"1048576\">\\Memory\\AvailableMemory</MapName>\n<MapName name=\"PercentAvailableMemory\">\\Memory\\PercentAvailableMemory</MapName>\n<MapName name=\"UsedMemory\" scaleUp=\"1048576\">\\Memory\\UsedMemory</MapName>\n<MapName name=\"PercentUsedMemory\">\\Memory\\PercentUsedMemory</MapName>\n<MapName name=\"PercentUsedByCache\">\\Memory\\PercentUsedByCache</MapName>\n<MapName name=\"PagesPerSec\">\\Memory\\PagesPerSec</MapName>\n<MapName name=\"PagesReadPerSec\">\\Memory\\PagesReadPerSec</MapName>\n<MapName name=\"PagesWrittenPerSec\">\\Memory\\PagesWrittenPerSec</MapName>\n<MapName name=\"AvailableSwap\" scaleUp=\"1048576\">\\Memory\\AvailableSwap</MapName>\n<MapName name=\"PercentAvailableSwap\">\\Memory\\PercentAvailableSwap</MapName>\n<MapName name=\"UsedSwap\" scaleUp=\"1048576\">\\Memory\\UsedSwap</MapName>\n<MapName name=\"PercentUsedSwap\">\\Memory\\PercentUsedSwap</MapName>\n\n    </Unpivot> </OMIQuery>\n\n     <OMIQuery cqlQuery=\"SELECT PercentIdleTime,PercentUserTime,PercentNiceTime,PercentPrivilegedTime,PercentInterruptTime,PercentDPCTime,PercentProcessorTime,PercentIOWaitTime from SCX_ProcessorStatisticalInformation where Name='_Total'\" eventName=\"cpuStats\" omiNamespace=\"root/scx\" sampleRateInSeconds=\"15\" storeType=\"local\">\n       <Unpivot columnName=\"CounterName\" columnValue=\"Value\" columns=\" PercentUserTime,PercentNiceTime,PercentPrivilegedTime,PercentInterruptTime,PercentDPCTime,PercentProcessorTime,PercentIOWaitTime\">\n       <MapName name=\"PercentIdleTime\">\\Processor\\PercentIdleTime</MapName>\n<MapName name=\"PercentUserTime\">\\Processor\\PercentUserTime</MapName>\n<MapName name=\"PercentNiceTime\">\\Processor\\PercentNiceTime</MapName>\n<MapName name=\"PercentPrivilegedTime\">\\Processor\\PercentPrivilegedTime</MapName>\n<MapName name=\"PercentInterruptTime\">\\Processor\\PercentInterruptTime</MapName>\n<MapName name=\"PercentDPCTime\">\\Processor\\PercentDPCTime</MapName>\n<MapName name=\"PercentProcessorTime\">\\Processor\\PercentProcessorTime</MapName>\n<MapName name=\"PercentIOWaitTime\">\\Processor\\PercentIOWaitTime</MapName>\n\n         </Unpivot>\n    </OMIQuery>\n\n  <OMIQuery cqlQuery=\"SELECT BytesPerSecond ,ReadBytesPerSecond,WriteBytesPerSecond,TransfersPerSecond,ReadsPerSecond,WritesPerSecond,AverageReadTime,AverageWriteTime,AverageTransferTime,AverageDiskQueueLength  from SCX_DiskDriveStatisticalInformation  where Name='_Total' \" eventName=\"diskStats\" omiNamespace=\"root/scx\" sampleRateInSeconds=\"15\" storeType=\"local\">\n <Unpivot columnName=\"CounterName\" columnValue=\"Value\" columns=\"  BytesPerSecond ,ReadBytesPerSecond,WriteBytesPerSecond,TransfersPerSecond,ReadsPerSecond,WritesPerSecond,AverageReadTime,AverageWriteTime,AverageTransferTime,AverageDiskQueueLength\">\n  <MapName name=\"BytesPerSecond\">\\PhysicalDisk\\BytesPerSecond</MapName>\n<MapName name=\"ReadBytesPerSecond\">\\PhysicalDisk\\ReadBytesPerSecond</MapName>\n<MapName name=\"WriteBytesPerSecond\">\\PhysicalDisk\\WriteBytesPerSecond</MapName>\n<MapName name=\"TransfersPerSecond\">\\PhysicalDisk\\TransfersPerSecond</MapName>\n<MapName name=\"ReadsPerSecond\">\\PhysicalDisk\\ReadsPerSecond</MapName>\n<MapName name=\"WritesPerSecond\">\\PhysicalDisk\\WritesPerSecond</MapName>\n<MapName name=\"AverageReadTime\">\\PhysicalDisk\\AverageReadTime</MapName>\n<MapName name=\"AverageWriteTime\">\\PhysicalDisk\\AverageWriteTime</MapName>\n<MapName name=\"AverageTransferTime\">\\PhysicalDisk\\AverageTransferTime</MapName>\n<MapName name=\"AverageDiskQueueLength\">\\PhysicalDisk\\AverageDiskQueueLength</MapName>\n\n   </Unpivot>\n    </OMIQuery>\n  <OMIQuery cqlQuery=\" SELECT BytesTransmitted,BytesReceived,PacketsTransmitted,PacketsReceived,BytesTotal,TotalRxErrors,TotalTxErrors,TotalCollisions from SCX_EthernetPortStatistics  \" eventName=\"netStats\" omiNamespace=\"root/scx\" sampleRateInSeconds=\"15\" storeType=\"local\">\n<Unpivot columnName=\"CounterName\" columnValue=\"Value\" columns=\"  BytesTransmitted,BytesReceived,PacketsTransmitted,PacketsReceived,BytesTotal,TotalRxErrors,TotalTxErrors,TotalCollisions from SCX_EthernetPortStatistics\">\n<MapName name=\"BytesTransmitted\">\\NetworkInterface\\BytesTransmitted</MapName>\n<MapName name=\"BytesReceived\">\\NetworkInterface\\BytesReceived</MapName>\n<MapName name=\"PacketsTransmitted\">\\NetworkInterface\\PacketsTransmitted</MapName>\n<MapName name=\"PacketsReceived\">\\NetworkInterface\\PacketsReceived</MapName>\n<MapName name=\"BytesTotal\">\\NetworkInterface\\BytesTotal</MapName>\n<MapName name=\"TotalRxErrors\">\\NetworkInterface\\TotalRxErrors</MapName>\n<MapName name=\"TotalTxErrors\">\\NetworkInterface\\TotalTxErrors</MapName>\n<MapName name=\"TotalCollisions\">\\NetworkInterface\\TotalCollisions</MapName>\n\n</Unpivot>\n    </OMIQuery>\n</OMI>\n    <DerivedEvents>\n      <DerivedEvent duration=\"PT1M\" eventName=\"WADMetricsPT1MP10DV2S\" isFullName=\"true\" source=\"memoryStats\">\n        <LADQuery columnName=\"CounterName\" columnValue=\"Value\" instanceID=\"\" partitionKey=\"ladtest\" />\n      </DerivedEvent>\n\n      <DerivedEvent duration=\"PT1M\" eventName=\"WADMetricsPT1MP10DV2S\" isFullName=\"true\" source=\"cpuStats\">\n        <LADQuery columnName=\"CounterName\" columnValue=\"Value\" instanceID=\"\" partitionKey=\"ladtest\" />\n      </DerivedEvent>\n <DerivedEvent duration=\"PT1M\" eventName=\"WADMetricsPT1MP10DV2S\" isFullName=\"true\" source=\"diskStats\">\n        <LADQuery columnName=\"CounterName\" columnValue=\"Value\" instanceID=\"\" partitionKey=\"ladtest\" />\n      </DerivedEvent>\n <DerivedEvent duration=\"PT1M\" eventName=\"WADMetricsPT1MP10DV2S\" isFullName=\"true\" source=\"netStats\">\n        <LADQuery columnName=\"CounterName\" columnValue=\"Value\" instanceID=\"\" partitionKey=\"ladtest\" />\n      </DerivedEvent>\n <DerivedEvent duration=\"PT1H\" eventName=\"WADMetricsPT1HP10DV2S\" isFullName=\"true\" source=\"memoryStats\">\n        <LADQuery columnName=\"CounterName\" columnValue=\"Value\" instanceID=\"\" partitionKey=\"ladtest\" />\n      </DerivedEvent>\n\n      <DerivedEvent duration=\"PT1H\" eventName=\"WADMetricsPT1HP10DV2S\" isFullName=\"true\" source=\"cpuStats\">\n        <LADQuery columnName=\"CounterName\" columnValue=\"Value\" instanceID=\"\" partitionKey=\"ladtest\" />\n      </DerivedEvent>\n <DerivedEvent duration=\"PT1H\" eventName=\"WADMetricsPT1HP10DV2S\" isFullName=\"true\" source=\"diskStats\">\n        <LADQuery columnName=\"CounterName\" columnValue=\"Value\" instanceID=\"\" partitionKey=\"ladtest\" />\n      </DerivedEvent>\n <DerivedEvent duration=\"PT1H\" eventName=\"WADMetricsPT1HP10DV2S\" isFullName=\"true\" source=\"netStats\">\n        <LADQuery columnName=\"CounterName\" columnValue=\"Value\" instanceID=\"\" partitionKey=\"ladtest\" />\n       </DerivedEvent>\n\n</DerivedEvents>\n</Events>\n</MonitoringManagement>\n"
  },
  {
    "path": "Diagnostic/mdsd/azure.list",
    "content": "deb [arch=amd64] https://packages.microsoft.com/repos/azurecore/ trusty main\n"
  },
  {
    "path": "Diagnostic/mdsd/buildcmake.sh",
    "content": "#!/bin/bash\n\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT license.\n\n# This will build mdsd and its libraries\n# Usage: see Usage()\n#\n\nTotalErrors=0\n\nBuildType=\nCCompiler=gcc\nCXXCompiler=g++\nBuildName=dev\nBUILDDIR=builddir\nMakeFileOnly=0\nParallelism=\"-j4\"\n\n# If CodeCoverage=1, build with code coverage options.\n# NOTE: only gcc is supported and it must be debug build.\nCodeCoverage=OFF\n\nUsage()\n{\n    echo \"Usage: $0 <-a> | <-d|-o> <-c|-g> [-b buildname] [-mC] [-p parallelism] [-s] [-t]\"\n    echo \"    -b: use buildname. Default: timestamp.\"\n    echo \"    -C: capture code coverage.\"\n    echo \"    -d: build debug build.\"\n    echo \"    -m: create makefiles only. After done, run 'make help' for options.\"\n    echo \"    -o: build optimized(release) build.\"\n    echo \"    -p: specify number of parallel compile operations (default 4).\"\n}\n\nif [ \"$#\" == \"0\" ]; then\n    Usage\n    exit 1\nfi\n\nargs=`getopt b:Cdhmop: $*`\nif [ $? != 0 ]; then\n    Usage\n    exit 1\nfi\nset -- $args\n\nfor i; do\n    case \"$i\" in\n        -b)\n            BuildName=$2\n            shift ; shift ;;\n        -C)\n            CodeCoverage=ON\n            shift ;;\n        -d)\n            if [ -z \"${BuildType}\" ]; then\n                BuildType=d\n            else\n                echo \"Error: build type is already set to be ${BuildType}.\"\n                exit 1\n            fi\n            shift ;;\n        -h)\n            Usage\n            exit 0\n            shift ;;\n        -m)\n            MakeFileOnly=1\n            shift ;;\n        -o)\n            if [ -z \"${BuildType}\" ]; then\n                BuildType=o\n            else\n                echo \"Error: build type is already set to be ${BuildType}.\"\n                exit 1\n            fi\n            shift ;;\n        -p)\n            declare -i numJobs  # This variable is an integer, guaranteed by the shell\n            numJobs=$2\n            if [ $numJobs -gt 1 ]; then\n                Parallelism=\"-j$numJobs\"\n                echo \"Setting parallelism to $Parallelism\"\n            else\n                Parallelism=\"\"\n                echo \"Disabling parallel compilation\"\n            fi\n            shift; shift ;;\n        --) shift; break ;;\n    esac\ndone\n\nif [ -z \"${BuildType}\" ]; then\n    echo \"Error: missing build type. -d or -o is required.\"\n    exit 1\nfi\n\nif [ \"${CodeCoverage}\" == \"ON\" ]; then\n    if [ \"${BuildType}\" != \"d\" ]; then\n        echo \"Error: only debug build is supported for code coverage.\"\n        exit 1\n    fi\nfi\n\nBuildWithCMake()\n{\n    echo\n    echo Start to build source code. BuildType=${BuildType} ...\n    BinDropDir=${BUILDDIR}.${BuildType}.${CCompiler}\n    rm -rf ${BUILDDIR} ${BinDropDir}\n    mkdir ${BinDropDir}\n    ln -s ${BinDropDir} ${BUILDDIR}\n\n    pushd ${BinDropDir}\n\n    DefBuildNumber=\n    if [ ! -z \"${BuildName}\" ]; then\n        DefBuildNumber=-DBUILD_NUMBER=${BuildName}\n    fi\n    echo \"BuildName: '${DefBuildNumber}'\"\n\n    CMakeBuildType=\"Release\"\n    if [ ${BuildType} == \"d\" ]; then\n        CMakeBuildType=\"Debug\"\n    fi\n\n    cmake -DCMAKE_C_COMPILER=${CCompiler} -DCMAKE_CXX_COMPILER=${CXXCompiler} \\\n          -DCMAKE_BUILD_TYPE=${CMakeBuildType} ${DefBuildNumber} \\\n          -DBUILD_COV=${CodeCoverage} ../\n\n    CheckCmdError \"cmake\"\n\n    if [ ${MakeFileOnly} != 0 ]; then\n        echo\n        echo Makfiles are created. To make, cd ${BUILDDIR}, run make \\<target\\>.\n        echo\n        make help\n        exit ${TotalErrors}\n    fi\n\n    make ${Parallelism}\n    CheckCmdError \"make ${Parallelism}\"\n\n    make install\n    CheckCmdError \"make install\"\n\n    if [ ${CCompiler} == \"gcc\" ]; then\n        # Make deb/rpm packages for LAD mdsd\n        make -C ../lad-mdsd/deb LABEL=${BuildName}\n        CheckCmdError  \"lad-mdsd/deb\"\n        make -C ../lad-mdsd/rpm LABEL=${BuildName}\n        CheckCmdError  \"lad-mdsd/rpm\"\n    fi\n    tar czf release.tar.gz release\n    popd\n}\n\n# Check whether previous command has error or not.\n# Usage: CheckCmdError \"description\"\nCheckCmdError()\n{\n    if [ $? != 0 ]; then\n        let TotalErrors+=1\n        echo Error: build $1 failed\n        exit ${TotalErrors}\n    else\n        echo Finished building $1 successfully\n    fi\n}\n\n# Usage: ParseGlibcVer <dirname> <filename>(optional)\nParseGlibcVer()\n{\n    # Maximum GLIBC version supported by oldest supported distro\n    glibcver=2.15\n    ParserScript=./parseglibc.py\n    dirname=$1\n    filename=$2  # optional, can be NULL\n    echo\n    if [ -n \"${filename}\" ]; then\n        echo python ${ParserScript} -f ${dirname}/${filename} -v ${glibcver}\n        python ${ParserScript} -f ${dirname}/${filename} -v ${glibcver}\n    else\n        echo python ${ParserScript} -d ${dirname} -v ${glibcver}\n        python ${ParserScript} -d ${dirname} -v ${glibcver}\n    fi\n\n    if [ $? != 0 ]; then\n        let TotalErrors+=1\n        echo Error: ParseGlibcVer failed: maximum supported GLIBC version is ${glibcver}.\n        exit ${TotalErrors}\n    fi\n}\n\n# Download/build/install the appropriate version of openssl.\n#    This is needed because the lib{ssl,crypto}.a that's available through the Ubuntu repo\n#    is causing some link errors at the last stage. We need to use /usr/local/ssl as the\n#    top-level OpenSSL directory for the libraries, to make them work on all distros\n#    (especially SUSE 11, which is already done that way).\nBuildOpenSsl()\n{\n    opensslDir=openssl-1.0.2* # Grab the only (which must be latest) OpenSSL 1.0.2 release\n    tgzFile=$opensslDir.tar.gz\n    wget ftp://ftp.openssl.org/source/$tgzFile || exit 1\n    InstallOpenSSL=1\n    if [ -e /usr/local/lib/libcrypto.a -a -e /usr/local/lib/libssl.a ]; then\n        OpenSSLVersion=$(strings /usr/local/lib/libssl.a | egrep \"^OpenSSL \" | awk '{ print $2 }')\n        DownloadedTGZName=$(ls $tgzFile)\n        if [ \"$DownloadedTGZName\" == \"openssl-$OpenSSLVersion.tar.gz\" ]; then # Already latest\n            InstallOpenSSL=0\n        fi\n    fi\n    if [ \"$InstallOpenSSL\" == \"1\" ]; then\n        tar xfz $tgzFile\n        cd $opensslDir\n         # Need to make the lib*.a linkable to .so as well (for AI SDK lib*.so) by adding -fPIC.\n        export CC=\"gcc -fPIC\"\n        ./config --prefix=/usr/local --openssldir=/usr/lib/ssl zlib\n        make\n        CheckCmdError \"openssl make\"\n        sudo make install_sw\n        CheckCmdError \"openssl make install_sw\"\n        cd ..\n    fi\n}\n\n\necho Start build at `date`. BuildType=${BuildType} CC=${CCompiler} ...\n\nBuildOpenSsl\n\nBuildWithCMake\n\n# Remaining steps should be run only on a non-static build except ParseGlibcVer on bin build.\n\nParseGlibcVer ./${BUILDDIR}/release/bin\nParseGlibcVer ./${BUILDDIR}/release/lib\n\necho\necho Finished all builds at `date`. error = ${TotalErrors}\nexit ${TotalErrors}\n"
  },
  {
    "path": "Diagnostic/mdsd/lad-mdsd/Makefile.in.version",
    "content": "VERSION_NUM=1.6.100\n"
  },
  {
    "path": "Diagnostic/mdsd/lad-mdsd/README.txt",
    "content": "This directory contains files to create the Debian package and the RPM package for\nthe mdsd static binary executable that'll be bundled in LAD 3.0. LAD 3.0 depends on\nomsagent, scx, omi packages (that are installed through\nthe omsagent shell bundle), and we shouldn't let these packages be removed when\nthe OMS Agent for Linux extension is uninstalled (the OMS Agent extension also uses\nthe omsagent shell bundle). The Debian/RPM packages include just the mdsd binary\nat /usr/local/lad/bin, and specify the dependencies.\n\nTo run the Makefile on Ubuntu, the rpm package must be installed first:\n\n    $ sudo apt-get install rpm\n\nThen simply run 'make' at this directory, and collect the **/lad-mdsd-*.deb and the\n**/lad-mdsd-*.rpm files.\n\nNOTE: Version number conventions are different on dpkg and rpm, so that's why now\nVERSION_NUM is separately defined in Makefile.in.version, and actual\nversion strings are composed for different deb/rpm packaging directories.\n"
  },
  {
    "path": "Diagnostic/mdsd/lad-mdsd/changelog",
    "content": "PACKAGE (1.4.101) stable; urgency=low\n *  Bug fix: Emit schema md5 hashes at the end of Event Hub\n    Notification event bodies.\n\n -- Azure Linux <azlinux@microsoft.com>  Wed Jun 21 16:30:00 UTC 2017\n\nPACKAGE (1.4.100) stable; urgency=low\n *  Release mdsd binary with libraries static-linked as much as\n    possible. Gcc-built azure-mdsd deb pkg has more libraries\n    statically linked than clang-built azure-mdsd-clang deb pkg.\n\n * Fixed mdsd http proxy bug.\n\n -- Azure Linux <azlinux@microsoft.com>  Thur Jun 15 16:30:00 UTC 2017\n\nPACKAGE (1.3.101) stable; urgency=low\n *  Mdsd daemon pidfile is changed from /var/run/mdsd.pid to\n    <mdsd-role-name>.pid, default is /var/run/mdsd/default.pid.\n\n -- Azure Linux <azlinux@microsoft.com>  Thu Apr 27 17:40:00 UTC 2017\n\nPACKAGE (1.3.100) stable; urgency=low\n *  New feature: support new store type CentralJson. Data are uploaded\n    to Azure storage as JSON blob.\n *  New feature: support EventHub publishing with embedded SAS keys.\n    Data are uploaded to Azure EventHub service directly.\n *  New feature: environment variables MDSD_CONFIG_DIR, MDSD_RUN_DIR,\n    and MDSD_LOG_DIR.\n *  Bug fix: print clear error when mdsd pidport file was already locked.\n *  Bug fix: suppress transient rsyslog-mdsd OM connect() error log.\n *  Bug fix: fix mdsd SysV script reload bug.\n\n -- Azure Linux <azlinux@microsoft.com>  Mon Apr 10 22:20:00 UTC 2017\n\nPACKAGE (1.2.109) stable; urgency=low\n *  Bug fix: parse double number properly for dynamic json data.\n *  Bug fix: handle metadata conflicts for dynamic schema data.\n *  Bug fix: add UNIX socket filepath length validation.\n *  Bug fix: add EventHub max data size validation.\n\n -- Azure Linux <azlinux@microsoft.com>  Thur Feb 16 22:20:00 UTC 2017\n\nPACKAGE (1.2.108) stable; urgency=low\n *  Bug fix: retry OMI task up to 30-minute if start-up fails.\n *  Bug fix: fix EventHub reliability issue when EventHub blob is not found.\n\n -- Azure Linux <azlinux@microsoft.com>  Wed Jan 11 23:20:00 UTC 2017\n\nPACKAGE (1.2.107) stable; urgency=low\n *  Don't load EventHub SAS keys when mdsd.xml doesn't have related storetype.\n *  Bug fix: fix ETW event SchemaID issue.\n\n -- Azure Linux <azlinux@microsoft.com>  Wed Dec 6 00:40:00 UTC 2016\n\nPACKAGE (1.2.106) stable; urgency=low\n *  Bug fix: add EventHub blob download failure retry.\n *  When SAS key loads fails, retry in 1-minute instead of 6-hours.\n *  Refactor Centralbond sink and request code.\n *  Modified bond and djson protocols so that mdsd sets PreciseTimeStamp value.\n *  Account SAS support for LAD shared storage key.\n\n -- Azure Linux <azlinux@microsoft.com>  Fri Nov 11 23:40:00 UTC 2016\n\nPACKAGE (1.2.105) stable; urgency=low\n\n *  Bug fix: remove /tmp dependency for autokey downloading and parsing.\n *  Bug fix: rm ucf in mdsd debian pkg to avoid unwanted prompt.\n *  Improvement: FileSink open()s file lazilly, close on flush().\n *  Test improvement: add ingest stress tests; rm unwanted credentials.\n\n -- Azure Linux <azlinux@microsoft.com>  Thu Oct 20 23:40:00 UTC 2016\n\nPACKAGE (1.2.104) stable; urgency=low\n\n *  Bug fix for LAD2AI config validation crash.\n *  Bug fix for \"too many open files\" error during EventHub file parsing.\n *  Bug fix for a memory corruption in StreamListener() when ProcessLoop() has error.\n *  Performance improvement: defer destroying entries until after unlocking LocalSink.\n *  Restart mdsd per N-hour in cron job.\n *  Change openssl to latest release in mdsd-static.\n\n -- Azure Linux <azlinux@microsoft.com>  Tue Oct 4 23:40:00 UTC 2016\n\nPACKAGE (1.2.103) stable; urgency=low\n *  Enable finer-grained tracing of ingest and bond.\n *  Add sanity checks to old-style JSON ingest.\n *  rsyslog module: enforce a maximum event size of 1MB, bigger ones will be dropped.\n *  Bug fix: EventHub async fire-and-forget model may use object out of lifetime.\n *  Bug fix: EventHub async task is waiting for wrong task.\n *  Buf fix: EventHub SAS key should be set by AutoKey reload timer.\n *  Bug fix in rsyslog module: handle partial send(); handle concurrent send().\n *  Bug fix in rsyslog module: handle ack msg from mdsd in separate thread to avoid livelock.\n *  Bug fix in rsyslog module: throttle event resend to a peak of 20MBps.\n *  Bug fix: Skip sending an ACK for an ingested event if sending it would block.\n\n -- Azure Linux <azlinux@microsoft.com>  Fri Sep 23 17:00:00 UTC 2016\n\nPACKAGE (1.2.102) stable; urgency=low\n\n *  Performance improvement at uploading EventHub message.\n *  Bug fix: restore identity columns after LADQuery stage.\n\n -- Azure Linux <azlinux@microsoft.com>  Tue Aug 30 17:00:00 UTC 2016\n\nPACKAGE (1.2.101) stable; urgency=low\n\n *  Add unix socket support for rsyslog module\n\n -- Azure Linux <azlinux@microsoft.com>  Tue Aug 2 19:00:00 UTC 2016\n\nPACKAGE (1.2.100) stable; urgency=low\n\n *  Increase version number.\n\n -- Azure Linux <azlinux@microsoft.com>  Tue Aug 2 12:00:00 UTC 2016\n\nPACKAGE (1.1.106) stable; urgency=low\n\n *   Added support for input over unix domain sockets.\n *   Added support for two new input encoding/protocols (bond & json) that allow dynamic schema definition\n\n -- Azure Linux <azlinux@microsoft.com>  Fri Jul 14 12:00:00 UTC 2016\n\nPACKAGE (1.1.105) stable; urgency=low\n\n * Add identity columns on CentralBond as well\n\n -- Azure Linux <azlinux@microsoft.com>  Fri Jul 8 23:50:00 UTC 2016\n\nPACKAGE (1.1.104) stable; urgency=low\n\n *   Revert cJSON library source code due to regression (missing messages due\n     to JSON parsing errors)\n\n -- Azure Linux <azlinux@microsoft.com>  Tue Jun 29 23:50:00 UTC 2016\n\nPACKAGE (1.1.103) stable; urgency=low\n\n *   Make CentralBond type to send schemas to SchemasTable.\n\n -- Azure Linux <azlinux@microsoft.com>  Tue Jun 28 23:50:00 UTC 2016\n\nPACKAGE (1.1.102) stable; urgency=low\n\n *   Updated cJSON source code to fix a memory corruption bug\n *   Aborts main loop when accept() fails, to avoid spin loop.\n *   Avoid cascaded SIGABRT handler calls that could cause deadlock on malloc/free\n *   LocalSink lock scope improvement\n\n -- Azure Linux <azlinux@microsoft.com>  Mon Jun 20 23:50:00 UTC 2016\n\nPACKAGE (1.1.101) stable; urgency=low\n\n *   Add signal handler for SIGPIPE.\n *   Fix error handling when mdsd echos back to event sender and fails.\n\n -- Azure Linux <azlinux@microsoft.com>  Wed Jun 15 23:50:00 UTC 2016\n\nPACKAGE (1.1.100) stable; urgency=low\n\n *   Supports remote update of agent XML primary config file from Geneva based on namespace/tenant/role/roleinstance\n *   Supports log rotation via SIGUSR2\n *   Supports mapped storage monikers\n *   Enables use of a random JSON-listener port if the requested port is unavailable\n *   Reports actual listening port via a “pid and port” file\n\n -- Azure Linux <azlinux@microsoft.com>  Mon Jun 14 20:00:00 UTC 2016\n\nPACKAGE (1.0.100) unstable; urgency=low\n\n *   CentralBond support\n *   EventHub support for some Geneva pipeline services (dgrep, kusto, cosmos/coldpath)\n *   Statically link required libraries when possible\n\n -- Azure Linux <azlinux@microsoft.com>  Mon May 16 19:52:43 UTC 2016\n\nPACKAGE (0.9.5) unstable; urgency=low\n\n *   Proxy support\n *   CPPREST 2.8, Storage C++ SDK 2.3 upgrades (for proxy support)\n\n -- Azure Linux <azlinux@microsoft.com>  Mon Mar  7 11:26:35 PST 2016\n\nPACKAGE (0.9.4) unstable; urgency=low\n\n *   Fix JSON parsing error when the last character in the buffer is backslash\n *   Improve reporting of XML parse errors and warnings\n *   Enable stack trace on crash earlier in startup\n *   AppInsights: Add metadata for metrics and traces\n\n -- Azure Linux <azlinux@microsoft.com>  Fri Mar 4 19:24:45 UTC 2016\n\nPACKAGE (0.9.3) unstable; urgency=low\n\n *   Integration with hotfixed OMI/SCX\n\n -- Azure Linux <azlinux@microsoft.com>  Fri Jan 29 23:14:28 UTC 2016\n\nPACKAGE (0.9.2) unstable; urgency=low\n\n *   Add -C option to enable dropping a core file on fatal signal\n *   Improve logging by adding timestamp to all logs.\n\n -- Azure Linux <azlinux@microsoft.com>  Thu Jan 14 18:00:00 UTC 2016\n\nPACKAGE (0.9.1) unstable; urgency=low\n\n *   Write MDS metadata table entry correctly for MDS tables with long names (see 0.8.1)\n *   Lookup typeconverters by string instead of ustring\n *   Show known type converters when an unsupported type conversion is requested\n *   Improved error message at JSON event parsing against schema from config file.\n\n -- Azure Linux <azlinux@microsoft.com>  Wed Dec 02 23:00:00 UTC 2015\n\nPACKAGE (0.9.0) unstable; urgency=low\n\n *   Add support for AISDK library with graceful fail if not present\n\n -- Azure Linux <azlinux@microsoft.com>  Mon Nov 23 12:04:40 UTC 2015\n \nPACKAGE (0.8.3) unstable; urgency=low\n\n *   Fix the inverse-timestamp in shoebox rowkeys\n\n -- Azure Linux <azlinux@microsoft.com>  Wed Oct 21 21:04:40 UTC 2015\n\nPACKAGE (0.8.2) unstable; urgency=low\n\n *   Resolve a compatibility conflict with omazuremds.so\n\n -- Azure Linux <azlinux@microsoft.com>  Wed Sep 17 00:44:00 UTC 2015\n\nPACKAGE (0.8.1) unstable; urgency=low\n\n *   When <Import>ing a config file, ignore the attributes of <MonitoringManagement>\n     elements contained therein.\n *   Create correct SchemasTable entries for tables whose full names (with prefix\n     and all suffixes) exceed 63 characters in length.\n *   Eliminate some compiler warnings.\n *   Funnel all use of write() to a single WriteWithNewline() function that checks\n     return status.\n\n -- Azure Linux <azlinux@microsoft.com>  Wed Sep 9 23:19:00 UTC 2015\n\nPACKAGE (0.8.0) unstable; urgency=low\n\n *   Add support for full \"shoebox\" rowkey schema via instanceID attribute on\n     the <LADQuery> element.\n *   Minor corrections to config-file parse error messages.\n\n -- Azure Linux <azlinux@microsoft.com>  Fri Aug 28 00:37:45 UTC 2015\n\nPACKAGE (0.7.10) unstable; urgency=low\n\n *   Replace sprintf with snprintf\n *   Remove execvp from daemon execution; daemon is no longer started via a second\n     command line invocation.\n *   Add secure compilation options (PIC/PIE, stack protection, immediate binding)\n     to mdsd and autokey.\n \n -- Azure Linux <azlinux@microsoft.com>  Tue Aug 18 11:10:01 UTC 2015\n\nPACKAGE (0.7.9) unstable; urgency=low\n\n *   Enforce actual XTable limits (column size, total row size).\n *   Fixed a rare case that could result in events being uploaded twice.\n *   Expunge expired tags on an open ingest connection even if no new events are\n     arriving on that connection.\n *   Add dupeWindowSeconds to <AgentResourceUsage> element; specifies the time\n     window during which duplicate events must be detected. Min 60; max 3600.\n *   Fixed a rare problem in which receipt of a partial JSON event from a sender\n     corrupts the reassembly buffer during buffer expansion.\n\n -- Azure Linux <azlinux@microsoft.com>  Fri Jun 26 23:18:01 UTC 2015\n\nPACKAGE (0.7.8) unstable; urgency=low\n\n *   Close event-ingest connection if sync is lost while trying to find JSON. Event\n     senders are required to detect the closed connection and resend any event that\n     was not acknowledged (i.e. for which it did not see the TAG echoed back on the\n     connection).\n    \n -- Azure Linux <azlinux@microsoft.com>  Fri Jun 19 23:18:01 UTC 2015\n\nPACKAGE (0.7.7) unstable; urgency=low\n\n *   Fix a regression in the generation of the MDS Table Search schema.\n\n -- Azure Linux <azlinux@microsoft.com>  Wed Jun 10 23:52:26 UTC 2015\n\nPACKAGE (0.7.6) unstable; urgency=low\n\n *   Flush unneeded event data from local tables held in memory.\n *   Fix a crash (SIGSEGV) on process exit (seen only when using the -v option).\n\n -- Azure Linux <azlinux@microsoft.com>  Fri May 18 01:55:45 UTC 2015\n \nPACKAGE (0.7.5) unstable; urgency=low\n\n *   Add scaleUp and scaleDown attributes to <MapName> to scale specific values retrieved\n     from OMI and unpivoted. <MapName> transforms more than just the column name and is\n     thus somewhat misnamed, but changing it is a breaking schema change.\n\n -- Azure Linux <azlinux@microsoft.com>  Fri May 08 01:55:45 UTC 2015\n\nPACKAGE (0.7.4) unstable; urgency=low\n\n *   Build against libazurestorage 1.0.0. Stop suppressing certain warnings during build.\n\n -- Azure Linux <azlinux@microsoft.com>  Fri May 01 01:55:45 UTC 2015\n \nPACKAGE (0.7.3) unstable; urgency=low\n\n *   Don't emit an error message when creating a missing table.\n\n -- Azure Linux <azlinux@microsoft.com>  Sat Apr 25 01:55:45 UTC 2015\n \nPACKAGE (0.7.2) unstable; urgency=low\n\n *   Force mt_int32 values to remain 32 bits and not scaled up by the storage API.\n *   Store TIMESTAMP as a true DateTime (as implemented in PPLX utility::datetime and as\n     expected by the storage SDK).\n *   RowKey for the LAD Query has metric name and timestamp separated by only two\n     underscores, not the three that MDS uses when combining strings.  Also, the hex\n     expansions of non-alphanumerics are expected to use all uppercase hex digits.\n\n -- Azure Linux <azlinux@microsoft.com>  Sat Apr 25 01:55:45 UTC 2015\n\nPACKAGE (0.7.0) unstable; urgency=low\n\n *   Store events to unpersisted local tables (storeType=\"local\").\n *   <Unpivot> element within <OMIQuery> will unpivot specified columns into separate rows.\n *   <MapName> within <Unpivot> will rename specific unpivoted datum names (e.g. change\n     \"AvailableMemory\" to \"MEMORY\\Available\").\n *   <DerivedEvents> and <LADQuery> enable querying of data from a local table to produce\n     a set of aggregates. The query is fixed to meet the needs of Linux Azure Diagnostics\n     and includes customized partition and row keys.\n *   Create missing tables if full storage account credentials are suppplied.\n *   Enable mocking of MDS through storing events in a disk file (storeType=\"file\").\n\n -- Azure Linux <azlinux@microsoft.com>  Wed Apr 22 01:55:45 UTC 2015\n"
  },
  {
    "path": "Diagnostic/mdsd/lad-mdsd/copyright",
    "content": "Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/\nSource: https://msazure.visualstudio.com/One/_git/Compute-Runtime-Tux/\n\nFiles: *\nCopyright: 2015 Microsoft Corporation\nLicense: Microsoft Internal Use ONLY\n\n"
  },
  {
    "path": "Diagnostic/mdsd/lad-mdsd/deb/Makefile",
    "content": "include ../Makefile.in.version\n\nVERSION=${VERSION_NUM}\nPACKAGE=lad-mdsd\nLABEL?=~dev\n\nARCH?=amd64\nVER=$(VERSION)-$(LABEL)\n\nFAKEROOT=./data-root\nDOCDIR=$(FAKEROOT)/usr/share/doc/$(PACKAGE)\nSHAREDIR=$(FAKEROOT)/usr/share/$(PACKAGE)\nMDSD_BIN_DIR=$(FAKEROOT)/usr/local/lad/bin\n\nMDSD_BUILT_BIN=../../builddir/release/bin/mdsd\n\nDEB=$(PACKAGE)-$(VER).$(ARCH).deb\n\npackage: $(DEB)\n\nsigned-package: _gpgorigin $(DEB)\n\tar r $(DEB) $<\n\n_gpgorigin: $(DEB)\n\t-rm -f $@\n\tar p $(DEB) debian-binary control.tar.gz data.tar.gz | gpg -abs -o _gpgorigin\n\n$(DEB): tarballs debian-binary\n\t-rm -f $@\n\tar rc $@ debian-binary control.tar.gz data.tar.gz\n\n$(DOCDIR):\n\tmkdir -p $@\n\n$(DOCDIR)/changelog.Debian.gz: ../changelog $(DOCDIR)\n\tcat $< | gzip -9 > $@\n\n$(DOCDIR)/copyright: ../copyright $(DOCDIR)\n\tcp $< $@\n\ndebian-binary:\n\techo 2.0 > debian-binary\n\ntarballs: data.tar.gz control.tar.gz\n\ncontrol.tar.gz: md5sums control\n\t-rm -rf control-root\n\t-mkdir -p control-root\n\tcp control md5sums control-root\n\tchmod 644 control-root/*\n\tsed -i '/^Version:/c Version: $(VER)' control-root/control\n\tsed -i '/^Package:/c Package: $(PACKAGE)' control-root/control\n\tsed -i '/^Architecture:/c Architecture: $(ARCH)' control-root/control\n\tcd control-root && tar -czf ../$@ --owner=root --group=root .\n\nmd5sums: install-deps\n\t(cd $(FAKEROOT) && md5sum `find -type f`) > $@\n\tchmod 0644 $@\n\ndata.tar.gz: install-deps \\\n\t\t$(DOCDIR)/changelog.Debian.gz \\\n\t\t$(DOCDIR)/copyright \\\n\t\t$(LINTIANOVERRIDES)\n\tfind $(FAKEROOT) -type d | xargs chmod 0755\n\tfind $(FAKEROOT) -type d | xargs chmod ug-s\n\tfind $(FAKEROOT)/usr/share/doc -type f | xargs chmod 0644\n\tcd $(FAKEROOT) && tar -czf ../$@ --owner=root --group=root --mode=go-w *\n\n.PHONY: clean install-clean install-deps\n\nclean: install-clean\n\t-rm -rf control-root\n\t-rm -f debian-binary *.tar.gz _gpgorigin md5sums\n\t-rm -f $(PACKAGE)*.deb\n\ninstall-clean:\n\t-rm -rf $(FAKEROOT)\n\ninstall-deps: install-clean\n\tmkdir -p $(MDSD_BIN_DIR)\n\tinstall -m 755 $(MDSD_BUILT_BIN) $(MDSD_BIN_DIR)/mdsd\n"
  },
  {
    "path": "Diagnostic/mdsd/lad-mdsd/deb/control",
    "content": "Package: PACKAGE\nVersion: VERSION\nSection: admin\nPriority: optional\nArchitecture: ARCH\nDepends: libc6, scx (>=1.6.2.169), omi, omsagent\nMaintainer: Azure Linux Team <azlinux@microsoft.com>\nDescription: MDS monitoring agent daemon for Linux Azure Diagnostic extension\n MDS monitoring daemon for Linux Azure Diagnostic extension\n"
  },
  {
    "path": "Diagnostic/mdsd/lad-mdsd/rpm/Makefile",
    "content": "include ../Makefile.in.version\n\nVERSION=${VERSION_NUM}\nPACKAGE=lad-mdsd\nLABEL?=dev\n\nDATAROOT=./data-root\nFAKEROOT=$(DATAROOT)/$(PACKAGE)-$(VERSION)\nDOCDIR=$(FAKEROOT)/usr/share/doc/$(PACKAGE)\nSHAREDIR=$(FAKEROOT)/usr/share/$(PACKAGE)\nMDSD_BIN_DIR=$(FAKEROOT)/usr/local/lad/bin\n\nMDSD_BUILT_BIN=../../builddir/release/bin/mdsd\n\nRPM=RPMS/x86_64/$(PACKAGE)-$(VERSION)-$(LABEL).x86_64.rpm\n\nTARBALL=$(PACKAGE)-$(VERSION).tgz\n\nRPM: $(TARBALL)\n\trpmbuild -v -bb --clean --define \"_topdir $(realpath .)\" SPECS/lad-mdsd.spec\n\n$(TARBALL): rpm_prepare install-deps $(DOCDIR)/ChangeLog\n\tfind $(FAKEROOT) -type d | xargs chmod 0755\n\tfind $(FAKEROOT) -type d | xargs chmod ug-s\n\tcd $(DATAROOT) && tar -czf ../SOURCES/$@ *\n\n$(DOCDIR):\n\tmkdir -p $@\n\n$(DOCDIR)/ChangeLog: ../changelog $(DOCDIR)\n\tcp $< $@\n\nrpm_prepare: clean\n\tmkdir -p SOURCES SPECS BUILD BUILDROOT RPMS SRPMS\n\tcp lad-mdsd.spec SPECS\n\tsed -i '/^Name:/c Name: $(PACKAGE)' SPECS/lad-mdsd.spec\n\tsed -i '/^Version:/c Version: $(VERSION)' SPECS/lad-mdsd.spec\n\tsed -i '/^Release:/c Release: $(LABEL)' SPECS/lad-mdsd.spec\n\n.PHONY: clean install-clean install-deps\n\nclean: install-clean\n\t-rm -rf SOURCES SPECS BUILD BUILDROOT RPMS SRPMS\n\ninstall-clean:\n\t-rm -rf $(DATAROOT)\n\ninstall-deps: install-clean\n\tmkdir -p $(MDSD_BIN_DIR)\n\tinstall -m 755 $(MDSD_BUILT_BIN) $(MDSD_BIN_DIR)/mdsd\n"
  },
  {
    "path": "Diagnostic/mdsd/mdscommands/BinaryWriter.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef __BINARYWRITER__HH__\n#define __BINARYWRITER__HH__\n\n#include <string>\n#include <cstring>\n#include <vector>\n#include <type_traits>\n#include \"MdsException.hh\"\n\nnamespace mdsd { namespace details\n{\n\ntypedef uint8_t byte;\n\n// A helper class which allows write data in binary format to the bytes buffer.\nclass BinaryWriter\n{\n    template <class T, bool isFundamental>\n    class BinaryWriterFunctions\n    {\n    public:\n        static void Write(BinaryWriter& writer, T value);\n        static void Write(BinaryWriter& writer, size_t position, T value);\n    };\n\n    template <class T>\n    class BinaryWriterFunctions<T, true>\n    {\n    public:\n        static void Write(BinaryWriter& writer, T value)\n        {\n            writer.Write(reinterpret_cast<const byte *>(&value), sizeof(T));\n        }\n\n        static void Write(BinaryWriter& writer, size_t position, T value)\n        {\n            writer.Write(position, reinterpret_cast<const byte *>(&value), sizeof(T));\n        }\n    };\n\n    template <class T>\n    class BinaryWriterFunctions<T, false>\n    {\n        static void Write(BinaryWriter& writer, T value);\n        static void Write(BinaryWriter& writer, size_t position, T value);\n    };\n\npublic:\n\n    // Initializes a BinaryWriter object specifying the buffer to be used.\n    BinaryWriter(std::vector<byte>& buffer) : m_buffer(buffer) {}\n\n    // Gets the current size of the buffer.\n    std::size_t GetBufferSize() const { return m_buffer.size(); }\n\n    // Writes binary data to the specified position of the buffer, extending it if required.\n    void Write(size_t position, const byte* source, size_t sourceSize)\n    {\n        if (!source) {\n            throw MDSEXCEPTION(\"Unexpected NULL for source pointer.\");\n        }\n        if (position + sourceSize > m_buffer.size())\n        {\n            m_buffer.resize(position + sourceSize);\n        }\n\n        memcpy(m_buffer.data() + position, source, sourceSize);\n    }\n\n    // Writes binary data to the end of the buffer, extending it.\n    void Write(const byte* source, size_t sourceSize)\n    {\n        if (!source) {\n            throw MDSEXCEPTION(\"Unexpected NULL for source pointer.\");\n        }\n        Write(m_buffer.size(), source, sourceSize);\n    }\n\n    // Writes value of the primitive type to the end of the buffer in binary format.\n    template <class T>\n    void Write(T value)\n    {\n        BinaryWriterFunctions<T, std::is_fundamental<T>::value>::Write(*this, value);\n    }\n\n    // Writes value of the primitive type to the specified position of the buffer in binary format.\n    template <class T>\n    void Write(size_t position, T value)\n    {\n        BinaryWriterFunctions<T, std::is_fundamental<T>::value>::Write(*this, position, value);\n    }\n\n    // Writes string value to the end of the buffer.\n    void Write(const std::string & value)\n    {\n        Write(reinterpret_cast<const byte *>(value.c_str()), value.size());\n    }\n\n    // Writes an integer value to the end of the buffer in base-128 format.\n    void WriteInt32AsBase128(int value)\n    {\n        WriteInt64AsBase128(value);\n    }\n\n    // Writes an int64 value to the end of the buffer in base-128 format.\n    void WriteInt64AsBase128(int64_t value)\n    {\n        bool negative = value < 0;\n        long t = static_cast<long>(negative ? -value : value);\n        bool first = true;\n        do\n        {\n            byte b;\n            if (first)\n            {\n                b = (byte)(t & 0x3f);\n                t >>= 6;\n                if (negative)\n                {\n                    b = (byte)(b | 0x40);\n                }\n\n                first = false;\n            }\n            else\n            {\n                b = (byte)(t & 0x7f);\n                t >>= 7;\n            }\n\n            if (t > 0)\n            {\n                b |= 0x80;\n            }\n\n            Write(&b, sizeof(b));\n        } while (t > 0);\n    }\n\n    // Writes an unsigned integer value to the end of the buffer in base-128 format.\n    void WriteUInt32AsBase128(unsigned int value)\n    {\n        WriteUInt64AsBase128(value);\n    }\n\n    // Writes an unsigned long value to the end of the buffer in base-128 format.\n    void WriteUInt64AsBase128(uint64_t value)\n    {\n        uint64_t t = value;\n\n        do\n        {\n            byte b = (byte)(t & 0x7f);\n            t >>= 7;\n            if (t > 0)\n            {\n                b |= 0x80;\n            }\n\n            Write(&b, sizeof(b));\n        } while (t > 0);\n    }\n\n    // Clears the buffer.\n    void Reset() { m_buffer.clear(); }\n\nprivate:\n    \n    std::vector<byte>& m_buffer;\n};\n\n} // namespace details\n} // namespace mdsd\n\n#endif // __BINARYWRITER__HH__\n"
  },
  {
    "path": "Diagnostic/mdsd/mdscommands/BodyOnlyXmlParser.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include <iostream>\n#include <fstream>\n#include <sstream>\n#include <algorithm>\n#include <cctype>\n\n#include \"BodyOnlyXmlParser.hh\"\n#include \"MdsException.hh\"\n\nusing namespace mdsd::details;\n\nvoid\nBodyOnlyXmlParser::ParseFile(std::string xmlFilePath)\n{\n    m_xmlFilePath = std::move(xmlFilePath);\n\n    std::ifstream infile{m_xmlFilePath};\n    if (!infile) {\n        std::ostringstream strm;\n        strm << \"Failed to open file '\" << m_xmlFilePath << \"'.\";\n        throw MDSEXCEPTION(strm.str());\n    }\n\n    std::string line;\n    while(std::getline(infile, line)) {\n        ParseChunk(line);\n    }\n\n    if (!infile.eof()) {\n        std::ostringstream strm;\n        strm << \"Failed to parse file '\" << m_xmlFilePath << \"': \";\n        if (infile.bad()) {\n            strm << \"Corrupted stream.\";\n        }\n        else if (infile.fail()) {\n            strm << \"IO operation failed.\";\n        }\n        else {\n            strm << \"std::getline() returned 0 for unknown reason.\";\n        }\n        throw MDSEXCEPTION(strm.str());\n    }\n}\n\nvoid\nBodyOnlyXmlParser::OnCharacters(const std::string& chars)\n{\n    bool isEmptyOrWhiteSpace = std::all_of(chars.cbegin(), chars.cend(), ::isspace);\n    if (!isEmptyOrWhiteSpace) {\n        m_body.append(chars);\n    }\n}\n"
  },
  {
    "path": "Diagnostic/mdsd/mdscommands/BodyOnlyXmlParser.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef __BODYONLYXMLPARSER__HH__\n#define __BODYONLYXMLPARSER__HH__\n\n#include <iostream>\n#include <string>\n#include \"SaxParserBase.hh\"\n\nnamespace mdsd { namespace details\n{\n\n/// <summary>\n/// This is a simple XML parser. It will parse the XML body section only.\n/// The XML attributes are not parsed.\n/// </summary>\nclass BodyOnlyXmlParser : public SaxParserBase\n{\npublic:\n    BodyOnlyXmlParser() = default;\n    ~BodyOnlyXmlParser() = default;\n\n    /// <summary> Parse given xml file </summary>\n    virtual void ParseFile(std::string xmlFilePath);\n\n    std::string&& MoveBody() { return std::move(m_body); }\n    virtual std::string GetFilePath() const { return m_xmlFilePath; }\n\nprivate:\n    void OnStartElement(const std::string& name, const AttributeMap& attributes) override { m_body.clear(); }\n    void OnEndElement(const std::string& name) override {}\n    void OnCharacters(const std::string& chars) override;\n    void OnCDataBlock(const std::string& text) override { m_body.append(text); }\n\nprivate:\n    std::string m_xmlFilePath;\n    std::string m_body;\n};\n\n} // namespace details\n} // namespace mdsd\n\n#endif // __BODYONLYXMLPARSER__HH__\n"
  },
  {
    "path": "Diagnostic/mdsd/mdscommands/CMakeLists.txt",
    "content": "set(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} -pthread\")\n\ninclude_directories(\n    ${CASABLANCA_INCLUDE_DIRS}\n    ${STORAGE_INCLUDE_DIRS}\n    /usr/include/libxml2\n    ${CMAKE_SOURCE_DIR}/mdsd\n    ${CMAKE_SOURCE_DIR}/mdsdlog\n    ${CMAKE_SOURCE_DIR}/mdsdutil\n)\n\nset(SOURCES\n    BodyOnlyXmlParser.cc\n    CmdListXmlParser.cc\n    CmdXmlCommon.cc\n    CmdXmlElement.cc\n    CmdXmlParser.cc\n    ConfigUpdateCmd.cc\n    DirectoryIter.cc\n    EventData.cc\n    EventEntry.cc\n    EventHubCmd.cc\n    EventHubPublisher.cc\n    EventHubType.cc\n    EventHubUploader.cc\n    EventHubUploaderId.cc\n    EventHubUploaderMgr.cc\n    EventPersistMgr.cc\n    MdsBlobReader.cc\n    MdsException.cc\n    PersistFiles.cc\n    PublisherStatus.cc\n    ${CMAKE_SOURCE_DIR}/mdsd/SaxParserBase.cc\n)\n\n# Disable warning from CPPREST\nset_source_files_properties(PersistFiles.cc PROPERTIES COMPILE_FLAGS -Wno-sign-compare)\n\n# Disable warnings from azure storage API.\nset_source_files_properties(\n    MdsBlobReader.cc\n    EventHubCmd.cc\n    PROPERTIES\n    COMPILE_FLAGS \"-Wno-unused-value -Wno-reorder\"\n)\n\nadd_library(${CMD_LIB_NAME} STATIC ${SOURCES})\n\ninstall(TARGETS ${CMD_LIB_NAME}\n    ARCHIVE DESTINATION ${CMAKE_BINARY_DIR}/release/lib\n)\n"
  },
  {
    "path": "Diagnostic/mdsd/mdscommands/CmdListXmlParser.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include <sstream>\n#include <algorithm>\n#include <cctype>\n\n#include \"CmdListXmlParser.hh\"\n#include \"MdsException.hh\"\n#include \"CmdXmlElement.hh\"\n\nusing namespace mdsd::details;\n\nvoid\nCmdListXmlParser::OnEndElement(const std::string& name)\n{\n    switch(Name2ElementType(name)) {\n        case ElementType::Verb:\n            m_verb = MoveBody();\n            break;\n        case ElementType::Parameter:\n            m_paramList.emplace_back(MoveBody());\n            break;\n        case ElementType::Command:        \n            if (std::all_of(m_verb.cbegin(), m_verb.cend(), ::isspace)) {\n                std::ostringstream strm;\n                strm << \"Invalid data in XML file '\" << GetFilePath() \n                     << \"': 'Verb' cannot be empty or whitespace.\";\n                throw MDSEXCEPTION(strm.str());\n            }\n\n            if (0 == m_paramList.size()) {\n                std::ostringstream strm;\n                strm << \"Invalid data in XML file '\" << GetFilePath() \n                     << \"': no Parameter value is found.\";\n                throw MDSEXCEPTION(strm.str());\n            }\n\n            m_cmdParamMap[m_verb].emplace_back(m_paramList);\n            m_paramList.clear();\n            break;\n        default:\n            break;\n    }\n}\n"
  },
  {
    "path": "Diagnostic/mdsd/mdscommands/CmdListXmlParser.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef __CMDLISTXMLPARSER__HH__\n#define __CMDLISTXMLPARSER__HH__\n\n#include <vector>\n#include <unordered_map>\n\n#include \"BodyOnlyXmlParser.hh\"\n\nnamespace mdsd { namespace details\n{\n\n/// <summary>\n/// Commands XML parser. It will parse <Commands>...</Commands>.\n/// For reference, check commands.xsd.\n/// </summary>\nclass CmdListXmlParser : public BodyOnlyXmlParser\n{\npublic:\n    /// map key: Verb name. map value: list of parameter-list.\n    using CmdParamsType = std::unordered_map<std::string, std::vector<std::vector<std::string>>>;\n\n    CmdListXmlParser() = default;\n\n    ~CmdListXmlParser() = default;\n\n    CmdParamsType GetCmdParams() const { return m_cmdParamMap; }\n\nprivate:\n    void OnEndElement(const std::string& name) override;\n\nprivate:\n    CmdParamsType m_cmdParamMap;          // store all verb names and all parameters.\n    std::string m_verb;                   // store current verb name in the parser.\n    std::vector<std::string> m_paramList; // store current parameter list in the parser.\n};\n\n} // namespace details\n} // namespace mdsd\n\n#endif // __CMDLISTXMLPARSER__HH__\n\n"
  },
  {
    "path": "Diagnostic/mdsd/mdscommands/CmdXmlCommon.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include <sstream>\n\n#include \"CmdXmlCommon.hh\"\n#include \"MdsException.hh\"\n\nnamespace mdsd\n{\n\nstd::string CmdXmlCommon::s_rootContainerName = \"mam\";\n\nnamespace details\n{\n\nvoid\nValidateCmdBlobParamsList(\n    const std::vector<std::vector<std::string>>& paramsList,\n    const std::string & verbName,\n    size_t totalParams\n    )\n{\n    if (0 == paramsList.size()) {\n        std::ostringstream strm;\n        strm << \"No Command Parameter is found for Verb '\" << verbName << \"'.\";\n        throw MDSEXCEPTION(strm.str());\n    }\n\n    for (const auto & v : paramsList) {\n        if (totalParams != v.size()) {\n            std::ostringstream strm;\n            strm << \"Invalid number of Command (verb=\" << verbName << \") parameters: expected=\"\n            << totalParams << \"; actual=\" << v.size() << \".\";\n            throw MDSEXCEPTION(strm.str());\n        }\n    }\n}\n\n} // namespace details\n} // namespace mdsd\n"
  },
  {
    "path": "Diagnostic/mdsd/mdscommands/CmdXmlCommon.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef __CMDXMLCOMMON_HH__\n#define __CMDXMLCOMMON_HH__\n\n#include <string>\n#include <vector>\n\nnamespace mdsd\n{\n\nclass CmdXmlCommon {\npublic:\n\tstatic std::string GetRootContainerName() { return s_rootContainerName; }\n\tstatic void SetRootContainerName(std::string name) { s_rootContainerName = std::move(name); }\n\nprivate:\n\tstatic std::string s_rootContainerName;\n};\n\n\nnamespace details {\n\nvoid ValidateCmdBlobParamsList(\n    const std::vector<std::vector<std::string>>& paramsList,\n    const std::string & verbName,\n    size_t totalParams\n    );\n\n\n} // namespace details\n\n} // namespace mdsd\n\n#endif // __CMDXMLCOMMON_HH__\n"
  },
  {
    "path": "Diagnostic/mdsd/mdscommands/CmdXmlElement.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"CmdXmlElement.hh\"\n#include <unordered_map>\n\nusing namespace mdsd::details;\n\nstatic std::unordered_map<std::string, ElementType> & GetCmdElementTypeMap()\n{\n    static auto xmltable = new std::unordered_map<std::string, ElementType>(\n    {\n        { \"Verb\", ElementType::Verb },\n        { \"Parameter\", ElementType::Parameter },\n        { \"Command\", ElementType::Command }\n    });\n    return *xmltable;\n}\n\n\nElementType\nmdsd::details::Name2ElementType(const std::string& name)\n{\n    auto xmltable = GetCmdElementTypeMap();\n    auto iter = xmltable.find(name);\n    if (iter != xmltable.end()) {\n        return iter->second;\n    }\n    return ElementType::Unknown;\n}\n"
  },
  {
    "path": "Diagnostic/mdsd/mdscommands/CmdXmlElement.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef __CMDXMLELEMENT__HH__\n#define __CMDXMLELEMENT__HH__\n\n#include <string>\n\nnamespace mdsd { namespace details\n{\n\nenum class ElementType\n{\n    Unknown,\n    Verb,\n    Parameter,\n    Command\n};\n\nElementType Name2ElementType(const std::string& name);\n\n} // namespace details\n} // namespace mdsd\n\n#endif // __CMDXMLELEMENT__HH__\n"
  },
  {
    "path": "Diagnostic/mdsd/mdscommands/CmdXmlParser.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"CmdXmlParser.hh\"\n#include \"CmdXmlElement.hh\"\n\nusing namespace mdsd::details;\n\nvoid\nCmdXmlParser::OnEndElement(const std::string& name)\n{\n    switch(Name2ElementType(name)) {\n        case ElementType::Verb:\n            m_verb = MoveBody();\n            break;\n        case ElementType::Parameter:\n            m_paramList.emplace_back(MoveBody());\n            break;\n        default:\n            break;\n    }\n}\n"
  },
  {
    "path": "Diagnostic/mdsd/mdscommands/CmdXmlParser.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef __CMDXMLPARSER__HH__\n#define __CMDXMLPARSER__HH__\n\n#include <vector>\n#include \"BodyOnlyXmlParser.hh\"\n\nnamespace mdsd { namespace details\n{\n\n/// <summary>\n/// MDS Command XML parser. It will parse one <Command>...</Command>\n/// For reference, check commands.xsd.\n/// </summary>\nclass CmdXmlParser : public BodyOnlyXmlParser\n{\npublic:\n    CmdXmlParser() = default;\n    ~CmdXmlParser() = default;\n\n    std::string GetVerb() const { return m_verb; }\n\n    std::vector<std::string> GetParamList() const { return m_paramList; }\n\nprivate:\n    void OnEndElement(const std::string& name) override;\n\nprivate:\n    std::string m_verb;  // The value of 'Verb'\n    std::vector<std::string> m_paramList; // a list of the parameters defined for the Verb.\n};\n\n} // namespace details\n} // namespace mdsd\n\n#endif // __CMDXMLPARSER__HH__\n"
  },
  {
    "path": "Diagnostic/mdsd/mdscommands/ConfigUpdateCmd.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include <sstream>\n#include <fstream>\n#include <algorithm>\n\n#include <cpprest/pplx/pplxtasks.h>\n\n#include \"ConfigUpdateCmd.hh\"\n#include \"MdsBlobReader.hh\"\n#include \"CmdListXmlParser.hh\"\n#include \"CmdXmlCommon.hh\"\n#include \"MdsException.hh\"\n#include \"Trace.hh\"\n#include \"Logger.hh\"\n#include \"Crypto.hh\"\n\nusing namespace mdsd;\nusing namespace mdsd::details;\n\nuint64_t ConfigUpdateCmd::s_lastTimestamp = 0;\nCrypto::MD5Hash ConfigUpdateCmd::s_lastMd5Sum;\nstd::string ConfigUpdateCmd::s_cmdFileName = \"MACommandCu.xml\";\n\nConfigUpdateCmd::ConfigUpdateCmd(\n        const std::string& rootContainerSas,\n        const std::string& eventNameSpace,\n        const std::string& tenantName,\n        const std::string& roleName,\n        const std::string& instanceName)\n    : m_rootContainerSas(rootContainerSas)\n    , m_configXmlPersistentFlag(true) // Just to avoid IDE/compiler warning\n{\n    Trace trace(Trace::MdsCmd, \"ConfigUpdateCmd::ConfigUpdateCmd\");\n\n    if (rootContainerSas.empty()) {\n        throw MDSEXCEPTION(\"ConfigUpdate blob root container cannot be empty.\");\n    }\n    if (eventNameSpace.empty()) {\n        throw MDSEXCEPTION(\"ConfigUpdate MDS namespace cannot be empty.\");\n    }\n    // Check the validity of tenantName, roleName & instanceName.\n    // 1. if tenantName is empty, then both roleName & instanceName must be empty\n    if (tenantName.empty() && !(roleName.empty() && instanceName.empty())) {\n        throw MDSEXCEPTION(\"Non-empty role name or instance name when tenant name is empty.\");\n    }\n    // 2. if roleName is empty, then instance name must be empty\n    if (roleName.empty() && !instanceName.empty()) {\n        throw MDSEXCEPTION(\"Non-empty instanceName given when roleName is empty.\");\n    }\n\n    // Construct the list of all possible cmd xml paths in xstore.\n    // E.g., \"TuxTest/myTestTenant/role1/instance1/MACommandCu.xml\",\n    //       \"TuxTest/myTestTenant/role1/MACommandCu.xml\",\n    //       \"TuxTest/myTestTenant/MACommandCu.xml\" and\n    //       \"TuxTest/MACommandCu.xml\"\n    std::string upToNameSpace  = eventNameSpace + \"/\";\n    std::string upToTenantName = upToNameSpace + tenantName + \"/\";\n    std::string upToRoleName   = upToTenantName + roleName + \"/\";\n    m_cmdXmlPathsXstore.reserve(4); // Maximum 4 paths to try\n    if (!instanceName.empty()) {\n        m_cmdXmlPathsXstore.push_back(upToRoleName + instanceName + \"/\" + s_cmdFileName);\n    }\n    if (!roleName.empty()) {\n        m_cmdXmlPathsXstore.push_back(upToRoleName + s_cmdFileName);\n    }\n    if (!tenantName.empty()) {\n        m_cmdXmlPathsXstore.push_back(upToTenantName + s_cmdFileName);\n    }\n    // Namespace/MACommandCu.xml should be always added\n    m_cmdXmlPathsXstore.push_back(upToNameSpace + s_cmdFileName);\n\n    TRACEINFO(trace,\n            \"ConfigUpdateCmd::ConfigUpdateCmd(), namespace = \\\"\"\n            << eventNameSpace << \"\\\", tenantName = \\\"\" << tenantName\n            << \"\\\", roleName = \\\"\" << roleName << \"\\\", instanceName = \\\"\"\n            << instanceName << \"\\\", resulting cmd xml path in xstore (longest one only) = \\\"\"\n            << m_cmdXmlPathsXstore.front() << '\"');\n}\n\n// Helper for parsing config update cmd xml\nstatic bool\nParseConfigUpdateCmdXml(\n        std::string&& xmlDoc,\n        bool& configXmlPersistentFlag,\n        Crypto::MD5Hash& configXmlMD5Sum,\n        std::string& configXmlPathXstore)\n{\n    Trace trace(Trace::MdsCmd, \"ParseConfigUpdateCmdXml\");\n\n    if (xmlDoc.empty()) {\n        trace.NOTE(\"No ConfigUpdate cmd XML data to parse. Abort parser.\");\n        return false;\n    }\n\n    configXmlPersistentFlag = false;\n    configXmlPathXstore.clear();\n\n    CmdListXmlParser parser;\n    parser.Parse(xmlDoc);\n\n    auto paramTable = parser.GetCmdParams();\n    if (0 == paramTable.size()) {\n        throw MDSEXCEPTION(\"No Command Parameter is found in ConfigUpdate cmd XML.\");\n    }\n\n    // UpdateConfig cmd xml example:\n    //\n    // <Command version='1.0'>\n    //   <Verb>UpdateConfig</Verb>\n    //   <Parameters>\n    //     <Parameter>TRUE</Parameter>\n    //     <Parameter>65db3091d1b6ba83c7dba7a9a1a984ce</Parameter>\n    //     <Parameter>ConfigArchive/65db3091d1b6ba83c7dba7a9a1a984ce/TuxTestVer7v0.xml</Parameter>\n    //   </Parameters>\n    // </Command>\n    const std::string CfgUpdateCmdVerb = \"UpdateConfig\";\n    const auto NPARAMS = 3;\n    const auto PersistentFlagIndex = 0;\n    const auto ConfigXmlMD5SumIndex = 1;\n    const auto ConfigXmlXstorePathIndex = 2;\n\n    auto cfgUpdateParamsList = paramTable[CfgUpdateCmdVerb];\n    ValidateCmdBlobParamsList(cfgUpdateParamsList, CfgUpdateCmdVerb, NPARAMS);\n\n    // Now extract the parameters\n    // But check if there are more than one UpdateConfig commands in the cmd xml.\n    // In that case, log a warning and use the last one.\n    if (cfgUpdateParamsList.size() > 1)\n    {\n        std::ostringstream msg;\n        msg << \"More than one UpdateConfig commands given in the cmd XML\"\n            << \" (there were \" << cfgUpdateParamsList.size()\n            << \"). Only the last one will be used.\";\n        Logger::LogWarn(msg);\n    }\n    const auto& params = cfgUpdateParamsList.back();\n    configXmlPersistentFlag = params[PersistentFlagIndex] == \"TRUE\";\n    configXmlMD5Sum = Crypto::MD5Hash::from_hash(params[ConfigXmlMD5SumIndex]);\n    configXmlPathXstore = std::move(params[ConfigXmlXstorePathIndex]);\n\n    TRACEINFO(trace,\n            \"MDS config update cmd xml blob parsed. persist flag = \"\n            << configXmlPersistentFlag << \", config xml md5sum = \"\n            << configXmlMD5Sum.to_string() << \", config xml xstore path = \"\n            << configXmlPathXstore);\n    return true;\n}\n\npplx::task<bool>\nConfigUpdateCmd::StartAsyncDownloadOfNewConfig()\n{\n    Trace trace(Trace::MdsCmd, \"ConfigUpdateCmd::StartAsyncDownloadOfNewConfig\");\n\n    // Helper struct type to hold a cml blob path and its LMT\n    struct LmtLookupDataT\n    {\n        const std::string*  m_cmdXmlPath;\n        uint64_t            m_lmt;\n\n        LmtLookupDataT(const std::string& cmdXmlPath, uint64_t lmt)\n            : m_cmdXmlPath(&cmdXmlPath)\n            , m_lmt(lmt)\n        {}\n\n        // Just for containers\n        LmtLookupDataT() : m_cmdXmlPath(nullptr), m_lmt(0) {}\n\n        bool operator<(const LmtLookupDataT& rhs) const\n        {\n            return m_lmt < rhs.m_lmt;\n        }\n    };\n\n    std::vector<pplx::task<LmtLookupDataT>> lmtTasks; // Parallel LMT lookup tasks\n\n    // Async/parallel LMT retrieval\n    for (size_t i = 0; i < m_cmdXmlPathsXstore.size(); i++)\n    {\n        lmtTasks.push_back(pplx::task<LmtLookupDataT>([=]()\n        {\n            MdsBlobReader blobReader(m_rootContainerSas, m_cmdXmlPathsXstore[i]);\n\n            // Get the blob's LMT along with the blob's path (asynchronously)\n            auto asyncLmtLookupTask = blobReader.GetLastModifiedTimeStampAsync(\n                                            MdsBlobReader::DoNothingBlobNotFoundExHandler);\n                                            // We don't want to log non-existing blob here, as that could be frequent and persistent\n            return asyncLmtLookupTask.then([=](uint64_t lmt)\n            {\n                return LmtLookupDataT(m_cmdXmlPathsXstore[i], lmt);\n            });\n        }));\n    }\n\n    // Specify what to do when all parallel tasks are completed\n    return pplx::when_all(lmtTasks.begin(), lmtTasks.end()).then([=](std::vector<LmtLookupDataT> lmtResults) -> pplx::task<bool>\n    {\n        Trace trace(Trace::MdsCmd, \"ConfigUpdateCmd::StartAsyncDownloadOfNewConfig when_all().then() lambda\");\n\n        // Find latest LMT path\n        auto maxLmtResult = std::max_element(lmtResults.begin(), lmtResults.end());\n        auto latestLmt = maxLmtResult->m_lmt;\n        auto latestLmtCmdXmlPath = *maxLmtResult->m_cmdXmlPath;\n\n        TRACEINFO(trace, \"Latest LMT from all candidate cmd blob paths (# paths: \" << m_cmdXmlPathsXstore.size()\n                << \", longest path: \" << m_cmdXmlPathsXstore.front()\n                << \", latest LMT path: \" << latestLmtCmdXmlPath\n                << \") = \" << latestLmt << \" (0 means no cmd blob found), \"\n                << \", s_lastTimestamp = \" << s_lastTimestamp);\n\n        return GetCmdXmlAsync(latestLmt, latestLmtCmdXmlPath);\n    }).then([](bool result)\n    {\n        return result;\n    });\n}\n\npplx::task<bool>\nConfigUpdateCmd::GetCmdXmlAsync(uint64_t blobLmt, std::string cmdXmlPathXstore)\n{\n    Trace trace(Trace::MdsCmd, \"ConfigUpdateCmd::GetCmdXmlAsync\");\n\n    pplx::task<bool> returnFalseTask([]() { return false; });\n\n    if (blobLmt == 0) // No cmd blob found. Nothing to do.\n    {\n        TRACEINFO(trace, \"No cmd blob was passed (blobLmt = 0). Nothing to do.\");\n        return returnFalseTask;\n    }\n\n    if (blobLmt <= s_lastTimestamp) // No new cmd blob found. Nothing to do.\n    {\n        TRACEINFO(trace, \"No new cmd blob was passed (passed blobLmt = \"\n                << blobLmt << \", s_lastTimestamp = \" << s_lastTimestamp << '\"');\n        return returnFalseTask;\n    }\n\n    // Get/check the cmd blob's content\n    MdsBlobReader cmdXmlBlobReader(m_rootContainerSas, cmdXmlPathXstore);\n    auto asyncCmdXmlReadTask = cmdXmlBlobReader.ReadBlobToStringAsync();\n    return asyncCmdXmlReadTask.then([blobLmt,this](std::string cmdXmlString) -> pplx::task<bool>\n    {\n        return ProcessCmdXmlAsync(blobLmt, std::move(cmdXmlString));\n    });\n}\n\npplx::task<bool>\nConfigUpdateCmd::ProcessCmdXmlAsync(uint64_t blobLmt, std::string cmdXmlString)\n{\n    Trace trace(Trace::MdsCmd, \"ConfigUpdateCmd::ProcessCmdXmlAsync\");\n\n    TRACEINFO(trace, \"Cmd XML Blob content=\\\"\" << cmdXmlString << '\"');\n\n    pplx::task<bool> returnFalseTask([]() { return false; });\n\n    if (cmdXmlString.empty()) // Cmd blob content is empty. Nothing to do.\n    {\n        return returnFalseTask;\n    }\n\n    bool configXmlPersistentFlag = false;\n    Crypto::MD5Hash configXmlMD5Sum;\n    std::string configXmlPathXstore;\n    std::string genevaIssueMsg = \"[Geneva has generated an invalid configuration update command--See the description outside the bracket. Please report this via the 'Contact Us' button on the Geneva Monitoring portal] \";\n    try\n    {\n        if (!ParseConfigUpdateCmdXml(std::move(cmdXmlString), configXmlPersistentFlag,\n                configXmlMD5Sum, configXmlPathXstore)) {\n            return returnFalseTask;\n        }\n    }\n    catch (const MdsException& e)\n    {\n        std::ostringstream msg;\n        msg << genevaIssueMsg << \"ConfigUpdate cmd XML parse failed (no UpdateConfig verb or invalid XML format): \"\n            << e.what();\n        Logger::LogError(msg);\n        return returnFalseTask;\n    }\n\n    // Validate the retrieved ConfigUpdate cmd params\n    if (configXmlPathXstore.empty())\n    {\n        Logger::LogError(genevaIssueMsg + \"ConfigUpdate cmd's config xml xstore path param cannot be empty.\");\n        return returnFalseTask;\n    }\n\n    TRACEINFO(trace, \"Cmd XML parsed successfully. ConfigXml xstore path = \"\n                    << configXmlPathXstore << \", MD5 sum = \" << configXmlMD5Sum.to_string()\n                    << \", persistent flag = \" << configXmlPersistentFlag);\n\n    // Check if the md5 is the same as the last downloaded one, and return if so.\n    if (configXmlMD5Sum == s_lastMd5Sum)\n    {\n        TRACEINFO(trace, \"MD5 sum given in the cmd XML\"\n                << \" is equal to the last downloaded one. Skipping this one.\");\n        return returnFalseTask;\n    }\n\n    // Now, download config XML from Xstore (asynchronously)\n\n    MdsBlobReader blobReader(m_rootContainerSas, configXmlPathXstore);\n    auto cfgXmlAsyncReadTask = blobReader.ReadBlobToStringAsync();\n    return cfgXmlAsyncReadTask.then([=](std::string configXml) -> pplx::task<bool>\n    {\n        return GetCfgXmlAsync(std::move(configXml), configXmlMD5Sum,\n                configXmlPathXstore, configXmlPersistentFlag, blobLmt);\n    });\n}\n\npplx::task<bool>\nConfigUpdateCmd::GetCfgXmlAsync(\n        std::string && configXml,\n        const Crypto::MD5Hash & configXmlMD5Sum,\n        const std::string & configXmlPathXstore,\n        bool configXmlPersistentFlag,\n        uint64_t cmdBlobLmt)\n{\n    Trace trace(Trace::MdsCmd, \"ConfigUpdateCmd::GetCfgXmlAsync\");\n\n    TRACEINFO(trace, \"Downloaded mdsd cfg xml: \\\"\" << configXml << '\"');\n\n    pplx::task<bool> returnFalseTask([]() { return false; });\n\n    if (configXml.empty())\n    {\n        Logger::LogError(\"Downloaded mdsd cfg xml is empty!\");\n        return returnFalseTask;\n    }\n\n    // Check if md5 sum matches the passed md5sum param\n    auto computedMD5Sum = Crypto::MD5HashString(configXml);\n    if (configXmlMD5Sum != computedMD5Sum)\n    {\n        std::ostringstream msg;\n        msg << \"MD5 sum mismatch! Calculated = \" << computedMD5Sum.to_string()\n            << \", Given in cmd XML = \" << configXmlMD5Sum.to_string();\n        Logger::LogError(msg);\n        return returnFalseTask;\n    }\n\n    // Now update the relevant member variables\n    m_configXmlPathXstore = configXmlPathXstore;\n    m_configXmlString = std::move(configXml);\n    m_configXmlMD5Sum = std::move(computedMD5Sum);\n    m_configXmlPersistentFlag = configXmlPersistentFlag;\n    s_lastMd5Sum = computedMD5Sum;\n    s_lastTimestamp = cmdBlobLmt;\n\n    return pplx::task<bool>([](){ return true; });\n}\n"
  },
  {
    "path": "Diagnostic/mdsd/mdscommands/ConfigUpdateCmd.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef __CONFIGUPDATECMD_HH__\n#define __CONFIGUPDATECMD_HH__\n\n#include <string>\n#include <vector>\n#include \"Crypto.hh\"\n\nnamespace mdsd\n{\n\n/// <summary>\n/// This class implements functions to handle ConfigUpdate command xml files.\n/// This includes download xml file, parse xml file, and get data from xml.\n/// </summary>\nclass ConfigUpdateCmd\n{\npublic:\n    /// <summary>\n    /// Create the object that'll handle a ConfigUpdate command xml file.\n    /// <param name=\"rootContainerSas\">The sas key for the root container\n    /// where the command xml file locates. </param>\n    /// <param name=\"eventNameSpace\">Event namespace (e.g., TuxTest). Can't be empty.</param>\n    /// <param name=\"tenantName\">Tenant name. Optional</param>\n    /// <param name=\"roleName\">Role name. Optional</param>\n    /// <param name=\"instanceName\">Instance name. Optional</param>\n    /// </summary>\n    ConfigUpdateCmd(\n            const std::string& rootContainerSas,\n            const std::string& eventNameSpace,\n            const std::string& tenantName,\n            const std::string& roleName,\n            const std::string& instanceName);\n\n    ~ConfigUpdateCmd() {}\n\n    ConfigUpdateCmd(const ConfigUpdateCmd & other) = default;\n    ConfigUpdateCmd(ConfigUpdateCmd&& other) = default;\n    ConfigUpdateCmd& operator=(const ConfigUpdateCmd& other) = default;\n    ConfigUpdateCmd& operator=(ConfigUpdateCmd&& other) = default;\n\n    /// <summary>\n    /// Initiate an async download of a new config. Returns a task whose result\n    /// is true iff a new config was successfully downloaded (and corresponding\n    /// member variables are correctly updated).\n    /// </summary>\n    pplx::task<bool> StartAsyncDownloadOfNewConfig();\n\n    /// <summary>\n    /// Get the config XML string downloaded from XStore\n    /// </summary>\n    std::string GetConfigXmlString() const { return m_configXmlString; }\n\n    /// <summary>\n    /// Get the config XML string's MD5 sum\n    /// </summary>\n    Crypto::MD5Hash GetConfigXmlMD5Sum() const { return m_configXmlMD5Sum; }\n\n    /// <summary>\n    /// Initialize with existing MD5Hash (e.g. from the mdsd command line config).\n    /// </summary>\n    static void Initialize(const Crypto::MD5Hash& md5) { s_lastMd5Sum = md5; }\n\nprivate:\n    std::string m_rootContainerSas;\n    std::string m_configXmlString;          // Member variable where downloaded mdsd config xml will be stored\n\n    std::vector<std::string> m_cmdXmlPathsXstore;   // List of all XStore paths to search for a cmd xml blob.\n                                            // e.g., \"TuxTest/myTestTenant/role1/instance1/MACommandCu.xml\",\n                                            //       \"TuxTest/myTestTenant/role1/MACommandCu.xml\",\n                                            //       \"TuxTest/myTestTenant/MACommandCu.xml\"\n\n    // Function to asynchronously start downloading a cmd xml blob given as the param.\n    // The task then continues to the ProcessCmdXmlAsync task if a cmd xml is downloaded correctly.\n    // Returns the continuation task whose completion will give us the result of cmd blob downloading/processing.\n    pplx::task<bool> GetCmdXmlAsync(uint64_t blobLmt, std::string cmdXmlPathXstore);\n\n    // Async cmd XML processing task\n    // The task then continues to the GetCfgXmlAsync task if a cmd xml is parsed correctly.\n    pplx::task<bool> ProcessCmdXmlAsync(uint64_t blobLmt, std::string cmdXmlString);\n\n    // Async cfg XML downloading task\n    pplx::task<bool> GetCfgXmlAsync(\n            std::string && configXml,\n            const Crypto::MD5Hash & configXmlMD5Sum,\n            const std::string & configXmlPathXstore,\n            bool configXmlPersistentFlag,\n            uint64_t blobLmt);\n\n    // Extracted UpdateConfig cmd params\n    std::string m_configXmlPathXstore;      // e.g., \"ConfigArchive/65db3091d1b6ba83c7dba7a9a1a984ce/TuxTestVer7v0.xml\"\n    Crypto::MD5Hash m_configXmlMD5Sum;      // e.g., \"65db3091d1b6ba83c7dba7a9a1a984ce\"\n    bool m_configXmlPersistentFlag;         // May not be needed at all for us, but just saving it anyway\n\n    // Things to remember for update logic\n    // Updated with timestamp of the last successful XML cfg blob to compare with the new XML cfg blob\n    static uint64_t s_lastTimestamp;\n    // Updated with MD5 hash of the last successful mdsd config blob's MD5 sum to compare with the new XML cfg blob    \n    static Crypto::MD5Hash s_lastMd5Sum;\n    // Fixed constants\n    static std::string s_cmdFileName;       // Currently \"MACommandCu.xml\"\n};\n\n} // namespace mdsd\n\n#endif // __CONFIGUPDATECMD_HH__\n"
  },
  {
    "path": "Diagnostic/mdsd/mdscommands/DirectoryIter.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include <system_error>\n#include <sstream>\n#include <cstring>\n\nextern \"C\" {\n#include <sys/types.h>\n#include <dirent.h>\n}\n\n#include \"DirectoryIter.hh\"\n#include \"MdsException.hh\"\n#include \"MdsCmdLogger.hh\"\n\nusing namespace mdsd::details;\n\nDirectoryIter::DirectoryIter():\n    m_dirp(nullptr),\n    m_result(nullptr)\n{\n    memset(&m_ent, 0, sizeof(m_ent));\n}\n\nDirectoryIter::DirectoryIter(\n    const std::string & dirname):\n    m_dirname(dirname),\n    m_dirp(nullptr),\n    m_result(nullptr)\n{\n    m_dirp = opendir(dirname.c_str());\n\n    if (!m_dirp) {\n        std::error_code ec(errno, std::system_category());\n        std::ostringstream strm;\n        strm << \"Failed to open directory '\" << dirname << \"'; Reason: \" << ec.message();\n        throw MDSEXCEPTION(strm.str());\n    }\n\n    MoveToNextValid();\n}\n\nDirectoryIter::~DirectoryIter()\n{\n    if (m_dirp) {\n        closedir(m_dirp);\n    }\n}\n\nvoid\nDirectoryIter::MoveToNext()\n{\n    if (!m_dirp) {\n        return;\n    }\n\n    auto rtn = readdir_r(m_dirp, &m_ent, &m_result);\n    if (rtn) {\n        std::ostringstream strm;\n        strm << \"Error: in directory iteration, readdir_r() failed with error code=\" << rtn;\n        MdsCmdLogError(strm);\n    }\n    if (!m_result) {\n        memset(&m_ent, 0, sizeof(m_ent));\n        closedir(m_dirp);\n        m_dirp = nullptr;\n        m_result = nullptr;\n    }\n}\n\nvoid\nDirectoryIter::MoveToNextValid()\n{\n    while(true) {\n        MoveToNext();\n        if (!m_dirp) {\n            break;\n        }\n\n        std::string curdir{m_ent.d_name};\n        if (\".\" != curdir && \"..\" != curdir) {\n            break;\n        }\n    }\n}\n\nDirectoryIter&\nDirectoryIter::operator++()\n{\n    MoveToNextValid();\n    return *this;\n}\n\nstd::string\nDirectoryIter::operator*() const {\n    if (m_ent.d_name[0]) {\n        return m_dirname + \"/\" + m_ent.d_name;\n    }\n    else {\n        return std::string();\n    }\n}\n\nbool\nmdsd::details::operator==(\n    const DirectoryIter& x,\n    const DirectoryIter& y\n    )\n{\n    return (x.m_dirp == y.m_dirp &&\n            x.m_result == y.m_result &&\n            strncmp(x.m_ent.d_name, y.m_ent.d_name, sizeof(x.m_ent.d_name)) == 0);\n}\n"
  },
  {
    "path": "Diagnostic/mdsd/mdscommands/DirectoryIter.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef __DIRECTORYITER__HH__\n#define __DIRECTORYITER__HH__\n\n#include <string>\nextern \"C\" {\n#include <dirent.h>\n}\n\nnamespace mdsd { namespace details\n{\n\n/// <summary>\n/// Iterator each entry in the directory, including sub-directories.\n/// It ignores \".\" and \"..\".\n/// </summary>\nclass DirectoryIter\n{\npublic:\n    /// <sumamry>A directory iterator pointing to nothing </summary>\n    DirectoryIter();\n    /// <summary>A directory iterator for given dir</summary>\n    DirectoryIter(const std::string & dirname);\n    ~DirectoryIter();\n\n    /// There is no safe way to copy 'DIR*'. Make class movable, not copyable.\n    DirectoryIter(const DirectoryIter& other) = delete;\n    DirectoryIter(DirectoryIter&& other) = default;\n    DirectoryIter& operator=(const DirectoryIter& other) = delete;\n    DirectoryIter& operator=(DirectoryIter&& other) = default;\n\n    /// <summary> Pre-increment operator. Move to next entry in the directory.</summary>\n    DirectoryIter& operator++();\n\n    /// <summary> Return current item name (filename or dir name) </summary>\n    std::string operator*() const;\n\n    /// <summary> Return whether 2 iter points to the same thing </summary>\n    friend bool operator==(const DirectoryIter& x, const DirectoryIter& y);\n\n    /// <summary> Return whether 2 iter points to different things </summary>\n    friend bool operator!=(const DirectoryIter& x, const DirectoryIter& y)\n    {\n        return !(x==y);\n    }\n\nprivate:\n    void MoveToNext();\n    void MoveToNextValid();\n\nprivate:\n    std::string m_dirname;\n    DIR* m_dirp;\n    struct dirent m_ent;\n    struct dirent * m_result;\n};\n\nbool operator==(const DirectoryIter& x, const DirectoryIter& y);\n\n} // namespace details\n} // namespace mdsd\n\n#endif // __DIRECTORYITER__HH__\n"
  },
  {
    "path": "Diagnostic/mdsd/mdscommands/EventData.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include <bond/core/bond.h>\n#include \"EventData.hh\"\n#include \"MdsException.hh\"\n\nusing namespace mdsd;\n\nstatic std::string\nGetStringFromOutput(\n    const bond::OutputBuffer & output\n    )\n{\n    std::vector<bond::blob> blist;\n    output.GetBuffers(blist);\n\n    size_t totalLen = 0;\n    for (const auto & b : blist) {\n        totalLen += b.length();\n    }\n\n    std::string resultStr;\n    resultStr.reserve(totalLen);\n    for (const auto & b : blist) {\n        resultStr.append(b.content(), b.length());\n    }\n    return resultStr;\n}\n\n\nstd::string\nEventDataT::Serialize() const\n{\n    if (m_data.empty()) {\n        throw MDSEXCEPTION(\"EventData serialization failed: data cannot be empty.\");\n    }\n\n    bond::OutputBuffer output;\n    bond::SimpleBinaryWriter<bond::OutputBuffer> writer(output);\n    writer.Write(m_data);\n\n    writer.Write(static_cast<size_t>(m_table.size()));\n    for (const auto & it : m_table) {\n        writer.Write(it.first);\n        writer.Write(it.second);\n    }\n\n    return GetStringFromOutput(output);\n}\n\nEventDataT\nEventDataT::Deserialize(\n    const std::string & datastr\n    )\n{\n    return Deserialize(datastr.c_str(), datastr.size());\n}\n\nEventDataT\nEventDataT::Deserialize(\n    const char* buf,\n    size_t bufSize\n    )\n{\n    if (!buf) {\n        throw MDSEXCEPTION(\"EventData deserialization failed: input buf cannot be NULL.\");\n    }\n\n    EventDataT dataObj;\n\n    try {\n        bond::blob b;\n        b.assign(buf, bufSize);\n\n        bond::SimpleBinaryReader<bond::InputBuffer> reader(b);\n        std::string datastr;\n        reader.Read(datastr);\n        dataObj.SetData(std::move(datastr));\n\n        size_t tblSize = 0;\n        reader.Read(tblSize);\n\n        for (size_t i = 0; i < tblSize; i++) {\n            std::string k, v;\n            reader.Read(k);\n            reader.Read(v);\n            dataObj.AddProperty(std::move(k), std::move(v));\n        }\n    }\n    catch(std::exception& ex) {\n        throw MDSEXCEPTION(std::string(\"EventData deserialization failed: \") + ex.what());\n    }\n\n    return dataObj;\n}\n"
  },
  {
    "path": "Diagnostic/mdsd/mdscommands/EventData.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef __EVENTDATA_HH__\n#define __EVENTDATA_HH__\n\n#include <unordered_map>\n#include <string>\n\nnamespace mdsd {\n\n/// The EventDataT has 2 parts: a key-value pair table of properties and\n/// actual data string.\nclass EventDataT {\npublic:\n    using EventPropertyT = std::unordered_map<std::string, std::string>;\n\n    EventDataT() = default;\n    ~EventDataT() = default;\n\n    bool empty() const { return m_data.empty() && m_table.empty(); }\n\n    std::string GetData() const { return m_data; }\n    void SetData(const std::string & data) { m_data = data; }\n    void SetData(std::string && data) { m_data = std::move(data); }\n\n    // Specialization for all integral types\n    template <typename T>\n    typename std::enable_if<std::is_integral<T>::value, void>::type\n    AddProperty(std::string name, T value) {\n        m_table[std::move(name)] = std::to_string(value);\n    }\n\n    void AddProperty(std::string name, std::string value) {\n        m_table[std::move(name)] = std::move(value);\n    }\n\n    // <summary>\n    /// Get properties object which is [key,value] table.\n    /// </summary>\n    const EventPropertyT & Properties() const {\n        return m_table;\n    }\n\n    std::string Serialize() const;\n    static EventDataT Deserialize(const std::string & datastr);\n\n    /// <summary>\n    /// Deserialize a char array and return EventData object.\n    /// The memory of the char array must be valid in this function.\n    /// </summary>\n    static EventDataT Deserialize(const char* buf, size_t bufSize);\n\n    /// <summary>\n    /// The max size of EventHub data to support.\n    /// </summary>\n    static size_t GetMaxSize() { return 256*1024; }\n\nprivate:\n    EventPropertyT m_table; // {key,value} property table\n    std::string m_data; // actual message data\n};\n\n} // namespace mdsd\n\n#endif // __EVENTDATA_HH__\n"
  },
  {
    "path": "Diagnostic/mdsd/mdscommands/EventEntry.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"EventEntry.hh\"\n\nstd::atomic<uint64_t> mdsd::details::EventEntry::s_counter{0};"
  },
  {
    "path": "Diagnostic/mdsd/mdscommands/EventEntry.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n\n#ifndef __EVENTENTRY_HH__\n#define __EVENTENTRY_HH__\n\n#include <string>\n#include <ctime>\n#include <atomic>\n#include \"EventData.hh\"\n\nnamespace mdsd { namespace details\n{\n/// <summary>\n/// EventEntry class include data sent to EventHub for each upload,\n/// plus some metadata about the event data.\n/// <summary>\nclass EventEntry\n{\npublic:\n    EventEntry(const EventDataT & data) :\n        m_rawData(data)\n    {\n        s_counter++;\n        m_id = s_counter;\n    }\n\n    EventEntry(EventDataT && data) :\n        m_rawData(std::move(data))\n    {\n        s_counter++;\n        m_id = s_counter;\n    }\n\n    ~EventEntry() {}\n\n    EventEntry(const EventEntry& other) = default;\n    EventEntry(EventEntry&& other) = default;\n    EventEntry& operator=(const EventEntry& other) = default;\n    EventEntry& operator=(EventEntry&& other) = default;\n\n    /// <summary>Do exponential backoff for next retry </summary>\n    void BackOff()\n    {\n        auto delta = m_nextSendTimet - m_firstSendTimet;\n        if (0 == delta) {\n            m_nextSendTimet++;\n        }\n        else {\n            m_nextSendTimet = m_firstSendTimet + delta*2 + 1;\n        }\n    }\n\n    bool IsNeverSent() const { return (0 == m_firstSendTimet); }\n\n    void SetSendTime()\n    {\n        auto now = GetNow();\n        m_firstSendTimet = now;\n        m_nextSendTimet = now;\n    }\n\n    /// <summary>\n    /// Get number of seconds since the data was first uploaded.\n    /// Return -1 if the data is never uploaded before.\n    /// </summary>\n    int32_t GetAgeInSeconds() const\n    {\n        if (0 == m_firstSendTimet) {\n            return -1;\n        }\n        return (GetNow() - m_firstSendTimet);\n    }\n\n    EventDataT GetData() const { return m_rawData; }\n\n    /// <summary> Get some ID for the event, for tracing purpose only.\n    /// no need to be unique. </summary>\n    uint64_t GetId() const { return m_id; }\n\n    /// <summary> Is it now the time to re-upload the data? </summary>\n    bool IsTimeToRetry() const { return (GetNow() >= m_nextSendTimet); }\n\n    bool IsInPersistence() const { return m_inPersistence; }\n    void SetPersistence() { m_inPersistence = true; }\n\nprivate:\n    time_t GetNow() const { return time(nullptr); }\n\nprivate:\n    // The minimum time to upload when getting a next chance.\n    // If the current time is less than this value, data won't be uploaded.\n    time_t m_nextSendTimet = 0;\n\n    time_t m_firstSendTimet = 0;   // The first time to upload the data.\n    EventDataT m_rawData;         // The raw data uploaded to Event Hub.\n    static std::atomic<uint64_t> s_counter;\n    uint64_t m_id = 0;             // A ID for the entry. For tracing purpose only.\n    bool m_inPersistence = false;  // Is the item added to persistence manager?\n};\n\n} // namespace details\n} // namespace mdsd\n\n#endif // __EVENTENTRY_HH__\n"
  },
  {
    "path": "Diagnostic/mdsd/mdscommands/EventHubCmd.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include <sstream>\n#include \"EventHubCmd.hh\"\n#include \"MdsBlobReader.hh\"\n#include \"CmdListXmlParser.hh\"\n#include \"CmdXmlCommon.hh\"\n#include \"MdsException.hh\"\n#include \"Trace.hh\"\n#include \"Logger.hh\"\n\nusing namespace mdsd;\nusing namespace mdsd::details;\n\nstd::string EventHubCmd::s_parentContainerName = \"mdssubscriptions\";\n\nstd::ostream&\noperator<<(std::ostream& str, const EhCmdXmlItems & cmd)\n{\n    // for security reason, only dump part of SAS key.\n    str << \"SAS key: \" << cmd.sas.substr(0, 20) << \"..., MDS Endpoint ID: \"\n        << cmd.endpoint << \", Mapped Moniker: \" << cmd.moniker;\n    return str;\n}\n\nEventHubCmd::EventHubCmd(\n    std::string eventNameSpace,\n    int eventVersion,\n    std::string rootContainerSas\n    ) :\n    m_blobNameSuffix(std::move(eventNameSpace)),\n    m_rootContainerSas(std::move(rootContainerSas)),\n    m_noticeXmlItemsTable(new EhCmdXmlItemsTable_t()),\n    m_pubXmlItemsTable(new EhCmdXmlItemsTable_t())\n{\n    if (m_blobNameSuffix.empty()) {\n        throw MDSEXCEPTION(\"Event Hub MDS namespace cannot be empty.\");\n    }\n    if (m_rootContainerSas.empty()) {\n        throw MDSEXCEPTION(\"Event Hub blob root container cannot be empty.\");\n    }\n    m_blobNameSuffix.append(\"Ver\");\n    m_blobNameSuffix.append(std::to_string(eventVersion));\n    m_blobNameSuffix.append(\"v0.xml\");\n}\n\nvoid\nEventHubCmd::ProcessCmdXml()\n{\n    Trace trace(Trace::MdsCmd, \"EventHubCmd::ProcessCmdXml\");\n    // The MACommandPub<ConfigId>.xml contains both notice and publish EH event info.\n    ProcessBlob(GetBlobName(\"MACommandPub\"));\n}\n\nvoid\nEventHubCmd::ProcessBlob(\n    std::string&& blobName\n    )\n{\n    Trace trace(Trace::MdsCmd, \"EventHubCmd::ProcessBlob\");\n    MdsBlobReader blobReader(m_rootContainerSas, std::move(blobName), s_parentContainerName);\n\n    std::string blobData;\n    const int ntimes = 5;\n\n    // Because typically EventHubCmd XML blob should be OK to read, if empty data is returned,\n    // retry to avoid any possible storage API failures.\n    for (int i = 0; i < ntimes; i++) {\n        blobData = std::move(blobReader.ReadBlobToString());\n\n        if (!blobData.empty() || (ntimes-1) == i) {\n            break;\n        }\n\n        TRACEINFO(trace, \"No EventHubCmd XML is found. Retry index=\" << (i+1));\n        usleep(100*1000*(1<<i)); // exponential retry\n    }\n    if (blobData.empty()) {\n        throw MDSEXCEPTION(\"EventHubCmd::ProcessBlob() failed to get blob \" + blobName);\n    }\n\n    ParseCmdXml(std::move(blobData));\n}\n\nvoid\nEventHubCmd::ParseCmdXml(\n    std::string && xmlDoc\n    )\n{\n    Trace trace(Trace::MdsCmd, \"EventHubCmd::ParseCmdXml\");\n    if (xmlDoc.empty()) {\n        throw MDSEXCEPTION(\"EventHubCmd::ParseCmdXml(): unexpected empty XML doc\");\n    }\n\n    CmdListXmlParser parser;\n    parser.Parse(xmlDoc);\n\n    auto paramTable = parser.GetCmdParams();\n    if (0 == paramTable.size()) {\n        throw MDSEXCEPTION(\"No Command Parameter is found in Event Hub XML.\");\n    }\n\n    // index starts with 0\n    constexpr auto NPARAMSNotice = 13;\n    constexpr auto NPARAMSPub = 9;\n\n    constexpr auto EventNameIndexNotice = 6;\n    constexpr auto EventNameIndexPub = 4;\n\n    constexpr auto SASIndexNotice = 8;\n    constexpr auto SASIndexPub = 5;\n\n    constexpr auto MdsMonikerIndexNotice = 10;\n    constexpr auto MdsMonikerIndexPub = 6;\n\n    // example Endpoint value \"Test\". NOTE: this is not the full endpoint URL.\n    constexpr auto MdsEndpointIdIndexNotice = 11;\n    constexpr auto MdsEndpointIdIndexPub = 7;\n\n    const std::string NoticeVerb = \"SubscribeToEventHubEvent\";\n    const std::string PublisherVerb = \"SubscribeToEventPublisherEvent\";\n\n    auto noticeParamsList = paramTable[NoticeVerb];\n    ValidateCmdBlobParamsList(noticeParamsList, NoticeVerb, NPARAMSNotice);\n\n    TRACEINFO(trace, \"EventHub dump verb \" << NoticeVerb << \":\");\n    for (const auto & v : noticeParamsList) {\n        EhCmdXmlItems xmlItems { v[SASIndexNotice], v[MdsEndpointIdIndexNotice], v[MdsMonikerIndexNotice] };\n        m_noticeXmlItemsTable->emplace(v[EventNameIndexNotice], xmlItems);\n        TRACEINFO(trace, v[EventNameIndexNotice] << \"'s \" << xmlItems);\n    }\n\n    // Older version of MA may not have PublisherVerb\n    auto pubParamsList = paramTable[PublisherVerb];\n    if (0 == pubParamsList.size()) {\n        Logger::LogInfo(\"No \" + PublisherVerb + \" is found.\");\n        return;\n    }\n\n    ValidateCmdBlobParamsList(pubParamsList, PublisherVerb, NPARAMSPub);\n\n    TRACEINFO(trace, \"EventHub dump verb \" << PublisherVerb << \":\");\n    for (const auto & v : pubParamsList) {\n        EhCmdXmlItems xmlItems { v[SASIndexPub], v[MdsEndpointIdIndexPub], v[MdsMonikerIndexPub] };\n        m_pubXmlItemsTable->emplace(v[EventNameIndexPub], xmlItems);\n        TRACEINFO(trace, v[EventNameIndexPub] << \"'s \" << xmlItems);\n    }\n}\n"
  },
  {
    "path": "Diagnostic/mdsd/mdscommands/EventHubCmd.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef __EVENTHUBCMD__HH__\n#define __EVENTHUBCMD__HH__\n\n#include <string>\n#include <unordered_map>\n#include <memory>\n#include <iostream>\n\nnamespace mdsd\n{\n\n// Encapsulating type for EH cmd XML items\nstruct EhCmdXmlItems\n{\n    std::string sas;        // SAS key\n    std::string endpoint;   // MDS endpoint ID (e.g., \"Test\", \"Prod\", \"Stage\", ...)\n    std::string moniker;    // The mapped storage moniker (may be different from config file account moniker)\n};\n\n/// <summary>\n/// This class implements functions to handle Event Hub Commands xml files.\n/// This includes download xml file, parse xml file, and get data from xml.\n/// </summary>\nclass EventHubCmd\n{\npublic:\n    using EhCmdXmlItemsTable_t = std::unordered_map<std::string, EhCmdXmlItems>;\n\n    /// <summary>\n    /// Create the object that'll handle Event Hub command xml file.\n    /// <param name=\"eventNameSpace\"> event name space</param>\n    /// <param name=\"eventVersion\"> event version<param>\n    /// <param name=\"rootContainerSas\"> the sas key for the root container\n    /// where the command xml file locates. </param>\n    /// </summary>\n    EventHubCmd(std::string eventNameSpace,\n                int eventVersion,\n                std::string rootContainerSas);\n\n    ~EventHubCmd() {}\n\n    EventHubCmd(const EventHubCmd & other) = default;\n    EventHubCmd(EventHubCmd&& other) = default;\n    EventHubCmd& operator=(const EventHubCmd& other) = default;\n    EventHubCmd& operator=(EventHubCmd&& other) = default;\n\n    /// <sumamry>\n    /// Process the Event Hub command XML to extract SASKey and other info.\n    /// </summary>\n    void ProcessCmdXml();\n\n    /// <sumamry>\n    /// Get Event Hub SAS Keys and return it in table.\n    /// table: key=EventName; value: EH cmd XML items (currently SAS and MDS endpoint ID)\n    /// </summary>\n    std::shared_ptr<EhCmdXmlItemsTable_t> GetNoticeXmlItemsTable() const { return m_noticeXmlItemsTable; }\n    std::shared_ptr<EhCmdXmlItemsTable_t> GetPublisherXmlItemsTable() const { return m_pubXmlItemsTable; }\n\n    static void SetParentContainerName(std::string name) { s_parentContainerName = std::move(name); }\n\nprivate:\n    std::string GetBlobName(std::string baseName) { return baseName.append(m_blobNameSuffix); }\n\n    void ProcessBlob(std::string&& blobName);\n\n    void ParseCmdXml(std::string&& xmlDoc);\n\nprivate:\n    std::string m_blobNameSuffix;\n    std::string m_rootContainerSas;\n\n    // key = EventName; value: EH cmd XML items (currently SAS and MDS endpoint ID)\n    std::shared_ptr<EhCmdXmlItemsTable_t> m_noticeXmlItemsTable;\n    std::shared_ptr<EhCmdXmlItemsTable_t> m_pubXmlItemsTable;\n\n    static std::string s_parentContainerName;\n};\n\n} // namespace mdsd\n\nstd::ostream&\noperator<<(std::ostream& str, const mdsd::EhCmdXmlItems & cmd);\n\n#endif // __EVENTHUBCMD__HH__\n"
  },
  {
    "path": "Diagnostic/mdsd/mdscommands/EventHubPublisher.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include <vector>\n#include <bond/core/bond.h>\n#include <stddef.h>\n#include <boost/asio.hpp>\n\n#include \"BinaryWriter.hh\"\n#include \"EventHubPublisher.hh\"\n#include \"MdsCmdLogger.hh\"\n#include \"Trace.hh\"\n#include \"PublisherStatus.hh\"\n#include \"MdsException.hh\"\n\nusing namespace mdsd::details;\nusing namespace web::http;\nusing namespace web::http::client;\n\nstatic std::vector<byte>\nSerializeData(\n    const std::string & text\n    )\n{\n    std::vector<byte> v;\n    BinaryWriter writer(v);\n    writer.Write(text);\n    return v;\n}\n\nstatic bool\nDisableWeakSslCiphers(\n    const std::string & url,\n    web::http::client::native_handle handle\n)\n{\n    const std::string https = \"https:\";\n    if (url.size() <= https.size()) {\n        return true;\n    }\n    bool isHttps = (0 == strncasecmp(url.c_str(), https.c_str(), https.size()));\n    if (!isHttps) {\n        return true;\n    }\n\n    bool resultOK = true;\n    boost::asio::ssl::stream<boost::asio::ip::tcp::socket &>* streamobj =\n        static_cast<boost::asio::ssl::stream<boost::asio::ip::tcp::socket &>* >(handle);\n\n    if (streamobj)\n    {\n        SSL* ssl = streamobj->native_handle();\n        if (ssl)\n        {\n            const int isOK = 1;\n            const std::string cipherList = \"HIGH:!DSS:!RC4:!aNULL@STRENGTH\";\n            if (::SSL_set_cipher_list(ssl, cipherList.c_str()) != isOK) {\n                MdsCmdLogError(\"Error: failed to disable weak ciphers: \" + cipherList + \"; URL: \" + url);\n                resultOK = false;\n            }\n        }\n    }\n    return resultOK;\n}\n\nEventHubPublisher::EventHubPublisher(\n    const std::string & hostUrl,\n    const std::string & eventHubUrl,\n    const std::string & sasToken) :\n    m_hostUrl(hostUrl),\n    m_eventHubUrl(eventHubUrl),\n    m_sasToken(sasToken),\n    m_httpclient(nullptr),\n    m_resetHttpClient(false)\n{\n\n}\n\n// The actual data sent to EventHub is a serialized version of EventDataT::GetData().\n// However, because EventDataT::GetData() is std::string, and serialization doesn't\n// change the size of std::string, use the std::string's size to do validation.\nstatic void\nValidateData(\n    const mdsd::EventDataT & data\n    )\n{\n    if (data.GetData().size() > mdsd::EventDataT::GetMaxSize()) {\n        std::ostringstream strm;\n        strm << \"EventHub data is too big: max=\" << mdsd::EventDataT::GetMaxSize()\n             << \" B; input=\" << data.GetData().size() << \" B. Drop it.\";\n        throw mdsd::TooBigEventHubDataException(strm.str());\n    }\n}\n\nhttp_request\nEventHubPublisher::CreateRequest(\n    const EventDataT & data\n    )\n{\n    ValidateData(data);\n    auto serializedData = SerializeData(data.GetData());\n\n    http_request req;\n    req.set_request_uri(m_eventHubUrl);\n    req.set_method(methods::POST);\n    req.headers().add(\"Authorization\", m_sasToken);\n    req.headers().add(\"Content-Type\", \"application/atom+xml;type=entry;charset=utf-8\");\n\n    req.set_body(serializedData);\n\n    for (const auto & it : data.Properties()) {\n        req.headers().add(it.first, it.second);\n    }\n\n    return req;\n}\n\nvoid\nEventHubPublisher::ResetClient()\n{\n    Trace trace(Trace::MdsCmd, \"EventHubPublisher::ResetClient\");\n\n    if (m_httpclient) {\n        trace.NOTE(\"Http client will be reset due to previous failure.\");\n        m_httpclient.reset();\n        m_resetHttpClient = false;\n    }\n\n    auto lambda = [this](web::http::client::native_handle handle)->void\n    {\n        (void) DisableWeakSslCiphers(m_hostUrl, handle);\n    };\n\n    http_client_config httpClientConfig;\n    httpClientConfig.set_timeout(std::chrono::seconds(30)); // http request timeout value\n    httpClientConfig.set_nativehandle_options(lambda);\n    m_httpclient = std::move(std::unique_ptr<http_client>(new http_client(m_hostUrl, httpClientConfig)));\n}\n\nbool\nEventHubPublisher::Publish(\n    const EventDataT& data\n    )\n{\n    if (data.empty()) {\n        MdsCmdLogWarn(\"Empty data is passed to publisher. Drop it.\");\n        return true;\n    }\n\n    try {\n        if (!m_httpclient || m_resetHttpClient) {\n            ResetClient();\n        }\n\n        auto postRequest = CreateRequest(data);\n        auto httpResponse = m_httpclient->request(postRequest).get();\n        return HandleServerResponse(httpResponse, false);\n    }\n    catch(const mdsd::TooBigEventHubDataException & ex)\n    {\n        MdsCmdLogWarn(ex.what());\n        return true;\n    }\n    catch(const std::exception & ex)\n    {\n        MdsCmdLogError(\"Error: EH publish to \" + m_eventHubUrl + \" failed: \" + ex.what());\n    }\n    catch(...)\n    {\n        MdsCmdLogError(\"Error: EH publish to \" + m_eventHubUrl +\" has unknown exception.\");\n    }\n\n    m_resetHttpClient = true;\n    return false;\n}\n\npplx::task<bool>\nEventHubPublisher::PublishAsync(\n    const EventDataT& data\n    )\n{\n    if (data.empty()) {\n        MdsCmdLogWarn(\"Empty data is passed to async publisher. Drop it.\");\n        return pplx::task_from_result(true);\n    }\n    try {\n        if (!m_httpclient || m_resetHttpClient) {\n            ResetClient();\n        }\n\n        auto postRequest = CreateRequest(data);\n        auto shThis = shared_from_this();\n\n        return m_httpclient->request(postRequest)\n        .then([shThis](pplx::task<http_response> responseTask)\n        {\n            return shThis->HandleServerResponseAsync(responseTask);\n        });\n    }\n    catch(const mdsd::TooBigEventHubDataException & ex)\n    {\n        MdsCmdLogWarn(ex.what());\n        return pplx::task_from_result(true);\n    }\n    catch(const std::exception & ex)\n    {\n        MdsCmdLogError(\"Error: EH async publish to \" + m_eventHubUrl + \" failed: \" + ex.what());\n    }\n\n    m_resetHttpClient = true;\n    return pplx::task_from_result(false);\n}\n\nbool\nEventHubPublisher::HandleServerResponseAsync(\n    pplx::task<http_response> responseTask\n    )\n{\n    try {\n        return HandleServerResponse(responseTask.get(), true);\n    }\n    catch(const std::exception & e)\n    {\n        MdsCmdLogError(\"Error: EH async publish to \" + m_eventHubUrl +\n            \" failed with http response: \" + e.what());\n    }\n    m_resetHttpClient = true;\n    return false;\n}\n\nbool\nEventHubPublisher::HandleServerResponse(\n    const http_response & response,\n    bool isFromAsync\n    )\n{\n    Trace trace(Trace::MdsCmd, \"EventHubPublisher::HandleServerResponse\");\n    PublisherStatus pubStatus = PublisherStatus::Idle;\n\n    auto statusCode = response.status_code();\n    TRACEINFO(trace, \"Http response status_code=\" << statusCode << \"; Reason='\" << response.reason_phrase() << \"'\");\n\n    const int HttpStatusThrottled = 429;\n\n    std::string errDetails;\n\n    switch(statusCode) {\n        case status_codes::Created: // 201. According to MSDN, 201 means success.\n        case status_codes::OK:\n            pubStatus = PublisherStatus::PublicationSucceeded;\n            break;\n        case status_codes::BadRequest:\n            pubStatus = PublisherStatus::PublicationFailedWithBadRequest;\n            break;\n        case status_codes::Unauthorized:\n        case status_codes::Forbidden:\n            pubStatus = PublisherStatus::PublicationFailedWithAuthError;\n            errDetails += \" SAS: '\" + m_sasToken + \"'\";\n            break;\n        case status_codes::ServiceUnavailable:\n            pubStatus = PublisherStatus::PublicationFailedServerBusy;\n            m_resetHttpClient = true;\n            break;\n        case HttpStatusThrottled:\n            pubStatus = PublisherStatus::PublicationFailedThrottled;\n            break;\n        default:\n            pubStatus = PublisherStatus::PublicationFailedWithUnknownReason;\n            break;\n    }\n\n    if (PublisherStatus::PublicationSucceeded != pubStatus) {\n        std::ostringstream strm;\n        strm << \"Error: EH publish to \" << m_eventHubUrl << errDetails << \" failed with status=\"\n             << pubStatus << std::boolalpha << \". isAsync=\" << isFromAsync;\n        MdsCmdLogError(strm);\n    }\n    else {\n        TRACEINFO(trace, \"publication succeeded. isAsync=\" << std::boolalpha << isFromAsync);\n    }\n    return (PublisherStatus::PublicationSucceeded == pubStatus);\n}\n"
  },
  {
    "path": "Diagnostic/mdsd/mdscommands/EventHubPublisher.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef __EVENTHUBPUBLISHER__HH__\n#define __EVENTHUBPUBLISHER__HH__\n\n#include <string>\n#include <memory>\n#include <cstdlib>\n#include <cpprest/http_client.h>\n#include <pplx/pplxtasks.h>\n#include \"EventData.hh\"\n\nnamespace mdsd { namespace details\n{\n\n/// <summary>\n/// This class implements functions to publish data to EventHub\n/// service using https.\n/// </summary>\nclass EventHubPublisher : public std::enable_shared_from_this<EventHubPublisher>\n{\npublic:\n    static std::shared_ptr<EventHubPublisher> create(\n        const std::string & hostUrl,\n        const std::string & eventHubUrl,\n        const std::string & sasToken\n        )\n    {\n        return std::shared_ptr<EventHubPublisher>(new  EventHubPublisher(hostUrl, eventHubUrl, sasToken));\n    }\n\n    virtual ~EventHubPublisher() {}\n\n    EventHubPublisher(const EventHubPublisher &) = delete;\n    EventHubPublisher(EventHubPublisher&&) = default;\n\n    EventHubPublisher& operator=(EventHubPublisher&) = delete;\n    EventHubPublisher& operator=(EventHubPublisher&&) = default;\n\n    /// <summary>\n    /// Publish the data to Event Hub service synchronously.\n    /// Return true if success, false if any error.\n    /// If input data is empty, drop it and return true.\n    /// </summary>\n    virtual bool Publish(const EventDataT & data);\n\n    /// <summary>\n    /// Publish the data to Event Hub service asynchronously.\n    /// Return true if success, false if any error.\n    /// If input data is empty, drop it and return true.\n    /// </summary>\n    virtual pplx::task<bool> PublishAsync(const EventDataT & data);\n\n    /// <summary>\n    /// Create http request for EventHub data uploading.\n    /// Throw exception if any error for the input data.\n    /// </summary>\n    web::http::http_request CreateRequest(const EventDataT & data);\n\nprotected:\n    EventHubPublisher(\n        const std::string & hostUrl,\n        const std::string & eventHubUrl,\n        const std::string & sasToken);\n\nprivate:\n    void ResetClient();\n    bool HandleServerResponse(const web::http::http_response & response, bool isFromAsync);\n    bool HandleServerResponseAsync(pplx::task<web::http::http_response> responseTask);\n\nprivate:\n    std::string m_hostUrl;       // Event Hub host URL\n    std::string m_eventHubUrl;   // Event Hub service URL\n    std::string m_sasToken;      // Event Hub SAS token\n\n    std::unique_ptr<web::http::client::http_client> m_httpclient;\n    bool m_resetHttpClient;     // if true, reset the http client.\n};\n\n} // namespace details\n} // namespace mdsd\n\n#endif // __EVENTHUBPUBLISHER__HH__\n"
  },
  {
    "path": "Diagnostic/mdsd/mdscommands/EventHubType.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"EventHubType.hh\"\n#include <map>\n#include <stdexcept>\n\nstatic std::map<mdsd::EventHubType, std::string> & GetType2NameMap()\n{\n    static auto m = new std::map<mdsd::EventHubType, std::string> (\n    {\n        { mdsd::EventHubType::Notice, \"EventNotice\" },\n        { mdsd::EventHubType::Publish, \"EventPublish\" }\n    });\n    return *m;\n}\n\nstd::string\nmdsd::EventHubTypeToStr(EventHubType type)\n{\n    auto m = GetType2NameMap();\n    auto iter = m.find(type);\n    if (iter != m.end()) {\n        return iter->second;\n    }\n    return \"unknown\";\n}\n\nstatic std::map<std::string, mdsd::EventHubType> & GetName2TypeMap()\n{\n    static auto m = new std::map<std::string, mdsd::EventHubType>(\n    {\n        { \"EventNotice\", mdsd::EventHubType::Notice },\n        { \"EventPublish\", mdsd::EventHubType::Publish }\n    });\n    return *m;\n}\n\nmdsd::EventHubType\nmdsd::EventHubTypeFromStr(const std::string & s)\n{\n    auto m = GetName2TypeMap();\n    auto iter = m.find(s);\n    if (iter != m.end()) {\n        return iter->second;\n    }\n    throw std::runtime_error(\"Invalid EventHubType name: \" + s);\n}\n"
  },
  {
    "path": "Diagnostic/mdsd/mdscommands/EventHubType.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef __EVENTHUBTYPE_HH_\n#define __EVENTHUBTYPE_HH_\n\n#include <string>\n\nnamespace mdsd\n{\n\nenum class EventHubType\n{\n    Notice,\n    Publish\n};\n\nstd::string EventHubTypeToStr(EventHubType type);\nEventHubType EventHubTypeFromStr(const std::string & s);\n\n} // namespace mdsd\n\n#endif  // __EVENTHUBTYPE_HH_\n"
  },
  {
    "path": "Diagnostic/mdsd/mdscommands/EventHubUploader.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include <iostream>\n#include <sstream>\n#include <chrono>\n#include <cassert>\nextern \"C\" {\n#include <unistd.h>\n#include <stddef.h>\n}\n#include <cpprest/pplx/threadpool.h>\n#include <boost/bind.hpp>\n#include <boost/algorithm/string/replace.hpp>\n\n#include \"EventHubUploader.hh\"\n#include \"MdsException.hh\"\n#include \"MdsCmdLogger.hh\"\n#include \"Trace.hh\"\n#include \"Logger.hh\"\n#include \"EventEntry.hh\"\n#include \"EventPersistMgr.hh\"\n#include \"EventHubPublisher.hh\"\n#include \"Utility.hh\"\n\nusing namespace mdsd;\nusing namespace mdsd::details;\n\nclass UploadInterruptionException {};\n\nEventHubUploader::EventHubUploader(\n    const std::string & persistDir,\n    int32_t persistResendSeconds,\n    int32_t memoryTimeoutSeconds,\n    int32_t maxPersistSeconds\n    ) :\n    m_publisher(nullptr),\n    m_memoryTimeoutSeconds(memoryTimeoutSeconds),\n    m_stopSenderMode(0),\n    m_persistResendSeconds(persistResendSeconds),\n    m_persistResendTimer(crossplat::threadpool::shared_instance().service()),\n    m_persistDir(persistDir),\n    m_pmgr(EventPersistMgr::create(persistDir, maxPersistSeconds))\n{\n}\n\nEventHubUploader::~EventHubUploader()\n{\n    WaitForFinish();\n}\n\nvoid\nEventHubUploader::WaitForFinish(\n    int32_t maxMilliSeconds\n    )\n{\n    try {\n        Trace trace(Trace::MdsCmd, \"EventHubUploader::WaitForFinish\");\n        if (m_isFinished) {\n            TRACEINFO(trace, \"function is already called. abort.\");\n            return;\n        }\n        m_isFinished = true;\n\n        WaitForSenderTask(maxMilliSeconds);\n\n        if (m_senderTask.valid()) {\n            m_senderTask.get();\n        }\n        m_persistResendTimer.cancel();\n    }\n    catch(std::exception& ex) {\n        MdsCmdLogError(\"Error: EventHubUploader::WaitForFinish failed: \" + std::string(ex.what()));\n    }\n    catch(...) {\n        MdsCmdLogError(\"Error: EventHubUploader::WaitForFinish failed with unknown exception\");\n    }\n}\n\nvoid\nEventHubUploader::SetSasAndStart(\n    const std::string & eventHubSas\n    )\n{\n    Trace trace(Trace::MdsCmd, \"EventHubUploader::SetSasAndStart\");\n    if (eventHubSas.empty()) {\n        MdsCmdLogError(\"Error: EventHubUploader::SetSasAndStart: unexpected empty EventHub SasKey\");\n        return;\n    }\n\n    if (m_ehSasKey != eventHubSas) {\n        std::string hostUrl, eventHubUrl, sasToken;\n        ParseEventHubSas(eventHubSas, hostUrl, eventHubUrl, sasToken);\n\n        m_publisher = EventHubPublisher::create(hostUrl, eventHubUrl, sasToken);\n\n        // Because the senderTask requires EH publisher object, so\n        // create the task and timer only when EH publisher object is ready.\n        // This only needs to be called once.\n        std::call_once(m_initOnceFlag, &EventHubUploader::Init, this);\n\n        m_ehSasKey = eventHubSas;\n    }\n}\n\nvoid\nEventHubUploader::Init()\n{\n    m_senderTask = std::async(std::launch::async, &EventHubUploader::Upload, this);\n    m_persistResendTimer.expires_from_now(boost::posix_time::seconds(m_persistResendSeconds));\n    m_persistResendTimer.async_wait(boost::bind(&EventHubUploader::ResendPersistEvents,\n                                    this, boost::asio::placeholders::error));\n}\n\nvoid\nEventHubUploader::AddData(\n    const EventDataT & data\n    )\n{\n    if (data.empty()) {\n        return;\n    }\n    EventDataT dataCopy{data};\n    AddData(std::move(dataCopy));\n}\n\nvoid\nEventHubUploader::AddData(\n    EventDataT && data\n    )\n{\n    if (data.empty()) {\n        return;\n    }\n\n    EventEntryT item(new EventEntry(std::move(data)));\n    std::lock_guard<std::mutex> lk(m_qmutex);\n    m_uploadQueue.emplace(std::move(item));\n    m_qcv.notify_all();\n}\n\nvoid\nEventHubUploader::WaitForSenderTask(\n    int32_t milliSeconds\n    )\n{\n    Trace trace(Trace::MdsCmd, \"EventHubUploader::WaitForSenderTask\");\n\n    if (m_stopSenderMode > 0) {\n        return;\n    }\n    if (!m_senderTask.valid()) {\n        return;\n    }\n\n    TRACEINFO(trace, \"Notify sender task to stop ...\");\n\n    // Because condition variable (CV)'s checking for predicate and waiting\n    // is not atomic, to avoid lost notification, the operations that'll\n    // affect predicate results before CV notify() should be protected by\n    // the same mutex for CV wait().\n    if (-1 == milliSeconds) {\n        std::unique_lock<std::mutex> lck(m_qmutex);\n        m_stopSenderMode = StopTaskUntilDoneMode;\n        m_qcv.notify_all();\n        lck.unlock();\n\n        m_senderTask.wait();\n    }\n    else {\n        m_stopSenderMode = StopTaskUntilDoneMode;\n        m_senderTask.wait_for(std::chrono::milliseconds(milliSeconds));\n\n        std::unique_lock<std::mutex> lck(m_qmutex);\n        auto queueSize = m_uploadQueue.size();\n        m_stopSenderMode = StopTaskNowMode;\n        m_qcv.notify_all();\n        lck.unlock();\n\n        TRACEINFO(trace, \"Number of Items in upload queue: \" << queueSize );\n    }\n}\n\nvoid\nEventHubUploader::Upload()\n{\n    Trace trace(Trace::MdsCmd, \"EventHubUploader::Upload\");\n\n    try {\n        while(StopTaskNowMode != m_stopSenderMode) {\n            std::unique_lock<std::mutex> lk(m_qmutex);\n            m_qcv.wait(lk, [this] {\n                return (m_stopSenderMode || !m_uploadQueue.empty());\n            });\n\n            if (m_uploadQueue.empty()) {\n                break;\n            }\n            UploadInterruptionPoint();\n\n            EventEntryT item(std::move(m_uploadQueue.front()));\n            m_uploadQueue.pop();\n            lk.unlock();\n\n            UploadInterruptionPoint();\n\n            // item could be re-queued based on process result.\n            ProcessData(std::move(item));\n            UploadInterruptionPoint();\n        }\n    }\n    catch(UploadInterruptionException&) {\n        TRACEINFO(trace, \"Upload() is interrupted.\");\n    }\n}\n\nvoid\nEventHubUploader::ProcessData(\n    EventEntryT item\n)\n{\n    Trace trace(Trace::MdsCmd, \"EventHubUploader::ProcessData\");\n\n    auto itemAge = item->GetAgeInSeconds();\n    std::string itemTag = \"Item (\";\n    itemTag += std::to_string(item->GetId());\n    itemTag += \")\";\n\n    if (itemAge > m_memoryTimeoutSeconds) {\n        TRACEINFO(trace, itemTag << \" age (\" << itemAge\n                << \" s) > retry timeout(\" << m_memoryTimeoutSeconds << \" s). Stop retry.\");\n        return;\n    }\n\n    if (!item->IsTimeToRetry()) {\n        std::lock_guard<std::mutex> lk(m_qmutex);\n        m_uploadQueue.emplace(std::move(item));\n        return;\n    }\n\n    UploadInterruptionPoint();\n\n    if(m_publisher->Publish(item->GetData())) {\n        m_nUpSuccess++;\n        return;\n    }\n\n    UploadInterruptionPoint();\n\n    if (item->IsNeverSent()) {\n        item->SetSendTime();\n    }\n\n    m_nUpFail++;\n\n    // if persist write failed, no backoff. retry as soon as possible.\n    bool persistOK = true;\n    if (!item->IsInPersistence()) {\n        trace.NOTE(itemTag + \" upload failed. Add to persist and requeue.\");\n        persistOK = m_pmgr->Add(item->GetData());\n        if (!persistOK) {\n            m_npFail++;\n            MdsCmdLogError(\"Error: EventHubUploader data processor failed to add \"\n                + itemTag + \" to persist mgr.\");\n        }\n        else {\n            item->SetPersistence();\n        }\n    }\n    else {\n        trace.NOTE(itemTag + \" failed again. requeue.\");\n    }\n\n    if (persistOK) {\n        trace.NOTE(\"Backoff \" + itemTag);\n        item->BackOff();\n    }\n\n    UploadInterruptionPoint();\n    std::lock_guard<std::mutex> lk(m_qmutex);\n    m_uploadQueue.emplace(std::move(item));\n}\n\n// input sasKey format: https://tuxtestsb.servicebus.windows.net/Raw?sr=SR&sig=SIG&se=1455131008&skn=writer'\n// outputs:\n//   - hostUrl: https://tuxtestsb.servicebus.windows.net\n//   - eventHubUrl: https://tuxtestsb.servicebus.windows.net/Raw/messages\n//   - sasToken: SharedAccessSignature sr=SR&sig=SIG&se=1455131008&skn=writer\nvoid\nEventHubUploader::ParseEventHubSas(\n    const std::string & eventHubSas,\n    std::string & hostUrl,\n    std::string & eventHubUrl,\n    std::string & sasToken\n    )\n{\n    Trace trace(Trace::MdsCmd, \"EventHubUploader::ParseEventHubSas\");\n    std::string prefix{\"https://\"};\n    auto prefixLen = prefix.size();\n\n    if (eventHubSas.compare(0, prefixLen, prefix)) {\n        std::ostringstream strm;\n        strm << \"Invalid Event Hub SAS. SAS is expected to started with '\" << prefix << \"'\";\n        throw MDSEXCEPTION(strm.str());\n    }\n    auto hostPos = eventHubSas.find_first_of('/', prefixLen);\n    hostUrl = eventHubSas.substr(0, hostPos);\n\n    auto eventNamePos = eventHubSas.find_first_of('?', hostUrl.size());\n    eventHubUrl = eventHubSas.substr(0, eventNamePos) + \"/messages\";\n\n    auto tmpSasToken = eventHubSas.substr(eventNamePos+1);\n    sasToken = MdsdUtil::UnquoteXmlAttribute(tmpSasToken);\n    sasToken = \"SharedAccessSignature \" + sasToken;\n}\n\nvoid\nEventHubUploader::ResendPersistEvents(\n    const boost::system::error_code& error\n    )\n{\n    Trace trace(Trace::MdsCmd, \"EventHubUploader::ResendPersistEvents\");\n    if (boost::asio::error::operation_aborted == error) {\n        trace.NOTE(\"Previous timer cancelled.\");\n        return;\n    }\n\n    if (!m_pmgr->UploadAllAsync(m_publisher)) {\n        MdsCmdLogError(std::string(\"Error: EventHubUploader failed to start async upload. Retry in \")\n            + std::to_string(m_persistResendSeconds) + \" seconds.\");\n    }\n\n    if (0 == m_stopSenderMode) {\n        m_persistResendTimer.expires_from_now(boost::posix_time::seconds(m_persistResendSeconds));\n        m_persistResendTimer.async_wait(boost::bind(&EventHubUploader::ResendPersistEvents,\n                                        this, boost::asio::placeholders::error));\n    }\n}\n\nvoid\nEventHubUploader::UploadInterruptionPoint()\n{\n    if (StopTaskNowMode == m_stopSenderMode) {\n        throw UploadInterruptionException();\n    }\n}\n"
  },
  {
    "path": "Diagnostic/mdsd/mdscommands/EventHubUploader.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef __EVENTHUBUPLOADER__HH__\n#define __EVENTHUBUPLOADER__HH__\n\n#include <string>\n#include <queue>\n#include <mutex>\n#include <atomic>\n#include <future>\n#include <condition_variable>\n#include <memory>\n\nextern \"C\" {\n#include <stddef.h>\n}\n#include <boost/asio.hpp>\n#include \"EventData.hh\"\n\nnamespace boost\n{\n    namespace system\n    {\n        class error_code;\n    }\n}\n\nnamespace mdsd\n{\n    namespace details {\n        class EventEntry;\n        class EventPersistMgr;\n        class EventHubPublisher;\n    }\n}\n\nnamespace mdsd\n{\n\n/// <summary>\n/// This class implements the functions to upload data to Event Hub service.\n/// </summary>\nclass EventHubUploader\n{\n    using EventEntryT = std::unique_ptr<details::EventEntry>;\n\npublic:\n    /// <summary>\n    /// Construct an uploader object.\n    /// <param name=\"persistDir\">Directory fullpath where failed events are persisted.</param>\n    /// <param name=\"persistResendSeconds\">How often to resend failed, persisted events</param>\n    /// <param name=\"memoryTimeoutSeconds\">max time to keep data in memory after first failure.</param>\n    /// <param name=\"maxPersistSeconds\">Max time to persist failed data.</param>\n    /// </summary>\n    EventHubUploader(const std::string & persistDir,\n                     int32_t persistResendSeconds = 3600,\n                     int32_t memoryTimeoutSeconds = 3600,\n                     int32_t maxPersistSeconds = 604800  // 7-days\n                    );\n\n    ~EventHubUploader();\n\n    /// This class uses 'mutex', which is not movable, not copyable.\n    /// So make this class as not movable, not copyable.\n    EventHubUploader(const EventHubUploader& other) = delete;\n    EventHubUploader(EventHubUploader&& other) = delete;\n    EventHubUploader& operator=(const EventHubUploader& other) = delete;\n    EventHubUploader& operator=(EventHubUploader&& other) = delete;\n\n    /// <summary>\n    /// Set Event Hub SAS Key and start the uploader if not started yet.\n    /// When autokey is used, the SAS Key is changed every N hours. This API\n    /// will create a new instance of EventHubPublisher. So it should be called only\n    /// when SasKey is changed.\n    /// NOTE: This API is not thread-safe.\n    /// </summary>\n    void SetSasAndStart(const std::string & eventHubSas);\n\n    /// <summary>Add data to Event Hub service.</summary>\n    void AddData(const EventDataT & data);\n    void AddData(EventDataT && data);\n\n    /// <summary>\n    /// Wait for given time for all data to be uploaded.\n    /// Return until all data are uploaded or timed out.\n    /// -1 means forever.\n    /// NOTE: this function is not designed for thread-safe. In mdsd, it should\n    /// be called sequentially on given EventHubUploader object.\n    /// </summary>\n    void WaitForFinish(int32_t maxMilliSeconds = -1);\n\n    /// <summary>Get number of success uploads.</summary>\n    size_t GetNumUploadSuccess() const { return m_nUpSuccess; }\n\n    /// <summary>Get number of failed uploads.</summary>\n    size_t GetNumUploadFail() const { return m_nUpFail; }\n\n    /// <summary>Get number of failed persistence</summary>\n    size_t GetNumPersistFail() const { return m_npFail; }\n\n    std::string GetPersistDir() const { return m_persistDir; }\n\nprivate:\n    void WaitForSenderTask(int32_t maxMilliSeconds);\n    void ParseEventHubSas(const std::string & eventHubSas,\n        std::string& hostUrl, std::string& eventHubUrl, std::string& sasToken);\n    void Init();\n    void ProcessData(EventEntryT data);\n    void Upload();\n    void ResendPersistEvents(const boost::system::error_code& error);\n    void UploadInterruptionPoint();\n\nprivate:\n    std::shared_ptr<details::EventHubPublisher> m_publisher;\n    std::string m_ehSasKey;       // SASKey for EventHub service\n\n    size_t m_nUpSuccess = 0;      // number of upload success\n    size_t m_nUpFail = 0;         // number of upload failure\n    size_t m_npFail = 0;          // number of persist mgr failure\n\n    int32_t m_memoryTimeoutSeconds; // Max time to keep data in memory after first failure.\n\n    std::queue<EventEntryT> m_uploadQueue; // To store all events in memory.\n    std::mutex m_qmutex;                   // For queue/cv synchronization.\n    std::condition_variable m_qcv;         // For queue synchronization.\n\n    static const int StopTaskNowMode = 1;          // To stop the sender task immediately.\n    static const int StopTaskUntilDoneMode = 2;    // To stop the sender task when all data are processed.\n\n    std::atomic<int> m_stopSenderMode;        // A flag on when to stop the sender task.\n    std::future<void> m_senderTask;           // Task to send data to Event Hub service from memory queue.\n\n    int32_t m_persistResendSeconds = 0;               // How often to resend persisted, failed data.\n    boost::asio::deadline_timer m_persistResendTimer; // Persisted data resend timer.\n\n    std::string m_persistDir;                          // EventHub data persist dir\n    std::shared_ptr<details::EventPersistMgr> m_pmgr; // Event data persistence manager.\n    std::once_flag m_initOnceFlag;                    // Once flag to initialize this uploader object.\n    bool m_isFinished = false; // Whether EH uploading operation is finished\n};\n\n} // namespace mdsd\n\n#endif // __EVENTHUBUPLOADER__HH__\n"
  },
  {
    "path": "Diagnostic/mdsd/mdscommands/EventHubUploaderId.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"EventHubUploaderId.hh\"\n#include <sstream>\n#include <stdexcept>\n#include <vector>\n#include <boost/algorithm/string.hpp>\n#include <boost/algorithm/string/split.hpp>\n\nusing namespace mdsd;\n\nEventHubUploaderId::EventHubUploaderId(\n    EventHubType ehtype,\n    const std::string & moniker,\n    const std::string & eventname\n    ) :\n        m_ehtype(ehtype),\n        m_moniker(moniker),\n        m_eventname(eventname)\n{\n    if (m_moniker.empty()) {\n        throw std::invalid_argument(\"EventHubUploaderId: invalid empty moniker for event '\" + m_eventname + \"'\");\n    }\n    if (m_eventname.empty()) {\n        throw std::invalid_argument(\"EventHubUploaderId: invalid empty eventname for moniker '\" + m_moniker + \"'\");\n    }\n}\n\nEventHubUploaderId::EventHubUploaderId(const std::string & idstr)\n{\n    std::vector<std::string> fields;\n    boost::algorithm::split(fields, idstr, boost::is_any_of(\" \"), boost::token_compress_on);\n\n    constexpr size_t nExpected = 3;\n    if (nExpected != fields.size()) {\n        std::ostringstream strm;\n        strm << \"Invalid EHUploaderId '\" << idstr << \"' in number of tokens: expected=\" <<\n            nExpected << \"; actual=\" << fields.size();\n        throw std::runtime_error(strm.str());\n    }\n\n    m_eventname = fields[0];\n    m_moniker = fields[1];\n    m_ehtype = EventHubTypeFromStr(fields[2]);\n}\n"
  },
  {
    "path": "Diagnostic/mdsd/mdscommands/EventHubUploaderId.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _EVENTHUBUPLOADERID_HH_\n#define _EVENTHUBUPLOADERID_HH_\n\n#include <string>\n#include \"EventHubType.hh\"\n\nnamespace mdsd {\n\nstruct EventHubUploaderId {\n    EventHubType m_ehtype;\n    std::string m_moniker;\n    std::string m_eventname;\n\n    EventHubUploaderId(EventHubType ehtype, const std::string & moniker, const std::string & eventname);\n    EventHubUploaderId(const std::string & idstr);\n\n    operator std::string() const {\n        // put the bits that change more frequently at the front\n        return (m_eventname + \" \" + m_moniker + \" \" + EventHubTypeToStr(m_ehtype));\n    }\n};\n\n} // namespace mdsd\n\ninline std::ostream&\noperator<<(\n    std::ostream& os,\n    const mdsd::EventHubUploaderId& id\n    )\n{\n    os << static_cast<std::string>(id);\n    return os;\n}\n\n#endif // _EVENTHUBUPLOADERID_HH_\n"
  },
  {
    "path": "Diagnostic/mdsd/mdscommands/EventHubUploaderMgr.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"EventHubUploaderMgr.hh\"\n#include \"EventHubUploaderId.hh\"\n#include \"Utility.hh\"\n#include \"Trace.hh\"\n#include \"Logger.hh\"\n#include <stdexcept>\n#include <set>\n#include <cpprest/pplx/threadpool.h>\n\nusing namespace mdsd;\nusing namespace mdsd::details;\n\nEventHubUploaderMgr&\nEventHubUploaderMgr::GetInstance()\n{\n    // Because EventHubUploader's destructor will use pplx threadpool tasks, make sure\n    // the static threadpool is created first. First created will be last destroyed.\n    crossplat::threadpool::shared_instance();\n    static EventHubUploaderMgr s_instance;\n    return s_instance;\n}\n\nbool\nEventHubUploaderMgr::SetTopLevelPersistDir(\n    const std::string& persistDirTopLevel\n    )\n{\n    try {\n        MdsdUtil::ValidateDirRWXByUser(persistDirTopLevel);\n    }\n    catch(std::exception& ex) {\n        Logger::LogError(\"Error: failed to access directory '\" + persistDirTopLevel + \"'. Reason: \" + ex.what());\n        return false;\n    }\n    m_persistDirTopLevel = persistDirTopLevel;\n    return true;\n}\n\nstd::string\nEventHubUploaderMgr::CreateAndGetPersistDir(\n    EventHubType ehtype,\n    const std::string& moniker,\n    const std::string& eventname\n    )\n{\n    if (m_persistDirTopLevel.empty())\n    {\n        throw std::runtime_error(\"Root directory path string for persisting EventHub messages is empty\");\n    }\n\n    std::string persistDirPath = m_persistDirTopLevel;\n    persistDirPath += \"/\" + EventHubTypeToStr(ehtype);\n    MdsdUtil::CreateDirIfNotExists(persistDirPath, 01755);\n    persistDirPath += \"/\" + moniker;\n    MdsdUtil::CreateDirIfNotExists(persistDirPath, 01755);\n    persistDirPath += \"/\" + eventname;\n    MdsdUtil::CreateDirIfNotExists(persistDirPath, 01755);\n\n    return persistDirPath;\n}\n\nEventHubUploader*\nEventHubUploaderMgr::GetUploader(\n    const std::string & uploaderId\n    )\n{\n    // support multiple reader threads\n    boost::shared_lock<boost::shared_mutex> lk(m_mapMutex);\n    auto findResult = m_ehUploaders.find(uploaderId);\n    if (findResult == m_ehUploaders.end()) {\n        return nullptr;\n    }\n    return findResult->second.get();\n}\n\n// This API assumes m_mapMutex shared lock is already held.\nstd::set<std::pair<std::string, std::string>>\nEventHubUploaderMgr::GetNewItemSet(\n    EventHubType ehtype,\n    const std::unordered_map<std::string, std::unordered_set<std::string>> & eventMonikerMap\n    )\n{\n    Trace trace(Trace::MdsCmd, \"EventHubUploaderMgr::GetNewItemSet\");\n\n    std::set<std::pair<std::string, std::string>> newItemSet;\n    for (const auto & item : eventMonikerMap) {\n        auto & eventname = item.first;\n        auto & monikers = item.second;\n        for (const auto & moniker: monikers) {\n            auto findResult = m_ehUploaders.find(EventHubUploaderId(ehtype, moniker, eventname));\n\n            if (findResult == m_ehUploaders.end()) {\n                newItemSet.insert(std::make_pair(moniker, eventname));\n            }\n            else {\n                TRACEINFO(trace, \"Found existing EventHubUploader for moniker=\" << moniker << \", event=\" << eventname);\n            }\n        }\n    }\n    return newItemSet;\n}\n\n// This API assumes m_mapMutex shared lock is already held.\nstd::set<std::pair<std::string, std::string>>\nEventHubUploaderMgr::GetDroppedItemSet(\n    EventHubType ehtype,\n    const std::unordered_map<std::string, std::unordered_set<std::string>> & eventMonikerMap\n    )\n{\n    Trace trace(Trace::MdsCmd, \"EventHubUploaderMgr::GetDroppedItemSet\");\n\n    std::set<std::pair<std::string, std::string>> droppedItemSet;\n\n    for (const auto & item : m_ehUploaders) {\n        EventHubUploaderId ehid(item.first);\n        if (ehid.m_ehtype != ehtype) {\n            continue;\n        }\n        auto iter = eventMonikerMap.find(ehid.m_eventname);\n        if (iter == eventMonikerMap.end()) {\n            TRACEINFO(trace, \"Event '\" << ehid.m_eventname << \"' is dropped in MdsdConfig.\");\n            droppedItemSet.insert(std::make_pair(ehid.m_moniker, ehid.m_eventname));\n        }\n        else {\n            auto & monikers = iter->second;\n            for (const auto & moniker: monikers) {\n                if (moniker != ehid.m_moniker) {\n                    TRACEINFO(trace, \"Event \" << ehid.m_eventname << \"'s moniker '\" << ehid.m_moniker\n                        << \"' is dropped in MdsdConfig.\");\n                    droppedItemSet.insert(std::make_pair(ehid.m_moniker, ehid.m_eventname));\n                }\n            }\n        }\n    }\n    return droppedItemSet;\n}\n\nvoid\nEventHubUploaderMgr::CreateUploaders(\n    EventHubType ehtype,\n    const std::unordered_map<std::string, std::unordered_set<std::string>> & eventMonikerMap\n    )\n{\n    Trace trace(Trace::MdsCmd, \"EventHubUploaderMgr::CreateUploaders\");\n    if (m_persistDirTopLevel.empty()) {\n        Logger::LogError(\"Error: EventHub persist directory shouldn't be empty.\");\n        return;\n    }\n    try {\n        // This function could be called in multi-threads, or signal handler. use lock to protect.\n        boost::upgrade_lock<boost::shared_mutex> slock(m_mapMutex);\n\n        auto newItemSet = GetNewItemSet(ehtype, eventMonikerMap);\n        auto droppedItemSet = GetDroppedItemSet(ehtype, eventMonikerMap);\n\n        // Do exclusive lock on the EH uploader map\n        if (!newItemSet.empty() || !droppedItemSet.empty()) {\n            boost::upgrade_to_unique_lock< boost::shared_mutex > uniqueLock(slock);\n            for (const auto & item : newItemSet) {\n                auto & moniker = item.first;\n                auto & eventname = item.second;\n                EventHubUploaderId uploaderId(ehtype, moniker, eventname);\n                auto persistDir = CreateAndGetPersistDir(ehtype, moniker, eventname);\n                EhUploader_t newUploader(new EventHubUploader(persistDir));\n                m_ehUploaders[uploaderId] = std::move(newUploader);\n                TRACEINFO(trace, \"Created EventHubUploader for moniker=\" << moniker << \", event=\" << eventname);\n            }\n\n            for (const auto & item: droppedItemSet) {\n                auto & moniker = item.first;\n                auto & eventname = item.second;\n                m_ehUploaders.erase(EventHubUploaderId(ehtype, moniker, eventname));\n                TRACEINFO(trace, \"Removed EventHubUploader for moniker=\" << moniker << \", event=\" << eventname);\n            }\n        }\n    }\n    catch(std::exception& ex) {\n        Logger::LogError(\"Error: failed to create EventHub uploaders. Reason: \" + std::string(ex.what()));\n    }\n}\n\nbool\nEventHubUploaderMgr::SetSasAndStart(\n    const EventHubUploaderId& uploaderId,\n    const std::string & ehSas\n    )\n{\n    const std::string funcname = \"EventHubUploaderMgr::SetSasAndStart\";\n    Trace trace(Trace::MdsCmd, funcname);\n\n    if (ehSas.empty()) {\n        throw std::invalid_argument(funcname + \": unexpected empty SasKey\");\n    }\n\n    try {\n        auto uploaderObj = GetUploader(uploaderId);\n        if (!uploaderObj) {\n            TRACEINFO(trace, \"Cannot find uploader \" << uploaderId << \"'. Mdsd xml doesn't define it.\");\n            return false;\n        }\n        else {\n            TRACEINFO(trace, \"SetSasAndStart for \" << uploaderId);\n            uploaderObj->SetSasAndStart(ehSas);\n            return true;\n        }\n    }\n    catch(std::exception& ex) {\n        Logger::LogError(\"Error: EventHubUploaderMgr::SetSasAndStart() failed. Reason: \" + std::string(ex.what()));\n        return false;\n    }\n}\n\nbool\nEventHubUploaderMgr::AddMessageToUpload(\n    const EventHubUploaderId& uploaderId,\n    EventDataT&& eventData\n    )\n{\n    const std::string funcname = \"EventHubUploaderMgr::AddMessageToUpload\";\n    Trace trace(Trace::Bond, funcname);\n\n    if (eventData.empty()) {\n        throw std::invalid_argument(funcname + \": unexpected empty EventHub data\");\n    }\n\n    // The actual data sent to EventHub is a serialized version of EventDataT::GetData().\n    // However, because EventDataT::GetData() is std::string, and serialization doesn't\n    // change the size of std::string, use the std::string's size to do validation.\n    if (eventData.GetData().size() > EventDataT::GetMaxSize()) {\n        TRACEWARN(trace, \"Data size(\" << eventData.GetData().size()\n            << \") exceeds max supported size(\" << EventDataT::GetMaxSize() << \"). Drop it.\");\n        return false;\n    }\n\n    auto uploaderObj = GetUploader(uploaderId);\n    if (!uploaderObj) {\n        std::ostringstream oss;\n        oss << \"Error: \" << funcname << \" cannot find uploader '\" << uploaderId << \"'.\";\n        Logger::LogError(oss.str());\n        return false;\n    }\n\n    uploaderObj->AddData(std::move(eventData));\n    TRACEINFO(trace, \"Msg added to EventHubUploader, persistDir: \" + uploaderObj->GetPersistDir());\n    return true;\n}\n\nvoid\nEventHubUploaderMgr::WaitForFinish(\n    int32_t maxMilliSeconds\n    )\n{\n    Trace trace(Trace::MdsCmd, \"EventHubUploaderMgr::WaitForFinish\");\n    for (auto & iter : m_ehUploaders) {\n        iter.second->WaitForFinish(maxMilliSeconds);\n    }\n}\n\n// vim: se sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdscommands/EventHubUploaderMgr.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _EVENTHUBUPLOADERMGR_HH_\n#define _EVENTHUBUPLOADERMGR_HH_\n\n#include \"EventHubUploader.hh\"\n#include \"EventHubType.hh\"\n#include <string>\n#include <unordered_map>\n#include <map>\n#include <set>\n#include <unordered_set>\n#include <memory>\n#include <utility>\n#include <boost/thread/shared_mutex.hpp>\n\nnamespace mdsd {\n\nstruct EventHubUploaderId;\n\n// Using the singleton pattern\nclass EventHubUploaderMgr\n{\npublic:\n    static EventHubUploaderMgr& GetInstance();\n\n    EventHubUploaderMgr(const EventHubUploaderMgr &) = delete;\n    EventHubUploaderMgr(EventHubUploaderMgr&&) = delete;\n    EventHubUploaderMgr& operator=(const EventHubUploaderMgr&) = delete;\n    EventHubUploaderMgr& operator=(EventHubUploaderMgr&&) = delete;\n\n    bool SetTopLevelPersistDir(const std::string& persistDirTopLevel);\n    /// <summary>\n    /// Create EventHub uploaders for different EventHubType, moniker, eventname.\n    /// </summary>\n    /// <param name=\"eventMonikerMap\">key: eventname; value: monikernames. </param>\n    void CreateUploaders(EventHubType ehtype,\n        const std::unordered_map<std::string, std::unordered_set<std::string>> & eventMonikerMap);\n\n    /// <summary>\n    /// Set SAS Key for given EventHub uploader identified by an id string.\n    /// Return true if the SAS key is set; return false otherwise.\n    /// </summary>\n    bool SetSasAndStart(const EventHubUploaderId& uploaderId, const std::string & ehSas);\n\n    /// <summary>\n    /// Add an EventHub data item to EventHub data uploader identified by an id string.\n    /// Return true if data is added to uploader; return false otherwise.\n    /// <summary>\n    bool AddMessageToUpload(const EventHubUploaderId& uploaderId, EventDataT&& eventData);\n\n    size_t GetNumUploaders() const { return m_ehUploaders.size(); }\n\n    /// <summary>\n    /// Wait for given time for all data to be uploaded.\n    /// Return until all data are uploaded or timed out.\n    /// maxMilliSeconds=-1 means forever.\n    /// </summary>\n    void WaitForFinish(int32_t maxMilliSeconds);\n\nprivate:\n    EventHubUploaderMgr() {}\n    ~EventHubUploaderMgr() {}\n\n    // Top-level directory for persisting EventHub messages.\n    // There'll be a subdirectory for each accountmoniker/eventname combination.\n    std::string m_persistDirTopLevel;    // e.g., \"/var/mdsd\"\n\n    // Collection of all EHUploader objects\n    typedef std::unique_ptr<EventHubUploader> EhUploader_t;\n    std::map<std::string, EhUploader_t> m_ehUploaders;\n\n    // multiple readers single writer locks for EH uploaders map.\n    // NOTE: C++14 has std::shared_timed_mutex that can do the same thing. But it is not\n    // available until GCC5.0.\n    boost::shared_mutex m_mapMutex;\n\n    std::string CreateAndGetPersistDir(EventHubType ehtype, const std::string& moniker,\n        const std::string& eventname);\n\n    EventHubUploader* GetUploader(const std::string & uploaderId);\n\n\n    std::set<std::pair<std::string, std::string>> GetNewItemSet(\n        EventHubType ehtype,\n        const std::unordered_map<std::string, std::unordered_set<std::string>> & eventMonikerMap);\n\n    std::set<std::pair<std::string, std::string>> GetDroppedItemSet(\n        EventHubType ehtype,\n        const std::unordered_map<std::string, std::unordered_set<std::string>> & eventMonikerMap);\n\n};\n\n} // namespace mdsd\n\n#endif // _EVENTHUBUPLOADERMGR_HH_\n"
  },
  {
    "path": "Diagnostic/mdsd/mdscommands/EventPersistMgr.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include <cassert>\nextern \"C\" {\n#include <unistd.h>\n}\n#include \"EventPersistMgr.hh\"\n#include \"MdsCmdLogger.hh\"\n#include \"Trace.hh\"\n#include \"PersistFiles.hh\"\n#include \"EventHubPublisher.hh\"\n#include \"Utility.hh\"\n\nusing namespace mdsd::details;\n\n\nEventPersistMgr::EventPersistMgr(\n    const std::string & persistDir,\n    int32_t maxKeepSeconds\n    ) :\n    m_dirname(persistDir),\n    m_persist(new PersistFiles(persistDir)),\n    m_maxKeepSeconds(maxKeepSeconds),\n    m_nFileProcessed{0}\n{\n}\n\nEventPersistMgr::~EventPersistMgr()\n{\n}\n\nbool\nEventPersistMgr::Add(\n    const EventDataT & data\n    )\n{\n    if (data.empty()) {\n        return true;\n    }\n    try {\n        return m_persist->Add(data);\n    }\n    catch(std::exception & ex) {\n        MdsCmdLogError(std::string(\"Error: adding data to persistence hit exception: \") + ex.what());\n    }\n    return false;\n}\n\nsize_t\nEventPersistMgr::GetNumItems() const\n{\n    return m_persist->GetNumItems();\n}\n\nbool\nEventPersistMgr::UploadAllSync(\n    std::shared_ptr<EventHubPublisher> publisher\n    ) const\n{\n    Trace trace(Trace::MdsCmd, \"EventPersistMgr::UploadAllSync\");\n\n    if (!publisher) {\n        MdsCmdLogError(\"Error: EventPersistMgr::UploadAllSync(): unexpected NULL for publisher object.\");\n        return false;\n    }\n\n    int nPubErrs = 0;\n    auto endIter = m_persist->cend();\n    for (auto iter = m_persist->cbegin(); iter != endIter; ++iter)\n    {\n        auto item = *iter;\n        auto ageInSeconds = m_persist->GetAgeInSeconds(item);\n        assert(ageInSeconds >= 0);\n\n        if (ageInSeconds >= m_maxKeepSeconds) {\n            m_persist->Remove(item);\n        }\n        else {\n            try {\n                auto itemdata = m_persist->Get(item);\n                if (publisher->Publish(itemdata)) {\n                    m_persist->Remove(item);\n                }\n                else {\n                    nPubErrs++;\n                }\n            }\n            catch(std::exception & ex) {\n                MdsCmdLogError(std::string(\"Error: EventPersistMgr UploadAllSync() hits exception: \") + ex.what());\n                nPubErrs++;\n            }\n            usleep(100000); // sleep some time to avoid flush azure service.\n        }\n    }\n    if (nPubErrs) {\n        std::ostringstream strm;\n        strm << \"Error: EventPersistMgr UploadAllSync() hit \" << nPubErrs << \" publication errors.\";\n        MdsCmdLogError(strm);\n    }\n    return (0 == nPubErrs);\n}\n\n// Check whether an I/O error is retryable.\n// NOTE: this list may need to to be adjusted based on actual errors found in the future.\n// They are obtained from 'man 2 open', 'man 2 read', 'man 2 close'.\nstatic inline bool\nIsFileIOErrorRetryable(int errcode)\n{\n    switch(errcode) {\n        case EACCES:\n        case EISDIR:\n        case ELOOP:\n        case ENAMETOOLONG:\n        case ENOTDIR:\n        case EOVERFLOW:\n        case EIO:\n            return false;\n        default:\n            return true;\n    }\n    return true;\n}\n\n/// <summary>\n/// A convenient helper function to loop asychronously until a condition is met.\n/// NOTE: These functions are from CPPREST sample code.\n/// </summary>\npplx::task<bool> _do_while_iteration(std::function<pplx::task<bool>(void)> func)\n{\n    pplx::task_completion_event<bool> ev;\n    func().then([=](bool guard)\n    {\n        ev.set(guard);\n    });\n    return pplx::create_task(ev);\n}\npplx::task<bool> _do_while_impl(std::function<pplx::task<bool>(void)> func)\n{\n    return _do_while_iteration(func).then([=](bool guard) -> pplx::task<bool>\n    {\n        if(guard)\n        {\n            return ::_do_while_impl(func);\n        }\n        else\n        {\n            return pplx::task_from_result(false);\n        }\n    });\n}\npplx::task<void> do_while(std::function<pplx::task<bool>(void)> func)\n{\n    return _do_while_impl(func).then([](bool){});\n}\n\nstd::shared_ptr<std::queue<std::string>>\nEventPersistMgr::GetAllFiles() const\n{\n    auto fqueue = std::make_shared<std::queue<std::string>>();\n\n    auto endIter = m_persist->cend();\n    for (auto iter = m_persist->cbegin(); iter != endIter; ++iter)\n    {\n        auto item = *iter;\n        auto ageInSeconds = m_persist->GetAgeInSeconds(item);\n        assert(ageInSeconds >= 0);\n\n        if (ageInSeconds >= m_maxKeepSeconds) {\n            m_persist->RemoveAsync(item);\n        }\n        else {\n            fqueue->push(item);\n        }\n    }\n    return fqueue;\n}\n\nstatic std::shared_ptr<std::queue<std::string>>\nCreateBatch(\n    std::shared_ptr<std::queue<std::string>> fullList,\n    size_t batchSize\n    )\n{\n    if (fullList->size() <= batchSize) {\n        return fullList;\n    }\n\n    auto batch = std::make_shared<std::queue<std::string>>();\n    for (size_t i = 0; i < batchSize; i++) {\n        if (fullList->empty()) {\n            break;\n        }\n        batch->push(fullList->front());\n        fullList->pop();\n    }\n    return batch;\n}\n\nstatic void\nHandlePrevTaskFailure(\n    pplx::task<void> previous_task,\n    const std::string & testname\n    )\n{\n    try {\n        previous_task.wait();\n    }\n    catch(const std::exception& ex) {\n        MdsCmdLogError(testname + \" has exception: \" + ex.what());\n    }\n    catch(...) {\n        MdsCmdLogError(testname + \" has unknown exception.\");\n    }\n}\n\n// Calculate how many batches to use and each batch's size\n// based on total items to process and max open file resource limit.\n//\n// Make sure maxBatches is used.\n//\n// The result is that totalItems can be divided into n batches, such that\n// the first nExtraOne batches have batchSize+1 items, the rest\n// (nbatches-nExtraOne) has batchSize items.\n// e.g. totalItems=7, maxBatches=5, we want to have (2,2,1,1,1), where\n// nbatches=5, batchSize=1, nExtraOne=2.\nstatic void\nCalcBatchInfo(\n    size_t totalItems,\n    size_t& nbatches,\n    size_t& batchSize,\n    size_t& nExtraOne\n    )\n{\n    Trace trace(Trace::MdsCmd, \"EventPersistMgr::CalcBatchInfo\");\n\n    auto fdLimit = MdsdUtil::GetNumFileResourceSoftLimit();\n\n    if (0 == fdLimit) {\n        // max open file is unlimited, each batch processes one file.\n        nbatches = totalItems;\n        batchSize = 1;\n        nExtraOne = 0;\n    }\n    else {\n        // max batches: 10% of max open files.\n        // so that we won't run out of open files.\n        size_t maxBatches = fdLimit / 10;\n\n        nbatches = std::min(totalItems, maxBatches);\n        batchSize = totalItems / nbatches;\n        nExtraOne = totalItems % nbatches;\n    }\n\n    assert((nbatches*batchSize+nExtraOne) == totalItems);\n\n    TRACEINFO(trace, \"total=\" << totalItems << \"; nbatches=\" << nbatches <<\n        \"; batchSize=\" << batchSize << \"; nExtraOne=\" << nExtraOne);\n}\n\nbool\nEventPersistMgr::UploadAllAsync(\n    std::shared_ptr<EventHubPublisher> publisher\n    ) const\n{\n    Trace trace(Trace::MdsCmd, \"EventPersistMgr::UploadAllAsync\");\n\n    if (!publisher) {\n        MdsCmdLogError(\"Error: EventPersistMgr::UploadAllAsync(): unexpected NULL for publisher object.\");\n        return false;\n    }\n\n    auto allFileList = GetAllFiles();\n    if (allFileList->empty()) {\n        return true;\n    }\n    auto nFilesToProcess = allFileList->size();\n    size_t nbatches = 0;\n    size_t batchSize = 0;\n    size_t nExtraOne = 0;\n    CalcBatchInfo(nFilesToProcess, nbatches, batchSize, nExtraOne);\n\n    auto shThis = shared_from_this();\n\n    size_t nFilesInBatch = 0;\n    for (size_t i = 0; i < nbatches; i++) {\n        auto nItems = (i < nExtraOne)? (batchSize+1) : batchSize;\n        auto batch = CreateBatch(allFileList, nItems);\n\n        nFilesInBatch += batch->size();\n        pplx::task<void>([shThis, publisher, batch]()\n        {\n            shThis->UploadFileBatch(publisher, batch);\n        });\n    }\n\n    assert(nFilesInBatch == nFilesToProcess);\n\n    return true;\n}\n\n// This function will process a list of files by using\n// one open file handle only. It uses the async task idiom 'do_while'\n// to process these files in an async task loop.\nvoid\nEventPersistMgr::UploadFileBatch(\n    std::shared_ptr<EventHubPublisher> publisher,\n    std::shared_ptr<std::queue<std::string>> flist\n    ) const\n{\n    if (flist->empty()) {\n        return;\n    }\n\n    auto shThis = shared_from_this();\n\n    ::do_while([shThis, flist, publisher]()\n    {\n        if (flist->empty()) {\n            return pplx::task_from_result(false);\n        }\n        auto fileItem = flist->front();\n        flist->pop();\n        return shThis->UploadOneFile(publisher, fileItem);\n    })\n    .then([](pplx::task<void> previous_task)\n    {\n        HandlePrevTaskFailure(previous_task, \"UploadFileBatch\");\n    });\n}\n\npplx::task<bool>\nEventPersistMgr::UploadOneFile(\n    std::shared_ptr<EventHubPublisher> publisher,\n    const std::string & filePath\n    ) const\n{\n    auto shThis = shared_from_this();\n\n    return m_persist->GetAsync(filePath)\n    .then([publisher, shThis, filePath](const EventDataT & fileData)\n    {\n        shThis->ProcessFileData(publisher, filePath, fileData);\n    })\n    .then([shThis, filePath](pplx::task<void> previous_task)\n    {\n        shThis->m_nFileProcessed++;\n        shThis->HandleReadTaskFailure(previous_task, filePath);\n        return true;\n    });\n}\n\nvoid\nEventPersistMgr::ProcessFileData(\n    std::shared_ptr<EventHubPublisher> publisher,\n    const std::string & item,\n    const EventDataT & itemdata\n    ) const\n{\n    if (itemdata.empty()) {\n        return;\n    }\n\n    auto shThis = shared_from_this();\n    publisher->PublishAsync(itemdata)\n    .then([publisher, shThis, item](bool publishOK)\n    {\n        if (publishOK) {\n            shThis->m_persist->RemoveAsync(item)\n            .then([item](bool removeOK) {\n                if (!removeOK) {\n                    MdsCmdLogError(\"Error: EventPersistMgr::ProcessFileData failed to remove file \" +\n                        MdsdUtil::GetFileBasename(item));\n                }\n            });\n        }\n        else {\n            MdsCmdLogError(\"Error: EventPersistMgr::ProcessFileData failed to upload file \" +\n                MdsdUtil::GetFileBasename(item));\n        }\n    })\n    .then([item](pplx::task<void> previous_task)\n    {\n        try {\n            previous_task.wait();\n        }\n        catch(const std::exception& ex) {\n            MdsCmdLogError(\"Error: failed to publish EH file \" + MdsdUtil::GetFileBasename(item) +\n                \". Exception: \" + std::string(ex.what()));\n        }\n        catch(...) {\n            MdsCmdLogError(\"Error: failed to publish EH file \" + MdsdUtil::GetFileBasename(item) +\n                \" with unknown exception.\");\n        }\n    });\n}\n\nvoid\nEventPersistMgr::HandleReadTaskFailure(\n    pplx::task<void> readTask,\n    const std::string & item\n    ) const\n{\n    try {\n        readTask.wait();\n    }\n    catch(const std::system_error & ex) {\n        auto ec = ex.code().value();\n\n        if (IsFileIOErrorRetryable(ec)) {\n            MdsCmdLogWarn(\"Warning: failed to publish EH file \" + MdsdUtil::GetFileBasename(item) +\n                \". Exception: \" + std::string(ex.what()) + \". Retry next time.\");\n        }\n        else {\n            MdsCmdLogError(\"Error: failed to publish EH file \" + MdsdUtil::GetFileBasename(item) +\n                \". Exception: \" + std::string(ex.what()) + \". Remove file.\");\n            m_persist->RemoveAsync(item);\n        }\n    }\n    catch(const std::exception& ex) {\n        // To be conservative: for exception without details, retry them later.\n        MdsCmdLogError(\"Error: failed to publish EH file \" + MdsdUtil::GetFileBasename(item) +\n            \". Exception: \" + std::string(ex.what()) + \". Retry next time.\");\n    }\n    catch(...) {\n        MdsCmdLogError(\"Error: failed to publish EH file \" + MdsdUtil::GetFileBasename(item) +\n            \" with unknown exception.\");\n    }\n}\n\n// vim: sw=4 expandtab :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdscommands/EventPersistMgr.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef __EVENTPERSISTMGR__HH__\n#define __EVENTPERSISTMGR__HH__\n\n#include <string>\n#include <memory>\n#include <atomic>\n#include <queue>\n#include <functional>\n#include \"EventData.hh\"\n#include <pplx/pplxtasks.h>\n\nnamespace mdsd { namespace details\n{\n\nclass PersistFiles;\nclass EventHubPublisher;\n\n/// <summary>\n/// This class implements the functionality to persist events\n/// that are failed to be sent to Event Hub. It will save given\n/// event to persistence and do regular retry on them. When retry\n/// succeeds, the event will be removed from persistence.\n/// An event has a max persistence time. After that time, it will\n/// be removed from persistence.\n/// </summary>\nclass EventPersistMgr : public std::enable_shared_from_this<EventPersistMgr>\n{\n    /// <summary>\n    /// Construct a new object.\n    /// <param name=\"persistDir\"> Directory name to persist data</param>\n    /// <param name=\"maxKeepSeconds\"> Max seconds to keep the data. After this time,\n    /// It could be removed at any time. </param>\n    /// </summary>\n    EventPersistMgr(const std::string & persistDir,\n                    int32_t maxKeepSeconds);\n\npublic:\n    static std::shared_ptr<EventPersistMgr> create(\n        const std::string & persistDir,\n        int32_t maxKeepSeconds)\n    {\n        return std::shared_ptr<EventPersistMgr>(new EventPersistMgr(persistDir, maxKeepSeconds));\n    }\n\n    /// <summary>\n    /// NOTE: because this class defines a unique_ptr with forward-declared type,\n    /// the destructor must be implemented in the *cc file.\n    /// </summary>\n    ~EventPersistMgr();\n\n    // movable but not copyable\n    EventPersistMgr(const EventPersistMgr& other) = delete;\n    EventPersistMgr(EventPersistMgr&& other) = default;\n    EventPersistMgr& operator=(const EventPersistMgr& other) = delete;\n    EventPersistMgr& operator=(EventPersistMgr&& other) = default;\n\n    /// <summary>\n    /// Save given data as persistence object.\n    /// Return true if success, false if any error.\n    /// If data is empty, return true and do nothing.\n    /// </summary>\n    bool Add(const EventDataT & data);\n\n    /// <summary> Return number of files on the disk</summary>\n    size_t GetNumItems() const;\n\n    /// <summary>\n    /// Return number of files read and processed from persist dir.\n    /// This doesn't include files deleted when they are too old to keep.\n    /// </summary>\n    size_t GetNumFileProcessed() const { return m_nFileProcessed; }\n\n    /// <summary>\n    /// Go through each persistence object: if it is too old (beyond\n    /// max keep time), it will be removed. if it is not too old, it\n    /// will be uploaded. If the upload succeeds, it will be removed.\n    /// If upload fails, do nothing to it.\n    /// Return true if success, false if any error.\n    /// </summary>\n    bool UploadAllSync(std::shared_ptr<EventHubPublisher> publisher) const;\n\n    /// <summary>\n    /// Upload all events asynchronously. This is a \"fire and forget\"\n    /// function. It doesn't wait for the async tasks to finish.\n    /// Upload failure will be logged but won't be show in this function\n    /// return status.\n    /// Return true if success, false if any error.\n    /// </summary>\n    bool UploadAllAsync(std::shared_ptr<EventHubPublisher> publisher) const;\n\nprivate:\n    /// <summary>\n    /// Process the data read from file, including publishing the data to EventHub.\n    /// If data are empty, do nothing.\n    /// </summary>\n    void ProcessFileData(std::shared_ptr<EventHubPublisher> publisher, const std::string & item,\n        const EventDataT & itemdata) const;\n\n    /// <summary>\n    /// Handle any GetAsync() task failures.\n    /// </summary>\n    void HandleReadTaskFailure(pplx::task<void> readTask, const std::string & item) const;\n\n    /// <summary>\n    /// Return the names of the file to be uploaded. The files that are too old to upload\n    /// will be removed from disk.\n    /// </summary>\n    std::shared_ptr<std::queue<std::string>> GetAllFiles() const;\n\n    pplx::task<bool> UploadOneFile(std::shared_ptr<EventHubPublisher> publisher,\n        const std::string & filePath) const;\n\n    void UploadFileBatch(std::shared_ptr<EventHubPublisher> publisher,\n        std::shared_ptr<std::queue<std::string>> flist) const;\n\nprivate:\n    std::string m_dirname;                   // Persistence directory full path.\n    std::unique_ptr<PersistFiles> m_persist; // The persist mgr persists the data to files.\n    int32_t m_maxKeepSeconds;                // max seconds to keep the data.\n    mutable std::atomic<size_t> m_nFileProcessed; // number of files read from persistence dir.\n};\n\n} // namespace details\n} // namespace mdsd\n\n#endif // __EVENTPERSISTMGR__HH__\n"
  },
  {
    "path": "Diagnostic/mdsd/mdscommands/MdsBlobReader.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include <stdexcept>\n#include <sstream>\n#include <random>\n#include <chrono>\n#include <iostream>\n#include <was/blob.h>\n#include <wascore/basic_types.h>\n\n#include \"MdsBlobReader.hh\"\n#include \"Trace.hh\"\n\nextern \"C\" {\n#include <unistd.h>\n}\n\nusing namespace azure::storage;\nusing namespace mdsd::details;\n\nMdsBlobReader::MdsBlobReader(\n    std::string storageUri,\n    std::string blobName,\n    std::string parentPath\n    ) :\n    m_storageUri(std::move(storageUri)),\n    m_blobName(std::move(blobName)),\n    m_parentPath(std::move(parentPath))\n{\n    if (m_storageUri.empty()) {\n        throw MDSEXCEPTION(\"Storage URI cannot be empty.\");\n    }\n\n    if (!m_parentPath.empty() && m_blobName.empty()) {\n        throw MDSEXCEPTION(\"Blob name cannot be empty when inside a container.\");\n    }\n}\n\n/// Get exception message from exception_ptr\nstatic std::string\nGetEptrMsg(std::exception_ptr eptr) // passing by value is ok\n{\n    try {\n        if (eptr) {\n            std::rethrow_exception(eptr);\n        }\n    }\n    catch(const std::exception& e) {\n        return e.what();\n    }\n    return std::string();\n}\n\nstatic void\nHandleStorageException(\n    const storage_exception& ex\n    )\n{\n    auto result = ex.result();\n    auto httpcode = result.http_status_code();\n\n    std::ostringstream strm;\n    strm << \"Error: storage exception in reading MDS blob: \"\n         << \"Http status code=\" << httpcode << \"; \"\n         << \"Message: \" << ex.what() << \". \";\n\n    auto err = result.extended_error();\n    if (!err.message().empty()) {\n        strm << \"Extended info: \" << err.message() << \". \";\n    }\n\n    auto innerEx = GetEptrMsg(ex.inner_exception());\n    if (!innerEx.empty()) {\n        strm << \"Inner exception: \" << innerEx << \".\";\n    }\n\n    MdsCmdLogError(strm);\n}\n\nstatic operation_context\nCreateOperationContext(const std::string& reqId)\n{\n    operation_context op;\n    op.set_client_request_id(reqId);\n    return op;\n}\n\nstatic blob_request_options\nBlobRequestOptionsWithRetry()\n{\n    auto requestOpt = blob_request_options();\n    exponential_retry_policy retryPolicy;\n    requestOpt.set_retry_policy(retryPolicy);\n    return requestOpt;\n}\n\ncloud_blob\nMdsBlobReader::GetBlob() const\n{\n    Trace trace(Trace::MdsCmd, \"MdsBlobReader::GetBlob\");\n\n    web::http::uri webUri = {m_storageUri};\n    storage_uri uriObj = {webUri};\n    cloud_blob blob;\n    cloud_blob_container containerObj(uriObj);\n\n    if (m_parentPath.empty()) {\n        blob = containerObj.get_blob_reference(m_blobName);\n    }\n    else {\n        auto dirObj = containerObj.get_directory_reference(m_parentPath);\n\n        if (!dirObj.is_valid()) {\n            std::ostringstream strm;\n            strm << \"Failed to get container directory '\" << m_parentPath << \"'.\";\n            throw BlobNotFoundException(strm.str());\n        }\n\n        blob = dirObj.get_blob_reference(m_blobName);\n    }\n    auto requestId = utility::uuid_to_string(utility::new_uuid());\n    auto op = CreateOperationContext(requestId);\n\n    if (!blob.exists(BlobRequestOptionsWithRetry(), op)) {\n        std::ostringstream strm;\n        strm << \"Failed to find blob '\" << m_blobName << \"' in parent path '\" << m_parentPath << \"'.\"\n             << \"Request id: \" << requestId << \".\";\n        throw BlobNotFoundException(strm.str());\n    }\n\n    return blob;\n}\n\nvoid\nMdsBlobReader::ReadBlobToFile(\n    const std::string & filepath\n    ) const\n{\n    if (filepath.empty()) {\n        throw MDSEXCEPTION(\"Filepath name to save blob data cannot be empty.\");\n    }\n\n    std::string requestId;\n    try {\n        auto blob = GetBlob();\n        requestId = utility::uuid_to_string(utility::new_uuid());\n        auto op = CreateOperationContext(requestId);\n        blob.download_to_file(filepath, access_condition(), BlobRequestOptionsWithRetry(), op);\n    }\n    catch(const storage_exception & ex)\n    {\n        HandleStorageException(ex);\n        if (!requestId.empty()) {\n            MdsCmdLogError(\"Request id: \" + requestId);\n        }\n    }\n    catch(const BlobNotFoundException& ex)\n    {\n        MdsCmdLogWarn(\"Specified blob \" + m_blobName + \" is not found: \" + ex.what());\n    }\n}\n\nstd::string\nMdsBlobReader::ReadBlobToString() const\n{\n    Trace trace(Trace::MdsCmd, \"MdsBlobReader::ReadBlobToString\");\n\n    std::string requestId;\n\n    try {\n        auto blob = GetBlob();\n        requestId = utility::uuid_to_string(utility::new_uuid());\n        auto op = CreateOperationContext(requestId);\n        auto streamObj = blob.open_read(access_condition(), BlobRequestOptionsWithRetry(), op);\n        concurrency::streams::container_buffer<std::string> cbuf;\n        streamObj.read_to_end(cbuf).get();\n        streamObj.close();\n        return cbuf.collection();\n    }\n    catch(const storage_exception & ex)\n    {\n        HandleStorageException(ex);\n        if (!requestId.empty()) {\n            MdsCmdLogError(\"Request id: \" + requestId);\n        }\n    }\n    catch(const BlobNotFoundException & ex)\n    {\n        MdsCmdLogWarn(\"Specified blob \" + m_blobName + \" is not found: \" + ex.what());\n    }\n\n    return std::string();\n}\n\npplx::task<std::string>\nMdsBlobReader::ReadBlobToStringAsync() const\n{\n    Trace trace(Trace::MdsCmd, \"MdsBlobReader::ReadBlobToStringAsync\");\n\n    std::string requestId;\n\n    try {\n        auto blob = GetBlob();\n        requestId = utility::uuid_to_string(utility::new_uuid());\n        auto op = CreateOperationContext(requestId);\n        auto asyncReadTask = blob.open_read_async(access_condition(), BlobRequestOptionsWithRetry(), op);\n        return asyncReadTask.then([=](concurrency::streams::istream streamObj)\n        {\n            try\n            {\n                concurrency::streams::container_buffer<std::string> cbuf;\n                streamObj.read_to_end(cbuf).get();\n                streamObj.close();\n                return cbuf.collection();\n            }\n            catch (const storage_exception& ex)\n            {\n                HandleStorageException(ex);\n                if (!requestId.empty()) {\n                    MdsCmdLogError(\"Request id: \" + requestId);\n                }\n            }\n            return std::string();\n        });\n    }\n    catch(const storage_exception & ex)\n    {\n        HandleStorageException(ex);\n        if (!requestId.empty()) {\n            MdsCmdLogError(\"Request id: \" + requestId);\n        }\n    }\n    catch(const BlobNotFoundException & ex)\n    {\n        MdsCmdLogWarn(\"Specified blob \" + m_blobName + \" is not found: \" + ex.what());\n    }\n\n    return pplx::task<std::string>([](){ return std::string(); });\n}\n\n\nuint64_t\nMdsBlobReader::GetLastModifiedTimeStamp(\n        std::function<void(const MdsBlobReader*,\n                const BlobNotFoundException&)> blobNotFoundExHandler) const\n{\n    uint64_t lastModifiedTimeStamp = 0;\n\n    std::string requestId;\n    try\n    {\n        auto blob = GetBlob();\n        requestId = utility::uuid_to_string(utility::new_uuid());\n        auto op = CreateOperationContext(requestId);\n        blob.download_attributes(access_condition(), BlobRequestOptionsWithRetry(), op);\n        lastModifiedTimeStamp = blob.properties().last_modified().to_interval();\n    }\n    catch(const storage_exception & ex)\n    {\n        HandleStorageException(ex);\n        if (!requestId.empty()) {\n            MdsCmdLogError(\"Request id: \" + requestId);\n        }\n    }\n    catch(const BlobNotFoundException & ex)\n    {\n        blobNotFoundExHandler(this, ex);\n    }\n\n    return lastModifiedTimeStamp;\n}\n\n\npplx::task<uint64_t>\nMdsBlobReader::GetLastModifiedTimeStampAsync(\n        std::function<void(const MdsBlobReader*,\n                const BlobNotFoundException&)> blobNotFoundExHandler) const\n{\n    uint64_t lastModifiedTimeStamp = 0;\n\n    std::string requestId;\n    try\n    {\n        auto blob = GetBlob();\n        requestId = utility::uuid_to_string(utility::new_uuid());\n        auto op = CreateOperationContext(requestId);\n        auto asyncAttrDownloadTask = blob.download_attributes_async(access_condition(), BlobRequestOptionsWithRetry(), op);\n        return asyncAttrDownloadTask.then([=]()\n        {\n            return blob.properties().last_modified().to_interval();\n        });\n    }\n    catch(const storage_exception & ex)\n    {\n        HandleStorageException(ex);\n        if (!requestId.empty()) {\n            MdsCmdLogError(\"Request id: \" + requestId);\n        }\n    }\n    catch(const BlobNotFoundException & ex)\n    {\n        blobNotFoundExHandler(this, ex);\n    }\n\n    return pplx::task<uint64_t>([=]()\n    {\n        return lastModifiedTimeStamp; // = 0\n    });\n}\n"
  },
  {
    "path": "Diagnostic/mdsd/mdscommands/MdsBlobReader.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef __MDSBLOBREADER__HH__\n#define __MDSBLOBREADER__HH__\n\n#include <string>\n#include <vector>\n#include <cpprest/streams.h>\n#include <cpprest/pplx/pplxtasks.h>\n\n#include \"MdsException.hh\"\n#include \"MdsCmdLogger.hh\"\n\nnamespace azure\n{\n    namespace storage {\n        class cloud_blob;\n    }\n} // namespace azure\n\nnamespace mdsd { namespace details\n{\n\n/// <summary>\n/// Implement a class to read blob from azure storage related to MDS.\n/// </summary>\nclass MdsBlobReader\n{\npublic:\n    /// <summary>\n    /// Construct a new blob reader.\n    /// <param name=\"storageUri\"> The absolute URI to the blob root container</param>\n    /// <param name=\"blobName\"> blob name </param>\n    /// <param name=\"parentPath\"> the given blob's parent container name</param>\n    /// </summary>\n    MdsBlobReader(std::string storageUri,\n                  std::string blobName = \"\",\n                  std::string parentPath = \"\");\n\n    ~MdsBlobReader() {}\n\n    MdsBlobReader(const MdsBlobReader& other) = default;\n    MdsBlobReader(MdsBlobReader&& other) = default;\n    MdsBlobReader& operator=(const MdsBlobReader& other) = default;\n    MdsBlobReader& operator=(MdsBlobReader&& other) = default;\n\n    /// <summary> Read current blob object to a given file. </summary>\n    void ReadBlobToFile(const std::string & filepath) const;\n\n    /// <summary>\n    /// Read current blob object to a string.\n    /// Return the blob content, or empty string if any error.\n    /// </summary>\n    std::string ReadBlobToString() const;\n\n    /// <summary>\n    /// Start async reading of current blob object to a string.\n    /// Return the task whose result will be the string.\n    /// </summary>\n    pplx::task<std::string> ReadBlobToStringAsync() const;\n\n    /// <summary>\n    /// Returns the read blob's LMT (# seconds since epoch).\n    /// 0 will be returned if blob doesn't exist\n    /// or if any exception is thrown (e.g., storage exception)\n    /// </summary>\n    uint64_t GetLastModifiedTimeStamp(\n            std::function<void(const MdsBlobReader*,\n                    const BlobNotFoundException&)> blobNotFoundExHandler) const;\n\n    /// <summary>\n    /// Start async reading of blob's LMT (# seconds since epoch).\n    /// Return the task whose result will be the the blob's LMT.\n    /// </summary>\n    pplx::task<uint64_t> GetLastModifiedTimeStampAsync(\n            std::function<void(const MdsBlobReader*,\n                    const BlobNotFoundException&)> blobNotFoundExHandler) const;\n\n    // Typical BlobNotFoundException handlers provided here\n    static void DoNothingBlobNotFoundExHandler(const MdsBlobReader*, const BlobNotFoundException&) {}\n    static void LogWarnBlobNotFoundExHandler(const MdsBlobReader*, const BlobNotFoundException& ex)\n    {\n        MdsCmdLogWarn(\"Specified blob is not found: \" + std::string(ex.what()));\n    }\n\nprivate:\n    /// <summary>\n    /// Get current blob object.\n    /// </summary>\n    azure::storage::cloud_blob GetBlob() const;\n\nprivate:\n    std::string m_storageUri;\n    std::string m_blobName;\n    std::string m_parentPath;\n};\n\n} // namespace details\n} // namespace mdsd\n\n#endif // __MDSBLOBREADER__HH__\n"
  },
  {
    "path": "Diagnostic/mdsd/mdscommands/MdsCmdLogger.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef __MDSCMDLOGGER__HH__\n#define __MDSCMDLOGGER__HH__\n\n#include \"Logger.hh\"\n\nnamespace mdsd { namespace details\n{\n    inline void MdsCmdLogError(const std::string & msg)\n    {\n        Logger::LogError(\"MDSCMD \" + msg);\n    }\n\n    inline void MdsCmdLogError(const std::ostringstream& strm)\n    {\n        MdsCmdLogError(strm.str());\n    }\n\n    inline void MdsCmdLogWarn(const std::string & msg)\n    {\n        Logger::LogWarn(\"MDSCMD \" + msg);\n    }\n\n    inline void MdsCmdLogWarn(const std::ostringstream& strm)\n    {\n        MdsCmdLogWarn(strm.str());\n    }\n\n} // namespace details\n} // namespace mdsd\n\n#endif // __MDSCMDLOGGER__HH__\n"
  },
  {
    "path": "Diagnostic/mdsd/mdscommands/MdsException.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include <sstream>\n#include \"MdsException.hh\"\n\nusing namespace mdsd;\n\nstatic\nstd::string GetFileBasename(\n    const std::string & filepath\n    )\n{\n    auto p = filepath.find_last_of('/');\n    if (p == std::string::npos) {\n        return filepath;\n    }\n    return filepath.substr(p+1);\n}\n\nMdsException::MdsException(\n    const char* filename,\n    int lineno,\n    const std::string & message)\n    : std::exception()\n{\n    std::ostringstream strm;\n    if (filename) {\n        strm << GetFileBasename(filename) << \":\" << lineno << \" \";\n    }\n    strm << message;\n    m_msg = strm.str();\n}\n\nMdsException::MdsException(\n    const char* filename,\n    int lineno,\n    const char* message)\n    : std::exception()\n{\n    if (message) {\n        std::ostringstream strm;\n        if (filename) {\n            strm << GetFileBasename(filename) << \":\" << lineno << \" \";\n        }\n        strm << message;\n        m_msg = strm.str();\n    }\n}\n"
  },
  {
    "path": "Diagnostic/mdsd/mdscommands/MdsException.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n\n#ifndef __MDSEXCEPTION__HH__\n#define __MDSEXCEPTION__HH__\n\n#include <string>\n#include <exception>\n\n#define MDSEXCEPTION(message) \\\n    mdsd::MdsException(__FILE__, __LINE__, message)\n/**/\n\nnamespace mdsd\n{\n\nclass MdsException : public std::exception\n{\nprivate:\n    std::string m_msg;\n\npublic:\n    MdsException(const char* filename,\n                 int lineno,\n                 const std::string & message);\n\n    MdsException(const char* filename,\n                 int lineno,\n                 const char* message);\n\n    virtual const char * what() const noexcept\n    {\n        return m_msg.c_str();\n    }\n};\n\nclass BlobNotFoundException : public std::exception\n{\nprivate:\n    std::string m_msg;\n\npublic:\n    BlobNotFoundException(std::string message) noexcept :\n        std::exception(),\n        m_msg(std::move(message))\n    {}\n\n    virtual const char * what() const noexcept\n    {\n        return m_msg.c_str();\n    }\n};\n\nclass TooBigEventHubDataException : public MdsException\n{\npublic:\n    TooBigEventHubDataException(const std::string & msg) :\n        MdsException(nullptr, 0, msg)\n        {}\n    TooBigEventHubDataException(const char* msg) :\n        MdsException(nullptr, 0, msg)\n        {}\n};\n\n}\n\n#endif // __MDSEXCEPTION__HH__\n"
  },
  {
    "path": "Diagnostic/mdsd/mdscommands/PersistFiles.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include <fstream>\n#include <cstdio>\n#include <cstring>\n#include <system_error>\n#include <vector>\n\n#include <cpprest/filestream.h>\n#include <cpprest/containerstream.h>\n\nextern \"C\" {\n#include <sys/types.h>\n#include <sys/stat.h>\n#include <unistd.h>\n}\n\n#include \"PersistFiles.hh\"\n#include \"MdsCmdLogger.hh\"\n#include \"Trace.hh\"\n#include \"MdsException.hh\"\n#include \"Utility.hh\"\n\nusing namespace mdsd;\nusing namespace mdsd::details;\n\n// If the filepath exists and it is a dir, return true;\n// otherwise, return false.\nstatic bool\nIsDirExists(\n    const std::string& filepath\n    )\n{\n    struct stat sb;\n    auto rtn = stat(filepath.c_str(), &sb);\n    mode_t mode = sb.st_mode;\n    return (0 == rtn && S_ISDIR(mode));\n}\n\nPersistFiles::PersistFiles(\n    const std::string & dirname\n    ) :\n    m_dirname(dirname),\n    m_suffix(\"XXXXXX\"),\n    m_fileTemplate(new char[dirname.size()+m_suffix.size()+2])\n{\n    if (!IsDirExists(m_dirname)) {\n        throw MDSEXCEPTION(std::string(\"Failed to find directory '\") + m_dirname + \"'.\");\n    }\n    snprintf(m_fileTemplate.get(), dirname.size()+2, \"%s/\", dirname.c_str());\n}\n\nint\nPersistFiles::CreateUniqueFile() const\n{\n    // reset template for mkstemp\n    auto offset = m_dirname.size()+1;\n    auto sz = m_suffix.size() + 1;\n    snprintf(m_fileTemplate.get()+offset, sz, \"%s\", m_suffix.c_str());\n\n    int fd = mkstemp(m_fileTemplate.get());\n    if (-1 == fd) {\n        auto errnum = errno;\n        std::error_code ec(errnum, std::system_category());\n        std::ostringstream strm;\n        strm << \"Error: creating unique persist file with mkstemp() failed. errno=\"\n            << errnum << \"; Reason: \" << ec.message();\n        MdsCmdLogError(strm);\n    }\n    return fd;\n}\n\nbool\nPersistFiles::Add(\n    const EventDataT& data\n    ) const\n{\n    if (data.empty()) {\n        return true;\n    }\n\n    auto fd = CreateUniqueFile();\n    if (fd < 0) {\n        return false;\n    }\n    MdsdUtil::FdCloser fdCloser(fd);\n\n    bool resultOK = true;\n    auto datastr = data.Serialize();\n    if (-1 == write(fd, datastr.c_str(), datastr.size())) {\n        std::error_code ec(errno, std::system_category());\n        MdsCmdLogError(\"Error: write() to persist file failed. Reason: \"\n            + ec.message());\n        resultOK = false;\n    }\n\n    return resultOK;\n}\n\nEventDataT\nPersistFiles::Get(\n    const std::string& filepath\n    ) const\n{\n    if (filepath.empty()) {\n        throw MDSEXCEPTION(\"Empty string is used for file path parameter.\");\n    }\n\n    std::ifstream fin(filepath);\n    if (!fin) {\n        throw MDSEXCEPTION(\"Failed to open file '\" + filepath + \"'.\");\n    }\n    fin.seekg(0, fin.end);\n    size_t fsize = fin.tellg();\n    fin.seekg(0, fin.beg);\n\n    std::vector<char> buf(fsize);\n    fin.read(buf.data(), fsize);\n    fin.close();\n\n    return EventDataT::Deserialize(buf.data(), fsize);\n}\n\nbool\nPersistFiles::Remove(\n    const std::string& filepath\n    ) const\n{\n    if (filepath.empty()) {\n        return true;\n    }\n    if (remove(filepath.c_str())) {\n        std::error_code ec(errno, std::system_category());\n        MdsCmdLogError(\"Error: failed to remove persist file '\"\n            + filepath + \"'. Reason: \" + ec.message());\n        return false;\n    }\n    return true;\n}\n\npplx::task<bool>\nPersistFiles::RemoveAsync(\n    const std::string& filepath\n    ) const\n{\n    Trace trace(Trace::MdsCmd, \"PersistFiles::RemoveAsync\");\n\n    if (filepath.empty()) {\n        return pplx::task_from_result(true);\n    }\n\n    return pplx::task<bool>([=]() -> bool {\n        return Remove(filepath);\n    })\n    .then([](pplx::task<bool> previous_task)\n    {\n        try {\n            return previous_task.get();\n        }\n        catch(std::exception& ex) {\n            MdsCmdLogError(\"PersistFiles::RemoveAsync failed with \" + std::string(ex.what()));\n        }\n        catch(...) {\n            MdsCmdLogError(\"PersistFiles::RemoveAsync failed with unknown exception.\");\n        }\n        return false;\n    });\n}\n\n\nint32_t\nPersistFiles::GetAgeInSeconds(\n    const std::string & filepath\n    ) const\n{\n    struct stat sb;\n    auto rtn = stat(filepath.c_str(), &sb);\n    if (rtn) {\n        std::error_code ec(errno, std::system_category());\n        MdsCmdLogError(\"Error: failed to locate persist file '\" + filepath +\n            \"'. Reason: \" + ec.message());\n        return -1;\n    }\n    auto now = time(nullptr);\n    return static_cast<int32_t>(now - sb.st_mtime);\n}\n\n\nPersistFiles::const_iterator\nPersistFiles::cbegin() const\n{\n    DirectoryIter diter{m_dirname};\n    return diter;\n}\n\nPersistFiles::const_iterator\nPersistFiles::cend() const\n{\n    DirectoryIter diter;\n    return diter;\n}\n\nsize_t\nPersistFiles::GetNumItems() const\n{\n    size_t count = 0;\n    auto endIter = cend();\n    for (auto iter = cbegin(); iter != endIter; ++iter) {\n        count++;\n    }\n    return count;\n}\n\npplx::task<EventDataT>\nPersistFiles::GetAsync(\n    const std::string & filepath\n    ) const\n{\n    if (filepath.empty()) {\n        MdsCmdLogError(\"Error: GetAsync: unexpected empty filepath.\");\n        return pplx::task_from_result<EventDataT>(EventDataT());\n    }\n\n    return concurrency::streams::file_stream<char>::open_istream(filepath)\n    .then([filepath](concurrency::streams::basic_istream<char> inFile)\n    {\n        if (!inFile.is_open()) {\n            MdsCmdLogError(\"Error: PersistFiles failed to open file '\" + filepath + \"'.\");\n            return pplx::task_from_result<EventDataT>(EventDataT());\n        }\n        else\n        {\n            concurrency::streams::container_buffer<std::string> buf;\n            return inFile.read_to_end(buf)\n            .then([inFile, filepath, buf](size_t bytesRead)\n            {\n                inFile.close();\n                if (bytesRead > 0) {\n                    return pplx::task_from_result<EventDataT>(EventDataT::Deserialize(buf.collection()));\n                }\n\n                MdsCmdLogError(\"Error: no data is read from '\" + filepath + \"', unexpected empty file.\");\n                return pplx::task_from_result<EventDataT>(EventDataT());\n            });\n        }\n    });\n}\n"
  },
  {
    "path": "Diagnostic/mdsd/mdscommands/PersistFiles.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef __PERSISTFILES__HH__\n#define __PERSISTFILES__HH__\n\n#include <string>\n#include <pplx/pplxtasks.h>\n#include \"DirectoryIter.hh\"\n#include \"EventData.hh\"\n\nnamespace mdsd { namespace details\n{\n\nclass PersistFiles\n{\npublic:\n    typedef DirectoryIter const_iterator;\n\n    /// <summary>\n    /// Constructor. It will persist files to given directory.\n    /// Throw MdsException if it fails to access the directory.\n    /// </summary>\n    PersistFiles(const std::string & dirname);\n\n    virtual ~PersistFiles() {}\n\n    /// <summary>\n    /// Add given data to a new, unique file.\n    /// Return true if success, false if any error.\n    /// If 'data' is empty, return true and do nothing.\n    /// </summary>\n    bool Add(const EventDataT& data) const;\n\n    /// <summary>\n    /// Get the content of the file given filepath.\n    /// Return file content or throw exception if any error.\n    /// </summary>\n    EventDataT Get(const std::string& filepath) const;\n\n    /// <summary>\n    /// Get the content of the file asynchronously given filepath.\n    /// Return the task for file content, or task for empty string if any error.\n    /// </summary>\n    pplx::task<EventDataT> GetAsync(const std::string& filepath) const;\n\n    /// <summary>\n    /// Remove a filepath.\n    /// Return true if success, false if any error.\n    /// </summary>\n    bool Remove(const std::string & filepath) const;\n\n    /// <summary>\n    /// Remove a filepath asynchronously.\n    /// Return true if success, false if any error.\n    /// </summary>\n    pplx::task<bool> RemoveAsync(const std::string & filepath) const;\n\n    /// <summary>\n    /// Get a file's last modification time.\n    /// If the file doesn't exit, return -1.\n    /// </summary>\n    int32_t GetAgeInSeconds(const std::string & filepath) const;\n\n    const_iterator cbegin() const;\n    const_iterator cend() const;\n\n    /// <summary>\n    /// Get number of items in persist.\n    /// </summary>\n    size_t GetNumItems() const;\n\nprivate:\n    /// <summary>\n    /// Create a unique file. Return an open file descriptor, or -1 if any error.\n    /// </summary>\n    int CreateUniqueFile() const;\n\nprivate:\n    std::string m_dirname;\n    std::string m_suffix;\n    std::unique_ptr<char[]> m_fileTemplate;\n};\n\n} // namespace details\n} // namespace mdsd\n\n#endif // __PERSISTFILES__HH__\n"
  },
  {
    "path": "Diagnostic/mdsd/mdscommands/PublisherStatus.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include <ostream>\n#include <map>\n#include \"PublisherStatus.hh\"\n\nusing namespace mdsd::details;\n\n// To prevent static initialization order fiasco.\n// see https://isocpp.org/wiki/faq/ctors#static-init-order-on-first-use\nstatic std::map<PublisherStatus, std::string> & GetPublisherStatusMap()\n{\n    static auto enumMap = new std::map<PublisherStatus, std::string>(\n    {\n        { PublisherStatus::Idle, \"Idle\" },\n        { PublisherStatus::PublicationSucceeded, \"PublicationSucceeded\" },\n        { PublisherStatus::PublicationFailedWithUnknownReason, \"PublicationFailedWithUnknownReason\" },\n        { PublisherStatus::PublicationFailedWithBadRequest, \"PublicationFailedWithBadRequest\" },\n        { PublisherStatus::PublicationFailedWithAuthError, \"PublicationFailedWithAuthError\" },\n        { PublisherStatus::PublicationFailedServerBusy, \"PublicationFailedServerBusy\" },\n        { PublisherStatus::PublicationFailedThrottled, \"PublicationFailedThrottled\" }\n    });\n    return *enumMap;\n}\n\nstd::ostream&\noperator<<(\n    std::ostream& os,\n    PublisherStatus status\n    )\n{\n    auto enumMap = GetPublisherStatusMap();\n    auto iter = enumMap.find(status);\n    if (iter == enumMap.end()) {\n        os << \"Unknown PublisherStatus\";\n    }\n    else {\n        os << iter->second;\n    }\n\n    return os;\n}\n"
  },
  {
    "path": "Diagnostic/mdsd/mdscommands/PublisherStatus.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef __PUBLISHERSTATUS_HH__\n#define __PUBLISHERSTATUS_HH__\n\n#include <iosfwd>\n\nnamespace mdsd { namespace details\n{\n\nenum class PublisherStatus\n{\n    /// <summary>\n    /// Object has not started any work.\n    /// </summary>\n    Idle,\n\n    /// <summary>\n    /// The last publication attempt succeeded.\n    /// </summary>\n    PublicationSucceeded,\n\n    /// <summary>\n    /// The last publication attempt failed.\n    /// </summary>\n    PublicationFailedWithUnknownReason,\n\n    /// <summary>\n    /// The last publication attempt failed with bad request error.\n    /// </summary>\n    PublicationFailedWithBadRequest,\n\n    /// <summary>\n    /// The last publication attempt failed with auth error.\n    /// </summary>\n    PublicationFailedWithAuthError,\n\n    /// <summary>\n    /// The last publication attempt failed because server is busy, need to retry later.\n    /// </summary>\n    PublicationFailedServerBusy,\n\n    /// <summary>\n    /// The last publication attempt failed because of throttled, need to retry later.\n    /// </summary>\n    PublicationFailedThrottled\n};\n\n} // namespace details\n} // namespace mdsd\n\nstd::ostream&\noperator<<(\n    std::ostream& os,\n    mdsd::details::PublisherStatus status\n    );\n\n\n#endif // __PUBLISHERSTATUS_HH__\n"
  },
  {
    "path": "Diagnostic/mdsd/mdscommands/commands.xsd",
    "content": "<?xml version=\"1.0\"?>\n\n<!-- This holds the schema for the XML that defines a set of commands passed to the monitoring agent -->\n\n<xs:schema attributeFormDefault=\"unqualified\" elementFormDefault=\"qualified\" xmlns:xs=\"http://www.w3.org/2001/XMLSchema\">\n\n  <xs:complexType name=\"ParametersType\">\n    <xs:choice maxOccurs=\"unbounded\">\n      <xs:element name=\"Parameter\" type=\"xs:string\" minOccurs=\"1\" maxOccurs=\"1\" />\n    </xs:choice>\n  </xs:complexType>\n\n  <xs:complexType name=\"CommandType\">\n    <xs:sequence>\n      <xs:element name=\"Verb\" type=\"xs:string\" minOccurs=\"1\" maxOccurs=\"1\"/>\n      <xs:element name=\"Parameters\" type=\"ParametersType\" minOccurs=\"0\" maxOccurs=\"1\"/>\n    </xs:sequence>\n  </xs:complexType>\n\n  <xs:element name=\"Commands\">\n    <xs:complexType>\n      <xs:sequence>\n        <xs:element name=\"Command\" type=\"CommandType\" minOccurs=\"1\" maxOccurs=\"unbounded\" />\n      </xs:sequence>\n      <xs:attribute name=\"version\" type=\"xs:string\" use=\"required\"/>\n    </xs:complexType>\n  </xs:element>\n\n</xs:schema>\n\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/Batch.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"Batch.hh\"\n#include \"MdsdConfig.hh\"\n#include \"Credentials.hh\"\n#include \"MdsEntityName.hh\"\n#include \"CanonicalEntity.hh\"\n#include \"IMdsSink.hh\"\n#include \"Trace.hh\"\n#include \"Logger.hh\"\n#include \"Utility.hh\"\n#include <sstream>\n\nusing std::string;\n\nBatch::Batch(MdsdConfig* config, const MdsEntityName& target, const Credentials* creds, int interval)\n  : _config(config), _batchQIBase(0), _interval(interval), _sink(IMdsSink::CreateSink(config, target, creds)), _dirty(false)\n{\n\tTrace trace(Trace::Batching, \"Batch constructor\");\n\tif (trace.IsActive()) {\n\t\tstd::ostringstream msg;\n\t\tmsg << \"Created batch \" << this << \" (eventName \" << target << \" QI \" << interval << \")\";\n\t\ttrace.NOTE(msg.str());\n\t}\n\t_sink->ValidateAccess();\n}\n\nvoid\nBatch::AddRow(const CanonicalEntity & row)\n{\n\tTrace trace(Trace::Batching, \"Batch::AddRow\");\n\n\tif (trace.IsActive()) {\n\t\tstd::ostringstream msg;\n\t\tmsg << \"Batch \" << this << \" add CE \" << row;\n\t\ttrace.NOTE(msg.str());\n\t}\n\n\tMdsTime qibase = row.GetPreciseTimeStamp().Round(_interval);\n\n\tstd::lock_guard<std::recursive_mutex> lock(_mutex);\n\t// If the Query Interval Base has changed, then flush the batch.\n\tif (qibase != _batchQIBase) {\n\t\tif (trace.IsActive()) {\n\t\t\tstd::ostringstream msg;\n\t\t\tmsg << \"Query Interval Base changed from \" << _batchQIBase << \" to \" << qibase;\n\t\t\ttrace.NOTE(msg.str());\n\t\t}\n\t\t_sink->Flush();\n\t\t_batchQIBase = qibase;\n\t}\n\n\t_sink->AddRow(row, qibase);\t// May cause flush...\n\n\tMarkTime();\n}\n\n// Add a row to a batch destined for SchemasTable in some storage acct. This is a helper function\n// for building these rows correctly.\nvoid\nBatch::AddSchemaRow(const MdsEntityName &target, const string &hash, const string &schema)\n{\n\tTrace trace(Trace::Batching, \"Batch::AddSchemaRow\");\n\tCanonicalEntity row(3);\n\n\trow.AddColumn(\"PhysicalTableName\", target.Basename());\n\trow.AddColumn(\"MD5Hash\", hash);\n\trow.AddColumn(\"Schema\", schema);\n\n\tAddRow(row);\n}\n\nvoid\nBatch::Flush()\n{\n\tTrace trace(Trace::Batching, \"Batch::Flush\");\n\t\n\tif (IsClean())\n\t\treturn;\n\n\tif (trace.IsActive()) {\n\t\tstd::ostringstream msg;\n\t\tmsg << \"Batch \" << this;\n\t\ttrace.NOTE(msg.str());\n\t}\n\n\tstd::lock_guard<std::recursive_mutex> lock(_mutex);\n\tMarkFlushed();\n\t_sink->Flush();\n}\n\nBatch::~Batch()\n{\n\tTrace trace(Trace::Batching, \"Batch::~Batch\");\n\tif (IsDirty())\n\t\tFlush();\n\n\tdelete _sink;\n}\n\nbool\nBatch::HasStaleData() const\n{\n\tTrace trace(Trace::Batching, \"Batch::HasStaleData\");\n\t// I want data to not linger past the end of the *next* QI. If the QI size is 5 minutes and data is\n\t// written at 00:01:00, that data becomes stale at 00:10:00.\n\n\tif (IsClean())\n\t\treturn false;\n\n\tMdsTime trigger = (MdsTime::Now() - _interval).Round(_interval);\n\tif (trace.IsActive()) {\n\t\tstd::ostringstream msg;\n\t\tmsg << \"_lastAction=\" << _lastAction << \" _interval=\" << _interval << \" trigger=\" << trigger;\n\t\ttrace.NOTE(msg.str());\n\t}\n\treturn (_lastAction < trigger);\n}\n\n\nstd::ostream&\noperator<<(std::ostream& os, const Batch& batch)\n{\n\tos << &batch << \" (QIBase \" << batch._batchQIBase << \", Interval \" << batch._interval << \", Sink \" << batch._sink << \")\";\n\treturn os;\n}\n\nBatch*\nBatchSet::GetBatch(const MdsEntityName &target, int interval)\n{\n\tTrace trace(Trace::Batching, \"BatchSet::GetBatch\");\n\n\tauto creds = target.GetCredentials();\n\tkey_t key = std::make_pair(target.Basename(), creds);\n\tstd::ostringstream keystring;\n\n\tif (trace.IsActive()) {\n\t\tkeystring << \"<\" << target.Basename() << \", 0x\" << creds << \">\";\n\t}\n\n\tstd::lock_guard<std::mutex> lock(_mutex);\t// Lock held until this function returns\n\n\tstd::map<key_t, Batch*>::iterator iter = _map.find(key);\n\n\tif (iter != _map.end()) {\n\t\ttrace.NOTE(\"Found batch for \" + keystring.str());\n\t\treturn iter->second;\n\t}\n\n\ttrace.NOTE(\"Creating batch for \" + keystring.str());\n\n\tstd::ostringstream msg;\n\t// Bug 3532559: Batch constructor can fail if XTableSink constructor fails while\n\t// creating an XTableRequest. So wrap the constructor in a try/catch block.\n\ttry {\n\t\tBatch *batch = new Batch(_config, target, creds, interval);\n\t\tif (trace.IsActive()) {\n\t\t\tstd::ostringstream msg;\n\t\t\tmsg << \"New batch \" << *batch;\n\t\t\ttrace.NOTE(msg.str());\n\t\t}\n\t\t_map[key] = batch;\n\t\treturn batch;\n\t}\n\tcatch (const std::exception& e) {\n\t\tmsg << \"GetBatch(\" << target << \") failed to create new batch for \" << keystring.str() << \": \" << e.what();\n\t}\n\tcatch (...) {\n\t\tmsg << \"GetBatch(\" << target << \") caught unknown exception\";\n\t}\n\t// If we got here, we caught an exception and already created the error message\n\tLogger::LogError(msg.str());\n\ttrace.NOTE(msg.str());\n\treturn nullptr;\n}\n\nvoid\nBatchSet::Flush()\n{\n\tTrace trace(Trace::Batching, \"BatchSet::Flush\");\n\t// Walk the _map and flush all the dirty Batches\n\tfor (const auto &iter : _map) {\n\t\tif (iter.second->IsDirty()) {\n\t\t\titer.second->Flush();\n\t\t}\n\t}\n}\n\nvoid\nBatchSet::FlushIfStale()\n{\n\tTrace trace(Trace::Batching, \"BatchSet::FlushIfStale\");\n\t// Walk the _map and flush all the Batches\n\tfor (const auto &item : _map) {\n\t\tif (item.second->HasStaleData()) {\n\t\t\titem.second->Flush();\n\t\t}\n\t}\n}\n\nBatchSet::~BatchSet()\n{\n\tTrace trace(Trace::Batching, \"BatchSet::~BatchSet\");\n\t// Walk the _map and delete all the Batches; deleting them will Flush() them first\n\tfor (auto &iter : _map) {\n\t\tdelete iter.second;\n\t}\n}\n\n// vim: se sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/Batch.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _BATCH_HH_\n#define _BATCH_HH_\n\n#define MAX_BATCH_SIZE 100\n\n#include <utility>\n#include <string>\n#include <vector>\n#include <map>\n#include <mutex>\n#include <ctime>\n#include <iostream>\n#include \"MdsTime.hh\"\n#include \"MdsEntityName.hh\"\n\nclass MdsdConfig;\nclass MdsValue;\nclass Credentials;\nclass CanonicalEntity;\nclass IMdsSink;\n\nclass Batch\n{\n\tfriend std::ostream& operator<<(std::ostream& os, const Batch& batch);\n\npublic:\n\t/// <summary>Force all batched entities into MDS and leave the batch empty</summary>\n\tvoid Flush();\n\n\t/// <summary>Add a row to the batch. May trigger a flush which may or may not flush this row.</summary>\n\t/// <param name=\"row\">The row to be added to the batch. Must be complete (includes all columns). Contents\n\t/// are copied elsewhere by the sink; caller can reuse the object if desired.</param>\n\t/// <param name=\"pkey\">The PartitionKey for the row.</param>\n\t/// <param name=\"qibase\">The QueryInterval to which this row is associated.</param>\n\tvoid AddRow(const CanonicalEntity &row);\n\n\t/// <summary>Add a row to a batch of entries destined for some SchemasTable</summary>\n\tvoid AddSchemaRow(const MdsEntityName &target, const std::string &hash, const std::string &schema);\n\n\t// <summary>True if the batch might have rows from a prior query interval</summary>\n\tbool HasStaleData() const; // { return (_lastAction < (_batchQIBase + _interval)); }\n\n\t~Batch();\n\nprivate:\n\tBatch(MdsdConfig* config, const MdsEntityName &target, const Credentials* creds, int interval);\n\tBatch();\t\t\t\t// No void constructor\n\tBatch(const Batch&);\t\t\t// No copy constructor\n\tBatch& operator=(const Batch&);\t\t// Can't assign\n\n\t/// <summary>Update the _lastAction time.</summary>\n\tvoid MarkTime() { _lastAction.Touch(); _dirty = true; }\n\tvoid MarkFlushed() { _lastAction = MdsTime::Max(); _dirty = false; }\n\tbool IsDirty() const { return _dirty; }\n\tbool IsClean() const { return (! _dirty); }\n\n\tMdsdConfig *_config;\n\tMdsTime _lastAction;\t\t\t// Used to find lingering batches\n\tMdsTime _batchQIBase;\t\t\t// The Query Interval base timestamp for the current batch\n\tint _interval;\t\t\t\t// The width of the interval (in seconds)\n\n\tIMdsSink* _sink;\n\n\tstd::recursive_mutex _mutex;\n\n\tbool _dirty;\t\t\t\t// \"Dirty\" bit; set if any AddRow was called since last flush\n\n\tfriend class BatchSet;\n};\n\nstd::ostream& operator<<(std::ostream& os, const Batch& batch);\n\nclass BatchSet\n{\npublic:\n\tBatchSet(MdsdConfig* c) : _config(c) {}\n\t~BatchSet();\n\n\t// <summary>Get pointer to a Batch object for this table</summary>\n\t// <param name=target>The metadata for the destination for the batch's data</param>\n\t// <param name=interval>The \"query interval\" for the batch, i.e. how often it gets flushed</param>\n\tBatch* GetBatch(const MdsEntityName &target, int interval);\n\n\tvoid Flush();\n\tvoid FlushIfStale();\n\nprivate:\n\tusing key_t = std::pair<std::string, const Credentials*>;\n\n\tBatchSet(const BatchSet&);\t\t// No copy constructor\n\tBatchSet& operator=(const BatchSet&);\t// No copying\n\n\tstd::map<key_t, Batch*> _map;\n\n\tMdsdConfig* _config;\n\tstd::mutex _mutex;\t\t\t// Just covers the BatchSet object, not any of the Batches in the set\n};\n\n#endif // _BATCH_HH_\n\n// vim: se sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/CMakeLists.txt",
    "content": "SET(CMAKE_SKIP_BUILD_RPATH  FALSE)\nSET(CMAKE_BUILD_WITH_INSTALL_RPATH FALSE)\n# Reset rpath vars for static executable\nSET(CMAKE_INSTALL_RPATH \"${OMI_LIB_PATH}\")\nSET(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE)\n\nset(LINKER_FLAGS \"${LINKER_FLAGS} -static-libgcc -static-libstdc++\")\nset(CMAKE_EXE_LINKER_FLAGS \"${CMAKE_EXE_LINKER_FLAGS} ${LINKER_FLAGS}\")\n\nset(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} -pthread\")\nset(CMAKE_C_FLAGS \"${CMAKE_C_FLAGS} -pthread\")\n\nset(LDFLAGS \"-rdynamic\")\nset(LDFLAGS \"${LDFLAGS} -Wl,--wrap=memcpy\") # To force using memcpy@GLIBC_2.2.5 (for old distro versions)\nset(CMAKE_EXE_LINKER_FLAGS \"${CMAKE_EXE_LINKER_FLAGS} ${LDFLAGS}\")\n\nif(NOT BUILD_NUMBER)\n    execute_process(\n        COMMAND date +%s\n        OUTPUT_VARIABLE BUILD_NUMBER)\nendif(NOT BUILD_NUMBER)\nmessage(\"Build number: ${BUILD_NUMBER}\")\n\nadd_definitions(-DBUILD_NUMBER=${BUILD_NUMBER})\n\ninclude_directories(\n    /usr/include/libxml2\n    /usr/local/include\n    ${OMI_INCLUDE_DIRS}\n    ${CASABLANCA_INCLUDE_DIRS}\n    ${STORAGE_INCLUDE_DIRS}\n    ${CMAKE_SOURCE_DIR}/mdsd\n    ${CMAKE_SOURCE_DIR}/mdsdinput\n    ${CMAKE_SOURCE_DIR}/mdsdlog\n    ${CMAKE_SOURCE_DIR}/mdsdutil\n    ${CMAKE_SOURCE_DIR}/mdscommands\n    ${CMAKE_SOURCE_DIR}/mdsdcfg\n)\n\n# include(/usr/local/lib/bond/bond.cmake)\n\nlink_directories(\n    ${OMI_LIB_PATH}\n)\n\nif(\"${CMAKE_CXX_COMPILER_ID}\" MATCHES \"Clang\")\n    # Some dependency library has no static clang lib, so use shared ones.\n    set(XML_LIB xml++-2.6${LIBSUFFIX})\n    set(GLIBMM_LIB glibmm-2.4${LIBSUFFIX})\n    set(SIGC_LIB sigc-2.0${LIBSUFFIX})\n    set(BOOST_LIBS\n        boost_log${LIBSUFFIX}\n        boost_iostreams${LIBSUFFIX}\n        boost_regex${LIBSUFFIX}\n        boost_thread${LIBSUFFIX}\n        boost_system${LIBSUFFIX}\n    )\n\nelse()\n    # For gcc, use static libs\n    set(XML_LIB /usr/lib/libxml++-2.6.a)\n    set(GLIBMM_LIB /usr/lib/x86_64-linux-gnu/libglibmm-2.4.a)\n    set(SIGC_LIB /usr/lib/x86_64-linux-gnu/libsigc-2.0.a)\n    set(BOOST_LIBS\n        /usr/lib/x86_64-linux-gnu/libboost_log.a\n        /usr/lib/x86_64-linux-gnu/libboost_iostreams.a\n        /usr/lib/x86_64-linux-gnu/libboost_regex.a\n        /usr/lib/x86_64-linux-gnu/libboost_thread.a\n        /usr/lib/x86_64-linux-gnu/libboost_system.a\n    )\nendif()\n\nset(COMM_LIBS\n    micxx${LIBSUFFIX}\n    omiclient${LIBSUFFIX}\n    rt  # Required not to use clock_gettime@GLIBC_2.17\n    /usr/lib/x86_64-linux-gnu/bond/libbond${LIBSUFFIX}.a\n    ${LINKSTDLIB}\n    ${STORAGE_LIBRARIES}\n    ${CASABLANCA_LIBRARIES}\n    ${XML_LIB}\n    ${GLIBMM_LIB}\n    /usr/lib/x86_64-linux-gnu/libglib-2.0.a\n    ${SIGC_LIB}\n    /usr/lib/x86_64-linux-gnu/libpcre.a\n    /usr/lib/x86_64-linux-gnu/libuuid.a\n    /usr/lib/x86_64-linux-gnu/libxml2.a\n    /usr/lib/x86_64-linux-gnu/libz.a\n    /usr/lib/x86_64-linux-gnu/liblzma.a\n    ${BOOST_LIBS}\n    /usr/local/lib/libssl.a\n    /usr/local/lib/libcrypto.a\n    dl\n)\n\nset(SOURCES\n    Batch.cc\n    CanonicalEntity.cc\n    CfgContext.cc\n    CfgCtxAccounts.cc\n    CfgCtxDerived.cc\n    CfgCtxEnvelope.cc\n    CfgCtxError.cc\n    CfgCtxEtw.cc\n    CfgCtxEventAnnotations.cc\n    CfgCtxEvents.cc\n    CfgCtxExtensions.cc\n    CfgCtxHeartBeats.cc\n    CfgCtxImports.cc\n    CfgCtxManagement.cc\n    CfgCtxMdsdEvents.cc\n    CfgCtxMonMgmt.cc\n    CfgCtxOMI.cc\n    CfgCtxParser.cc\n    CfgCtxRoot.cc\n    CfgCtxSvcBusAccts.cc\n    CfgCtxSchemas.cc\n    CfgCtxSources.cc\n    cJSON.c\n    CmdLineConverter.cc\n    ConfigParser.cc\n    Constants.cc\n    Credentials.cc\n    cryptutil.cc\n    DaemonConf.cc\n    DerivedEvent.cc\n    Engine.cc\n    EtwEvent.cc\n    EventJSON.cc\n    ExtensionMgmt.cc\n    FileSink.cc\n    IMdsSink.cc\n    ITask.cc\n    LADQuery.cc\n    Listener.cc\n    LocalSink.cc\n    MdsdConfig.cc\n    MdsdMetrics.cc\n    mdsd.cc\n    MdsEntityName.cc\n    MdsSchemaMetadata.cc\n    MdsValue.cc\n    Memcheck.cc\n    OMIQuery.cc\n    OmiTask.cc\n    Pipeline.cc\n    PipeStages.cc\n    Priority.cc\n    ProtocolHandlerBase.cc\n    ProtocolHandlerBond.cc\n    ProtocolHandlerJSON.cc\n    ProtocolListener.cc\n    ProtocolListenerBond.cc\n    ProtocolListenerDynamicJSON.cc\n    ProtocolListenerJSON.cc\n    ProtocolListenerMgr.cc\n    ProtocolListenerTcpJSON.cc\n    RowIndex.cc\n    SaxParserBase.cc\n    SchemaCache.cc\n    Signals.c\n    StoreType.cc\n    StreamListener.cc\n    Subscription.cc\n    TableSchema.cc\n    TermHandler.cc\n    Version.cc\n    XJsonBlobBlockCountsMgr.cc\n    XJsonBlobRequest.cc\n    XJsonBlobSink.cc\n    XTableConst.cc\n    XTableHelper.cc\n    XTableRequest.cc\n    XTableSink.cc\n)\n\n# To set source file specific compile flags, do\n# set_source_files_properties(<file> PROPERTIES COMPILE_FLAGS <flag>)\n# example:\n# set_source_files_properties(Pipeline.cc PROPERTIES COMPILE_FLAGS -Wno-sign-compare)\n\n# Disable warnings from azure storage API.\nset_source_files_properties(\n    XJsonBlobBlockCountsMgr.cc\n    XJsonBlobRequest.cc\n    XJsonBlobSink.cc\n    XTableHelper.cc\n    XTableRequest.cc\n    XTableSink.cc\n    PROPERTIES\n    COMPILE_FLAGS \"-Wno-unused-value -Wno-reorder -Wno-sign-compare\"\n)\n\nset(WRAPPERS_FOR_OLD_GLIBC_SOURCES\n    wrap_memcpy.c\n    fdelt_chk.c\n)\n\nadd_executable(\n    mdsd\n    ${SOURCES}\n    ${WRAPPERS_FOR_OLD_GLIBC_SOURCES}\n)\n\ntarget_link_libraries(\n    mdsd\n    ${CMD_LIB_NAME}\n    ${INPUT_LIB_NAME}\n    ${UTIL_LIB_NAME}\n    ${LOG_LIB_NAME}\n    ${MDSDCFG_LIB_NAME}\n    ${COMM_LIBS}\n)\n\ninstall(TARGETS\n    mdsd\n    RUNTIME DESTINATION ${CMAKE_BINARY_DIR}/release/bin\n)\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/CanonicalEntity.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"CanonicalEntity.hh\"\n#include \"MdsSchemaMetadata.hh\"\n//#include <algorithm>\n#include \"Utility.hh\"\n\n#include \"Engine.hh\" // To get the mdsd config\n#include \"MdsdConfig.hh\"\n\n#include <unordered_map>\n\nusing std::string;\nusing std::make_pair;\n\n// Clone the src entity. Usually used when some operation plans to add columns to a reduced-size\n// \"master\" entity, so reserve some extra space just in case.\nCanonicalEntity::CanonicalEntity(const CanonicalEntity& src)\n\t: _timestamp(src._timestamp), _pkey(src._pkey), _rkey(src._rkey), _schemaId(src._schemaId),\n    _srctype(src._srctype)\n{\n\t_entity.reserve(2 + src._entity.size());\n\n\t// std::for_each(src._entity.cbegin(), src._entity.cend(), [this](const col_t& col){this->CopyAddColumn(col);});\n\tfor (const auto & col : src._entity) {\n\t\tAddColumn(col.first, new MdsValue(*(col.second)));\n\t}\n}\n\nCanonicalEntity::~CanonicalEntity()\n{\n\tfor (col_t col : _entity) {\n\t\tif (col.second) {\n\t\t\tdelete col.second;\n\t\t}\n\t}\n}\n\n// AddColumn \"owns\" the MdsValue* once it's passed in. We can keep it, or move from it and destroy it.\nvoid\nCanonicalEntity::AddColumn(const std::string name, MdsValue* val)\n{\n    if (name == \"PartitionKey\") {\n        _pkey = std::move(*(val->strval));\n        delete val;\n    } else if (name == \"RowKey\") {\n        _rkey = std::move(*(val->strval));\n        delete val;\n\t} else {\n\t\t_entity.push_back(std::make_pair(name, val));\n\t}\n}\n\n// Add column only if the column name isn't a MetaData column.\nvoid\nCanonicalEntity::AddColumnIgnoreMetaData(const std::string name, MdsValue* val)\n{\n    if (MdsSchemaMetadata::MetadataColumns.count(name)) {\n        delete val;\n    } else {\n        _entity.push_back(std::make_pair(name, val));\n    }\n}\n\nMdsValue*\nCanonicalEntity::Find(const std::string &name) const\n{\n\tfor (auto iter : _entity) {\n\t\tif (iter.first == name) {\n\t\t\treturn iter.second;\n\t\t}\n\t}\n\treturn nullptr;\n}\n\nstd::ostream&\noperator<<(std::ostream& os, const CanonicalEntity& ce)\n{\n\tint count = ce._entity.size();\n\n\tos << \"(\" << count << \" columns, time \" << ce.GetPreciseTimeStamp() << \", _pKey \";\n\tif (ce._pkey.empty()) {\n\t\tos << \"{empty}\";\n\t} else {\n\t\tos << ce._pkey;\n\t}\n\tos << \", _rkey \";\n\tif (ce._pkey.empty()) {\n\t\tos << \"{empty}\";\n\t} else {\n\t\tos << ce._rkey;\n\t}\n\tos << \", [\";\n\tfor (auto iter : ce._entity) {\n\t\tos << iter.first << \"=\";\n\t\tif (iter.second) {\n\t\t\tos << *(iter.second);\n\t\t} else {\n\t\t\tos << \"<nullptr>\";\n\t\t}\n\t\tif (--count) {\n\t\t\tos << \", \";\n\t\t}\n\t}\n\tos << \"])\";\n\n\treturn os;\n}\n\n\nstd::string\nCanonicalEntity::GetJsonRow(\n\t\tconst std::string& timeGrain,\n\t\tconst std::string& tenant,\n\t\tconst std::string& role,\n\t\tconst std::string& roleInstance) const\n{\n    const std::string& resourceId = Engine::GetEngine()->GetConfig()->GetResourceId();\n\n    if (resourceId.empty()) {\n        throw std::runtime_error(\"Empty resourceId (OboDirectPartitionField) when a JSON event is requested\");\n    }\n\n\t// Check if this row is for metric or for log.\n\t// A metric event must include \"CounterName\" and \"Last\" columns.\n\t// Its timeGrain shouldn't be empty.\n\tbool counterNameExists = false, lastExists = false;\n\tfor (auto item : _entity) {\n\t\tif (item.first == \"CounterName\") {\n\t\t\tcounterNameExists = true;\n\t\t}\n\t\telse if (item.first == \"Last\") {\n\t\t\tlastExists = true;\n\t\t}\n\t}\n\tbool isMetricRow = counterNameExists && lastExists && !timeGrain.empty();\n\treturn isMetricRow ? GetJsonRowForMetric(resourceId, timeGrain, tenant, role, roleInstance)\n                       : GetJsonRowForLog(resourceId);\n}\n\n\n/* Example return Json string:\n\n{ \"time\" : \"2016-12-21T01:06:04.9067290Z\",\n  \"resourceId\": \"/subscriptions/xxx-xxx-xxx-xxx/resourceGroups/myrg/providers/Microsoft.Compute/VirtualMachines/myvm\",\n  \"properties\" : {\n    \"Column1Name\": \"Column1Value\",\n    \"Column2Name\": \"Column2Value\",\n    \"ColumnNName\": \"ColumnNValue\"\n  },\n  \"category\": \"user\",\n  \"level\": \"info\",\n  \"operationName\": \"some_name_depending_on_detected_event_type\"\n}\n\n*/\nstd::string\nCanonicalEntity::GetJsonRowForLog(const std::string& resourceId) const\n{\n    std::ostringstream oss;\n\n    oss << \"{ \\\"time\\\" : \\\"\" << GetPreciseTimeStamp() << \"\\\",\\n\"\n           \"  \\\"resourceId\\\" : \\\"\" << resourceId << \"\\\",\\n\"\n\t       \"  \\\"properties\\\" : {\\n\";\n\tbool first = true;\n\tstd::string category = \"\\\"Unknown\\\"\", level = \"\\\"Unknown\\\"\", operationName = \"\\\"Unknown\\\"\";\n\tfor (auto iter : _entity) {\n\t\tif (first) {\n\t\t\tfirst = false;\n\t\t} else {\n\t\t\toss << \",\\n\";\n\t\t}\n\t\tif (iter.second) {\n\t\t\toss << \"    \\\"\" << iter.first << \"\\\" : \" << iter.second->ToJsonSerializedString();\n\t\t\t// We consider this event to be from syslog if there's a field named \"Facility\".\n\t\t\t// Set the related Azure Monitor required fields (category, level, operationName) accordingly.\n\t\t\tif (iter.first == \"Facility\") {\n\t\t\t\tcategory = iter.second->ToJsonSerializedString(); // Let's use syslog facility as Azure Monitor \"category\".\n\t\t\t\toperationName = \"\\\"LinuxSyslogEvent\\\"\"; // Change this later as necessary\n\t\t\t} else if (iter.first == \"Severity\") {\n\t\t\t\t// Let's use syslog severity as Azure Monitor \"level\".\n\t\t\t\tif (iter.second->IsNumeric()) {\n\t\t\t\t\tlevel = MdsdUtil::GetSyslogSeverityStringFromValue((int)iter.second->ToDouble());\n\t\t\t\t} else {\n\t\t\t\t\t// iter.second->IsString(), which is the case when syslog events are\n\t\t\t\t    // routed from fluentd's in_syslog & out_mdsd.\n\t\t\t\t\tlevel = iter.second->ToJsonSerializedString();\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n    oss << \"\\n  },\\n\"\n           \"  \\\"category\\\" : \" << category << \",\\n\"\n           \"  \\\"level\\\" : \" << level << \",\\n\"\n           \"  \\\"operationName\\\" : \" << operationName << \"\\n\"\n           \"}\";\n\n    return oss.str();\n}\n\n\n/* Example return Json string:\n\n{ \"time\" : \"2016-12-21T01:06:04.9067290Z\",\n  \"resourceId\": \"/subscriptions/xxx-xxx-xxx-xxx/resourceGroups/myrg/providers/Microsoft.Compute/VirtualMachines/myvm\",\n  \"timeGrain\" : \"PT1M\",\n  \"dimensions\" : {\n    \"Tenant\": \"JsonBlobTestTenantName\",\n    \"Role\": \"JsonBlobTestRoleName\",\n    \"RoleInstance\": \"JsonBlobTestRoleinstanceName\"\n  },\n  \"metricName\": \"\\\\Processor\\\\PercentProcessorTime\",\n  \"last\": 0\n}\n\n*/\nstd::string\nCanonicalEntity::GetJsonRowForMetric(\n        const std::string& resourceId,\n\t\tconst std::string& timeGrain,\n\t\tconst std::string& tenant,\n\t\tconst std::string& role,\n\t\tconst std::string& roleInstance) const\n{\n    std::ostringstream oss;\n    oss << \"{ \\\"time\\\" : \\\"\" << GetPreciseTimeStamp() << \"\\\",\\n\";\n    oss << \"  \\\"resourceId\\\" : \\\"\" << resourceId << \"\\\",\\n\";\n    oss << \"  \\\"timeGrain\\\" : \\\"\" << timeGrain << \"\\\",\\n\";\n    oss << \"  \\\"dimensions\\\": {\\n\"\n           \"     \\\"Tenant\\\": \\\"\" << tenant << \"\\\",\\n\"\n           \"     \\\"Role\\\": \\\"\" << role << \"\\\",\\n\"\n           \"     \\\"RoleInstance\\\": \\\"\" << roleInstance << \"\\\"\\n\"\n           \"  }\";\n\n    static std::unordered_map<std::string, std::string> columnNameTranslations = {\n        { \"CounterName\", \"metricName\" },\n        { \"Average\", \"average\" },\n        { \"Minimum\", \"minimum\" },\n        { \"Maximum\", \"maximum\" },\n        { \"Total\", \"total\" },\n        { \"Last\", \"last\" },\n        { \"Count\", \"count\" }\n    };\n\n    size_t countOfTranslations = 0;\n    for (const auto & nameValue : _entity) {\n        auto translationPair = columnNameTranslations.find(nameValue.first);\n        if (translationPair != columnNameTranslations.end()) {\n            oss << \",\\n  \\\"\" << translationPair->second << \"\\\": \" << nameValue.second->ToJsonSerializedString();\n            countOfTranslations++;\n        }\n    }\n\n    if (columnNameTranslations.size() != countOfTranslations) {\n        std::ostringstream msg;\n        msg << \"Dropping invalid CanonicalEntity for metric (missing required column(s)): \" << *this;\n        throw std::runtime_error(msg.str());\n    }\n\n    oss << \"\\n}\";\n    return oss.str();\n}\n\n// vim: se sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/CanonicalEntity.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _CANONICALENTITY_HH_\n#define _CANONICALENTITY_HH_\n\n#include <string>\n#include <utility>\n#include <vector>\n#include \"MdsValue.hh\"\n#include \"MdsTime.hh\"\n#include \"SchemaCache.hh\"\n#include <iostream>\n\n// CanonicalEntity is the internal canonical form of an entity to be handed to MDS. This is a middle ground\n// between the form in which the information was reported to the daemon (e.g. via JSON event, OMI query, etc.)\n// and the form required for transmission to the actual MDS data sync (Storage SDK table row object, compressed\n// BOND blob, etc.)\n//\n// CanonicalEntity is the \"owner\" of the data handed to it. Once you pass an MdsValue* to AddColumn,\n// you should leave it alone (and, especially, do not delete it).\n//\n// CanonicalEntity objects can be copied until they are added to a batch. Once added to a batch, the object\n// might at any instant (and asynchronously) be handed to a transport, which will convert it to the form\n// required for transmission to MDS and then delete it. Or it could linger in a local sink and eventually\n// make its way into some other batch; later rinse repeat.\n\nclass CanonicalEntity\n{\n\tusing col_t = std::pair<std::string, MdsValue*>;\n\tfriend std::ostream& operator<<(std::ostream& os, const CanonicalEntity& ce);\n\npublic:\n\tenum class SourceType {\n\t\tIngested,       // created from original ingestion\n\t\tDuplicated      // created from duplication (e.g. during pipeline)\n\t};\n\n\tCanonicalEntity() : _timestamp(0), _schemaId(0) { _entity.reserve(16); }\n\tCanonicalEntity(int n) : _timestamp(0), _schemaId(0) { _entity.reserve(n); }\n\tCanonicalEntity(const CanonicalEntity& src);\n\t~CanonicalEntity();\n\n\tvoid AddColumn(const std::string name, MdsValue* val);\n    void AddColumnIgnoreMetaData(const std::string name, MdsValue* val);\n\n\tstd::string PartitionKey() const { return _pkey; }\n\tstd::string RowKey() const { return _rkey; }\n\n\tvoid SetPreciseTime(const MdsTime& t) { _timestamp = t; }\n\n\tconst MdsTime& GetPreciseTimeStamp() const { return _timestamp; }\n\tconst MdsTime& PreciseTime() const { return _timestamp; }\n\ttime_t GetApproximateTime() const { return _timestamp.to_time_t(); }\n\n\tMdsValue* Find(const std::string &name) const;\n\n\tvoid SetSchemaId(SchemaCache::IdType id) { _schemaId = id; }\n\tSchemaCache::IdType SchemaId() const { return _schemaId; }\n\n\t// Convenience functions\n\tvoid AddColumn(const std::string name, const std::string& val)  { AddColumn(name, new MdsValue(val)); }\n\tvoid AddColumn(const std::string name, const char* val)  { AddColumn(name, new MdsValue(val)); }\n\n\t// Act a bit like a container, but not all the way\n\ttypedef std::vector<col_t>::iterator iterator;\n\ttypedef std::vector<col_t>::const_iterator const_iterator;\n\n\titerator begin() { return _entity.begin(); }\n\tconst_iterator begin() const { return _entity.begin(); }\n\titerator end() { return _entity.end(); }\n\tconst_iterator end() const { return _entity.end(); }\n\tsize_t size() const { return _entity.size(); }\n\n\t// For XJsonBlob & EventHub publishing support\n\t// timeGrain should be an empty string for log events, should be ISO8601 duration string (e.g., \"PT1M\") for metric events.\n\t// Caller is responsible to make all conditions true for metric events.\n\t// That is, when a non-empty timeGrain is passed (for a metric event), the row should\n\t// contain \"CounterName\" and \"Last\" columns.\n\tstd::string GetJsonRow(const std::string& timeGrain,\n\t\t\tconst std::string& tenant, const std::string& role, const std::string& roleInstance) const;\n\n\tvoid SetSourceType(SourceType t) { _srctype = t; }\n\tSourceType GetSourceType() const { return _srctype; }\n\nprivate:\n\tstd::vector<col_t> _entity;\n\tMdsTime _timestamp;\n\tstd::string _pkey;\n\tstd::string _rkey;\n\tSchemaCache::IdType _schemaId;\n\tSourceType _srctype = SourceType::Ingested;\n\n\tvoid CopyAddColumn(const col_t& col) { _entity.push_back(std::make_pair(col.first, new MdsValue(*(col.second)))); }\n\n\tstd::string GetJsonRowForLog(const std::string& resourceId) const;\n\tstd::string GetJsonRowForMetric(const std::string& resourceId, const std::string& timeGrain,\n\t\t\tconst std::string& tenant, const std::string& role, const std::string& roleInstance) const;\n};\n\nstd::ostream& operator<<(std::ostream& os, const CanonicalEntity& ce);\n\n#endif // _CANONICALENTITY_HH_\n\n// vim: se sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/CfgContext.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"CfgContext.hh\"\n#include \"CfgCtxError.hh\"\n#include \"MdsdConfig.hh\"\n#include \"Utility.hh\"\n\n#include <sstream>\n\nCfgContext*\nCfgContext::SubContextFactory(const std::string& name)\n{\n\tif (IsErrorContext()) {\n\t\treturn new CfgCtxError(this);\n\t}\n\n\tconst subelementmap_t& subelementmap = GetSubelementMap();\n\tauto iter = subelementmap.find(name);\n\tif (iter != subelementmap.end()) {\n\t\treturn (iter->second)(this);\n\t} else {\n\t\tstd::ostringstream oss;\n\t\toss << '<' << Name() << \"> does not define subelement <\" << name << '>';\n\t\tERROR(oss.str());\n\t\treturn new CfgCtxError(this);\n\t}\n}\n\nstd::string\nCfgContext::stringize_attributes(const xmlattr_t& properties)\n{\n\tstd::string result;\n\tbool first = true;\n\n\tfor (const auto& item : properties)\n\t{\n\t\tif (!first) {\n\t\t\tresult += \", \";\n\t\t}\n\t\tresult += item.first + \"=\\\"\" + item.second + \"\\\"\";\n\t\tfirst = false;\n\t}\n\treturn result;\n}\n\nvoid\nCfgContext::log_entry(const xmlattr_t& properties)\n{\n\tstd::string msg;\n\tif (properties.size() > 0) {\n\t\tmsg = \"Entered \" + Name() + \" with attribute(s) \" + stringize_attributes(properties);\n\t}\n\telse {\n\t\tmsg = \"Entered \" + Name();\n\t}\n\tINFO(msg);\n}\n\nvoid\nCfgContext::log_body(const std::string& body)\n{\n\tINFO(\"Element \" + Name() + \" has body {\" + body + \"}\");\n}\n\nbool\nCfgContext::empty_or_whitespace()\n{\n\treturn MdsdUtil::IsEmptyOrWhiteSpace(Body);\n}\n\nCfgContext* CfgContext::Leave() {\n\tif (!empty_or_whitespace()) {\n\t\tstd::ostringstream oss;\n\t\toss << '<' << Name() << \"> expected empty body; did not expect {\" << Body << '}';\n\t\tWARNING(oss.str());\n\t}\n\treturn ParentContext;\n}\n\nvoid CfgContext::warn_if_attributes(const xmlattr_t& properties)\n{\n\t// log_entry(properties);\n\n\tif (!properties.empty()) {\n\t\tWARNING(\"Expected no attributes\");\n\t}\n}\n\nvoid\nCfgContext::INFO(const std::string& msg) { Config->AddMessage(MdsdConfig::info, msg); }\n\nvoid\nCfgContext::WARNING(const std::string& msg) { Config->AddMessage(MdsdConfig::warning, msg); }\n\nvoid\nCfgContext::ERROR(const std::string& msg) { Config->AddMessage(MdsdConfig::error, msg); }\n\nvoid\nCfgContext::FATAL(const std::string& msg) { Config->AddMessage(MdsdConfig::fatal, msg); }\n\nvoid\nCfgContext::parse_singleton_attribute(\n\tconst std::string & itemname,\n\tconst std::string & itemval,\n\tconst std::string & attrname,\n\tstd::string& attrval\n\t)\n{\n\tif (attrname != itemname) {\n\t\treturn;\n\t}\n\tif (attrval.empty()) {\n\t\tattrval = itemval;\n\t}\n\telse {\n\t\tERROR(\"\\\"\" + attrname + \"\\\" can appear in <\" + Name() + \"> only once.\");\n\t}\n}\n\nvoid\nCfgContext::fatal_if_no_attributes(\n\tconst std::string & attrname,\n\tconst std::string & attrval\n\t)\n{\n\tif (attrval.empty()) {\n\t\tFATAL(\"<\" + Name() + \"> requires \\\"\" + attrname + \"\\\" attribute.\");\n\t}\n}\n\n// vim: se sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/CfgContext.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _CFGCONTEXT_HH_\n#define _CFGCONTEXT_HH_\n\n#include <string>\n#include <map>\n#include <functional>\n#include \"SaxParserBase.hh\"\n\nclass CfgContext;\nclass MdsdConfig;\n\n/// <summary>\n/// Maps from a (permitted) subelement name to the function which returns an appropriate new context.\n/// </summary>\ntypedef std::map<std::string, std::function<CfgContext* (CfgContext*)> > subelementmap_t;\n\n/// <summary>\n/// XML element attribute list\n/// </summary>\ntypedef SaxParserBase::AttributeMap xmlattr_t;\n\n/// <summary>\n/// const iterator on an XML attribute list\n/// </summary>\ntypedef SaxParserBase::AttributeMap::const_iterator xmlattr_iter_t;\n\n/// <summary>\n/// This pure virtual class is really an Interface class for all parsing context classes.\n/// </summary>\nclass CfgContext\n{\npublic:\n\tvirtual ~CfgContext() {}\n\n\t/// <summary>\n\t/// Asks the current context to construct a child context given the name of a subelement.\n\t/// If the current context doesn't permit a subelement of that name, a new Error context is\n\t/// returned.\n\t/// </summary>\n\tCfgContext* SubContextFactory(const std::string& name);\n\n\t/// <summary>\n\t/// Provides attributes of the just-entered XML element to the context for the element.\n\t/// </summary>\n\tvirtual void Enter(const xmlattr_t& properties) = 0;\n\n\t/// <summary>\n\t/// Provides the body of the current XML element to the context for the element. May be\n\t/// called for each chunk of characters found between subelements. By default, just\n\t/// accumulate chunks into the Body member variable.\n\t/// </summary>\n\tvirtual void HandleBody(const std::string& body) { Body += body; }\n\n\t/// <summary>\n\t/// Provides the CDATA text of the current XML element to the context of the element.\n\t/// By default, just accumulate chunks into the CdataText member variable.\n\t/// Example for CDATA: <![CDATA[SomeMessage]]>\n\t/// </summary>\n\tvirtual void HandleCdata(const std::string& cdata) { Body += cdata; }\n\n\t/// <summary>\n\t/// Invoked when the parser is leaving the element. The context should finish its work\n\t/// (e.g. finalize changes to the MdsdConfig object). Once this member is called, the class\n\t/// instance is ready to be destroyed. Base class implementation warns if the body is\n\t/// non-empty but otherwise does nothing.\n\t/// </summary>\n\tvirtual CfgContext* Leave();\n\n\t/// <summary>Fetch the printable name for the context.</summary>\n\tvirtual const std::string& Name() const = 0;\n\n\t/// <summary>Fetch the context map of permitted subelements.</summary>\n\tvirtual const subelementmap_t& GetSubelementMap() const = 0;\n\n\t/// <summary>True if the parse is in \"error\" state</summary>\n\tvirtual bool IsErrorContext() const { return false; }\n\n\tvoid INFO(const std::string& msg);\n\tvoid WARNING(const std::string& msg);\n\tvoid ERROR(const std::string& msg);\n\tvoid FATAL(const std::string& msg);\n\nprivate:\n\t/// <summary>\n\t/// Disallow default constructor.\n\t/// </summary>\n\tCfgContext() : ParentContext(NULL), Config(NULL) {}\t\n\n\t/// <summary>\n\t/// Convert a list of XML SaxParser attributes to a printable string\n\t/// </summary>\n\t/// <param name=\"properties\">The attribute list for the element</param>\n\tstd::string stringize_attributes(const xmlattr_t& properties);\n\nprotected:\n\n\t/// <summary>The context object for the XML element that contains this one.</summary>\n\tCfgContext* const ParentContext;\n\n\t// Should provide an accessor to allow derived classes to call methods through this pointer, with the\n\t// pointer itself remaining private.\n\tMdsdConfig* const Config;\n\n\t/// <summary>Accumulated body of the element</summary>\n\tstd::string Body;\n\n\t/// <summary>\n\t/// Creates a context representing a particular element an XML document. Knows how to handle attributes\n\t/// of the element and any content (body text). Knows what sub-elements are legal.\n\t/// </summary>\n\t/// <param name=\"previousContext\">A pointer to the parent (enveloping) context.</param>\n\tCfgContext(CfgContext* previousContext) : ParentContext(previousContext), Config(previousContext->Config) {}\n\n\t/// <summary>\n\t/// Creates a context representing the root element an XML document.\n\t/// </summary>\n\t/// <param name=\"previousContext\">A pointer to the parent (enveloping) context.</param>\n\tCfgContext(MdsdConfig* config) : ParentContext(NULL), Config(config) {}\n\n\t/// <summary>\n\t/// Add an Info message recording entry into a new element\n\t/// </summary>\n\t/// <param name=\"properties\">The attribute list for the element</param>\n\tvoid log_entry(const xmlattr_t& properties);\n\n\t/// <summary>\n\t/// Add an Info message recording a body-chunk for the current element\n\t/// </summary>\n\t/// <param name=\"body\">The body text found within the element</param>\n\tvoid log_body(const std::string& body);\n\n\t/// <summary>\n\t/// Return true if the accumulated body of the element is empty or whitespace\n\t/// </summary>\n\tbool empty_or_whitespace();\n\n\t/// <summary>\n\t/// Add a warning message if any attributes were specified\n\t/// </summary>\n\t/// <param name=\"properties\">The attribute list for the element</param>\n\tvoid warn_if_attributes(const xmlattr_t& properties);\n\n\tvoid parse_singleton_attribute(const std::string & itemname, const std::string & itemval,\n\t\tconst std::string & attrname, std::string& attrval);\n\n\tvoid fatal_if_no_attributes(const std::string & attrname, const std::string & attrval);\n\n\tvoid warn_if_attribute_unexpected(const std::string & attrname)\n\t{\n\t\tWARNING(\"Ignoring unexpected <\" + Name() + \"> attribute \\\"\" + attrname + \"\\\"\");\n\t}\n\n\tvoid fatal_if_impossible_subelement()\n\t{\n\t\tFATAL(\"Found <\" + Name() + \"> in <\" + ParentContext->Name() + \">; that can't happen.\");\n\t}\n\n};\n\n#endif //_CFGCONTEXT_HH_\n\n// vim: se sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/CfgCtxAccounts.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"CfgCtxAccounts.hh\"\n#include \"MdsdConfig.hh\"\n#include \"Credentials.hh\"\n#include \"Utility.hh\"\n#include \"AzureUtility.hh\"\n#include \"cryptutil.hh\"\n#include \"Trace.hh\"\n#include <algorithm>\n\n///////// CfgCtxAccounts\n\nsubelementmap_t CfgCtxAccounts::_subelements = {\n\t{ \"Account\", [](CfgContext* parent) -> CfgContext* { return new CfgCtxAccount(parent); } },\n\t{ \"SharedAccessSignature\", [](CfgContext* parent) -> CfgContext* { return new CfgCtxSAS(parent); } },\n};\n\nstd::string CfgCtxAccounts::_name = \"Accounts\";\n\nCfgContext*\nCfgCtxAccounts::Leave()\n{\n\treturn CfgContext::Leave();\n}\n\n///////// CfgCtxAccount\n\nvoid\nCfgCtxAccount::Enter(const xmlattr_t& properties)\n{\n\tTrace trace(Trace::ConfigLoad, \"CfgCtxAccount::Enter\");\n\tstd::string moniker, account, sharedKey, decryptKeyPath, blobEndpoint, tableEndpoint;\n\tbool makeDefault = false;\n\n\tfor (const auto& item : properties)\n\t{\n\t\tif (item.first == \"moniker\") {\n\t\t\tif (moniker.empty()) {\n\t\t\t\tmoniker = item.second;\n\t\t\t}\n\t\t\telse {\n\t\t\t\tERROR(\"\\\"moniker\\\" can appear in <Account> only once\");\n\t\t\t}\n\t\t}\n\t\telse if (item.first == \"key\") {\n\t\t\tsharedKey = item.second;\n\t\t}\n\t\telse if (item.first == \"decryptKeyPath\") {\n\t\t\tdecryptKeyPath = item.second;\n\t\t}\n\t\telse if (item.first == \"account\") {\n\t\t\taccount = item.second;\n\t\t\tsize_t len = account.length();\n\t\t\t// Squeeze any embedded spaces from the account\n\t\t\taccount.erase(std::remove(account.begin(), account.end(), ' '), account.end());\n\t\t\tif (len != account.length()) {\n\t\t\t\tWARNING(\"Account cannot contain spaces; blanks were removed\");\n\t\t\t}\n\t\t}\n\t\telse if (item.first == \"isDefault\") {\n\t\t\tmakeDefault = MdsdUtil::to_bool(item.second);\n\t\t}\n\t\telse if (item.first == \"blobEndpoint\") {\n\t\t\tblobEndpoint = item.second;\n\t\t}\n\t\telse if (item.first == \"tableEndpoint\") {\n\t\t\ttableEndpoint = item.second;\n\t\t}\n\t\telse {\n\t\t\tWARNING(\"Ignoring unexpected attribute \\\"\" + item.first + \"\\\"\");\n\t\t}\n\t}\n\n\tif (moniker.empty()) {\n\t\tFATAL(\"<Account> requires \\\"moniker\\\" attribute\");\n\t}\n\telse {\n\t\t// Create the correct credential object based on the attributes\n\t\t// Must be shared key\n\t\tif (account.empty()) {\n\t\t\tERROR(\"\\\"account\\\" must be set for shared key moniker\");\n\t\t} else if (sharedKey.empty()) {\n\t\t\tERROR(\"\\\"key\\\" must be set for shared key moniker\");\n\t\t} else {\n\t\t\tif (!decryptKeyPath.empty()) {\n\t\t\t\ttry {\n\t\t\t\t\tsharedKey = cryptutil::DecodeAndDecryptString(decryptKeyPath, sharedKey);\n\t\t\t\t}\n\t\t\t\tcatch (const std::exception& e) {\n\t\t\t\t\tERROR(std::string(\"Storage key decryption (using private key at \").append(decryptKeyPath).append(\") failed with the message: \").append(e.what()));\n\t\t\t\t\treturn;\n\t\t\t\t}\n\t\t\t\tcatch (...) {\n\t\t\t\t\tERROR(\"Unknown exception thrown when decrypting storage key\");\n\t\t\t\t\treturn;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tauto creds = new CredentialType::SharedKey(moniker, account, sharedKey);\n\t\t\tif (!blobEndpoint.empty()) {\n\t\t\t\tcreds->BlobUri(blobEndpoint);\n\t\t\t}\n\t\t\tif (!tableEndpoint.empty()) {\n\t\t\t\tcreds->TableUri(tableEndpoint);\n\t\t\t}\n\n\t\t\t/* Validate storage account for table access.\n\t\t\t */\n\t\t\ttry {\n\t\t\t\tMdsdUtil::ValidateStorageCredentialForTable(creds->GetConnectionStringOnly(Credentials::ServiceType::XTable));\n\t\t\t\tConfig->AddCredentials(creds, makeDefault);\n\t\t\t}\n\t\t\tcatch (const std::exception& e) {\n\t\t\t\tERROR(std::string(\"Storage credential validation for table storage failed: \").append(e.what()));\n\t\t\t}\n\t\t\tcatch (...) {\n\t\t\t\tERROR(\"Unknown exception thrown when validating storage credential for table storage\");\n\t\t\t}\n\t\t}\n\t}\n}\n\nsubelementmap_t CfgCtxAccount::_subelements;\n\nstd::string CfgCtxAccount::_name = \"Account\";\n\n///////// CfgCtxSAS\n\nvoid\nCfgCtxSAS::Enter(const xmlattr_t& properties)\n{\n\tstd::string moniker, account, token, decryptKeyPath, blobEndpoint, tableEndpoint;\n\tbool makeDefault = false;\n\n\tfor (const auto& item : properties)\n\t{\n\t\tif (item.first == \"moniker\") {\n            moniker = item.second;\n\t\t}\n\t\telse if (item.first == \"key\") {\n\t\t\ttoken = item.second;\n\t\t\tMdsdUtil::ReplaceSubstring(token, \"&#38;\", \"&\");\n\t\t}\n\t\telse if (item.first == \"decryptKeyPath\") {\n\t\t\tdecryptKeyPath = item.second;\n\t\t}\n\t\telse if (item.first == \"account\") {\n\t\t\taccount = item.second;\n\t\t\tsize_t len = account.length();\n\t\t\t// Squeeze any embedded spaces from the account\n\t\t\taccount.erase(std::remove(account.begin(), account.end(), ' '), account.end());\n\t\t\tif (len != account.length()) {\n\t\t\t\tWARNING(\"Account cannot contain spaces; blanks were removed\");\n\t\t\t}\n\t\t}\n\t\telse if (item.first == \"blobEndpoint\") {\n\t\t\tblobEndpoint = item.second;\n\t\t}\n\t\telse if (item.first == \"tableEndpoint\") {\n\t\t\ttableEndpoint = item.second;\n\t\t}\n        else if (item.first == \"isDefault\") {\n            makeDefault = MdsdUtil::to_bool(item.second);\n        }\n\t\telse {\n\t\t\tWARNING(\"Ignoring unexpected attribute \\\"\" + item.first + \"\\\"\");\n\t\t}\n\t}\n\n\tif (moniker.empty()) {\n\t\tFATAL(\"\\\"moniker\\\" must be specified\");\n\t}\n\telse if (account.empty()) {\n\t\tFATAL(\"\\\"account\\\" must be specified\");\n\t}\n\telse if (token.empty()) {\n\t\tFATAL(\"\\\"key\\\" must be specified\");\n\t} else {\n\t\tif (!decryptKeyPath.empty()) {\n\t\t\ttry {\n\t\t\t\ttoken = cryptutil::DecodeAndDecryptString(decryptKeyPath, token);\n\t\t\t\tMdsdUtil::ReplaceSubstring(token, \"&#38;\", \"&\");\n\t\t\t}\n\t\t\tcatch (const std::exception& e) {\n\t\t\t\tERROR(std::string(\"Storage account SAS token decryption (using private key at \").append(decryptKeyPath).append(\") failed with the message: \").append(e.what()));\n\t\t\t\treturn;\n\t\t\t}\n\t\t\tcatch (...) {\n\t\t\t\tERROR(\"Unknown exception thrown when decrypting storage account SAS token\");\n\t\t\t\treturn;\n\t\t\t}\n\t\t}\n\n\t\ttry {\n\t\t\tauto creds = new CredentialType::SAS(moniker, account, token);\n\t\t\tif (!blobEndpoint.empty()) {\n\t\t\t\tcreds->BlobUri(blobEndpoint);\n\t\t\t}\n\t\t\tif (!tableEndpoint.empty()) {\n\t\t\t\tcreds->TableUri(tableEndpoint);\n\t\t\t}\n\n\t\t\tif (creds->IsAccountSas()) {\n\t\t\t\t/* Validate storage account for table access (same as above in shared key)\n\t\t\t\t * only if it's an account SAS. */\n\t\t\t\tMdsdUtil::ValidateStorageCredentialForTable(creds->GetConnectionStringOnly(Credentials::ServiceType::XTable));\n\t\t\t}\n\t\t\tConfig->AddCredentials(creds, makeDefault);\n\t\t}\n\t\tcatch (MdsdUtil::MdsdInvalidSASException& e) {\n\t\t\tERROR(std::string(\"Invalid SAS token given. Reason: \").append(e.what()));\n\t\t}\n\t\tcatch (const std::exception& e) {\n\t\t\tERROR(std::string(\"Storage credential validation for table storage failed: \").append(e.what()));\n\t\t}\n\t\tcatch (...) {\n\t\t\tERROR(\"Unknown exception thrown when validating storage credential for table storage\");\n\t\t}\n\t}\n}\n\nsubelementmap_t CfgCtxSAS::_subelements;\n\nstd::string CfgCtxSAS::_name = \"SharedAccessSignature\";\n\n// vim: se sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/CfgCtxAccounts.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _CFGCTXACCOUNTS_HH_\n#define _CFGCTXACCOUNTS_HH_\n\n#include \"CfgContext.hh\"\n\n\nclass CfgCtxAccounts : public CfgContext\n{\npublic:\n\tCfgCtxAccounts(CfgContext* config) : CfgContext(config) {}\n\tvirtual ~CfgCtxAccounts() {}\n\n\tvirtual const std::string& Name() const { return _name; }\n\tvirtual const subelementmap_t& GetSubelementMap() const { return _subelements; }\n\n\tvoid Enter(const xmlattr_t& properties) { warn_if_attributes(properties); }\n\tCfgContext* Leave() override;\n\nprivate:\n\tstatic subelementmap_t _subelements;\n\tstatic std::string _name;\n};\n\nclass CfgCtxAccount : public CfgContext\n{\npublic:\n\tCfgCtxAccount(CfgContext* config) : CfgContext(config) {}\n\tvirtual ~CfgCtxAccount() { }\n\n\tvirtual const std::string& Name() const { return _name; }\n\tvirtual const subelementmap_t& GetSubelementMap() const { return _subelements; }\n\n\tvoid Enter(const xmlattr_t& properties);\n\nprivate:\n\tstatic subelementmap_t _subelements;\n\tstatic std::string _name;\n};\n\nclass CfgCtxSAS : public CfgContext\n{\npublic:\n\tCfgCtxSAS(CfgContext* config) : CfgContext(config) {}\n\tvirtual ~CfgCtxSAS() { }\n\n\tvirtual const std::string& Name() const { return _name; }\n\tvirtual const subelementmap_t& GetSubelementMap() const { return _subelements; }\n\n\tvoid Enter(const xmlattr_t& properties);\n\nprivate:\n\tstatic subelementmap_t _subelements;\n\tstatic std::string _name;\n};\n\n#endif //_CFGCTXACCOUNTS_HH_\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/CfgCtxDerived.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"CfgCtxDerived.hh\"\n#include \"CfgCtxError.hh\"\n#include \"MdsdConfig.hh\"\n#include \"Utility.hh\"\n#include \"StoreType.hh\"\n#include \"PipeStages.hh\"\n#include \"LADQuery.hh\"\n#include \"Priority.hh\"\n#include \"Trace.hh\"\n#include \"EventType.hh\"\n\n////////////////// CfgCtxDerived\n\nsubelementmap_t CfgCtxDerived::_subelements = {\n\t{ \"DerivedEvent\", [](CfgContext* parent) -> CfgContext* { return new CfgCtxDerivedEvent(parent); } }\n};\n\nstd::string CfgCtxDerived::_name = \"DerivedEvents\";\n\n////////////////// CfgCtxDerivedEvent\n\nvoid\nCfgCtxDerivedEvent::Enter(const xmlattr_t& properties)\n{\n\tTrace trace(Trace::ConfigLoad, \"CfgCtxDerivedEvent::Enter\");\n\tstd::string eventName, account, source, durationString;\n\tMdsTime duration { 0 };\n\tbool NoPerNDay = false, isFullName = false;\n\tPriority priority { \"Normal\" };\n\n\t_task = nullptr;\n\t_isOK = true;\n\t_storeType = StoreType::XTable;\n\t_doSchemaGeneration = true;\n\n\tfor (const auto & iter : properties) {\n\t\tif (iter.first == \"eventName\") {\n\t\t\tif (MdsdUtil::NotValidName(iter.second)) {\n\t\t\t\tERROR(\"Invalid eventName attribute\");\n\t\t\t} else {\n\t\t\t\teventName = iter.second;\n\t\t\t}\n\t\t} else if (iter.first == \"priority\") {\n\t\t\tif (! priority.Set(iter.second)) {\n\t\t\t\tWARNING(\"Ignoring unknown priority \\\"\" + iter.second + \"\\\"\");\n\t\t\t}\n\t\t} else if (iter.first == \"account\") {\n\t\t\tif (MdsdUtil::NotValidName(iter.second)) {\n\t\t\t\tERROR(\"Invalid account attribute\");\n\t\t\t} else {\n\t\t\t\taccount = iter.second;\n\t\t\t}\n\t\t} else if (iter.first == \"dontUsePerNDayTable\") {\n\t\t\tNoPerNDay = MdsdUtil::to_bool(iter.second);\n\t\t} else if (iter.first == \"isFullName\") {\n\t\t\tisFullName = MdsdUtil::to_bool(iter.second);\n\t\t} else if (iter.first == \"duration\") {\n\t\t\tMdsTime requestedDuration = MdsTime::FromIS8601Duration(iter.second);\n\t\t\tif (!requestedDuration) {\n\t\t\t\tERROR(\"Invalid duration attribute\");\n\t\t\t\t_isOK = false;\n\t\t\t} else {\n\t\t\t\tduration = requestedDuration;\n\t\t\t\tdurationString = iter.second;\n\t\t\t}\n\t\t} else if (iter.first == \"storeType\") {\n\t\t\t_storeType = StoreType::from_string(iter.second);\n\t\t\t_doSchemaGeneration = StoreType::DoSchemaGeneration(_storeType);\n\t\t} else if (iter.first == \"source\") {\n\t\t\tif (MdsdUtil::NotValidName(iter.second)) {\n\t\t\t\tERROR(\"Invalid account attribute\");\n\t\t\t} else {\n\t\t\t\tsource = iter.second;\n\t\t\t}\n\t\t} else {\n\t\t\tWARNING(\"Ignoring unexpected attribute \" + iter.first);\n\t\t}\n\t}\n\n\tif (!duration) {\n\t\tERROR(\"The duration attribute is required\");\n\t\t_isOK = false;\n\t}\n\n\tif (!_isOK) {\n\t\treturn;\n\t}\n\n\ttry {\n\t\t// Build target on the stack, move it into the DerivedTask\n\t\tauto target = MdsEntityName { eventName, NoPerNDay, Config, account, _storeType, isFullName };\n\n\t\t_task = new DerivedEvent(Config, std::move(target), priority, duration, source);\n\t\t// Centrally-stored events implicitly have Identity columns added to them as\n\t\t// defined in the <Management> element. Add them first thing so they're available\n\t\t// to subsequent stages (if any).\n\t\tif (_storeType != StoreType::Local) {\n\t\t\t_task->AddStage(new Pipe::Identity(Config->GetIdentityVector()));\n\t\t}\n\t\tConfig->AddMonikerEventInfo(account, eventName, _storeType, source, mdsd::EventType::DerivedEvent);\n\t\tConfig->SetDurationForEventName(eventName, durationString);\n\t}\n\tcatch (const std::exception& ex) {\n\t\tERROR(ex.what());\n\t\t_isOK = false;\n\t\treturn;\n\t}\n\tcatch (...) {\n\t\tFATAL(\"Unknown exception; skipping\");\n\t\t_isOK = false;\n\t\treturn;\n\t}\n}\n\nCfgContext*\nCfgCtxDerivedEvent::Leave()\n{\n\tTrace trace(Trace::ConfigLoad, \"CfgCtxDerivedEvent::Leave\");\n\n\tif(_task) {\n\t\t// If not local, add a stage to push metadata into MDS. Derived queries should produce results with\n\t\t// the same schema each time. Doing an <LADQuery> doesn't change that.\n\t\tif (_doSchemaGeneration && _storeType != StoreType::Local) {\n\t\t\t_task->AddStage(new Pipe::BuildSchema(Config, _task->Target(), true));\n\t\t}\n\n\t\t// Find/make the batch for this task; add a final pipeline stage to write to that batch;\n\t\t// add the task to the set of tasks in this config.\n\t\tBatch *batch = Config->GetBatch(_task->Target(), _task->FlushInterval());\n\t\tif (batch) {\n\t\t\t_task->AddStage(new Pipe::BatchWriter(batch, Config->GetIdentityVector(),\n\t\t\t\t\t\t\t\tConfig->PartitionCount(), _storeType));\n\t\t\tConfig->AddTask(_task);\n\t\t} else {\n\t\t\tERROR(\"Configuration error(s) detected; dropping this DerivedEvent.\");\n\t\t\tdelete _task;\n\t\t}\n\t}\n\treturn ParentContext;\n}\n\nconst subelementmap_t&\nCfgCtxDerivedEvent::GetSubelementMap() const\n{\n\tif (_isOK) { return _subelements; }\n\telse { return CfgCtxError::subelements; }\n}\n\n\nsubelementmap_t CfgCtxDerivedEvent::_subelements {\n\t{ \"LADQuery\", [](CfgContext* parent) -> CfgContext* { return new CfgCtxLADQuery(parent); } }\n};\n\nstd::string CfgCtxDerivedEvent::_name = \"DerivedEvent\";\n\n////////////////// CfgCtxLADEvent\n\nvoid\nCfgCtxLADQuery::Enter(const xmlattr_t& properties)\n{\n\tTrace trace(Trace::ConfigLoad, \"CfgCtxLADQuery::Enter\");\n\tstd::string valueAttrName, nameAttrName, partitionKey, uuid;\n\n\tCfgCtxDerivedEvent* query = dynamic_cast<CfgCtxDerivedEvent*>(ParentContext);\n\tif (!query) {\n\t\tERROR(\"<LADQuery> is not a valid subelement of <\" + ParentContext->Name() + \">\");\n\t\treturn;\n\t}\n\n\t// Bail if parent didn't parse right or didn't build an OmiTask instance\n\tif (! (query->isOK() && query->GetTask())) {\n\t\treturn;\n\t}\n\n\tfor (const auto& item : properties) {\n\t\tif (item.first == \"columnValue\") {\n\t\t\tvalueAttrName = item.second;\n\t\t} else if (item.first == \"columnName\") {\n\t\t\tnameAttrName = item.second;\n\t\t} else if (item.first == \"partitionKey\") {\n\t\t\tpartitionKey = item.second;\n\t\t} else if (item.first == \"instanceID\") {\n\t\t\tuuid = item.second;\n\t\t} else {\n\t\t\tWARNING(\"Ignoring unexpected attribute \" + item.first);\n\t\t}\n\t}\n\n\tif (valueAttrName.empty() || nameAttrName.empty() || partitionKey.empty()) {\n\t\tERROR(\"Missing one or more required attributes (columnValue, columnName, partitionKey)\");\n\t\treturn;\n\t}\n\t// An empty or unset uuid attribute is permitted (and meaningful)\n\n\tauto task = query->GetTask();\n\ttask->AddStage(new Pipe::LADQuery(std::move(valueAttrName), std::move(nameAttrName),\n\t                                              std::move(partitionKey), std::move(uuid)));\n\t// Centrally-stored events implicitly have Identity columns added to them as\n\t// defined in the <Management> element. The LADQuery stage strips them off;\n\t// we should put them back in.\n\tif (! (query->isStoredLocally()) ) {\n\t\ttask->AddStage(new Pipe::Identity(Config->GetIdentityVector()));\n\t}\n\tquery->SuppressSchemaGeneration();\t// LAD queries don't generate entries in SchemasTable\n}\n\nsubelementmap_t CfgCtxLADQuery::_subelements;\n\nstd::string CfgCtxLADQuery::_name = \"LADQuery\";\n\n// vim: se sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/CfgCtxDerived.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _CFGCTXDERIVED_HH_\n#define _CFGCTXDERIVED_HH_\n\n#include \"CfgContext.hh\"\n#include \"DerivedEvent.hh\"\n#include \"StoreType.hh\"\n\nclass CfgCtxDerived : public CfgContext\n{\npublic:\n\tCfgCtxDerived(CfgContext* config) : CfgContext(config) {}\n\tvirtual ~CfgCtxDerived() { }\n\n\tvirtual const std::string& Name() const { return _name; }\n\tvirtual const subelementmap_t& GetSubelementMap() const { return _subelements; }\n\n\tvoid Enter(const xmlattr_t& properties) { warn_if_attributes(properties); }\n\nprivate:\n\tstatic subelementmap_t _subelements;\n\tstatic std::string _name;\n};\n\n\nclass CfgCtxDerivedEvent : public CfgContext\n{\npublic:\n\tCfgCtxDerivedEvent(CfgContext* config) : CfgContext(config) {}\n\tvirtual ~CfgCtxDerivedEvent() { }\n\n\tvirtual const std::string& Name() const { return _name; }\n\tvirtual const subelementmap_t& GetSubelementMap() const;\n\n\tvoid Enter(const xmlattr_t& properties);\n\tCfgContext* Leave();\n\n\tbool isOK() const { return _isOK; }\n\tbool isStoredLocally() const { return StoreType::Local == _storeType; }\n\tDerivedEvent * GetTask() const { return _task; }\n\tvoid SuppressSchemaGeneration() { _doSchemaGeneration = false; }\n\nprivate:\n\tstatic subelementmap_t _subelements;\n\tstatic std::string _name;\n\n\tDerivedEvent *_task;\n\tbool _isOK;\n\tStoreType::Type _storeType;\n\tbool _doSchemaGeneration;\n};\n\nclass CfgCtxLADQuery : public CfgContext\n{\npublic:\n\tCfgCtxLADQuery(CfgContext* config) : CfgContext(config) {}\n\tvirtual ~CfgCtxLADQuery() { }\n\n\tvirtual const std::string& Name() const { return _name; }\n\tvirtual const subelementmap_t& GetSubelementMap() const { return _subelements; }\n\n\tvoid Enter(const xmlattr_t& properties);\n\nprivate:\n\tstatic subelementmap_t _subelements;\n\tstatic std::string _name;\n};\n\n#endif //_CFGCTXDERIVED_HH_\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/CfgCtxEnvelope.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"CfgCtxEnvelope.hh\"\n#include \"MdsdConfig.hh\"\n#include \"Utility.hh\"\n\n/////// CfgCtxEnvelope\n\nsubelementmap_t CfgCtxEnvelope::_subelements = {\n\t{ \"Field\", [](CfgContext* parent) -> CfgContext* { return new CfgCtxEnvelopeField(parent); } },\n\t{ \"Extension\", [](CfgContext* parent) -> CfgContext* { return new CfgCtxEnvelopeExtension(parent); } },\n};\n\nstd::string CfgCtxEnvelope::_name = \"EnvelopeSchema\";\n\n\n/////// CfgCtxEnvelopeField\n\nsubelementmap_t CfgCtxEnvelopeField::_subelements;\n\nstd::string CfgCtxEnvelopeField::_name = \"Field\";\n\nvoid\nCfgCtxEnvelopeField::SetFieldValueIfUnset(CfgCtxEnvelopeField::ValueSource source, const std::string & value)\n{\n\tif (Source != ValueSource::none) {\n\t\tWARNING(std::string(\"Cannot specify multiple sources for this value; using '\") + FieldValue + \"'\");\n\t} else {\n\t\tFieldValue = value;\n\t\tSource = source;\n\t}\n}\n\nvoid CfgCtxEnvelopeField::Enter(const xmlattr_t& properties)\n{\n\tSource = ValueSource::none;\n\n\tfor (const auto& item : properties)\n\t{\n\t\tif (item.first == \"name\") {\n\t\t\tFieldName = item.second;\n\t\t} else if (item.first == \"envariable\") {\n\t\t\ttry {\n\t\t\t\tSetFieldValueIfUnset(ValueSource::environment, MdsdUtil::GetEnvironmentVariable(item.second));\n\t\t\t}\n\t\t\tcatch (std::exception & ex) {\n\t\t\t\tWARNING(ex.what());\n\t\t\t\tSetFieldValueIfUnset(ValueSource::environment, std::string());\n\t\t\t}\n\t\t} else if (item.first == \"useComputerName\") {\n\t\t\tSetFieldValueIfUnset(ValueSource::agentIdent, Config->AgentIdentity());\n\t\t} else {\n\t\t\tERROR(\"<Field> ignoring unexpected attribute \" + item.first);\n\t\t}\n\t}\n\n\tif (FieldName.empty()) {\n\t\tERROR(\"<Field> missing required 'name' attribute\");\n\t}\n}\n\n\nvoid\nCfgCtxEnvelopeField::HandleBody(const std::string& body)\n{\n\tif (Source == ValueSource::environment || Source == ValueSource::agentIdent) {\n\t\tWARNING(std::string(\"Cannot specify multiple sources for this value; using '\") + FieldValue + \"'\");\n\t} else {\n\t\tFieldValue += body;\n\t\tSource = ValueSource::configFile;\n\t}\n}\n\nCfgContext*\nCfgCtxEnvelopeField::Leave()\n{\n\tif (!FieldName.empty()) {\n\t\tif (Source == ValueSource::none) {\n\t\t\tWARNING(\"No value supplied for this column; using empty string\");\n\t\t}\n\t\tConfig->AddEnvelopeColumn(std::move(FieldName), std::move(FieldValue));\n\t}\n\treturn ParentContext;\n}\n\n/////// CfgCtxEnvelopeExtension\n\nsubelementmap_t CfgCtxEnvelopeExtension::_subelements = {\n\t{ \"Field\", [](CfgContext* parent) -> CfgContext* { return new CfgCtxEnvelopeField(parent); } },\n};\n\nstd::string CfgCtxEnvelopeExtension::_name = \"Extension\";\n\nvoid CfgCtxEnvelopeExtension::Enter(const xmlattr_t& properties)\n{\n\tfor (const auto& item : properties)\n\t{\n\t\tif (item.first == \"name\") {\n\t\t\tExtensionName = item.second;\n\t\t} else {\n\t\t\tERROR(\"<EnvelopeSchema> ignoring unexpected attribute \" + item.first);\n\t\t}\n\t}\n\n\tif (ExtensionName.empty()) {\n\t\tERROR(\"<Extension> missing required 'name' attribute\");\n\t}\n}\n\n// vim: set tabstop=4 softtabstop=4 shiftwidth=4 noexpandtab :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/CfgCtxEnvelope.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _CFGCTXENVELOPE_HH_\n#define _CFGCTXENVELOPE_HH_\n\n#include \"CfgContext.hh\"\n\nclass LocalSink;\n\nclass CfgCtxEnvelope : public CfgContext\n{\npublic:\n\tCfgCtxEnvelope(CfgContext* config) : CfgContext(config) {}\n\tvirtual ~CfgCtxEnvelope() { }\n\n\tvirtual const std::string& Name() const { return _name; }\n\tvirtual const subelementmap_t& GetSubelementMap() const { return _subelements; }\n\n\tvoid Enter(const xmlattr_t& properties) { warn_if_attributes(properties); }\n\nprivate:\n\tstatic subelementmap_t _subelements;\n\tstatic std::string _name;\n};\n\nclass CfgCtxEnvelopeField : public CfgContext\n{\npublic:\n\tenum ValueSource { none, environment, agentIdent, configFile };\n\n\tCfgCtxEnvelopeField(CfgContext* config) : CfgContext(config) {}\n\tvirtual ~CfgCtxEnvelopeField() { }\n\n\tvirtual const std::string& Name() const { return _name; }\n\tvirtual const subelementmap_t& GetSubelementMap() const { return _subelements; }\n\n\tvoid Enter(const xmlattr_t& properties);\n\tvirtual void HandleBody(const std::string& body);\n\tCfgContext* Leave();\n\n\tvoid SetFieldValueIfUnset(ValueSource, const std::string &);\n\nprivate:\n\tstd::string FieldName;\n\tstd::string FieldValue;\n\tValueSource Source;\n\n\tstatic subelementmap_t _subelements;\n\tstatic std::string _name;\n\n};\n\n\nclass CfgCtxEnvelopeExtension : public CfgContext\n{\npublic:\n\tCfgCtxEnvelopeExtension(CfgContext* config) : CfgContext(config) {}\n\tvirtual ~CfgCtxEnvelopeExtension() { }\n\n\tvirtual const std::string& Name() const { return _name; }\n\tvirtual const subelementmap_t& GetSubelementMap() const { return _subelements; }\n\n\tvoid Enter(const xmlattr_t& properties);\n\nprivate:\n\tstd::string ExtensionName;\n\n\tstatic subelementmap_t _subelements;\n\tstatic std::string _name;\n\n};\n\n#endif //_CFGCTXENVELOPE_HH_\n\n// vim: set tabstop=4 softtabstop=4 shiftwidth=4 noexpandtab :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/CfgCtxError.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"CfgCtxError.hh\"\n\nsubelementmap_t CfgCtxError::subelements;\n\nstd::string CfgCtxError::name = \"(A previous error was detected)\";\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/CfgCtxError.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n\n#ifndef _CFGCTXERROR_HH_\n#define _CFGCTXERROR_HH_\n\n#include \"CfgContext.hh\"\n\n/// <summary>\n/// Once an unexpected element is found while parsing the config file, this class sets up\n/// an \"error detected\" context that is propagated until the parse leaves the unexpected\n/// element. Note that any Insert elements are ignored by this context.\n/// </summary>\nclass CfgCtxError :\n\tpublic CfgContext\n{\npublic:\n\tCfgCtxError(CfgContext* previousContext) : CfgContext(previousContext) {}\n\tvirtual ~CfgCtxError() { }\n\n\tvirtual const std::string& Name() const { return name; }\n\tconst subelementmap_t& GetSubelementMap() const { return subelements; }\n\n\t// We're deliberately silent on the attributes and body of elements while we're in an error state\n\tvoid Enter(const xmlattr_t&) {};\n\tvoid HandleBody(const std::string&) {};\n\tCfgContext* Leave() { return ParentContext; }\n\n\t/// <summary>\n\t/// An empty list of legal subelements. Any context can return this from GetSubelementMap() if the\n\t/// element has errors that block usage.\n\t/// </summary>\n\tstatic subelementmap_t subelements;\n\n\t/// <summary>True if the parse is in \"error\" state</summary>\n\tvirtual bool IsErrorContext() const { return true; }\n\nprivate:\n\tstatic std::string name;\n};\n\n#endif //_CFGCTXERROR_HH_\n\n// vim: se sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/CfgCtxEtw.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"CfgCtxEtw.hh\"\n#include \"MdsdConfig.hh\"\n#include \"CfgCtxParser.hh\"\n#include \"LocalSink.hh\"\n#include \"Subscription.hh\"\n#include \"PipeStages.hh\"\n#include \"EtwEvent.hh\"\n#include \"EventType.hh\"\n\n////////////////// CfgCtxEtwProviders\n\nsubelementmap_t CfgCtxEtwProviders::s_subelements = {\n    { \"EtwProvider\", [] (CfgContext* parent) -> CfgContext* { return new CfgCtxEtwProvider(parent); } }\n};\n\nstd::string CfgCtxEtwProviders::s_name = \"EtwProviders\";\n\n////////////////// CfgCtxEtwProvider\n\nstd::string CfgCtxEtwProvider::s_name = \"EtwProvider\";\n\nsubelementmap_t CfgCtxEtwProvider::s_subelements = {\n    {\"Event\", [] (CfgContext* parent) -> CfgContext * { return new CfgCtxEtwEvent(parent); } }\n};\n\nvoid\nCfgCtxEtwProvider::Enter(const xmlattr_t& properties)\n{\n    CfgCtx::CfgCtxParser parser(this);\n    if (!parser.ParseEtwProvider(properties)) {\n        return;\n    }\n\n    m_guid = parser.GetGuid();\n    m_priority = parser.GetPriority();\n\n    if (parser.HasStoreType()) {\n        m_storeType = parser.GetStoreType();\n    }\n}\n\nCfgContext*\nCfgCtxEtwProvider::Leave()\n{\n    return ParentContext;\n}\n\n////////////////// CfgCtxEtwEvent\nsubelementmap_t CfgCtxEtwEvent::s_subelements;\nstd::string CfgCtxEtwEvent::s_name = \"Event\";\n\nvoid\nCfgCtxEtwEvent::Enter(const xmlattr_t& properties)\n{\n    CfgCtx::CfgCtxParser parser(this);\n    if (!parser.ParseEvent(properties, CfgCtx::EventType::EtwEvent)) {\n        return;\n    }\n\n    CfgCtxEtwProvider *parent = dynamic_cast<CfgCtxEtwProvider*>(ParentContext);\n    if (!parent) {\n        FATAL(\"Found <\" + s_name + \"> in <\" + ParentContext->Name() + \">; that can't happen.\");\n        return;\n    }\n\n    auto guidstr = parent->GetGuid();\n    if (guidstr.empty()) {\n        ERROR(\"<\" + s_name + \"> missed required GUID attribute.\");\n        return;\n    }\n\n    if (parser.HasStoreType()) {\n        m_storeType = parser.GetStoreType();\n    }\n    else {\n        m_storeType = parent->GetStoreType();\n    }\n\n    if (StoreType::None == m_storeType) {\n        m_storeType = StoreType::XTable;\n    }\n\n    Priority priority;\n    if (parser.HasPriority()) {\n        priority = parser.GetPriority();\n    }\n    else {\n        priority = parent->GetPriority();\n    }\n\n    m_eventId = parser.GetEventId();\n\n    // for ETW, use local table name as LocalSink source.\n    std::string source = EtwEvent::BuildLocalTableName(guidstr, m_eventId);\n    m_sink = LocalSink::Lookup(source);\n    if (!m_sink) {\n        m_sink = new LocalSink(source);\n\tm_sink->AllocateSchemaId();\n    }\n\n    bool isNoPerNDay = parser.IsNoPerNDay();\n    std::string account = parser.GetAccount();\n    std::string eventName = parser.GetEventName();\n    time_t interval = parser.GetInterval();\n\n    try {\n        auto target = MdsEntityName { eventName, isNoPerNDay, Config, account, m_storeType };\n        m_subscription = new Subscription(m_sink, std::move(target), priority, MdsTime(interval));\n\n        if (StoreType::Local != m_storeType) {\n            m_subscription->AddStage(new Pipe::Identity(Config->GetIdentityVector()));\n        }\n        Config->AddMonikerEventInfo(account, eventName, m_storeType, source, mdsd::EventType::EtwEvent);\n    }\n    catch(const std::invalid_argument& ex) {\n        ERROR(ex.what());\n        return;\n    }\n    catch(...) {\n        FATAL(\"Unknown exception; skipping.\");\n        return;\n    }\n}\n\nCfgContext*\nCfgCtxEtwEvent::Leave()\n{\n    if (!m_subscription) {\n        return ParentContext;\n    }\n\n    if (StoreType::XTable == m_storeType) {\n        m_subscription->AddStage(new Pipe::BuildSchema(Config, m_subscription->target(), true));\n    }\n\n    Batch* batch = Config->GetBatch(m_subscription->target(), m_subscription->Duration());\n    if (batch) {\n        m_subscription->AddStage(new Pipe::BatchWriter(batch, Config->GetIdentityVector(),\n                                                       Config->PartitionCount(), m_storeType));\n        Config->AddTask(m_subscription);\n    }\n    else {\n        ERROR(\"Unable to create routing for \" + s_name + \" id=\" + std::to_string(m_eventId));\n    }\n\n    return ParentContext;\n}\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/CfgCtxEtw.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n\n#ifndef _CFGCTXETW_HH_\n#define _CFGCTXETW_HH_\n\n#include \"CfgContext.hh\"\n#include \"CfgCtxError.hh\"\n#include \"StoreType.hh\"\n#include \"Priority.hh\"\n\nclass CfgCtxEtwProviders : public CfgContext\n{\npublic:\n    CfgCtxEtwProviders(CfgContext * config) : CfgContext(config) {}\n    virtual ~CfgCtxEtwProviders() {}\n\n    virtual const std::string& Name() const { return s_name; }\n    static const std::string& XmlName() { return s_name; }\n    virtual const subelementmap_t& GetSubelementMap() const { return s_subelements; }\n\n    void Enter(const xmlattr_t& properties) { warn_if_attributes(properties); }\n\nprivate:\n    static subelementmap_t s_subelements;\n    static std::string s_name;\n};\n\nclass CfgCtxEtwProvider : public CfgContext\n{\npublic:\n    CfgCtxEtwProvider(CfgContext * config) : CfgContext(config) {}\n    virtual ~CfgCtxEtwProvider() {}\n\n    virtual const std::string& Name() const { return s_name; }\n    static const std::string& XmlName() { return s_name; }\n\n    virtual const subelementmap_t& GetSubelementMap() const\n    {\n        return (m_guid.empty()? CfgCtxError::subelements : s_subelements);\n    }\n\n    void Enter(const xmlattr_t& properties);\n    CfgContext* Leave();\n\n    std::string GetGuid() const { return m_guid; }\n    StoreType::Type GetStoreType() const { return m_storeType; }\n    Priority GetPriority() const { return m_priority; }\n\nprivate:\n    static subelementmap_t s_subelements;\n    static std::string s_name;\n\n    std::string m_guid;\n    StoreType::Type m_storeType = StoreType::None;\n    Priority m_priority;\n};\n\nclass CfgCtxEtwEvent : public CfgContext\n{\npublic:\n    CfgCtxEtwEvent(CfgContext * config) : CfgContext (config) {}\n    virtual ~CfgCtxEtwEvent() {}\n\n    virtual const std::string& Name() const { return s_name; }\n    static const std::string& XmlName() { return s_name; }\n    virtual const subelementmap_t& GetSubelementMap() const { return s_subelements; }\n\n    void Enter(const xmlattr_t& properties);\n    CfgContext* Leave();\n\nprivate:\n    static subelementmap_t s_subelements;\n    static std::string s_name;\n\n    StoreType::Type m_storeType = StoreType::None;\n    int m_eventId = -1;\n\n    class LocalSink* m_sink = nullptr;\n    class Subscription* m_subscription = nullptr;\n};\n\n\n#endif // _CFGCTXETW_HH_\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/CfgCtxEventAnnotations.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"CfgCtxEventAnnotations.hh\"\n#include \"MdsdConfig.hh\"\n#include \"ConfigParser.hh\"\n#include \"MdsTime.hh\"\n#include \"Trace.hh\"\n#include \"CfgOboDirectConfig.hh\"\n#include \"MdsdEventCfg.hh\"\n#include \"EventPubCfg.hh\"\n#include \"Utility.hh\"\n#include \"cryptutil.hh\"\n\n///////// CfgCtxEventAnnotations\n\nsubelementmap_t CfgCtxEventAnnotations::_subelements = {\n    { \"EventStreamingAnnotation\", [](CfgContext* parent) -> CfgContext* { return new CfgCtxEventAnnotation(parent); } }\n};\n\nstd::string CfgCtxEventAnnotations::_name = \"EventStreamingAnnotations\";\n\nvoid\nCfgCtxEventAnnotations::SetEventType(\n    const std::string & itemname,\n    EventAnnotationType::Type type\n    )\n{\n    if (itemname.empty()) {\n        ERROR(\"<\" + Name() + \"> invalid empty itemname attribute\");\n        return;\n    }\n\n    // if duplicate, report error\n    auto item = _eventmap.find(itemname);\n    if (item != _eventmap.end()) {\n        if (item->second & type) {\n            ERROR(\"<\" + Name() + \"> itemname \" + itemname + \" already defined for type \" + std::to_string(type));\n        }\n    }\n\n    _eventmap[itemname] = static_cast< EventAnnotationType::Type>(_eventmap[itemname] | type);\n}\n\nCfgContext*\nCfgCtxEventAnnotations::Leave()\n{\n    if (_eventmap.size() > 0) {\n        Config->GetMdsdEventCfg()->SetEventAnnotationTypes(std::move(_eventmap));\n    }\n    return ParentContext;\n}\n\n///////// CfgCtxEventAnnotation\n\nsubelementmap_t CfgCtxEventAnnotation::_subelements = {\n    { \"EventPublisher\", [](CfgContext* parent) -> CfgContext* { return new CfgCtxEPA(parent); } },\n    { \"OnBehalf\", [](CfgContext* parent) -> CfgContext* {\n        return new CfgCtxOnBehalf(parent, dynamic_cast<CfgCtxEventAnnotation*>(parent)->_itemName); }\n    }\n};\n\nstd::string CfgCtxEventAnnotation::_name = \"EventStreamingAnnotation\";\n\n\nvoid\nCfgCtxEventAnnotation::Enter(const xmlattr_t& properties)\n{\n    const std::string attrName = \"name\";\n\n    for (const auto & item : properties) {\n        if (attrName ==  item.first) {\n            parse_singleton_attribute(item.first, item.second, attrName, _itemName);\n        }\n        else {\n            warn_if_attribute_unexpected(item.first);\n        }\n    }\n    fatal_if_no_attributes(attrName, _itemName);\n}\n\nvoid\nCfgCtxEventAnnotation::SetEventType(EventAnnotationType::Type eventType)\n{\n    auto parentObj = dynamic_cast<CfgCtxEventAnnotations*>(ParentContext);\n    if (!parentObj) {\n        fatal_if_impossible_subelement();\n        return;\n    }\n\n    parentObj->SetEventType(_itemName, eventType);\n}\n\nvoid\nCfgCtxEventAnnotation::SetEventSasKey(\n    std::string&& saskey\n    )\n{\n    if (saskey.empty()) {\n        return;\n    }\n\n    // EventHubs publisher requires resourceId defined for Shoebox V2.\n    // If another scenario needs to be supported, this code may need to be changed as well.\n    if (Config->GetResourceId().empty()) {\n        ERROR(\"<\" + Name() + \">: OboDirectPartitionField resourceId is missing, when Shoebox V2 EventHubs publisher needs one.\");\n        return;\n    }\n\n    try {\n        Config->GetEventPubCfg()->AddAnnotationKey(_itemName, std::move(saskey));\n    }\n    catch(const std::exception& ex) {\n        ERROR(\"<\" + Name() + \"> exception: \" + ex.what());\n    }\n}\n\n///////// CfgCtxEPA\n\nsubelementmap_t CfgCtxEPA::_subelements = {\n    { \"Content\", [](CfgContext* parent) -> CfgContext* { return new CfgCtxEPAContent(parent); } },\n    { \"Key\", [](CfgContext* parent) -> CfgContext* { return new CfgCtxEPAKey(parent); } }\n};\n\nvoid\nCfgCtxEPA::Enter(const xmlattr_t& properties)\n{\n    warn_if_attributes(properties);\n\n    // Set the event type (EventPublisher) for the event (this Publisher element's parent's name attribute) in the EventAnnotations (grandparent) element's event type map\n    auto parentObj = dynamic_cast<CfgCtxEventAnnotation*>(ParentContext);\n    if (!parentObj)\n    {\n        fatal_if_impossible_subelement();\n        return;\n    }\n    parentObj->SetEventType(EventAnnotationType::Type::EventPublisher);\n}\n\nvoid\nCfgCtxEPA::SetEventSasKey(\n    std::string&& saskey\n    )\n{\n    if (saskey.empty()) {\n        return;\n    }\n\n    auto parentObj = dynamic_cast<CfgCtxEventAnnotation*>(ParentContext);\n    if (!parentObj)\n    {\n        fatal_if_impossible_subelement();\n        return;\n    }\n    parentObj->SetEventSasKey(std::move(saskey));\n}\n\nstd::string CfgCtxEPA::_name = \"EventPublisher\";\n\n///////// CfgCtxEPAContent\n\nsubelementmap_t CfgCtxEPAContent::_subelements;\nstd::string CfgCtxEPAContent::_name = \"Content\";\n\n\n///////// CfgCtxEPAKey\n\nsubelementmap_t CfgCtxEPAKey::_subelements;\nstd::string CfgCtxEPAKey::_name = \"Key\";\n\nvoid\nCfgCtxEPAKey::Enter(const xmlattr_t& properties)\n{\n    // Decrypt key path attribute is optional\n    const std::string & decryptKeyPathAttr = \"decryptKeyPath\";\n\n    for (const auto & item : properties) {\n        if (decryptKeyPathAttr == item.first) {\n            parse_singleton_attribute(item.first, item.second, decryptKeyPathAttr, _decryptKeyPath);\n        }\n        else {\n            warn_if_attribute_unexpected(item.first);\n        }\n    }\n}\n\n\nCfgContext*\nCfgCtxEPAKey::Leave()\n{\n    if (Body.empty()) {\n        return ParentContext;\n    }\n\n    auto parentObj = dynamic_cast<CfgCtxEPA*>(ParentContext);\n    if (!parentObj)\n    {\n        fatal_if_impossible_subelement();\n        return ParentContext;\n    }\n\n    if (_decryptKeyPath.empty()) {\n        auto escapedConnStr = MdsdUtil::UnquoteXmlAttribute(Body);\n        parentObj->SetEventSasKey(std::move(escapedConnStr));\n    }\n    else {\n        if (!MdsdUtil::IsRegFileExists(_decryptKeyPath)) {\n            ERROR(\"Cannot find decrypt key path \" + _decryptKeyPath);\n        }\n        else {\n            try {\n                auto decryptedSas = cryptutil::DecodeAndDecryptString(_decryptKeyPath, Body);\n                parentObj->SetEventSasKey(std::move(decryptedSas));\n            }\n            catch(const std::exception & ex) {\n                ERROR(\"EventPublisher SAS key decryption using private key file '\" +\n                    _decryptKeyPath + \"' failed: \" + ex.what());\n            }\n        }\n    }\n\n    return ParentContext;\n}\n\n\n/////////// CfgCtxOnBehalf\n\nsubelementmap_t CfgCtxOnBehalf::_subelements = {\n        { \"Content\", [](CfgContext* parent) -> CfgContext* { return new CfgCtxOnBehalfContent(parent, dynamic_cast<CfgCtxOnBehalf*>(parent)->_eventName); } }\n};\n\nstd::string CfgCtxOnBehalf::_name = \"OnBehalf\";\n\nvoid\nCfgCtxOnBehalf::Enter(const xmlattr_t& properties)\n{\n    std::string valDirectMode;\n    const std::string attrDirectMode = \"directMode\";\n\n    for (const auto& item : properties)\n    {\n        if (attrDirectMode == item.first)\n        {\n            parse_singleton_attribute(item.first, item.second, attrDirectMode, valDirectMode);\n        }\n        else\n        {\n            warn_if_attribute_unexpected(item.first);\n        }\n    }\n    fatal_if_no_attributes(attrDirectMode, valDirectMode);\n\n    if (valDirectMode != \"true\")\n    {\n        ERROR(\"<\" + Name() + \"> supports attribute \" + attrDirectMode + \"=\\\"true\\\" only currently\");\n    }\n\n    // Set the event type (OnBehalf) for the event (this Publisher element's parent's name attribute) in the EventAnnotations (grandparent) element's event type map\n    auto parentObj = dynamic_cast<CfgCtxEventAnnotation*>(ParentContext);\n    if (!parentObj)\n    {\n        fatal_if_impossible_subelement();\n        return;\n    }\n    parentObj->SetEventType(EventAnnotationType::Type::OnBehalf);\n}\n\n/////////// CfgCtxOnBehalfContent\n\nsubelementmap_t CfgCtxOnBehalfContent::_subelements = {\n        { \"Config\", [](CfgContext* parent) -> CfgContext* { return new CfgCtxOnBehalfConfig(parent, dynamic_cast<CfgCtxOnBehalfContent*>(parent)->_eventName); } }    // This is a trick to handle the CDATA XML content as a subelement...\n};\n\nstd::string CfgCtxOnBehalfContent::_name = \"Content\";\n\nvoid\nCfgCtxOnBehalfContent::Enter(const xmlattr_t& properties)\n{\n    warn_if_attributes(properties);\n}\n\nCfgContext*\nCfgCtxOnBehalfContent::Leave()\n{\n    if (Body.empty())\n    {\n        ERROR(\"<\" + Name() +\"> must have a body (CDATA), but it's empty\");\n    }\n    else\n    {\n        // Trick: Parse the cdata (another XML) by treating it as a subelement...\n        ConfigParser xmlCdataParser(this, Config);\n        xmlCdataParser.Parse(Body);\n    }\n    return ParentContext;\n}\n\n///////////// CfgCtxOnBehalfConfig (XML in CDATA of CfgCtxOnBehalfContent...)\n\nsubelementmap_t CfgCtxOnBehalfConfig::_subelements;\n\nstd::string CfgCtxOnBehalfConfig::_name = \"Config\";\n\nvoid\nCfgCtxOnBehalfConfig::Enter(const xmlattr_t& properties)\n{\n    Trace trace(Trace::ConfigLoad, \"CfgCtxOnBehalfConfig::Enter\");\n\n    auto oboDirectConfig = std::make_shared<mdsd::OboDirectConfig>();\n\n    for (const auto& item : properties)\n    {\n        if (item.first == \"onBehalfFields\") // Not used by mdsd yet\n        {\n            oboDirectConfig->onBehalfFields = item.second;\n        }\n        else if (item.first == \"containerSuffix\") // Not used by mdsd yet\n        {\n            oboDirectConfig->containerSuffix = item.second;\n        }\n        else if (item.first == \"primaryPartitionField\")\n        {\n            oboDirectConfig->primaryPartitionField = item.second;\n        }\n        else if (item.first == \"partitionFields\")\n        {\n            oboDirectConfig->partitionFields = item.second;\n        }\n        else if (item.first == \"onBehalfReplaceFields\") // Not used by mdsd yet\n        {\n            oboDirectConfig->onBehalfReplaceFields = item.second;\n        }\n        else if (item.first == \"excludeFields\") // Not used by mdsd yet\n        {\n            oboDirectConfig->excludeFields = item.second;\n        }\n        else if (item.first == \"timePeriods\")\n        {\n            if (MdsTime::FromIS8601Duration(item.second).to_time_t() == 0)\n            {\n                ERROR(\"Invalid ISO8601 time duration is given: \" + item.second);\n            }\n            else\n            {\n                oboDirectConfig->timePeriods = item.second;\n            }\n        }\n        else if (item.first == \"priority\")\n        {\n            oboDirectConfig->priority = item.second;\n        }\n        else\n        {\n            warn_if_attribute_unexpected(item.first);\n        }\n    }\n\n    Config->AddOboDirectConfig(_eventName, std::move(oboDirectConfig));\n}\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/CfgCtxEventAnnotations.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _CFGCTXEVENTANNOTATIONS_HH_\n#define _CFGCTXEVENTANNOTATIONS_HH_\n\n#include \"CfgContext.hh\"\n#include \"CfgEventAnnotationType.hh\"\n#include <unordered_map>\n\nclass CfgCtxEventAnnotations : public CfgContext\n{\npublic:\n    CfgCtxEventAnnotations(CfgContext* config) : CfgContext(config) {}\n    virtual ~CfgCtxEventAnnotations() {}\n\n    virtual const std::string & Name() const { return _name; }\n    virtual const subelementmap_t& GetSubelementMap() const { return _subelements; }\n    void Enter(const xmlattr_t& properties) { warn_if_attributes(properties); }\n    CfgContext* Leave();\n\n    /// Set each annotation name and type.\n    /// The itemname can be event name, source name, etc.\n    void SetEventType(const std::string & itemname, EventAnnotationType::Type type);\n\nprivate:\n    static subelementmap_t _subelements;\n    static std::string _name;\n\n    /// map key: itemname, value: annotation type\n    std::unordered_map<std::string, EventAnnotationType::Type> _eventmap;\n};\n\nclass CfgCtxEventAnnotation : public CfgContext\n{\npublic:\n    CfgCtxEventAnnotation(CfgContext* config) : CfgContext(config) {}\n    virtual ~CfgCtxEventAnnotation() {}\n\n    virtual const std::string & Name() const { return _name; }\n    virtual const subelementmap_t& GetSubelementMap() const { return _subelements; }\n    void Enter(const xmlattr_t& properties);\n\n    void SetEventType(EventAnnotationType::Type type);\n    void SetEventSasKey(std::string&& saskey);\n\nprivate:\n    static subelementmap_t _subelements;\n    static std::string _name;\n\n    std::string _itemName;\n};\n\nclass CfgCtxEPA : public CfgContext\n{\npublic:\n    CfgCtxEPA(CfgContext* config) : CfgContext(config) {}\n    virtual ~CfgCtxEPA() {}\n\n    virtual const std::string & Name() const { return _name; }\n    virtual const subelementmap_t& GetSubelementMap() const { return _subelements; }\n    void Enter(const xmlattr_t& properties);\n    void SetEventSasKey(std::string&& saskey);\n\nprivate:\n    static subelementmap_t _subelements;\n    static std::string _name;\n};\n\nclass CfgCtxEPAContent : public CfgContext\n{\npublic:\n    CfgCtxEPAContent(CfgContext* config) : CfgContext(config) {}\n    virtual ~CfgCtxEPAContent() {}\n\n    virtual const std::string & Name() const { return _name; }\n    virtual const subelementmap_t& GetSubelementMap() const { return _subelements; }\n    void Enter(const xmlattr_t& properties) { warn_if_attributes(properties); }\n\nprivate:\n    static subelementmap_t _subelements;\n    static std::string _name;\n};\n\nclass CfgCtxEPAKey : public CfgContext\n{\npublic:\n    CfgCtxEPAKey(CfgContext* config) : CfgContext(config) {}\n    virtual ~CfgCtxEPAKey() {}\n    virtual const std::string & Name() const { return _name; }\n    virtual const subelementmap_t& GetSubelementMap() const { return _subelements; }\n    void Enter(const xmlattr_t& properties);\n    CfgContext* Leave();\n\nprivate:\n    static subelementmap_t _subelements;\n    static std::string _name;\n    std::string _decryptKeyPath;\n};\n\n\n\nclass CfgCtxOnBehalf : public CfgContext\n{\npublic:\n    CfgCtxOnBehalf(CfgContext* config, const std::string& eventName)\n        : CfgContext(config), _eventName(eventName)\n    {}\n    virtual ~CfgCtxOnBehalf() {}\n\n    virtual const std::string& Name() const { return _name; }\n    virtual const subelementmap_t& GetSubelementMap() const { return _subelements; }\n    void Enter(const xmlattr_t& properties);\n\nprivate:\n    static std::string _name;\n    static subelementmap_t _subelements;\n\n    std::string _eventName;\n};\n\nclass CfgCtxOnBehalfContent : public CfgContext\n{\npublic:\n    CfgCtxOnBehalfContent(CfgContext* config, const std::string& eventName)\n        : CfgContext(config), _eventName(eventName)\n    {}\n    virtual ~CfgCtxOnBehalfContent() {}\n\n    virtual const std::string& Name() const { return _name; }\n    virtual const subelementmap_t& GetSubelementMap() const { return _subelements; }\n    void Enter(const xmlattr_t& properties);\n    CfgContext* Leave();\n\nprivate:\n    static std::string _name;\n    static subelementmap_t _subelements;\n\n    std::string _eventName;\n};\n\nclass CfgCtxOnBehalfConfig : public CfgContext\n{\npublic:\n    CfgCtxOnBehalfConfig(CfgContext* config, const std::string& eventName)\n        : CfgContext(config), _eventName(eventName)\n    {}\n    virtual ~CfgCtxOnBehalfConfig() {}\n\n    virtual const std::string& Name() const { return _name; }\n    virtual const subelementmap_t& GetSubelementMap() const { return _subelements; }\n    void Enter(const xmlattr_t& properties);\n\nprivate:\n    static std::string _name;\n    static subelementmap_t _subelements;\n\n    std::string _eventName;\n};\n\n#endif // _CFGCTXEVENTANNOTATIONS_HH_\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/CfgCtxEvents.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"CfgCtxEvents.hh\"\n#include \"CfgCtxHeartBeats.hh\"\n#include \"CfgCtxOMI.hh\"\n#include \"CfgCtxMdsdEvents.hh\"\n#include \"CfgCtxDerived.hh\"\n#include \"CfgCtxExtensions.hh\"\n#include \"CfgCtxEtw.hh\"\n\n////////////////// CfgCtxEvents\n\nsubelementmap_t CfgCtxEvents::_subelements = {\n\t{ \"HeartBeats\", [](CfgContext* parent) -> CfgContext* { return new CfgCtxHeartBeats(parent); } },\n\t{ \"OMI\", [](CfgContext* parent) -> CfgContext* { return new CfgCtxOMI(parent); } },\n\t{ \"MdsdEvents\", [](CfgContext* parent) -> CfgContext* { return new CfgCtxMdsdEvents(parent); } },\n\t{ \"DerivedEvents\", [](CfgContext* parent) -> CfgContext* { return new CfgCtxDerived(parent); } },\n\t{ \"Extensions\", [](CfgContext* parent) -> CfgContext* { return new CfgCtxExtensions(parent); } },\n\t{ \"EtwProviders\", [](CfgContext* parent) -> CfgContext* { return new CfgCtxEtwProviders(parent); } }\n};\n\nstd::string CfgCtxEvents::_name = \"Events\";\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/CfgCtxEvents.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _CFGCTXEVENTS_HH_\n#define _CFGCTXEVENTS_HH_\n\n#include \"CfgContext.hh\"\n\nclass CfgCtxEvents : public CfgContext\n{\npublic:\n\tCfgCtxEvents(CfgContext* config) : CfgContext(config) {}\n\tvirtual ~CfgCtxEvents() { }\n\n\tvirtual const std::string& Name() const { return _name; }\n\tvirtual const subelementmap_t& GetSubelementMap() const { return _subelements; }\n\n\tvoid Enter(const xmlattr_t& properties) { warn_if_attributes(properties); }\n\nprivate:\n\tstatic subelementmap_t _subelements;\n\tstatic std::string _name;\n};\n\n#endif //_CFGCTXEVENTS_HH_\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/CfgCtxExtensions.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"CfgCtxExtensions.hh\"\n#include \"Utility.hh\"\n#include \"MdsdExtension.hh\"\n#include \"MdsdConfig.hh\"\n#include \"CmdLineConverter.hh\"\n\n////////////////// CfgCtxExtensions\n\nsubelementmap_t CfgCtxExtensions::_subelements = {\n    { \"Extension\", [] (CfgContext* parent) -> CfgContext* { return new CfgCtxExtension(parent); } }\n};\n\nstd::string CfgCtxExtensions::_name = \"Extensions\";\n\n////////////////// CfgCtxExtension\n\nvoid\nCfgCtxExtension::Enter(const xmlattr_t& properties)\n{\n    const std::string extNameAttr = \"extensionName\";\n\n    for (const auto & item : properties) {\n        if (extNameAttr == item.first)\n        {\n            _extension_name = item.second;\n        }\n        else  {\n            WARNING(\"Ignoring unexpected attribute \" + item.second);\n        }\n    }\n    \n    if (_extension_name.empty())\n    {\n        ERROR(\"<\" + _name + \"> requires attribute '\" + extNameAttr + \"'\");\n    }\n\n    _extension = new MdsdExtension(_extension_name);\n}\n\nCfgContext* CfgCtxExtension::Leave()\n{\n    if (_extension)\n    {\n        Config->AddExtension(_extension);\n    }\n    else {\n        ERROR(\"Unexpected NULL value for MdsdExtension object in CfgCtxExtension.\");\n    }\n    return ParentContext;\n}\n\n\nsubelementmap_t CfgCtxExtension::_subelements = {\n    { \"CommandLine\", [] (CfgContext* parent) -> CfgContext* { return new CfgCtxExtCmdLine(parent); } },\n    { \"Body\", [] (CfgContext* parent) -> CfgContext* { return new CfgCtxExtBody(parent); } },\n    { \"AlternativeExtensionLocation\", [] (CfgContext* parent) -> CfgContext* { return new CfgCtxExtAlterLocation(parent); } },\n    { \"ResourceUsage\", [] (CfgContext* parent) -> CfgContext* { return new CfgCtxExtResourceUsage(parent); } }\n};\n\nstd::string CfgCtxExtension::_name = \"Extension\";\n\n\n////////////////// CfgCtxExtCmdLine\n\nCfgContext* CfgCtxExtCmdLine::Leave()\n{\n    std::string cmdline = std::move(Body);\n\n    if (MdsdUtil::IsEmptyOrWhiteSpace(cmdline))\n    {\n        ERROR(\"unexpected empty or whitespace value for Extension CmdLine\");\n    }\n    else\n    {\n        CfgCtxExtension * ctxext = dynamic_cast<CfgCtxExtension*>(ParentContext);\n        if (ctxext)\n        {\n            CmdLineConverter::Tokenize(cmdline, std::bind(&CfgContext::WARNING, this, std::placeholders::_1));\t// To warn (if any) sooner than later\n            ctxext->GetExtension()->SetCmdLine(cmdline);\n        }\n        else {\n            FATAL(\"Found <\" + _name + \"> in <\" + ParentContext->Name() + \">; that can't happen\");\n        }\n    }\n\n    return ParentContext;\n}\n\nsubelementmap_t CfgCtxExtCmdLine::_subelements;\nstd::string CfgCtxExtCmdLine::_name = \"CommandLine\";\n\n////////////////// CfgCtxExtBody\n\nCfgContext* CfgCtxExtBody::Leave()\n{\n    if (empty_or_whitespace())\n    {\n        WARNING(\"<\" + _name + \"> expected non-empty body; did not expect '{\" + Body + \"}'\");\n    }\n    else\n    {\n        CfgCtxExtension * ctxext = dynamic_cast<CfgCtxExtension*>(ParentContext);\n        if (ctxext)\n        {\n            ctxext->GetExtension()->SetBody(Body);\n        }\n        else {\n            FATAL(\"Found <\" + _name + \"> in <\" + ParentContext->Name() + \">; that can't happen\");\n        }\n    }\n\n    return ParentContext;\n}\n\nsubelementmap_t CfgCtxExtBody::_subelements;\nstd::string CfgCtxExtBody::_name = \"Body\";\n\n\n////////////////// CfgCtxExtAlterLocation\n\nCfgContext* CfgCtxExtAlterLocation::Leave()\n{\n    std::string loc = std::move(Body);\n    if (MdsdUtil::IsEmptyOrWhiteSpace(loc))\n    {\n        WARNING(\"<\" + _name + \"> value cannot be empty or whitespace.\");\n    }\n    else\n    {\n        CfgCtxExtension * ctxext = dynamic_cast<CfgCtxExtension*>(ParentContext);\n        if (ctxext)\n        {\n            ctxext->GetExtension()->SetAlterLocation(loc);\n        }\n        else {\n            FATAL(\"Found <\" + _name + \"> in <\" + ParentContext->Name() + \">; that can't happen\");\n        }\n    }\n\n    return ParentContext;\n}\n\nsubelementmap_t CfgCtxExtAlterLocation::_subelements;\nstd::string CfgCtxExtAlterLocation::_name = \"AlternativeExtensionLocation\";\n\n////////////////// CfgCtxExtResourceUsage\n\nvoid \nCfgCtxExtResourceUsage::Enter(const xmlattr_t& properties)\n{\n    CfgCtxExtension * ctxext = dynamic_cast<CfgCtxExtension*>(ParentContext);\n    if (!ctxext) {\n        FATAL(\"Found <\" + _name + \"> in <\" + ParentContext->Name() + \">; that can't happen\");\n        return;\n    }\n\n    MdsdExtension * ext = ctxext->GetExtension();\n\n    for (const auto & item : properties) {\n        if (\"cpuPercentUsage\" == item.first)\n        {\n            float f = std::stof(item.second);\n            ext->SetCpuPercentUsage(f);\n        }\n        else if (\"cpuThrottling\" == item.first)\n        {\n            bool b = MdsdUtil::to_bool(item.second);\n            ext->SetIsCpuThrottling(b);\n        }\n        else if (\"memoryLimitInMB\" == item.first)\n        {\n            unsigned long long m = std::stoull(item.second);\n            ext->SetMemoryLimitInMB(m);\n        }\n        else if (\"memoryThrottling\" == item.first)\n        {\n            bool b = MdsdUtil::to_bool(item.second);\n            ext->SetIsMemoryThrottling(b);\n        }\n        else if (\"ioReadLimitInKBPerSecond\" == item.first)\n        {\n            unsigned long long n = std::stoull(item.second);\n            ext->SetIOReadLimitInKBPerSecond(n);\n        }\n        else if (\"ioReadThrottling\" == item.first)\n        {\n            bool b = MdsdUtil::to_bool(item.second);\n            ext->SetIsIOReadThrottling(b);\n        }\n        else if (\"ioWriteLimitInKBPerSecond\" == item.first)\n        {\n            unsigned long long n = std::stoull(item.second);\n            ext->SetIOWriteLimitInKBPerSecond(n);\n        }\n        else if (\"ioWriteThrottling\" == item.first)\n        {\n            bool b = MdsdUtil::to_bool(item.second);\n            ext->SetIsIOWriteThrottling(b);\n        }\n        else\n        {\n            WARNING(\"Ignoring unexpected attribute \" + item.second);\n        }\n    }\n}\n\nsubelementmap_t CfgCtxExtResourceUsage::_subelements;\nstd::string CfgCtxExtResourceUsage::_name = \"ResourceUsage\";\n\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/CfgCtxExtensions.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _CFGCTXEXTENSIONS_HH_\n#define _CFGCTXEXTENSIONS_HH_\n\n#include \"CfgContext.hh\"\n#include \"CfgCtxError.hh\"\n#include <map>\n\nclass MdsdExtension;\n\n/// <summmary>\n/// Extensions define all the monitoring agent's extensions.\n/// </summary>\nclass CfgCtxExtensions : public CfgContext\n{\npublic:\n    CfgCtxExtensions(CfgContext *config) : CfgContext(config) {}\n    virtual ~CfgCtxExtensions() { }\n\n    virtual const std::string& Name() const { return _name; }\n    static const std::string& XmlName() { return _name; }\n    virtual const subelementmap_t& GetSubelementMap() const { return _subelements; }\n\n    void Enter(const xmlattr_t& properties) { warn_if_attributes(properties); }\n\nprivate:\n    static subelementmap_t _subelements;\n    static std::string _name;\n};\n\n/// <summmary>\n/// Extension specifies one monitoring agent extension. The Name and CommandLine of an\n/// extension are required. Other properperties are optional.\n/// </summary>\nclass CfgCtxExtension : public CfgContext\n{\npublic:\n    CfgCtxExtension(CfgContext * config) : \n    CfgContext(config), \n    _extension(nullptr)\n    {}\n\n    virtual ~CfgCtxExtension() { }\n\n    virtual const std::string& Name() const { return _name; }\n    static const std::string& XmlName() { return _name; }\n    virtual const subelementmap_t& GetSubelementMap() const\n    {\n        return (_extension_name.empty())? (CfgCtxError::subelements) : (_subelements);\n    }\n\n    void Enter(const xmlattr_t& properties);\n    CfgContext* Leave();\n    MdsdExtension * GetExtension() const { return _extension; }\nprivate:\n    static subelementmap_t _subelements;\n    static std::string _name;\n    std::string _extension_name;\n    MdsdExtension * _extension;\n};\n\n/// <summmary>\n/// This specifies an extension's command line. It is required.\n/// </summary>\nclass CfgCtxExtCmdLine : public CfgContext\n{\npublic:\n    CfgCtxExtCmdLine(CfgContext * config) : CfgContext(config) {}\n    virtual ~CfgCtxExtCmdLine() { }\n\n    virtual const std::string & Name() const { return _name; }\n    static const std::string& XmlName() { return _name; }\n    virtual const subelementmap_t& GetSubelementMap() const { return _subelements; }\n\n    void Enter(const xmlattr_t& properties) { warn_if_attributes(properties); }\n    CfgContext* Leave();\n\nprivate:\n    static subelementmap_t _subelements;\n    static std::string _name;\n};\n\n/// <summary>\n/// Body: optional XML element. It specifies an extension's config body to be passed to the\n/// extension via environment variable \"MON_EXTENSION_BODY\".\n/// </summary>\nclass CfgCtxExtBody : public CfgContext\n{\npublic:\n    CfgCtxExtBody(CfgContext * config) : CfgContext(config) {}\n    virtual ~CfgCtxExtBody() { }\n\n    virtual const std::string & Name() const { return _name; }\n    static const std::string& XmlName() { return _name; }\n    virtual const subelementmap_t& GetSubelementMap() const { return _subelements; }\n\n    void Enter(const xmlattr_t& properties) { warn_if_attributes(properties); }\n    CfgContext* Leave();\n\nprivate:\n    static subelementmap_t _subelements;\n    static std::string _name;\n};\n\n/// <summmary>\n/// This specifies the extension home directory. It is optional.\n/// </summary>\nclass CfgCtxExtAlterLocation : public CfgContext\n{\npublic:\n    CfgCtxExtAlterLocation(CfgContext * config) : CfgContext(config) {}\n    virtual ~CfgCtxExtAlterLocation() { }\n\n    virtual const std::string & Name() const { return _name; }\n    static const std::string& XmlName() { return _name; }\n    virtual const subelementmap_t& GetSubelementMap() const { return _subelements; }\n\n    void Enter(const xmlattr_t& properties) { warn_if_attributes(properties); }\n    CfgContext* Leave();\n\nprivate:\n    static subelementmap_t _subelements;\n    static std::string _name;\n};\n\n/// <summmary>\n/// This specifies the limits of CPU, memory, IO throttling information. They will overwrite\n/// the default values defined in Management\\AgentResourceUsage\\ExtensionResourceUsage.\n/// </summary>\nclass CfgCtxExtResourceUsage : public CfgContext\n{\npublic:\n    CfgCtxExtResourceUsage(CfgContext * config) : CfgContext(config) { }\n    virtual ~CfgCtxExtResourceUsage() { }\n\n    virtual const std::string & Name() const { return _name; }\n    static const std::string& XmlName() { return _name; }\n    virtual const subelementmap_t& GetSubelementMap() const { return _subelements; }\n\n    void Enter(const xmlattr_t& properties);\n\nprivate:\n    static subelementmap_t _subelements;\n    static std::string _name;\n};\n\n\n#endif // _CFGCTXEXTENSIONS_HH_\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/CfgCtxHeartBeats.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"CfgCtxHeartBeats.hh\"\n\n////////////////// CfgCtxHeartBeats\n\nsubelementmap_t CfgCtxHeartBeats::_subelements = {\n\t{ \"HeartBeat\", [](CfgContext* parent) -> CfgContext* { return new CfgCtxHeartBeat(parent); } }\n};\n\nstd::string CfgCtxHeartBeats::_name = \"HeartBeats\";\n\n////////////////// CfgCtxHeartBeat\n\nsubelementmap_t CfgCtxHeartBeat::_subelements;\n\nstd::string CfgCtxHeartBeat::_name = \"HeartBeat\";\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/CfgCtxHeartBeats.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#include \"CfgContext.hh\"\n\n\nclass CfgCtxHeartBeats : public CfgContext\n{\npublic:\n\tCfgCtxHeartBeats(CfgContext* config) : CfgContext(config) {}\n\tvirtual ~CfgCtxHeartBeats() { }\n\n\tvirtual const std::string& Name() const { return _name; }\n\tvirtual const subelementmap_t& GetSubelementMap() const { return _subelements; }\n\n\tvoid Enter(const xmlattr_t& properties) { warn_if_attributes(properties); }\n\nprivate:\n\tstatic subelementmap_t _subelements;\n\tstatic std::string _name;\n};\n\n\nclass CfgCtxHeartBeat : public CfgContext\n{\npublic:\n\tCfgCtxHeartBeat(CfgContext* config) : CfgContext(config) {}\n\tvirtual ~CfgCtxHeartBeat() { }\n\n\tvirtual const std::string& Name() const { return _name; }\n\tvirtual const subelementmap_t& GetSubelementMap() const { return _subelements; }\n\n\tvoid Enter(const xmlattr_t& properties) { log_entry(properties); }\n\nprivate:\n\tstatic subelementmap_t _subelements;\n\tstatic std::string _name;\n};\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/CfgCtxImports.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"CfgCtxImports.hh\"\n#include \"MdsdConfig.hh\"\n\n////////////// CfgCtxImports\n\nsubelementmap_t CfgCtxImports::_subelements = {\n\t{ \"Import\", [](CfgContext* parent) -> CfgContext* { return new CfgCtxImport(parent); } }\n};\n\nstd::string CfgCtxImports::_name = \"Imports\";\n\n////////////// CfgCtxImport\n\nsubelementmap_t CfgCtxImport::_subelements;\n\nstd::string CfgCtxImport::_name = \"Import\";\n\nvoid\nCfgCtxImport::Enter(const xmlattr_t& properties)\n{\n\tstd::string filename;\n\n\t// Find the file attribute; invoke Config->LoadFromConfigFile() on the value thereof.\n\tfor (const auto& item : properties)\n\t{\n\t\tif (item.first == \"file\") {\n\t\t\tfilename = item.second;\n\t\t}\n\t\telse {\n\t\t\tConfig->AddMessage(MdsdConfig::warning, \"Ignoring unknown attribute \\\"\" + item.first + \"\\\"\");\n\t\t}\n\t}\n\tif (filename.empty()) {\n\t\tConfig->AddMessage(MdsdConfig::error, \"<Import>:  \\\"file\\\" attribute is missing or empty\");\n\t}\n\telse {\n\t\tConfig->LoadFromConfigFile(filename);\n\t}\n}\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/CfgCtxImports.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _CFGCTXIMPORTS_HH_\n#define _CFGCTXIMPORTS_HH_\n\n#include \"CfgContext.hh\"\n\n\nclass CfgCtxImports : public CfgContext\n{\npublic:\n\tCfgCtxImports(CfgContext* config) : CfgContext(config) {}\n\tvirtual ~CfgCtxImports() { }\n\n\tvirtual const std::string& Name() const { return _name; }\n\tvirtual const subelementmap_t& GetSubelementMap() const { return _subelements; }\n\n\tvoid Enter(const xmlattr_t& properties) { warn_if_attributes(properties); }\n\n\nprivate:\n\tstatic subelementmap_t _subelements;\n\tstatic std::string _name;\n};\n\n\nclass CfgCtxImport : public CfgContext\n{\npublic:\n\tCfgCtxImport(CfgContext* config) : CfgContext(config) {}\n\tvirtual ~CfgCtxImport() { }\n\n\tvirtual const std::string& Name() const { return _name; }\n\tvirtual const subelementmap_t& GetSubelementMap() const { return _subelements; }\n\n\tvoid Enter(const xmlattr_t& properties);\n\nprivate:\n\tstatic subelementmap_t _subelements;\n\tstatic std::string _name;\n};\n\n#endif //_CFGCTXIMPORTS_HH_\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/CfgCtxManagement.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"CfgCtxManagement.hh\"\n#include \"MdsdConfig.hh\"\n#include \"Listener.hh\"\n#include \"Utility.hh\"\n#include \"Trace.hh\"\n#include <cstdlib>\n\n/////// CfgCtxManagement\n\nsubelementmap_t CfgCtxManagement::_subelements = {\n\t{ \"Identity\", [](CfgContext* parent) -> CfgContext* { return new CfgCtxIdentity(parent); } },\n\t{ \"AgentResourceUsage\", [](CfgContext* parent) -> CfgContext* { return new CfgCtxAgentResourceUsage(parent); } },\n\t{ \"OboDirectPartitionField\", [](CfgContext* parent) -> CfgContext* { return new CfgCtxOboDirectPartitionField(parent); } }\n};\n\nstd::string CfgCtxManagement::_name = \"Management\";\n\nstd::map<std::string, unsigned int> CfgCtxManagement::_eventVolumes = {\n\t{ \"Small\", 1 },\n\t{ \"small\", 1 },\n\t{ \"Medium\", 10 },\n\t{ \"medium\", 10 },\n\t{ \"Large\", 100 },\n\t{ \"large\", 100 }\n};\n\nvoid CfgCtxManagement::Enter(const xmlattr_t& properties)\n{\n\tTrace trace(Trace::ConfigLoad, \"CfgCtxManagement::Enter\");\n\tfor (const auto& item : properties)\n\t{\n\t\tif (item.first == \"eventVolume\") {\n\t\t\tauto numPart = _eventVolumes.find(item.second);\n\t\t\tif (numPart != _eventVolumes.end()) {\n\t\t\t\tConfig->PartitionCount(numPart->second);\n\t\t\t} else {\n\t\t\t\tERROR(\"Unknown eventVolume \\\"\" + item.second + \"\\\"\");\n\t\t\t}\n\t\t}\n\t\telse if (item.first == \"defaultRetentionInDays\") {\n\t\t\tunsigned long retention = std::stoul(item.second);\n\t\t\tif (retention < 1) {\n\t\t\t\tERROR(\"Invalid value for defaultRetentionInDays\");\n\t\t\t} else {\n\t\t\t\tConfig->DefaultRetention(retention);\n\t\t\t}\n\t\t}\n\t\telse {\n\t\t\tConfig->AddMessage(MdsdConfig::warning, \"<Management> ignoring unexpected attribute \" + item.first);\n\t\t}\n\t}\n}\n\n////// CfgCtxIdentity\n\nsubelementmap_t CfgCtxIdentity::_subelements = {\n\t{ \"IdentityComponent\", [](CfgContext* parent) -> CfgContext* { return new CfgCtxIdentityComponent(parent); } }\n};\n\nstd::string CfgCtxIdentity::_name = \"Identity\";\n\nvoid CfgCtxIdentity::Enter(const xmlattr_t& properties)\n{\n    Config->SetTenantAlias(\"Tenant\");\n    Config->SetRoleAlias(\"Role\");\n    Config->SetRoleInstanceAlias(\"RoleInstance\");\n\n    for (const auto& item : properties)\n    {\n        if (item.first == \"type\") {\n            if (item.second == \"TenantRole\") {\n                // Add three identity components based on envariables\n                AddEnvariable(\"Tenant\", \"MONITORING_TENANT\");\n                AddEnvariable(\"Role\", \"MONITORING_ROLE\");\n                AddEnvariable(\"RoleInstance\", \"MONITORING_ROLE_INSTANCE\");\n                IdentityWasSet = true;\n            } else if (item.second == \"ComputerName\") {\n                // Add a single identity component containing the hostname\n                (void)Config->AddIdentityColumn(\"ComputerName\", Config->AgentIdentity());\n                IdentityWasSet = true;\n            } else {\n                WARNING(\"Ignoring unknown type \" + item.second);\n            }\n        } else if (item.first == \"tenantNameAlias\") {\n\t    Config->SetTenantAlias(item.second);\n        } else if (item.first == \"roleNameAlias\") {\n\t    Config->SetRoleAlias(item.second);\n        } else if (item.first == \"roleInstanceNameAlias\") {\n\t    Config->SetRoleInstanceAlias(item.second);\n        } else {\n            WARNING(\"Ignoring unknown attribute \" + item.first);\n        }\n    }\n}\n\nvoid\nCfgCtxIdentity::AddString(const std::string& name, const std::string& value)\n{\n\tif (IdentityWasSet) {\n\t\tWARNING(\"Ignoring extra identity column \" + name);\n\t\treturn;\n\t}\n\n\tif (!(Config->AddIdentityColumn(name, value))) {\n\t\tERROR(\"Duplicate IdentityComponent \" + name);\n\t}\n}\n\nvoid\nCfgCtxIdentity::AddEnvariable(const std::string& name, const std::string& varname)\n{\n\tif (IdentityWasSet) {\n\t\tWARNING(\"Ignoring extra identity column \" + name);\n\t\treturn;\n\t}\n\n\ttry {\n\t\tstd::string Value = MdsdUtil::GetEnvironmentVariable(varname);\n\t\tif (!(Config->AddIdentityColumn(name, Value))) {\n\t\t\tERROR(\"Duplicate IdentityComponent \" + name);\n\t\t}\n\t}\n\tcatch (std::exception & ex) {\n\t\tWARNING(std::string(ex.what()) + \"; \" + name + \" not added to identity columns\");\n\t}\n}\n\n////// CfgCtxIdentityComponent\n\nsubelementmap_t CfgCtxIdentityComponent::_subelements;\n\nstd::string CfgCtxIdentityComponent::_name = \"IdentityComponent\";\n\nvoid CfgCtxIdentityComponent::Enter(const xmlattr_t& properties)\n{\n\tIsValid = true;\t\t// Assume this will be a valid definition\n\tIgnoreBody = ExtraBody = false;\n\n\tstd::string Envariable;\n\tbool useHostname = false;\n\n\t_ctxidentity = dynamic_cast<CfgCtxIdentity*>(ParentContext);\n\tif (!_ctxidentity) {\n\t\tFATAL(\"Found <IdentityComponent> in <\" + ParentContext->Name() + \">; that can't happen\");\n\t\tIsValid = false;\n\t\treturn;\n\t}\n\n\tfor (const auto& item : properties)\n\t{\n\t\tif (item.first == \"name\") {\n\t\t\tComponentName = item.second;\n\t\t} else if (item.first == \"envariable\") {\n\t\t\tEnvariable = item.second;\n\t\t} else if (item.first == \"useComputerName\") {\n\t\t\tuseHostname = MdsdUtil::to_bool(item.second);\n\t\t} else {\n\t\t\tERROR(\"<IdentityComponent> ignoring unexpected attribute \" + item.first);\n\t\t}\n\t}\n\n\tif (ComponentName.empty()) {\n\t\tERROR(\"<IdentityComponent> requires attribute \\\"name\\\"\");\n\t\tIsValid = false;\n\t} else if (!Envariable.empty() && useHostname) {\n\t\tERROR(\"Cannot specify both useComputerName and envariable for the same <IdentityComponent>\");\n\t\tIsValid = false;\n\t} else if (!Envariable.empty() || useHostname) {\n\t\tIgnoreBody = true;\n\t\tif (useHostname) {\n\t\t\t_ctxidentity->AddString(ComponentName, Config->AgentIdentity());\n\t\t} else {\n\t\t\t_ctxidentity->AddEnvariable(ComponentName, Envariable);\n\t\t}\n\t}\n\t// If !IgnoreBody && IsValid, then the Leave() method will add the accumulated\n\t// string to the Identity column set\n\tif (!IsValid) {\n\t\tIgnoreBody = true;\n\t}\n}\n\nvoid\nCfgCtxIdentityComponent::HandleBody(const std::string& body)\n{\n\tif (IgnoreBody) {\n\t\tExtraBody = true;\t// We'll ignore it and warn about it\n\t} else {\n\t\tBody += body;\n\t}\n}\n\nCfgContext* CfgCtxIdentityComponent::Leave()\n{\n\tif (!IsValid) {\n\t\tWARNING(\"Skipping invalid IdentityComponent\");\n\t} else if (ExtraBody) {\n\t\t\tWARNING(\"Ignoring extra content for IdentityComponent; hope that's okay\");\n\t} else if (!IgnoreBody) {\n\t\tif (empty_or_whitespace()) {\n\t\t\tWARNING(\"Empty value for IdentityComponent; hope that's okay\");\n\t\t}\n\t\t_ctxidentity->AddString(ComponentName, Body);\n\t}\n\treturn ParentContext;\n}\n\n////// CfgCtxAgentResourceUsage\n\nsubelementmap_t CfgCtxAgentResourceUsage::_subelements;\n\nstd::string CfgCtxAgentResourceUsage::_name = \"AgentResourceUsage\";\n\nvoid CfgCtxAgentResourceUsage::Enter(const xmlattr_t& properties)\n{\n\tfor (const auto& item : properties)\n\t{\n\t\tif (item.first == \"diskQuotaInMB\") {\n\t\t\tunsigned long diskQuota = std::stoul(item.second);\n\t\t\tif (diskQuota < 1) {\n\t\t\t\tERROR(\"diskQuotaInMB must be greater than zero\");\n\t\t\t} else {\n\t\t\t\tConfig->AddQuota(\"disk\", diskQuota);\n\t\t\t}\n\t\t} else if (item.first == \"dupeWindowSeconds\") {\n\t\t\tunsigned long dupeWindow = std::stoul(item.second);\n\t\t\tif (dupeWindow < 60) {\n\t\t\t\tWARNING(\"dupeWindowSeconds must be >= 60\");\n\t\t\t\tdupeWindow = 60;\n\t\t\t} else if (dupeWindow > 3600) {\n\t\t\t\tWARNING(\"dupeWindowSeconds must be <= 3600\");\n\t\t\t\tdupeWindow = 3600;\n\t\t\t}\n\t\t\tListener::setDupeWindow(dupeWindow);\n\t\t} else {\n\t\t\tERROR(\"<AgentResourceUsage> ignoring unexpected attribute \" + item.first);\n\t\t}\n\t}\n}\n\n////// CfgCtxOboDirectPartitionField\n\nsubelementmap_t CfgCtxOboDirectPartitionField::_subelements;\n\nstd::string CfgCtxOboDirectPartitionField::_name = \"OboDirectPartitionField\";\n\nvoid CfgCtxOboDirectPartitionField::Enter(const xmlattr_t& properties)\n{\n\tstd::string name, value;\n\n\tfor (const auto& item : properties) {\n\t\tif (item.first == \"name\") {\n\t\t\tname = item.second;\n\t\t}\n\t\telse if (item.first == \"value\") {\n\t\t\tvalue = item.second;\n\t\t}\n\t\telse {\n\t\t\tWARNING(\"Ignoring unknown attribute \" + item.first);\n\t\t}\n\t}\n\n\tif (name.empty() || value.empty()) {\n\t\tERROR(\"<OboDirectPartitionField> requires both 'name' and 'value' attributes.\");\n\t\treturn;\n\t}\n\n\tConfig->SetOboDirectPartitionFieldNameValue(std::move(name), std::move(value));\n}\n\n// vim: se sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/CfgCtxManagement.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _CFGCTXMANAGEMENT_HH_\n#define _CFGCTXMANAGEMENT_HH_\n\n#include \"CfgContext.hh\"\n#include <list>\n\nclass TableSchema;\n\nclass CfgCtxManagement : public CfgContext\n{\npublic:\n\tCfgCtxManagement(CfgContext* config) : CfgContext(config) {}\n\tvirtual ~CfgCtxManagement() { }\n\n\tvirtual const std::string& Name() const { return _name; }\n\tvirtual const subelementmap_t& GetSubelementMap() const { return _subelements; }\n\n\tvoid Enter(const xmlattr_t& properties);\n\nprivate:\n\tstatic subelementmap_t _subelements;\n\tstatic std::string _name;\n\tstatic std::map<std::string, unsigned int> _eventVolumes;\n};\n\nclass CfgCtxIdentity : public CfgContext\n{\npublic:\n\tCfgCtxIdentity(CfgContext* config) : CfgContext(config), IdentityWasSet(false) {}\n\tvirtual ~CfgCtxIdentity() { }\n\n\tvirtual const std::string& Name() const { return _name; }\n\tvirtual const subelementmap_t& GetSubelementMap() const { return _subelements; }\n\n\tvoid Enter(const xmlattr_t& properties);\n\n\tvoid AddString(const std::string& n, const std::string& str);\n\tvoid AddEnvariable(const std::string& n, const std::string& varname);\n\nprivate:\n\tbool IdentityWasSet;\n\n\tstatic subelementmap_t _subelements;\n\tstatic std::string _name;\n};\n\nclass CfgCtxIdentityComponent : public CfgContext\n{\npublic:\n\tCfgCtxIdentityComponent(CfgContext* config) : CfgContext(config), _ctxidentity(nullptr) {}\n\tvirtual ~CfgCtxIdentityComponent() { }\n\n\tvirtual const std::string& Name() const { return _name; }\n\tvirtual const subelementmap_t& GetSubelementMap() const { return _subelements; }\n\n\tvoid Enter(const xmlattr_t& properties);\n\tvirtual void HandleBody(const std::string& body);\n\tCfgContext* Leave();\n\nprivate:\n\tstd::string ComponentName;\n\tbool IsValid;\n\t//bool GotBody;\n\tbool ExtraBody;\n\tbool IgnoreBody;\n\tCfgCtxIdentity* _ctxidentity;\n\n\tstatic subelementmap_t _subelements;\n\tstatic std::string _name;\n};\n\nclass CfgCtxAgentResourceUsage : public CfgContext\n{\npublic:\n\tCfgCtxAgentResourceUsage(CfgContext* config) : CfgContext(config) {}\n\tvirtual ~CfgCtxAgentResourceUsage() { }\n\n\tvirtual const std::string& Name() const { return _name; }\n\tvirtual const subelementmap_t& GetSubelementMap() const { return _subelements; }\n\n\tvoid Enter(const xmlattr_t& properties);\n\nprivate:\n\tstatic subelementmap_t _subelements;\n\tstatic std::string _name;\n};\n\nclass CfgCtxOboDirectPartitionField : public CfgContext\n{\npublic:\n\tCfgCtxOboDirectPartitionField(CfgContext* config) : CfgContext(config) {}\n\tvirtual ~CfgCtxOboDirectPartitionField() { }\n\n\tvirtual const std::string& Name() const { return _name; }\n\tvirtual const subelementmap_t& GetSubelementMap() const { return _subelements; }\n\n\tvoid Enter(const xmlattr_t& properties);\n\nprivate:\n\tstatic subelementmap_t _subelements;\n\tstatic std::string _name;\n};\n\n#endif //_CFGCTXMANAGEMENT_HH_\n\n// :vim set ai sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/CfgCtxMdsdEvents.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"CfgCtxMdsdEvents.hh\"\n#include \"MdsdConfig.hh\"\n#include \"MdsEntityName.hh\"\n#include \"Subscription.hh\"\n#include \"Utility.hh\"\n#include \"Priority.hh\"\n#include \"PipeStages.hh\"\n#include \"LocalSink.hh\"\n#include <iterator>\n#include \"CfgCtxParser.hh\"\n#include \"EventType.hh\"\n\n////////////////// CfgCtxMdsdEvents\n\nsubelementmap_t CfgCtxMdsdEvents::_subelements = {\n\t{ \"MdsdEventSource\", [](CfgContext* parent) -> CfgContext* { return new CfgCtxMdsdEventSource(parent); } }\n};\n\nstd::string CfgCtxMdsdEvents::_name = \"MdsdEvents\";\n\n////////////////// CfgCtxMdsdEventSource\n\nvoid\nCfgCtxMdsdEventSource::Enter(const xmlattr_t& properties)\n{\n\tfor (const auto& item : properties) {\n\t\tif (item.first == \"source\") {\n\t\t\t_source = item.second;\n\t\t} else {\n\t\t\tWARNING(\"Ignoring unexpected attribute \" + item.first);\n\t\t}\n\t}\n\n\tif (_source.empty()) {\n\t\tERROR(\"Missing required source attribute\");\n\t\treturn;\n\t}\n\n\tif (!Config->IsValidSource(_source) && !Config->IsValidDynamicSchemaSource(_source)) {\n\t\tERROR(\"Undefined source \\\"\" + _source + \"\\\"\");\n\t\t_source.clear();\t// Puts the entire element in error state\n\t}\n\telse {\n\t\t// The LocalSink object should be already created\n\t\t_sink = LocalSink::Lookup(_source);\n\t\tif (!_sink) {\n\t\t\tERROR(\"Failed to find LocalSink for MdsdEventSource \\\"\" + _source + \"\\\"\");\n\t\t}\n\t}\n}\n\nsubelementmap_t CfgCtxMdsdEventSource::_subelements = {\n\t{ \"RouteEvent\", [](CfgContext* parent) -> CfgContext* { return new CfgCtxRouteEvent(parent); } }\n};\n\nstd::string CfgCtxMdsdEventSource::_name = \"MdsdEventSource\";\n\n////////////////// CfgCtxRouteEvent\n\n// Construct a Subscription object to query the event sink. Build the front of the pipeline to\n// process entities fetched from the sink.\n// The duration attribute is optional. If it's not set, a duration based on priority is used.\n// If priority is not explicitly set, there's a default for that, which then governs the duration.\nvoid\nCfgCtxRouteEvent::Enter(const xmlattr_t& properties)\n{\n\t_subscription = 0;\n\t_storeType = StoreType::XTable;\n\t_doSchemaGeneration = true;\n\tbool addIdentity = true;\n\n\t_ctxEventSource = dynamic_cast<CfgCtxMdsdEventSource*>(ParentContext);\n\tif (!_ctxEventSource) {\n\t\tFATAL(\"Found <RouteEvent> in <\" + ParentContext->Name() + \">; that can't happen\");\n\t\treturn;\n\t}\n\n\tCfgCtx::CfgCtxParser parser(this);\n\tif (!parser.ParseEvent(properties, CfgCtx::EventType::RouteEvent)) {\n\t\treturn;\n\t}\n\n\tstd::string eventName = parser.GetEventName();\n\tPriority priority = parser.GetPriority();\n\tstd::string account = parser.GetAccount();\n\tbool NoPerNDay = parser.IsNoPerNDay();\n\ttime_t interval = parser.GetInterval();\n\n\tif (parser.HasStoreType()) {\n\t\t_storeType = parser.GetStoreType();\n\t\t_doSchemaGeneration = StoreType::DoSchemaGeneration(_storeType);\n\t\taddIdentity = StoreType::DoAddIdentityColumns(_storeType);\n\t}\n\n\ttry {\n\t\t// Build target on the stack, move it into the Subscription task\n\t\tauto target = MdsEntityName { eventName, NoPerNDay, Config, account, _storeType };\n\t\tassert(interval != 0);\n\t\t_subscription = new Subscription( _ctxEventSource->Sink(), std::move(target), priority, MdsTime(interval) );\n\t\tif (addIdentity) {\n\t\t\t// When we add custom identity columns per-subscription, sub them in here\n\t\t\t_subscription->AddStage(new Pipe::Identity(Config->GetIdentityVector()));\n\t\t}\n\t\tConfig->AddMonikerEventInfo(account, eventName, _storeType, _ctxEventSource->Source(), mdsd::EventType::RouteEvent);\n\t}\n\tcatch (const std::invalid_argument& ex) {\n\t\tERROR(ex.what());\n\t\treturn;\n\t}\n\tcatch (...) {\n\t\tFATAL(\"Unknown exception; skipping\");\n\t\treturn;\n\t}\n}\n\nCfgContext*\nCfgCtxRouteEvent::Leave()\n{\n\tif (! _subscription) {\n\t\treturn ParentContext;\n\t}\n\n\t// Non-local/file targets need to have a schema constructed and pushed. The schema for\n\t// events from a given external source is fixed, so it only needs to be computed once\n\t// and pushed once per Nday period\n\tif (_doSchemaGeneration) {\n\t\t_subscription->AddStage(new Pipe::BuildSchema(Config, _subscription->target(), true));\n\t}\n\n\t// Find/make the batch for this task; add a final pipeline stage to write to that batch;\n\t// add the subscription to the config.\n\tBatch *batch = Config->GetBatch(_subscription->target(), _subscription->Duration());\n\tif (batch) {\n\t\t_subscription->AddStage(new Pipe::BatchWriter(batch, Config->GetIdentityVector(),\n\t\t                                              Config->PartitionCount(), _storeType));\n\n\t\t// Config->AddSubscription(_ctxEventSource->Source(), _subscription);\n\t\tConfig->AddTask(_subscription);\n\t} else {\n\t\tERROR(\"Unable to create routing for this event\");\n\t}\n\treturn ParentContext;\n}\n\nsubelementmap_t CfgCtxRouteEvent::_subelements = {\n\t{ \"Filter\", [](CfgContext* parent) -> CfgContext* { return new CfgCtxFilter(parent); } }\n};\n\nstd::string CfgCtxRouteEvent::_name = \"RouteEvent\";\n\n////////////////// CfgCtxFilter\n\nsubelementmap_t CfgCtxFilter::_subelements;\n\nstd::string CfgCtxFilter::_name = \"Filter\";\n\n\n// vim: se sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/CfgCtxMdsdEvents.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _CFGCTXMDSDEVENTS_HH_\n#define _CFGCTXMDSDEVENTS_HH_\n\n#include \"CfgContext.hh\"\n#include \"CfgCtxError.hh\"\n#include <map>\n#include \"Subscription.hh\"\n\nclass LocalSink;\n\nclass CfgCtxMdsdEvents : public CfgContext\n{\npublic:\n\tCfgCtxMdsdEvents(CfgContext* config) : CfgContext(config) {}\n\tvirtual ~CfgCtxMdsdEvents() { }\n\n\tvirtual const std::string& Name() const { return _name; }\n\tvirtual const subelementmap_t& GetSubelementMap() const { return _subelements; }\n\n\tvoid Enter(const xmlattr_t& properties) { warn_if_attributes(properties); }\n\nprivate:\n\tstatic subelementmap_t _subelements;\n\tstatic std::string _name;\n};\n\n\nclass CfgCtxMdsdEventSource : public CfgContext\n{\npublic:\n\tCfgCtxMdsdEventSource(CfgContext* config) : CfgContext(config) {}\n\tvirtual ~CfgCtxMdsdEventSource() { }\n\n\tvirtual const std::string& Name() const { return _name; }\n\tvirtual const subelementmap_t&\n\t\tGetSubelementMap() const { return (_source.empty())?(CfgCtxError::subelements):(_subelements); }\n\n\tvoid Enter(const xmlattr_t& properties);\n\n\tconst std::string& Source() { return _source; }\n\tLocalSink * Sink() { return _sink; }\n\nprivate:\n\tstatic subelementmap_t _subelements;\n\tstatic std::string _name;\n\n\tstd::string _source;\n\tLocalSink *_sink;\n};\n\nclass CfgCtxRouteEvent : public CfgContext\n{\npublic:\n\tCfgCtxRouteEvent(CfgContext* config) : CfgContext(config) {}\n\tvirtual ~CfgCtxRouteEvent() { }\n\n\tvirtual const std::string& Name() const { return _name; }\n\tvirtual const subelementmap_t& GetSubelementMap() const { return _subelements; }\n\n\tvoid Enter(const xmlattr_t& properties);\n\tCfgContext* Leave();\n\nprivate:\n\tstatic subelementmap_t _subelements;\n\tstatic std::string _name;\n\n\tSubscription* _subscription;\n\tStoreType::Type _storeType;\n\tCfgCtxMdsdEventSource* _ctxEventSource;\n\tbool _doSchemaGeneration;\n};\n\nclass CfgCtxFilter : public CfgContext\n{\npublic:\n\tCfgCtxFilter(CfgContext* config) : CfgContext(config) {}\n\tvirtual ~CfgCtxFilter() { }\n\n\tvirtual const std::string& Name() const { return _name; }\n\tvirtual const subelementmap_t& GetSubelementMap() const { return _subelements; }\n\n\tvoid Enter(const xmlattr_t& properties) { log_entry(properties); }\n\nprivate:\n\tstatic subelementmap_t _subelements;\n\tstatic std::string _name;\n};\n\n#endif //_CFGCTXMDSDEVENTS_HH_\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/CfgCtxMonMgmt.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"CfgCtxMonMgmt.hh\"\n#include \"CfgCtxImports.hh\"\n#include \"CfgCtxAccounts.hh\"\n#include \"CfgCtxManagement.hh\"\n#include \"CfgCtxSchemas.hh\"\n#include \"CfgCtxEnvelope.hh\"\n#include \"CfgCtxSources.hh\"\n#include \"CfgCtxEvents.hh\"\n#include \"CfgCtxSvcBusAccts.hh\"\n#include \"CfgCtxEventAnnotations.hh\"\n#include \"MdsdConfig.hh\"\n#include \"Trace.hh\"\n\nsubelementmap_t CfgCtxMonMgmt::_subelements = {\n\t{ \"Imports\",\t[](CfgContext* parent) -> CfgContext* { return new CfgCtxImports(parent); } },\n\t{ \"Accounts\",\t[](CfgContext* parent) -> CfgContext* { return new CfgCtxAccounts(parent); } },\n\t{ \"Management\",\t[](CfgContext* parent) -> CfgContext* { return new CfgCtxManagement(parent); } },\n\t{ \"Schemas\",\t[](CfgContext* parent) -> CfgContext* { return new CfgCtxSchemas(parent); } },\n\t{ \"EnvelopeSchema\",\t[](CfgContext* parent) -> CfgContext* { return new CfgCtxEnvelope(parent); } },\n\t{ \"Sources\",\t[](CfgContext* parent) -> CfgContext* { return new CfgCtxSources(parent); } },\n\t{ \"Events\",\t[](CfgContext* parent) -> CfgContext* { return new CfgCtxEvents(parent); } },\n\t{ \"ServiceBusAccountInfos\", [](CfgContext* parent) -> CfgContext* { return new CfgCtxSvcBusAccts(parent); } },\n\t{ \"EventStreamingAnnotations\", [](CfgContext* parent) -> CfgContext* { return new CfgCtxEventAnnotations(parent); } }\n};\n\nstd::string CfgCtxMonMgmt::_name = \"MonitoringManagement\";\n\nvoid\nCfgCtxMonMgmt::Enter(const xmlattr_t& properties)\n{\n\tTrace trace(Trace::ConfigLoad, \"CfgCtxMonMgmt::Enter\");\n\tif (Config->MonitoringManagementSeen()) {\n\t\treturn;\n\t}\n\n\tbool versionChecked = false;\n\n\tfor (const auto& item : properties)\n\t{\n\t\tif (item.first == \"namespace\") {\n\t\t\tConfig->Namespace(item.second);\n\t\t}\n\t\telse if (item.first == \"eventVersion\") {\n\t\t\tint ver = std::stoi(item.second);\n\t\t\tif (ver > 0) {\n\t\t\t\tConfig->EventVersion(ver);\n\t\t\t}\n\t\t\telse {\n\t\t\t\tConfig->AddMessage(MdsdConfig::error, \"eventVersion, when present, must be a positive integer\");\n\t\t\t}\n\t\t}\n\t\telse if (item.first == \"version\") {\n\t\t\tversionChecked = true;\n\t\t\tif (item.second != \"1.0\") {\n\t\t\t\tConfig->AddMessage(MdsdConfig::fatal, \"Only config file version 1.0 is supported\");\n\t\t\t}\n\t\t}\n\t\telse if (item.first == \"timestamp\") {\n\t\t\tConfig->Timestamp(item.second);\n\t\t}\n\t\telse {\n\t\t\tConfig->AddMessage(MdsdConfig::warning,\n\t\t\t\t\"<MonitoringManagement> ignoring unexpected attribute \\\"\" + item.first + \"\\\"\");\n\t\t}\n\t}\n\tif (!versionChecked) {\n\t\tConfig->AddMessage(MdsdConfig::fatal, \"Must specify \\\"version\\\" attribute\");\n\t}\n\n\tConfig->MonitoringManagementSeen(true);\n}\n\nCfgContext*\nCfgCtxMonMgmt::Leave()\n{\n\tConfig->ValidateEvents();\n\treturn ParentContext;\n}\n\n// vim: se sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/CfgCtxMonMgmt.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _CFGCTXMONMGMT_HH_\n#define _CFGCTXMONMGMT_HH_\n\n#include \"CfgContext.hh\"\n\nclass CfgCtxMonMgmt : public CfgContext\n{\npublic:\n\tCfgCtxMonMgmt(CfgContext* config) : CfgContext(config) {}\n\tvirtual ~CfgCtxMonMgmt() { }\n\n\tvirtual const std::string& Name() const { return _name; }\n\tvirtual const subelementmap_t& GetSubelementMap() const { return _subelements; }\n\n\tvoid Enter(const xmlattr_t& properties);\n\tCfgContext* Leave();\n\nprivate:\n\tstatic subelementmap_t _subelements;\n\tstatic std::string _name;\n};\n\n#endif //_CFGCTXMONMGMT_HH_\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/CfgCtxOMI.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"CfgCtxOMI.hh\"\n#include \"CfgCtxError.hh\"\n#include \"OmiTask.hh\"\n#include \"MdsdConfig.hh\"\n#include \"Utility.hh\"\n#include \"StoreType.hh\"\n#include \"PipeStages.hh\"\n#include \"Trace.hh\"\n#include \"EventType.hh\"\n\n#include <cstdlib>\n#include <climits>\n\n////////////////// CfgCtxOMI\n\nsubelementmap_t CfgCtxOMI::_subelements = {\n\t{ \"OMIQuery\", [](CfgContext* parent) -> CfgContext* { return new CfgCtxOMIQuery(parent); } }\n};\n\nstd::string CfgCtxOMI::_name = \"OMI\";\n\n////////////////// CfgCtxOMIQuery\n\nvoid\nCfgCtxOMIQuery::Enter(const xmlattr_t& properties)\n{\n    Trace trace(Trace::ConfigLoad, \"CfgCtxOMIQuery::Enter\");\n    std::string eventName, account, omiNamespace, cqlQuery;\n    Priority priority;\n    time_t sampleRate = 0;\n    bool NoPerNDay = false;\n\n    _task = nullptr;\n    _isOK = true;\n    _storeType = StoreType::XTable;\n    _doSchemaGeneration = true;\n\n    for (const auto& item : properties) {\n        if (item.first == \"eventName\") {\n            if (MdsdUtil::NotValidName(item.second)) {\n                ERROR(\"Invalid eventName attribute\");\n            } else {\n                eventName = item.second;\n            }\n        } else if (item.first == \"priority\") {\n            if (! priority.Set(item.second)) {\n                WARNING(\"Ignoring unknown priority \\\"\" + item.second + \"\\\"\");\n            }\n        } else if (item.first == \"account\") {\n            if (MdsdUtil::NotValidName(item.second)) {\n                ERROR(\"Invalid account attribute\");\n            } else {\n                account = item.second;\n            }\n        } else if (item.first == \"dontUsePerNDayTable\") {\n            NoPerNDay = MdsdUtil::to_bool(item.second);\n        } else if (item.first == \"omiNamespace\") {\n            omiNamespace = item.second;\n        } else if (item.first == \"cqlQuery\") {\n            cqlQuery = MdsdUtil::UnquoteXmlAttribute(item.second);\n        } else if (item.first == \"sampleRateInSeconds\") {\n            time_t requestedRate = std::stoul(item.second);\n            if (requestedRate == 0) {\n                ERROR(\"Invalid sampleRateInSeconds attribute - using default\");\n            } else {\n                sampleRate = requestedRate;\n            }\n        } else if (item.first == \"storeType\") {\n            _storeType = StoreType::from_string(item.second);\n            _doSchemaGeneration = StoreType::DoSchemaGeneration(_storeType);\n        } else {\n            WARNING(\"Ignoring unexpected attribute \" + item.first);\n        }\n    }\n\n\ttry {\n\t\t// Build target on the stack, move it into the OmiTask\n\t\tauto target = MdsEntityName { eventName, NoPerNDay, Config, account, _storeType };\n\t\t_task = new OmiTask(Config, std::move(target), priority, omiNamespace, cqlQuery, sampleRate);\n\t\t// Centrally-stored events implicitly have Identity columns added to them as\n\t\t// defined in the <Management> element. Add them first thing so they're available\n\t\t// to subsequent stages (if any).\n\t\tif (_storeType != StoreType::Local) {\n\t\t\t_task->AddStage(new Pipe::Identity(Config->GetIdentityVector()));\n\t\t}\n\t\tConfig->AddMonikerEventInfo(account, eventName, _storeType, \"\", mdsd::EventType::OMIQuery);\n\t}\n\tcatch (const std::invalid_argument& ex) {\n\t\tERROR(ex.what());\n\t\t_isOK = false;\n\t\treturn;\n\t}\n\tcatch (...) {\n\t\tFATAL(\"Unknown exception; skipping\");\n\t\t_isOK = false;\n\t\treturn;\n\t}\n}\n\nCfgContext*\nCfgCtxOMIQuery::Leave()\n{\n\tTrace trace(Trace::ConfigLoad, \"CfgCtxOMIQuery::Leave\");\n\tif(_task) {\n\t\t// If not local/file, add a stage to push metadata into MDS. OMI queries should produce results with\n\t\t// the same schema each time. Doing an <Unpivot> doesn't change that.\n\t\tif (_doSchemaGeneration) {\n\t\t\t_task->AddStage(new Pipe::BuildSchema(Config, _task->Target(), true));\n\t\t}\n\n\t\t// Find/make the batch for this task; add a final pipeline stage to write to that batch;\n\t\t// add the task to the set of tasks in this config.\n\t\tBatch *batch = Config->GetBatch(_task->Target(), _task->FlushInterval());\n        if (batch) {\n            _task->AddStage(new Pipe::BatchWriter(batch, Config->GetIdentityVector(), Config->PartitionCount(), _storeType));\n            Config->AddOmiTask(_task);\n        } else {\n            ERROR(\"Configuration error(s) detected; dropping this OMIQuery.\");\n            delete _task;\n        }\n\n\t}\n\treturn ParentContext;\n}\n\nconst subelementmap_t&\nCfgCtxOMIQuery::GetSubelementMap() const\n{\n        if (_isOK) { return _subelements; }\n        else { return CfgCtxError::subelements; }\n}\n\n\nsubelementmap_t CfgCtxOMIQuery::_subelements {\n\t{ \"Unpivot\", [](CfgContext* parent) -> CfgContext* { return new CfgCtxUnpivot(parent); } }\n};\n\nstd::string CfgCtxOMIQuery::_name = \"OMIQuery\";\n\n////////////////// CfgCtxUnpivot\n\nvoid\nCfgCtxUnpivot::Enter(const xmlattr_t& properties)\n{\n\t_query = dynamic_cast<CfgCtxOMIQuery*>(ParentContext);\n\tif (!_query) {\n\t\tERROR(\"<Unpivot> is not a valid subelement of <\" + ParentContext->Name() + \">\");\n\t\t_isOK = false;\n\t\treturn;\n\t}\n\n\t// Bail if parent didn't parse right or didn't build an OmiTask instance\n\tif (! (_query->isOK() && _query->GetTask())) {\n\t\t_isOK = false;\n\t\treturn;\n\t}\n\n\tfor (const auto &iter : properties) {\n\t\tif (iter.first == \"columnValue\") {\n\t\t\t_valueAttrName = iter.second;\n\t\t} else if (iter.first == \"columnName\") {\n\t\t\t_nameAttrName = iter.second;\n\t\t} else if (iter.first == \"columns\") {\n\t\t\t_unpivotColumns = iter.second;\n\t\t} else {\n\t\t\tWARNING(\"Ignoring unexpected attribute \" + iter.first);\n\t\t}\n\t}\n\n\tif (_valueAttrName.empty() || _nameAttrName.empty() || _unpivotColumns.empty()) {\n\t\tERROR(\"Missing one or more required attributes (columnValue, columnName, columns)\");\n\t\t_isOK = false;\n\t\treturn;\n\t}\n}\n\nCfgContext*\nCfgCtxUnpivot::Leave()\n{\n\tif (_isOK) {\n\t\tauto unpivoter = new Pipe::Unpivot(_valueAttrName, _nameAttrName, _unpivotColumns, std::move(_transforms));\n\t\t_query->GetTask()->AddStage(unpivoter);\n\t}\n\n\treturn ParentContext;\n}\n\nvoid\nCfgCtxUnpivot::addTransform(const std::string& from, const std::string& to, double scale)\n{\n\t_transforms.emplace(std::piecewise_construct, std::forward_as_tuple(from), std::forward_as_tuple(to, scale));\n}\n\nsubelementmap_t CfgCtxUnpivot::_subelements {\n\t{ \"MapName\", [](CfgContext* parent) -> CfgContext* { return new CfgCtxMapName(parent); } }\n};\n\nstd::string CfgCtxUnpivot::_name = \"Unpivot\";\n\n////////////////// CfgCtxMapName\n\nvoid\nCfgCtxMapName::Enter(const xmlattr_t& properties)\n{\n\t_unpivot = dynamic_cast<CfgCtxUnpivot*>(ParentContext);\n\tif (!_unpivot) {\n\t\tERROR(\"<MapName> is not a valid subelement of <\" + ParentContext->Name() + \">\");\n\t\t_isOK = false;\n\t\treturn;\n\t}\n\n\tfor (const auto &iter : properties) {\n\t\tif (iter.first == \"name\") {\n\t\t\t_from = iter.second;\n\t\t} else if (iter.first == \"scaleUp\") {\n\t\t\t_scale *= std::stod(iter.second);\n\t\t} else if (iter.first == \"scaleDown\") {\n\t\t\t_scale /= std::stod(iter.second);\n\t\t} else {\n\t\t\tWARNING(\"Ignoring unexpected attribute \" + iter.first);\n\t\t}\n\t}\n\n\tif (_from.empty()) {\n\t\tERROR(\"Missing required \\\"from\\\" attribute\");\n\t\t_isOK = false;\n\t\treturn;\n\t}\n}\n\n// Process XML body; accumulate it as the value of the _to instance var\nvoid\nCfgCtxMapName::HandleBody(const std::string& body)\n{\n\tif (_isOK) {\n\t\t_to += body;\n\t}\n}\n\n// Now that we have the target name for the translation, let's save it.\nCfgContext*\nCfgCtxMapName::Leave()\n{\n\tif (_isOK) {\n\t\tif (_to.empty()) {\n\t\t\t_to = _from;\n\t\t}\n\t\t_unpivot->addTransform(_from, _to, _scale);\n\t} else {\n\t\tERROR(\"Error(s) detected; ignoring this element\");\n\t}\n\n\treturn ParentContext;\n}\n\nsubelementmap_t CfgCtxMapName::_subelements;\n\nstd::string CfgCtxMapName::_name = \"MapName\";\n\n// vim: se sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/CfgCtxOMI.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _CFGCTXOMI_HH_\n#define _CFGCTXOMI_HH_\n\n#include \"CfgContext.hh\"\n#include \"StoreType.hh\"\n#include <unordered_map>\n#include \"PipeStages.hh\"\n\nclass OmiTask;\n\nclass CfgCtxOMI : public CfgContext\n{\npublic:\n\tCfgCtxOMI(CfgContext* config) : CfgContext(config) {}\n\tvirtual ~CfgCtxOMI() { }\n\n\tvirtual const std::string& Name() const { return _name; }\n\tvirtual const subelementmap_t& GetSubelementMap() const { return _subelements; }\n\n\tvoid Enter(const xmlattr_t& properties) { warn_if_attributes(properties); }\n\nprivate:\n\tstatic subelementmap_t _subelements;\n\tstatic std::string _name;\n};\n\n\nclass CfgCtxOMIQuery : public CfgContext\n{\npublic:\n\tCfgCtxOMIQuery(CfgContext* config) : CfgContext(config) {}\n\tvirtual ~CfgCtxOMIQuery() { }\n\n\tvirtual const std::string& Name() const { return _name; }\n\tvirtual const subelementmap_t& GetSubelementMap() const;\n\n\tvoid Enter(const xmlattr_t& properties);\n\tCfgContext* Leave();\n\n\tOmiTask * GetTask() const { return _task; }\n\tbool isOK() const { return _isOK; }\n\nprivate:\n\tstatic subelementmap_t _subelements;\n\tstatic std::string _name;\n\tOmiTask *_task;\n\tbool _isOK;\n\tStoreType::Type _storeType;\n\tbool _doSchemaGeneration;\n};\n\nclass CfgCtxUnpivot : public CfgContext\n{\npublic:\n\tCfgCtxUnpivot(CfgContext* config) : CfgContext(config), _isOK(true) {}\n\tvirtual ~CfgCtxUnpivot() { }\n\n\tvirtual const std::string& Name() const { return _name; }\n\tvirtual const subelementmap_t& GetSubelementMap() const { return _subelements; }\n\n\tvoid Enter(const xmlattr_t& properties);\n\tCfgContext* Leave();\n\n\tvoid addTransform(const std::string& from, const std::string& to, double scale = 1.0 );\n\nprivate:\n\tstatic subelementmap_t _subelements;\n\tstatic std::string _name;\n\n\tCfgCtxOMIQuery* _query;\n\tbool _isOK;\n\tstd::string _valueAttrName;\n\tstd::string _nameAttrName;\n\tstd::string _unpivotColumns;\n\tstd::unordered_map<std::string, ColumnTransform> _transforms;\n};\n\nclass CfgCtxMapName : public CfgContext\n{\npublic:\n\tCfgCtxMapName(CfgContext* config) : CfgContext(config), _isOK(true), _scale(1.0) {}\n\tvirtual ~CfgCtxMapName() { }\n\n\tvirtual const std::string& Name() const { return _name; }\n\tvirtual const subelementmap_t& GetSubelementMap() const { return _subelements; }\n\n\tvoid Enter(const xmlattr_t& properties);\n\tvoid HandleBody(const std::string& body);\n\tCfgContext* Leave();\n\nprivate:\n\tstatic subelementmap_t _subelements;\n\tstatic std::string _name;\n\n\tbool _isOK;\n\tCfgCtxUnpivot* _unpivot;\n\tstd::string _from;\n\tstd::string _to;\n\tdouble _scale;\n};\n\n#endif //_CFGCTXOMI_HH_\n\n// vim: se sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/CfgCtxParser.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"CfgCtxParser.hh\"\n#include \"Utility.hh\"\n#include \"MdsTime.hh\"\n\nusing namespace CfgCtx;\n\nstd::map<std::string, CfgCtxParser::typeparser_t> CfgCtxParser::s_evtParsers = {\n    { \"account\", [] (CfgCtxParser* p, const xmlattr_iter_t & iter) -> bool\n        { return p->ParseName(iter->first, iter->second, p->m_account); }\n    },\n    { \"dontUsePerNDayTable\", [] (CfgCtxParser* p, const xmlattr_iter_t & iter) -> bool\n        { p->m_isNoPerNDay = MdsdUtil::to_bool(iter->second); return true; }\n    },\n    { \"duration\", [] (CfgCtxParser* p, const xmlattr_iter_t & iter) -> bool\n        { return p->ParseDuration(iter->first, iter->second); }\n    },\n    { \"eventName\", [] (CfgCtxParser* p, const xmlattr_iter_t & iter) -> bool\n        { return p->ParseName(iter->first, iter->second, p->m_eventName); }\n    },\n    { \"priority\", [] (CfgCtxParser* p, const xmlattr_iter_t & iter) -> bool\n        { return p->ParsePriority(iter->first, iter->second); }\n    },\n    { \"storeType\", [] (CfgCtxParser* p, const xmlattr_iter_t & iter) -> bool\n        { return p->ParseStoreType(iter->first, iter->second); }\n    }\n};\n\nstd::map<std::string, CfgCtxParser::typeparser_t>\nCfgCtxParser::s_etwEvtParsers = BuildEtwParsersTable();\n\nstd::map<std::string, CfgCtxParser::typeparser_t>\nCfgCtxParser::BuildEtwParsersTable()\n{\n    std::map<std::string, typeparser_t> tmp = s_evtParsers;\n    tmp[\"id\"] = [] (CfgCtxParser* p, const xmlattr_iter_t & iter) -> bool\n        { return p->ParseId(iter->first, iter->second); };\n    return tmp;\n}\n\nstd::map<std::string, CfgCtxParser::typeparser_t>\nCfgCtxParser::s_etwProviderParsers = {\n    { \"format\", [] (CfgCtxParser* p, const xmlattr_iter_t & iter) -> bool\n        { return p->ParseName(iter->first, iter->second, p->m_format); }\n    },\n    { \"guid\", [] (CfgCtxParser* p, const xmlattr_iter_t & iter) -> bool\n        { return p->ParseName(iter->first, iter->second, p->m_guid); }\n    },\n    { \"priority\", [] (CfgCtxParser* p, const xmlattr_iter_t & iter) -> bool\n        { return p->ParsePriority(iter->first, iter->second); }\n    },\n    { \"storeType\", [] (CfgCtxParser* p, const xmlattr_iter_t & iter) -> bool\n        { return p->ParseStoreType(iter->first, iter->second); }\n    }\n};\n\n\nbool\nCfgCtxParser::ParseEvent(\n    const xmlattr_t& properties,\n    EventType eventType\n    )\n{\n    if (!m_context) {\n        return false;\n    }\n\n    bool resultOK = true;\n    auto & parsersTable = (EventType::RouteEvent == eventType) ? s_evtParsers : s_etwEvtParsers;\n\n    for (xmlattr_iter_t iter = properties.begin(); iter != properties.end(); ++iter) {\n        auto parserIter = parsersTable.find(iter->first);\n        if (parserIter != parsersTable.end()) {\n            resultOK = resultOK && parserIter->second(this, iter);\n        }\n        else {\n            LogUnexpectedAttrNameWarn(iter->first);\n        }\n    }\n\n    // validate required attributes\n    if (m_eventName.empty()) {\n        LogRequiredAttrError(\"eventName\");\n        resultOK = false;\n    }\n\n    if (EventType::EtwEvent == eventType && m_eventId < 0) {\n        LogRequiredAttrError(\"id\");\n        resultOK = false;\n    }\n\n    return resultOK;\n}\n\nbool\nCfgCtxParser::ParseEtwProvider(\n    const xmlattr_t& properties\n    )\n{\n    if (!m_context) {\n        return false;\n    }\n\n    bool resultOK = true;\n    const char* supportedFormat = \"EventSource\"; // only this is supported for now\n\n    for (xmlattr_iter_t iter = properties.begin(); iter != properties.end(); ++iter) {\n        auto parserIter = s_etwProviderParsers.find(iter->first);\n        if (parserIter != s_etwProviderParsers.end()) {\n            resultOK = resultOK && parserIter->second(this, iter);\n        }\n        else {\n            LogUnexpectedAttrNameWarn(iter->first);\n        }\n    }\n\n    if (m_guid.empty()) {\n        LogRequiredAttrError(\"guid\");\n        resultOK = false;\n    }\n\n    if (!m_format.empty() && supportedFormat != m_format) {\n        LogInvalidValueError(\"format\", m_format);\n        resultOK = false;\n    }\n\n    return resultOK;\n}\n\nbool\nCfgCtxParser::ParseName(\n    const std::string & attrName,\n    const std::string & attrValue,\n    std::string & result)\n{\n    result = attrValue;\n    if (MdsdUtil::NotValidName(result)) {\n        result.clear();\n        LogInvalidValueError(attrName, attrValue);\n        return false;\n    }\n    return true;\n}\n\nbool\nCfgCtxParser::ParseStoreType(const std::string & attrName, const std::string & attrValue)\n{\n    bool resultOK = true;\n\n    m_storeType = StoreType::from_string(attrValue);\n    if (StoreType::None == m_storeType) {\n        LogInvalidValueError(attrName, attrValue);\n        resultOK = false;\n    }\n    else {\n        m_hasStoreType = true;\n    }\n    return resultOK;\n}\n\nbool\nCfgCtxParser::ParsePriority(const std::string & attrName, const std::string & attrValue)\n{\n    m_hasPriority = true;\n    if (!m_priority.Set(attrValue)) {\n        LogUnknownAttrValueWarn(attrName, attrValue);\n        m_hasPriority = false;\n    }\n    else if (0 == m_interval) {\n        m_interval = m_priority.Duration();\n    }\n    return true;\n}\n\nbool\nCfgCtxParser::ParseDuration(const std::string & attrName, const std::string & attrValue)\n{\n    m_interval = MdsTime::FromIS8601Duration(attrValue).to_time_t();\n\n    if (0 == m_interval) {\n        LogInvalidValueError(attrName, attrValue);\n        return false;\n    }\n    else if (10 > m_interval) {\n        m_context->WARNING(\"Minimum supported duration is ten (10) seconds; using minimum\");\n        m_interval = 10;\n    }\n    return true;\n}\n\nbool\nCfgCtxParser::ParseId(const std::string & attrName, const std::string & attrValue)\n{\n    int tmp = atoi(attrValue.c_str());\n    if (tmp < 0 || tmp > INT_MAX) {\n        LogInvalidValueError(attrName, attrValue);\n        return false;\n    }\n    else {\n        m_eventId = tmp;\n    }\n    return true;\n}\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/CfgCtxParser.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n\n#ifndef _CFGCTXPARSER_HH_\n#define _CFGCTXPARSER_HH_\n\n#include \"ConfigParser.hh\"\n#include <map>\n#include <functional>\n\n#include \"StoreType.hh\"\n#include \"Priority.hh\"\n#include \"CfgContext.hh\"\n\nextern \"C\" {\n#include <time.h>\n}\n\nnamespace CfgCtx {\n\n/// mdsd Event types\nenum class EventType {\n    RouteEvent,\n    EtwEvent\n};\n\n\n\n/// A utility class to parse mdsd configuration XML.\n/// It implements parsing routines for common XML properties like\n/// priority, storeType, etc.\nclass CfgCtxParser {\n    using typeparser_t = std::function<bool (CfgCtxParser*, xmlattr_iter_t&)>;\npublic:\n    /// <summary>\n    /// Create a new parser instance.\n    /// </summary>\n    /// <param name='context'>Context where the parser is called. </param>\n    CfgCtxParser(CfgContext * context) :\n        m_context(context)\n        {\n        }\n\n    ~CfgCtxParser() {}\n\n    /// <summary>\n    /// Parse properties of an EventType. After parsing, the results will\n    /// be available from GetXXX() functions.\n    /// </summary>\n    /// <param name='properties'> properties to parse </param>\n    /// <param name='eventType'> EventType </param>\n    /// Return true if no error, false if any error.\n    bool ParseEvent(const xmlattr_t& properties,\n                    EventType eventType);\n\n    /// <summary>\n    /// Parse <EtwProvider ...> XML configuration.\n    /// </summary>\n    /// <param name='properties'> properties to parse </param>\n    /// Return true if no error, false if any error.\n    bool ParseEtwProvider(const xmlattr_t& properties);\n\n    std::string GetAccount() const { return m_account; }\n    bool IsNoPerNDay() const { return m_isNoPerNDay; }\n    time_t GetInterval() const { return (0 == m_interval)? m_priority.Duration() : m_interval; }\n\n    std::string GetEventName() const { return m_eventName; }\n    std::string GetFormat() const { return m_format; }\n    std::string GetGuid() const { return m_guid; }\n\n    int GetEventId() const { return m_eventId; }\n\n    Priority GetPriority() const { return m_priority; }\n    bool HasPriority() const { return m_hasPriority; }\n\n    StoreType::Type GetStoreType() const { return m_storeType; }\n    bool HasStoreType() const { return m_hasStoreType; }\n\n\nprivate:\n    bool ParseName(const std::string & attrName,\n                   const std::string & attrValue,\n                   std::string & result);\n\n    bool ParsePriority(const std::string & attrName,\n                       const std::string & attrValue);\n\n    bool ParseStoreType(const std::string & attrName,\n                        const std::string & attrValue);\n\n    bool ParseDuration(const std::string & attrName,\n                       const std::string & attrValue);\n\n    bool ParseId(const std::string & attrName,\n                 const std::string & attrValue);\n\n    void LogInvalidValueError(const std::string & attrName,\n                              const std::string & attrValue)\n    {\n        m_context->ERROR(\"<\" + m_context->Name() + \"> attribute '\" + attrName +\n            \"' has invalid value '\" + attrValue + \"'.\");\n    }\n\n    void LogUnknownAttrValueWarn(const std::string & attrName,\n                                 const std::string & attrValue)\n    {\n        m_context->WARNING(\"<\" + m_context->Name() + \">: ignoring unknown '\" +\n            attrName + \"'' value '\" + attrValue + \"'\");\n    }\n\n    void LogRequiredAttrError(const std::string & attrName)\n    {\n        m_context->ERROR(\"<\" + m_context->Name() + \"> requires attribute '\" + attrName + \"'\");\n    }\n\n    void LogUnexpectedAttrNameWarn(const std::string & attrName)\n    {\n        m_context->WARNING(\"<\" + m_context->Name() +\n            \"> ignoring unexpected attribute '\" + attrName + \"'.\");\n    }\n\n    static std::map<std::string, typeparser_t> BuildEtwParsersTable();\n\nprivate:\n    CfgContext * const m_context;\n\n    std::string m_account;\n    bool m_isNoPerNDay = false;\n    time_t m_interval = 0;\n\n    std::string m_eventName;\n    std::string m_format;\n    std::string m_guid;\n\n    int m_eventId = -1;\n\n    Priority m_priority;\n    bool m_hasPriority = false;\n\n    StoreType::Type m_storeType = StoreType::None;\n    bool m_hasStoreType = false;\n\n    static std::map<std::string, typeparser_t> s_evtParsers;\n    static std::map<std::string, typeparser_t> s_etwEvtParsers;\n    static std::map<std::string, typeparser_t> s_etwProviderParsers;\n};\n\n} // namespace CfgCtx\n\n#endif // _CFGCTXPARSER_HH_\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/CfgCtxRoot.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"CfgCtxRoot.hh\"\n#include \"CfgCtxMonMgmt.hh\"\n\nsubelementmap_t CfgCtxRoot::_subelements = {\n\t{ \"MonitoringManagement\", [](CfgContext* parent) -> CfgContext* { return new CfgCtxMonMgmt(parent); } }\n};\n\nstd::string CfgCtxRoot::_name = \"(Document Root)\";\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/CfgCtxRoot.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _CFGCTXROOT_HH_\n#define _CFGCTXROOT_HH_\n\n#include \"CfgContext.hh\"\n#include \"MdsdConfig.hh\"\n#include <map>\n#include <functional>\n\nclass CfgCtxRoot :\n\tpublic CfgContext\n{\npublic:\n\t/// <summary>\n\t/// The root context for a document. Tracks no information from prior context. Is neither entered\n\t/// nor left, in the sense of document parsing.\n\tCfgCtxRoot(MdsdConfig* config) : CfgContext(config) {}\n\tvirtual ~CfgCtxRoot() {};\n\n\tvirtual const std::string& Name() const { return _name; }\n\tvirtual const subelementmap_t& GetSubelementMap() const { return _subelements; }\n\n\tvoid Enter(const xmlattr_t& properties) { log_entry(properties); }\n\nprivate:\n\tstatic subelementmap_t _subelements;\n\tstatic std::string _name;\n};\n\n#endif //_CFGCTXROOT_HH_\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/CfgCtxSchemas.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"CfgCtxSchemas.hh\"\n#include \"MdsdConfig.hh\"\n#include \"Engine.hh\"\n#include \"TableSchema.hh\"\n#include <sstream>\n\nsubelementmap_t CfgCtxSchemas::_subelements = {\n\t{ \"Schema\", [](CfgContext* parent) -> CfgContext* { return new CfgCtxSchema(parent); } }\n};\n\nstd::string CfgCtxSchemas::_name = \"Schemas\";\n\n///////////////////////\n\nsubelementmap_t CfgCtxSchema::_subelements = {\n\t{ \"Column\", [](CfgContext* parent) -> CfgContext* { return new CfgCtxColumn(parent); } }\n};\n\nstd::string CfgCtxSchema::_name = \"Schema\";\n\nconst subelementmap_t&\nCfgCtxSchema::GetSubelementMap() const\n{\n\tif (_schema) { return _subelements; }\n\telse { return CfgCtxError::subelements; }\n}\n\nvoid\nCfgCtxSchema::Enter(const xmlattr_t& properties)\n{\n\t_schema = 0;\n\n\tfor (const auto& item : properties)\n\t{\n\t\tif (item.first == \"name\") {\n\t\t\tif (_schema == 0) {\n\t\t\t\t_schema = new TableSchema(item.second);\n\t\t\t}\n\t\t\telse {\n\t\t\t\tConfig->AddMessage(MdsdConfig::error, \"\\\"name\\\" can appear in <Schema> only once\");\n\t\t\t}\n\t\t}\n\t\telse {\n\t\t\tConfig->AddMessage(MdsdConfig::warning, \"Ignoring unexpected attribute \\\"\" + item.first + \"\\\"\");\n\t\t}\n\t}\n\n\tif (_schema == 0) {\n\t\tConfig->AddMessage(MdsdConfig::fatal, \"<Schema> requires \\\"name\\\" attribute\");\n\t}\n}\n\n// Called from CfgCtxColumn::Enter()\nvoid\nCfgCtxSchema::AddColumn(const std::string& n, const std::string& srctype, const std::string& mdstype)\n{\n\t// If we have no valid schema, or we've seen the column before, skip it.\n\tif (!_schema) return;\n\n\tauto result = _schema->AddColumn(n, srctype, mdstype);\n\tif (!result) {\n\t\treturn;\n\t}\n\n\tstd::ostringstream msg;\n\tswitch (result) {\n\t\tcase TableSchema::Ok:\n\t\t\treturn;\t\t// !!! Return, not break\n\t\tcase TableSchema::NoConverter:\n\t\t\tmsg << \"Can't convert \" << srctype << \" to \" << mdstype << \" - ignoring column \" << n;\n\t\t\tmsg << \". Known converters: \" << Engine::ListConverters();\n\t\t\tbreak;\n\t\tcase TableSchema::DupeColumn:\n\t\t\tmsg << \"Column \" << n << \" already added to Schema \" << _schema->Name();\n\t\t\tdelete _schema;\n\t\t\t_schema = 0;\t\t// Throw away the schema, we're broken\n\t\t\tbreak;\n\t\tcase TableSchema::BadSrcType:\n\t\t\tmsg << \"Unknown source type \" << srctype << \" - ignoring column \" << n;\n\t\t\tmsg << \". Known converters: \" << Engine::ListConverters();\n\t\t\tbreak;\n\t\tcase TableSchema::BadMdsType:\n\t\t\tmsg << \"Unknown MDS type \" << mdstype << \" - ignoring column \" << n;\n\t\t\tmsg << \". Known converters: \" << Engine::ListConverters();\n\t\t\tbreak;\n\t}\n\tConfig->AddMessage(MdsdConfig::error, msg.str());\n}\n\nCfgContext*\nCfgCtxSchema::Leave()\n{\n\tif (_schema) {\n\t\tConfig->AddSchema(_schema);\t\t// All the way through without a fatal error - add it to the config\n\t}\n\telse {\n\t\tConfig->AddMessage(MdsdConfig::error, \"Schema dropped from active configuration due to errors\");\n\t}\n\treturn ParentContext;\n}\n\n///////////////////////\n\nsubelementmap_t CfgCtxColumn::_subelements;\n\nstd::string CfgCtxColumn::_name = \"Column\";\n\nvoid\nCfgCtxColumn::Enter(const xmlattr_t& properties)\n{\n\tstd::string colname;\n\tstd::string srctype, mdstype;\n\n\tfor (const auto& item : properties)\n\t{\n\t\tif (item.first == \"name\") {\n\t\t\tcolname = item.second;\n\t\t}\n\t\telse if (item.first == \"type\") {\n\t\t\tsrctype = item.second;\n\t\t}\n\t\telse if (item.first == \"mdstype\") {\n\t\t\tmdstype = item.second;\n\t\t}\n\t\telse {\n\t\t\tConfig->AddMessage(MdsdConfig::warning, \"Ignoring unexpected attribute \\\"\" + item.first + \"\\\"\");\n\t\t}\n\t}\n\tif (colname.empty() || srctype.empty() || mdstype.empty()) {\n\t\tConfig->AddMessage(MdsdConfig::error, \"Missing required attributes (name, type, mdstype)\");\n\t}\n\telse {\n\t\tCfgCtxSchema* ctxschema = dynamic_cast<CfgCtxSchema*>(ParentContext);\n\t\tif (ctxschema) {\n\t\t\tctxschema->AddColumn(colname, srctype, mdstype);\n\t\t}\n\t\telse {\n\t\t\tConfig->AddMessage(MdsdConfig::fatal,\n\t\t\t\t\t\"Found <Column> in <\" + ParentContext->Name() + \">; that can't happen\");\n\t\t}\n\t}\n}\n\n// vim: se sw=8 : \n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/CfgCtxSchemas.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _CFGCTXSCHEMAS_HH_\n#define _CFGCTXSCHEMAS_HH_\n\n#include \"CfgContext.hh\"\n#include \"CfgCtxError.hh\"\n#include <set>\n\nclass TableSchema;\n\nclass CfgCtxSchemas : public CfgContext\n{\npublic:\n\tCfgCtxSchemas(CfgContext* config) : CfgContext(config) {}\n\tvirtual ~CfgCtxSchemas() { }\n\n\tvirtual const std::string& Name() const { return _name; }\n\tvirtual const subelementmap_t& GetSubelementMap() const { return _subelements; }\n\n\tvoid Enter(const xmlattr_t& properties) { warn_if_attributes(properties); }\n\nprivate:\n\tstatic subelementmap_t _subelements;\n\tstatic std::string _name;\n};\n\n\nclass CfgCtxSchema : public CfgContext\n{\npublic:\n\tCfgCtxSchema(CfgContext* config) : CfgContext(config) {}\n\tvirtual ~CfgCtxSchema() { }\n\n\tvirtual const std::string& Name() const { return _name; }\n\tvirtual const subelementmap_t& GetSubelementMap() const;\n\n\tvoid Enter(const xmlattr_t& properties);\n\tCfgContext* Leave();\n\n\tvoid AddColumn(const std::string& n, const std::string& srctype, const std::string& mdstype);\n\nprivate:\n\tTableSchema* _schema;\n\tstd::set<std::string> _columnNames;\n\n\tstatic subelementmap_t _subelements;\n\tstatic std::string _name;\n};\n\n\nclass CfgCtxColumn : public CfgContext\n{\npublic:\n\tCfgCtxColumn(CfgContext* config) : CfgContext(config) {}\n\tvirtual ~CfgCtxColumn() { }\n\n\tvirtual const std::string& Name() const { return _name; }\n\tvirtual const subelementmap_t& GetSubelementMap() const { return _subelements; }\n\n\tvoid Enter(const xmlattr_t& properties);\n\nprivate:\n\tstatic subelementmap_t _subelements;\n\tstatic std::string _name;\n};\n\n#endif //_CFGCTXSCHEMAS_HH_\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/CfgCtxSources.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"CfgCtxSources.hh\"\n#include \"MdsdConfig.hh\"\n#include \"LocalSink.hh\"\n#include \"EventType.hh\"\n#include \"Utility.hh\"\n\nsubelementmap_t CfgCtxSources::_subelements = {\n\t{ \"Source\", [](CfgContext* parent) -> CfgContext* { return new CfgCtxSource(parent); } }\n};\n\nstd::string CfgCtxSources::_name = \"Sources\";\n\n////////////\n\nvoid\nCfgCtxSource::Enter(const xmlattr_t& properties)\n{\n    std::string name, schema, dynamic_schema;\n\n    for (const auto& item : properties)\n    {\n        if (item.first == \"name\") {\n            name = item.second;\n        }\n        else if (item.first == \"schema\") {\n            schema = item.second;\n        }\n        else if (item.first == \"dynamic_schema\") {\n            dynamic_schema = item.second;\n        }\n        else {\n            Config->AddMessage(MdsdConfig::warning, \"Ignoring unexpected attribute \\\"\" + item.first + \"\\\"\");\n        }\n    }\n\n    auto isOK = true;\n    if (name.empty()) {\n        Config->AddMessage(MdsdConfig::fatal, \"<Source> requires a \\\"name\\\" attribute\");\n        isOK = false;\n    }\n\n    auto isDynamicSchema = MdsdUtil::to_bool(dynamic_schema);\n\n    if ((!schema.empty() && isDynamicSchema) ||\n        (schema.empty() && (dynamic_schema.empty() || !isDynamicSchema))) {\n        Config->AddMessage(MdsdConfig::fatal, \"<Source> requires either a valid \\\"schema\\\" attribute or that the \\\"dynamic_schema\\\" attribute be set to \\\"true\\\", but not both.\");\n    }\n\n    if (!isOK) {\n        return;\n    }\n\n    auto sink = LocalSink::Lookup(name);\n    if (!sink) {\n        sink = new LocalSink(name);\n    }\n\n    if (!isDynamicSchema) {\n        Config->AddSource(name, schema);\n        sink->AllocateSchemaId();\n    }\n    else {\n        Config->AddDynamicSchemaSource(name);\n    }\n\n    Config->AddMonikerEventInfo(\"\", \"\", StoreType::Local, name, mdsd::EventType::None);\n}\n\nsubelementmap_t CfgCtxSource::_subelements;\n\nstd::string CfgCtxSource::_name = \"Source\";\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/CfgCtxSources.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _CFGCTXSOURCES_HH_\n#define _CFGCTXSOURCES_HH_\n\n#include \"CfgContext.hh\"\n\nclass CfgCtxSources : public CfgContext\n{\npublic:\n\tCfgCtxSources(CfgContext* config) : CfgContext(config) {}\n\tvirtual ~CfgCtxSources() { }\n\n\tvirtual const std::string& Name() const { return _name; }\n\tvirtual const subelementmap_t& GetSubelementMap() const { return _subelements; }\n\n\tvoid Enter(const xmlattr_t& properties) { warn_if_attributes(properties); }\n\nprivate:\n\tstatic subelementmap_t _subelements;\n\tstatic std::string _name;\n};\n\n\nclass CfgCtxSource : public CfgContext\n{\npublic:\n\tCfgCtxSource(CfgContext* config) : CfgContext(config) {}\n\tvirtual ~CfgCtxSource() { }\n\n\tvirtual const std::string& Name() const { return _name; }\n\tvirtual const subelementmap_t& GetSubelementMap() const { return _subelements; }\n\n\tvoid Enter(const xmlattr_t& properties);\n\nprivate:\n\tstatic subelementmap_t _subelements;\n\tstatic std::string _name;\n};\n\n#endif //_CFGCTXSOURCES_HH_\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/CfgCtxSvcBusAccts.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"CfgCtxSvcBusAccts.hh\"\n#include \"MdsdConfig.hh\"\n#include \"EventPubCfg.hh\"\n#include \"Trace.hh\"\n#include \"Utility.hh\"\n#include \"cryptutil.hh\"\n\n///////// CfgCtxSvcBusAccts\n\nsubelementmap_t CfgCtxSvcBusAccts::_subelements = {\n    { \"ServiceBusAccountInfo\", [](CfgContext* parent) -> CfgContext* { return new CfgCtxSvcBusAcct(parent); } }\n};\n\nstd::string CfgCtxSvcBusAccts::_name = \"ServiceBusAccountInfos\";\n\n///////// CfgCtxSvcBusAcct\n\nsubelementmap_t CfgCtxSvcBusAcct::_subelements = {\n    { \"EventPublisher\", [](CfgContext* parent) -> CfgContext* { return new CfgCtxEventPublisher(parent); } }\n};\n\nstd::string CfgCtxSvcBusAcct::_name = \"ServiceBusAccountInfo\";\n\nvoid\nCfgCtxSvcBusAcct::Enter(const xmlattr_t& properties)\n{\n    Trace trace(Trace::ConfigLoad, \"CfgCtxSvcBusAcct::Enter\");\n    const std::string & attrMoniker = \"name\";\n\n    for (const auto& item : properties)\n    {\n        if (attrMoniker == item.first) {\n            parse_singleton_attribute(item.first, item.second, attrMoniker, _moniker);\n        }\n        else {\n            warn_if_attribute_unexpected(item.first);\n        }\n    }\n\n    fatal_if_no_attributes(attrMoniker, _moniker);\n}\n\n///////// CfgCtxEventPublisher\n\nsubelementmap_t CfgCtxEventPublisher::_subelements;\nstd::string CfgCtxEventPublisher::_name = \"EventPublisher\";\n\nvoid\nCfgCtxEventPublisher::Enter(const xmlattr_t& properties)\n{\n    std::string valConnStr;\n    std::string valDecryptKeyPath;\n\n    const std::string & attrConnStr = \"connectionString\";\n    const std::string & attrDecryptKeyPath = \"decryptKeyPath\";\n\n    for (const auto & item : properties)\n    {\n        if (attrConnStr == item.first) {\n            parse_singleton_attribute(item.first, item.second, attrConnStr, valConnStr);\n        }\n        else if (attrDecryptKeyPath == item.first) {\n            parse_singleton_attribute(item.first, item.second, attrDecryptKeyPath, valDecryptKeyPath);\n        }\n        else {\n            warn_if_attribute_unexpected(item.first);\n        }\n    }\n    fatal_if_no_attributes(attrConnStr, valConnStr);\n\n    auto sbObj = dynamic_cast<CfgCtxSvcBusAcct*>(ParentContext);\n    if (!sbObj) {\n        fatal_if_impossible_subelement();\n        return;\n    }\n    auto sbmoniker = sbObj->GetMoniker();\n    try {\n        if (valDecryptKeyPath.empty()) {\n            auto escapedConnStr = MdsdUtil::UnquoteXmlAttribute(valConnStr);\n            Config->GetEventPubCfg()->AddServiceBusAccount(sbmoniker, std::move(escapedConnStr));\n        }\n        else {\n            if (!MdsdUtil::IsRegFileExists(valDecryptKeyPath)) {\n                ERROR(\"Cannot find decrypt key path \" + valDecryptKeyPath);\n            }\n            else {\n                auto decryptedSas = cryptutil::DecodeAndDecryptString(valDecryptKeyPath, std::move(valConnStr));\n                Config->GetEventPubCfg()->AddServiceBusAccount(sbmoniker, std::move(decryptedSas));\n            }\n        }\n    }\n    catch(const std::exception & ex) {\n        ERROR(\"<\" + Name() + \"> exception: \" + ex.what());\n    }\n}\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/CfgCtxSvcBusAccts.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _CFGCTXSVCBUSACCTS_HH_\n#define _CFGCTXSVCBUSACCTS_HH_\n\n#include \"CfgContext.hh\"\n\nclass CfgCtxSvcBusAccts : public CfgContext\n{\npublic:\n    CfgCtxSvcBusAccts(CfgContext* config) : CfgContext(config) {}\n    virtual ~CfgCtxSvcBusAccts() {}\n\n    virtual const std::string & Name() const { return _name; }\n    virtual const subelementmap_t& GetSubelementMap() const { return _subelements; }\n    void Enter(const xmlattr_t& properties) { warn_if_attributes(properties); }\n\nprivate:\n    static subelementmap_t _subelements;\n    static std::string _name;\n};\n\nclass CfgCtxSvcBusAcct : public CfgContext\n{\npublic:\n    CfgCtxSvcBusAcct(CfgContext* config) : CfgContext(config) {}\n    virtual ~CfgCtxSvcBusAcct() {}\n    virtual const std::string& Name() const { return _name; }\n    virtual const subelementmap_t& GetSubelementMap() const { return _subelements; }\n\n    void Enter(const xmlattr_t& properties);\n\n    std::string GetMoniker() const { return _moniker; }\nprivate:\n    static subelementmap_t _subelements;\n    static std::string _name;\n    std::string _moniker;\n};\n\nclass CfgCtxEventPublisher : public CfgContext\n{\npublic:\n    CfgCtxEventPublisher(CfgContext* config) : CfgContext(config) {}\n    virtual ~CfgCtxEventPublisher() {}\n    virtual const std::string& Name() const { return _name; }\n    virtual const subelementmap_t& GetSubelementMap() const { return _subelements; }\n\n    void Enter(const xmlattr_t& properties);\n\nprivate:\n    static subelementmap_t _subelements;\n    static std::string _name;\n};\n\n\n#endif // _CFGCTXSVCBUSACCTS_HH_\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/CfgEventAnnotationType.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _CFGEVENTANNOTATIONTYPE_HH_\n#define _CFGEVENTANNOTATIONTYPE_HH_\n\nnamespace EventAnnotationType\n{\n    // Because one event can be multiple types,\n    // each type should be a power of 2.\n    enum Type\n    {\n        None = 0,\n        EventPublisher = 1 << 0,\n        OnBehalf = 1 << 1\n    };\n};\n\n#endif // _CFGEVENTANNOTATIONTYPE_HH_\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/CfgOboDirectConfig.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n\n#ifndef _CFGOBODIRECTCONFIG_HH_\n#define _CFGOBODIRECTCONFIG_HH_\n\n#include <string>\n\n// struct to hold OBO direct upload config data\n\nnamespace mdsd {\n\nstruct OboDirectConfig\n{\n    // Currently all fields are as is from the XML CDATA config (e.g., \"ProviderName,AnsiString\").\n    // Parse out as desired.\n    std::string onBehalfFields;\n    std::string containerSuffix;\n    std::string primaryPartitionField;\n    std::string partitionFields;\n    std::string onBehalfReplaceFields;\n    std::string excludeFields;\n    std::string timePeriods = \"PT1H\";   // timePeriods is optional and \"PT1H\" by default if not given.\n    std::string priority;\n};\n\n}\n\n#endif // _CFGOBODIRECTCONFIG_HH_\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/CmdLineConverter.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"CmdLineConverter.hh\"\n#include \"Utility.hh\"\n#include \"Trace.hh\"\n#include <vector>\n#include <exception>\n#include <cstring>\n\nstd::vector<std::string> CmdLineConverter::Tokenize(const std::string& cmdline, std::function<void(const std::string&)> ctxLogOnWarning)\n{\n    Trace trace(Trace::Extensions, \"CmdLineConverter::Tokenize\");\n\n    auto current = cmdline.begin();\n    size_t pos = 1;\n    std::vector<std::string> argv;\n    std::string element;\n\n    enum TokenizerState { outside, within, escape, singlequote, doublequote, doubleescape };\n    TokenizerState state = outside;\n    while (current != cmdline.end()) {\n\n        // Generally, state transitions consume the character that causes the transition. (See bottom of loop.)\n        // Any exceptions to this rule are clearly noted (by using \"continue\").\n        switch (state) {\n        case outside:\n            // Advance past whitespace, else transition to state=within\n            switch (*current) {\n            case ' ':\n            case '\\n':\n                break;\n            default:\n                state = within;         // NOTE: This state transition does NOT consume the character\n                continue;\n            }\n            break;\n\n        case within:\n            switch (*current) {\n            case '\\\\':                  // escape character - change to matching state\n                state = escape;\n                break;\n            case '\\'':                  // start single quote - change to matching state\n                state  = singlequote;\n                break;\n            case '\"':                   // start double quote - change to matching state\n                state = doublequote;\n                break;\n            case ' ':                   // whitespace terminates the element, which we can push\n            case '\\n':                  // into the vector; change to \"outside\" state\n                argv.emplace_back(std::move(element));\n                element.clear();\n                state = outside;\n                break;\n            default:\n                element.push_back(*current);\n                break;\n            }\n            break;\n\n        case escape:\n            // Only blank, newline, backslash, singlequote, and doublequote can be escaped; if the\n            // character isn't one of those, put the backslash into the element along with the\n            // shouldn't-have-been-escaped character.\n            if (std::string(\" \\n\\\\'\\\"\").find_first_of(*current) == std::string::npos) {\n                element.push_back('\\\\');\n            }\n            element.push_back(*current);\n            state = within;\n            break;\n\n        case singlequote:\n            if (*current != '\\'') {\n                element.push_back(*current);\n            } else {\n                state = within;\n            }\n            break;\n\n        case doublequote:\n            switch (*current) {\n            case '\"':\n                state = within;\n                break;\n            case '\\\\':\n                state = doubleescape;\n                break;\n            default:\n                element.push_back(*current);\n                break;\n            }\n            break;\n\n        case doubleescape:\n            // If it's not a backslash or a doublequote, it can't be escaped, so flow the escape char through\n            if (std::string(\"\\\\\\\"\").find_first_of(*current) == std::string::npos) {\n                element.push_back('\\\\');\n            }\n            element.push_back(*current);\n            state = doublequote;\n            break;\n        }\n\n        current++;\n        pos++;\n    }\n\n    std::string warnMsg;\n    switch (state) {\n    case outside:\n        break;\n    case within:\n        if (element.size()) {\n            argv.emplace_back(std::move(element));\n        }\n        break;\n    case singlequote:\n    case doublequote:\n        // Issue config-file parsing warning about an unterminated quote at the end of a cmdline\n        warnMsg = \"Unterminated quote at the end of the command line\";\n        trace.NOTEWARN(warnMsg);\n        ctxLogOnWarning(warnMsg);\n        // Auto-close it and add it, even it if's an empty string\n        argv.emplace_back(std::move(element));\n        break;\n    case escape:\n    case doubleescape:\n        // Issue config-file warning about incomplete escape at the end of the cmdline\n        warnMsg = \"Incomplete escape at the end of the command line\";\n        trace.NOTEWARN(warnMsg);\n        ctxLogOnWarning(warnMsg);\n        // Add what we have\n        argv.emplace_back(std::move(element));\n        break;\n    }\n\n    return argv;\n}\n\nCmdLineConverter::CmdLineConverter(const std::string & cmdline)\n{\n    Trace trace(Trace::Extensions, \"CmdLineConverter::CmdLineConverter\");\n\n    try {\n        std::vector<std::string> strarray = Tokenize(cmdline);\n\n        execvp_nargs = strarray.size();\n\n        execvp_args  = new char*[execvp_nargs+1];\n        size_t i = 0;\n        for (const auto& x : strarray)\n        {\n            size_t len = x.length();\n            execvp_args[i] = static_cast<char*>(malloc(len+1));\n            strncpy(execvp_args[i], x.c_str(), len);\n            execvp_args[i][len] = '\\0';\n            i++;\n        }\n        execvp_args[execvp_nargs] = NULL;\n    }\n\tcatch (const std::exception& e) {\n\t\ttrace.NOTEERR(\"Failed to parse cmdline: '\" + cmdline + \"'. Error=\" + e.what());\n\t}\n}\n\nCmdLineConverter::~CmdLineConverter()\n{\n    if (execvp_args)\n    {\n        for (size_t i = 0; i < execvp_nargs; i++)\n        {\n            free(execvp_args[i]);\n            execvp_args[i] = NULL;\n        }\n        delete [] execvp_args;\n        execvp_args = NULL;\n    }\n}\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/CmdLineConverter.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _CMDLINECONVERTER_HH_\n#define _CMDLINECONVERTER_HH_\n\n#include <string>\n#include <vector>\n#include <functional>\n#include \"CfgContext.hh\"\n\nclass CmdLineConverter\n{\npublic:\n    CmdLineConverter(const std::string & cmdline);\n    \n    virtual ~CmdLineConverter();\n\n    static std::vector<std::string> Tokenize(const std::string& cmdline,\n                                             std::function<void(const std::string&)> ctxLogOnWarning = [](const std::string&){} // Don't do any warning logging by default\n                                            );\n\n    /// <summary>\n    /// Returns the char* array that can be used for execvp() directly.\n    /// The caller shouldn't free the memory from this function.\n    /// NOTE: the last item of the array is always NULL.\n    /// </summary>\n    char** argv() const { return execvp_args; }\n\n    /// <summary>\n    /// Returns the number of items in execvp args. This doesn't include\n    /// the last NULL element.\n    /// </summary>\n    size_t argc() const { return execvp_nargs; }\n\nprivate:\n    size_t execvp_nargs = 0;\n    char** execvp_args = NULL;\n};\n\n\n#endif // _CMDLINECONVERTER_HH_\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/ConfigParser.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"ConfigParser.hh\"\n\nConfigParser::~ConfigParser()\n{\n}\n\n\nvoid\nConfigParser::OnStartElement(const std::string& name, const xmlattr_t& properties)\n{\n\tcurrentContext = currentContext->SubContextFactory(name);\n\tcurrentContext->Enter(properties);\n}\n\nvoid\nConfigParser::OnEndElement(const std::string&)\n{\n\tCfgContext* tmp = currentContext;\n\tcurrentContext = currentContext->Leave();\n\tdelete tmp;\n}\n\n\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/ConfigParser.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _CONFIGPARSER_HH_\n#define _CONFIGPARSER_HH_\n\n#include \"SaxParserBase.hh\"\n#include \"CfgContext.hh\"\n#include \"MdsdConfig.hh\"\n#include <list>\n\nclass ConfigParser : public SaxParserBase\n{\npublic:\n\t/// <summary>\n\t/// Initialize a parser to handle a config file.\n\t/// </summary>\n\t/// <param name=\"Root\">A CfgContext class whose factory can construct contexts for the expected root element</param>\n\t/// <param name=\"Config\">The MdsdConfig to which this parse should log any warnings or errors</param>\n\tConfigParser(CfgContext* Root, MdsdConfig* Config) : currentContext(Root), config(Config) {};\n\n\tvirtual ~ConfigParser();\n\nprivate:\n\tCfgContext* currentContext;\n\tMdsdConfig* const config;\n\nprotected:\n\tvirtual void OnStartDocument() override {};\n\tvirtual void OnEndDocument() override {};\n\tvirtual void OnComment(const std::string&) override {};\n\tvirtual void OnStartElement(const std::string& name, const xmlattr_t& properties) override;\n\tvirtual void OnCharacters(const std::string& characters) override { currentContext->HandleBody(characters); };\n\tvirtual void OnEndElement(const std::string& name) override;\n\n\tvirtual void OnWarning(const std::string& text) override { config->AddMessage(MdsdConfig::warning, text); }\n\tvirtual void OnError(const std::string& text) override { config->AddMessage(MdsdConfig::error, text); }\n\tvirtual void OnFatalError(const std::string& text) override { config->AddMessage(MdsdConfig::fatal, text); }\n\n\tvirtual void OnCDataBlock(const std::string& text) override { currentContext->HandleCdata(text); }\n};\n#endif //_CONFIGPARSER_HH_\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/Constants.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"Constants.hh\"\n#include <fstream>\n#include <string>\n\n#define DEFINE_STRING(name, value) const std::string name { value }; const std::wstring name ## W { L ## value };\n\nuint64_t Constants::_UniqueId { 0 };\n\nnamespace Constants {\n\nconst std::string TIMESTAMP { \"TIMESTAMP\" };\nconst std::string PreciseTimeStamp { \"PreciseTimeStamp\" };\n\nnamespace Compression {\n\tconst std::string lz4hc { \"lz4hc\" };\n} // namespace Compression\n\nnamespace EventCategory {\n\tconst std::string Counter { \"counter\" };\n\tconst std::string Trace { \"trace\" };\n} // namespace EventCategory\n\nnamespace AzurePropertyNames {\n\tDEFINE_STRING(Namespace, \"namespace\")\n\tDEFINE_STRING(EventName, \"eventname\")\n\tDEFINE_STRING(EventVersion, \"eventversion\")\n\tDEFINE_STRING(EventCategory, \"eventcategory\")\n\tDEFINE_STRING(BlobVersion, \"version\")\n\tDEFINE_STRING(BlobFormat, \"format\")\n\tDEFINE_STRING(DataSize, \"datasizeinbytes\")\n\tDEFINE_STRING(BlobSize, \"blobsizeinbytes\")\n\tDEFINE_STRING(MonAgentVersion, \"monagentversion\")\n\tDEFINE_STRING(CompressionType, \"compressiontype\")\n\tDEFINE_STRING(MinLevel, \"minlevel\")\n\tDEFINE_STRING(AccountMoniker, \"accountmoniker\")\n\tDEFINE_STRING(Endpoint, \"endpoint\")\n\tDEFINE_STRING(OnbehalfFields, \"onbehalffields\")\n\tDEFINE_STRING(OnbehalfServiceId, \"onbehalfid\")\n\tDEFINE_STRING(OnbehalfAnnotations, \"onbehalfannotations\")\n} // namespace AzurePropertyNames\n\n\nuint64_t\nUniqueId()\n{\n\tstatic std::string digits { \"0123456789ABCDEFabcdef\" };\n\n\tif (!Constants::_UniqueId) {\n\t\tstd::ifstream bootid(\"/proc/sys/kernel/random/boot_id\", std::ifstream::in);\n\t\tif (bootid.is_open()) {\n\t\t\tuint64_t id = 0;\n\t\t\tint nybbles = 16;\n\t\t\twhile (nybbles && bootid.good()) {\n\t\t\t\tchar c = bootid.get();\n\t\t\t\tsize_t pos = digits.find(c);\n\t\t\t\tif (pos != std::string::npos) {\n\t\t\t\t\tif (pos > 15) {\n\t\t\t\t\t\tpos -= 6;\n\t\t\t\t\t}\n\t\t\t\t\tid <<= 4;\n\t\t\t\t\tid += pos;\n\t\t\t\t\tnybbles--;\n\t\t\t\t}\n\t\t\t}\n\t\t\tif (id == 0) {\n\t\t\t\tid = 1;\t\t// Backstop in case something got weird\n\t\t\t}\n\t\t\tConstants::_UniqueId = id;\n\t\t} else {\n\t\t\tConstants::_UniqueId = 1;\t\t// Backstop in case something got weird\n\t\t}\n\t}\n\n\treturn Constants::_UniqueId;\n}\n\n} // namespace Constants\n\n// vim: se sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/Constants.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#ifndef _CONSTANTS_HH_\n#define _CONSTANTS_HH_\n#pragma once\n\n#define DECLARE_STRING(name) extern const std::string name; extern const std::wstring name ## W;\n\n#include <string>\n\nnamespace Constants\n{\n\textern const std::string TIMESTAMP;\n\textern const std::string PreciseTimeStamp;\n\tenum class ETWlevel : unsigned char { LogAlways = 0, Critical = 1, Error = 2, Warning = 3, Information = 4, Verbose = 5 };\n\n\tstatic constexpr uint32_t MDS_blob_version { 1 };\n\tstatic constexpr uint32_t MDS_blob_format  { 2 };\n\n\textern uint64_t _UniqueId;\n\tuint64_t UniqueId();\n\n\tnamespace Compression {\n\t\textern const std::string lz4hc;\n\t}\n\n\tnamespace EventCategory {\n\t\textern const std::string Counter;\n\t\textern const std::string Trace;\n\t} // namespace EventCategory\n\n\tnamespace AzurePropertyNames {\n\t\tDECLARE_STRING(Namespace)\n\t\tDECLARE_STRING(EventName)\n\t\tDECLARE_STRING(EventVersion)\n\t\tDECLARE_STRING(EventCategory)\n\t\tDECLARE_STRING(BlobVersion)\n\t\tDECLARE_STRING(BlobFormat)\n\t\tDECLARE_STRING(DataSize)\n\t\tDECLARE_STRING(BlobSize)\n\t\tDECLARE_STRING(MonAgentVersion)\n\t\tDECLARE_STRING(CompressionType)\n\t\tDECLARE_STRING(MinLevel)\n\t\tDECLARE_STRING(AccountMoniker)\n\t\tDECLARE_STRING(Endpoint)\n\t\tDECLARE_STRING(OnbehalfFields)\n\t\tDECLARE_STRING(OnbehalfServiceId)\n\t\tDECLARE_STRING(OnbehalfAnnotations)\n\t} // namespace AzurePropertyNames\n\n};\n\n#undef DECLARE_STRING\n\n#endif // _CONSTANTS_HH_\n\n// vim: se sw=8\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/Credentials.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"Credentials.hh\"\n#include <sstream>\n#include <cstdlib>\n#include \"Trace.hh\"\n#include \"Logger.hh\"\n#include \"MdsdConfig.hh\"\n#include \"Utility.hh\"\n#include \"AzureUtility.hh\"\n\nusing std::string;\n\nstd::ostream&\noperator<<(std::ostream &os, const Credentials& creds)\n{\n    os << &creds << \"=(Moniker \" << creds.Moniker() << \" type \" << creds.TypeName() << \")\";\n    return os;\n}\n\nconst std::string&\nCredentials::ServiceType_to_string(ServiceType svcType)\n{\n    static std::map<Credentials::ServiceType, std::string> stmap =\n    {\n        { Credentials::ServiceType::XTable, \"XTable\" },\n        { Credentials::ServiceType::Blob, \"Blob\" },\n        { Credentials::ServiceType::EventPublish, \"EventPublish\" }\n    };\n    static std::string UnknownType { \"Unknown ServiceType\" };\n    auto iter = stmap.find(svcType);\n    if (iter == stmap.end()) {\n        return UnknownType;\n    } else {\n        return iter->second;\n    }\n}\n\nstd::ostream&\noperator<<(std::ostream &os, Credentials::ServiceType svcType)\n{\n    os << Credentials::ServiceType_to_string(svcType);\n    return os;\n}\n\n\n// Extract the \"se\" part of the query string and expire 30-60 minutes before then\nMdsTime\nCredentialType::AutoKey::GetExpireTimeFromSasSE(const std::string & sas)\n{\n    std::map<string, string> qry;\n    MdsdUtil::ParseQueryString(sas, qry);\n    auto exp = qry.find(\"se\");\n    if (exp == qry.end()) {\n        // Shouldn't happen, but if it does, the URI should be good for 11-12 hours.\n        return (MdsTime::Now() + MdsTime(11 * 3600 + random()%3600));\n    } else {\n        return (MdsTime(exp->second) - MdsTime(1800 + random()%1800));\n    }\n}\n\n// Three output parameters are set by the ConnectionString() methods\n//\n// For XTable, fullSvcName will be set to the actual XStore table name to be used. The namespace prefix and the version\n//      and perNDay suffixes will be applied as appropriate. The perNDay selected is \"right now\".\n// connstr will be set to the connection string.\n// expires will be set to the expiration time of the connection string (i.e. the time at which a new\n//      connection string should be requested).\n//\n// Returns true if a connection string could be constructed; false if not.\n\n\nbool\nCredentialType::Local::ConnectionString(const MdsEntityName &target, ServiceType svcType,\n        string &fullSvcName, string &connstr, MdsTime &expires) const\n{\n    Trace trace(Trace::Credentials, \"ConnectionString Local\");\n    Logger::LogError(\"Can't make connection string for Local moniker \" + Moniker());\n    return false;\n}\n\nbool\nCredentialType::SharedKey::ConnectionString(const MdsEntityName &target, ServiceType svcType,\n        string &fullSvcName, string &connstr, MdsTime &expires) const\n{\n    Trace trace(Trace::Credentials, \"ConnectionString SharedKey\");\n\n    try {\n        connstr = GetConnectionStringOnly(svcType);\n    } catch (std::invalid_argument& e) {\n        trace.NOTE(e.what());\n        Logger::LogError(e.what());\n        return false;\n    }\n\n    fullSvcName = target.Name();\n\n    if (target.IsConstant()) {\n        expires = MdsTime::Max();\n    } else {\n        // Rebuild connection string at next ten-day interval\n        expires = (MdsTime::Now() + 10*24*3600).RoundTenDay();\n    }\n\n    return true;\n}\n\n\nstd::string\nCredentialType::SharedKey::GetConnectionStringOnly(ServiceType svcType) const\n{\n    std::ostringstream conn;\n\n    if (ServiceType::Blob == svcType) {\n        conn << \"BlobEndpoint=\" << _blobUri;\n    }\n    else if (ServiceType::XTable == svcType) {\n        conn << \"TableEndpoint=\" << _tableUri;\n    }\n    else {\n        throw invalid_type(svcType);\n    }\n\n    conn << \";AccountName=\" << _accountName << \";AccountKey=\" << _secret;\n\n    return conn.str();\n}\n\n\nbool\nCredentialType::AutoKey::ConnectionString(\n        const MdsEntityName &target,\n        ServiceType svcType,\n        string &fullSvcName,\n        string &connstr,\n        MdsTime &expires) const\n{\n    Trace trace(Trace::Credentials, \"ConnectionString AutoKey\");\n\n    std::ostringstream conn;\n\n    string autokey;\n\n    switch (svcType) {\n    case ServiceType::EventPublish:\n        fullSvcName = target.EventName();\n        autokey = _config->GetEventPublishCmdXmlItems(Moniker(), fullSvcName).sas;\n        break;\n    case ServiceType::Blob:\n    case ServiceType::XTable:\n        fullSvcName = target.Name();\n        autokey = _config->GetAutokey(Moniker(), fullSvcName);\n        break;\n    default:\n        std::ostringstream strm;\n        strm << \"Error: AutoKey credential doesn't support service \" << svcType;\n        trace.NOTE(strm.str());\n        Logger::LogError(strm.str());\n        return false;\n    }\n\n    if (autokey.empty()) {\n        std::ostringstream strm;\n        strm << \"Can't find autokey for moniker \" << Moniker() << \", \" << svcType << \" \" << fullSvcName;\n        trace.NOTE(strm.str());\n        Logger::LogError(strm.str());\n        return false;\n    }\n\n    string endpointName;\n    string endpointSep;\n    if (ServiceType::XTable == svcType) {\n        endpointName = \"TableEndpoint\";\n        endpointSep = \"/$batch?\";\n    }\n    else if (ServiceType::Blob == svcType) {\n        endpointName = \"BlobEndpoint\";\n        endpointSep = \"/\" + fullSvcName + \"?\";\n    }\n\n    size_t pos = autokey.find(endpointSep);\t// Separates endpoint from SAS\n    if (pos == string::npos) {\n        std::ostringstream msg;\n        msg << \"Improperly formatted autokey for \" << Moniker() << \", \" << svcType << \" \" << fullSvcName;\n        msg << \": \\\"\" << autokey << \"\\\"\";\n        trace.NOTE(msg.str());\n        Logger::LogError(msg.str());\n        return false;\n    }\n    conn << endpointName << \"=\" << autokey.substr(0, pos);\n    conn << \";SharedAccessSignature=\" << autokey.substr(pos+endpointSep.size());\n\n    if (!autokey.empty()) {\n        expires = GetExpireTimeFromSasSE(autokey);\n    }\n\n    // If the tablename can change, rebuild at the change time, if that's sooner\n    if (!target.IsConstant()) {\n        MdsTime proposed = (MdsTime::Now() + 10*24*3600).RoundTenDay();\n        if (proposed < expires) {\n            expires = proposed;\n        }\n    }\n\n    connstr = conn.str();\n    trace.NOTE(\"AutoKey ConnectionString='\" + connstr + \"'.\");\n    return true;\n}\n\nmdsd::EhCmdXmlItems\nCredentialType::AutoKey::GetEhParameters(const std::string& eventName,\n\tCredentials::ServiceType eventType\n\t) const\n{\n    if (Credentials::ServiceType::EventPublish == eventType) {\n        return _config->GetEventPublishCmdXmlItems(Moniker(), eventName);\n    }\n    throw invalid_type(eventType);\n}\n\nCredentialType::SAS::SAS(const std::string& moniker, const std::string& acct, const std::string &token)\n\t: Credentials(moniker, SecretType::SAS), _secret(token), _accountName(acct),\n\t  _blobUri(MakePublicCloudEndpoint(acct, ServiceType::Blob)),\n\t  _tableUri(MakePublicCloudEndpoint(acct, ServiceType::XTable))\n{\n    MdsdUtil::ValidateSAS(token, _isAccountSas);\n}\n\nstd::string\nCredentialType::SAS::GetConnectionStringOnly(ServiceType svcType) const\n{\n    std::ostringstream conn;\n\n    if (ServiceType::XTable == svcType) {\n        conn << \"TableEndpoint=\" << _tableUri;\n    }\n    else if (ServiceType::Blob == svcType) {\n        conn << \"BlobEndpoint=\" << _blobUri;\n    }\n    else {\n        throw invalid_type(svcType);\n    }\n    conn << \";SharedAccessSignature=\" << _secret;\n\n    return conn.str();\n}\n\nbool\nCredentialType::SAS::ConnectionString(const MdsEntityName &target, ServiceType svcType,\n\tstring &fullSvcName, string &connstr, MdsTime &expires) const\n{\n    Trace trace(Trace::Credentials, \"ConnectionString SAS\");\n\n    try {\n        connstr = GetConnectionStringOnly(svcType);\n    } catch (std::invalid_argument& e) {\n        trace.NOTE(e.what());\n        Logger::LogError(e.what());\n        return false;\n    }\n\n    std::map<string, string> qry;\n    MdsdUtil::ParseQueryString(_secret, qry);\n\n    if (IsAccountSas()) {\n        // The SAS is an account SAS, replacing the storage shared key, and the svc name should be a name with\n        // the 10-day suffix, not the base name.\n        fullSvcName = target.Name();\n    }\n    else if (ServiceType::XTable == svcType) {\n        // SAS (non-account SAS) includes the tablename; update to match, otherwise the SAS won't work.\n        auto item = qry.find(\"tn\");\n        if (item != qry.end()) {\n            fullSvcName = item->second;\n        } else {\n            Logger::LogError(\"SAS for MDS moniker \" + Moniker() + \" missing tn= component\");\n            fullSvcName = target.Basename();\n        }\n    }\n\n    auto exp = qry.find(\"se\");\n    if (exp == qry.end()) {\n        expires = MdsTime::Max();\t// No expiration in SAS\n    } else {\n        expires = MdsTime(exp->second);\n        if (MdsTime::Now() > expires) {\n            Logger::LogError(\"Expired SAS for MDS moniker \" + Moniker());\n        }\n    }\n\n    if (IsAccountSas()) {\n        // Set expires for next ten-day interval, following the storage shared key credential logic.\n        // (Note: The account SAS itself will/should never expire, like a storage shared key)\n        expires = std::min(expires, (MdsTime::Now() + 10*24*3600).RoundTenDay());\n    }\n\n    return true;\n}\n\n// vim: se sw=4 expandtab :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/Credentials.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _CREDENTIALS_HH_\n#define _CREDENTIALS_HH_\n\n#include <string>\n#include <iostream>\n#include \"MdsTime.hh\"\n#include \"MdsEntityName.hh\"\n#include \"EventHubCmd.hh\"\n\nclass MdsdConfig;\n\nclass Credentials {\n\n\tfriend std::ostream& operator<<(std::ostream &os, const Credentials& creds);\n\npublic:\n\tenum SecretType { None, Key, SAS };\n\t// EventPublish: Event data directly publishing to EventHub.\n\tenum class ServiceType { XTable, Blob, EventPublish };\n\tstatic const std::string& ServiceType_to_string(ServiceType svcType);\n\n\tCredentials(const std::string& moniker, SecretType type) : _moniker(moniker), _secretType(type) {}\n\tvirtual ~Credentials() {}\n\n\tconst std::string Moniker() const { return _moniker; }\n\tSecretType Type() const { return _secretType; }\n\n\tvirtual bool useAutoKey() const { return false; }\n\n\tvirtual std::string AccountName() const = 0;\n\n\tvirtual bool ConnectionString(const MdsEntityName &target, ServiceType svcType,\n\t\tstd::string &fullSvcName, std::string &connstr,\n\t\tMdsTime &expires) const = 0;\n\n\tvirtual const std::string TypeName() const = 0;\n\n\tvirtual bool accessAnyTable() const  { return (Type() == Key || useAutoKey() ); }\n\nprivate:\n\tconst std::string _moniker;\n\tSecretType _secretType;\n\n\tCredentials() = delete;\n};\n\nstd::ostream& operator<<(std::ostream &os, const Credentials& creds);\nstd::ostream& operator<<(std::ostream &os, Credentials::ServiceType svcType);\n\nnamespace CredentialType {\n\n\nclass invalid_type : public std::logic_error\n{\npublic:\n\tinvalid_type(Credentials::ServiceType svcType)\n\t\t: std::logic_error(\"Service type [\"\n\t\t                 + Credentials::ServiceType_to_string(svcType)\n\t\t\t\t + \"] not supported by this operation\")\n\t\t{ }\n};\n\nstatic inline std::string MakePublicCloudEndpoint(const std::string& acct, Credentials::ServiceType svcType)\n{\n\tstd::string result;\n\tresult.reserve(33 + acct.size());\n\n\tresult.append(\"https://\").append(acct);\n\tif (svcType == Credentials::ServiceType::Blob) {\n\t\tresult.append(\".blob.core.windows.net\");\n\t} else if (svcType == Credentials::ServiceType::XTable) {\n\t\tresult.append(\".table.core.windows.net\");\n\t} else {\n\t\tthrow invalid_type(svcType);\n\t}\n\n\treturn result;\n}\n\nclass SharedKey : public Credentials {\npublic:\n\tSharedKey(const std::string& moniker, const std::string &name, const std::string &key)\n\t\t: Credentials(moniker, SecretType::Key), _accountName(name), _secret(key),\n\t\t  _blobUri(MakePublicCloudEndpoint(name, ServiceType::Blob)),\n\t\t  _tableUri(MakePublicCloudEndpoint(name, ServiceType::XTable)) {}\n\n\tstd::string AccountName() const { return _accountName; }\n\tbool ConnectionString(const MdsEntityName &target, ServiceType svcType,\n\t\tstd::string &fullSvcName, std::string &connstr, MdsTime &expires) const;\n\tconst std::string TypeName() const { return std::string{\"SharedKey\"}; }\n\n\tvoid TableUri(const std::string& uri) { _tableUri = uri; }\n\tvoid BlobUri(const std::string& uri) { _blobUri = uri; }\n\n\t// To get the connection string only, without passing target. Will throw if svcType is neither blob nor table.\n\tstd::string GetConnectionStringOnly(ServiceType svcType) const;\n\nprivate:\n\tstd::string _accountName;\n\tstd::string _secret;\n\tstd::string _blobUri;\n\tstd::string _tableUri;\n};\n\nclass AutoKey : public Credentials {\npublic:\n\tAutoKey(const std::string& moniker, MdsdConfig *config) : Credentials(moniker, SecretType::SAS), _config(config) {}\n\n\tstd::string AccountName() const { return std::string{\"AutoKey\"}; }\n\tbool ConnectionString(const MdsEntityName &target, ServiceType svcType,\n\t\tstd::string &fullSvcName, std::string &connstr, MdsTime &expires) const;\n\n\tconst std::string TypeName() const { return std::string{\"AutoKey\"}; }\n\tbool useAutoKey() const { return true; }\n\tstatic MdsTime GetExpireTimeFromSasSE(const std::string & sas);\n\n\tmdsd::EhCmdXmlItems GetEhParameters(const std::string& eventName, Credentials::ServiceType eventType) const;\n\nprivate:\n\tMdsdConfig *_config;\n};\n\nclass SAS : public Credentials {\npublic:\n\tSAS(const std::string& moniker, const std::string& acct, const std::string &token);\n\n\tstd::string AccountName() const { return _accountName; }\n\tbool ConnectionString(const MdsEntityName &target, ServiceType svcType,\n\t\tstd::string &fullSvcName, std::string &connstr, MdsTime &expires) const;\n\tconst std::string TypeName() const { return std::string{\"SAS\"}; }\n\tconst std::string Token() const { return _secret; }\n\tbool IsAccountSas() const { return _isAccountSas; }\n\tbool accessAnyTable() const  { return _isAccountSas; }\n\n\tvoid BlobUri(const std::string& uri) { _blobUri = uri; }\n\tvoid TableUri(const std::string& uri) { _tableUri = uri; }\n\n\t// To get the connection string only, without passing target. Will throw if svcType is neither blob nor table.\n\tstd::string GetConnectionStringOnly(ServiceType svcType) const;\n\nprivate:\n\tstd::string _secret;\n\tstd::string _accountName;\n\tstd::string _blobUri;\n\tstd::string _tableUri;\n\tbool _isAccountSas;\n};\n\nclass Local : public Credentials {\npublic:\n\tLocal() : Credentials(std::string{\"(LOCAL)\"}, SecretType::None) {}\n\n\tstd::string AccountName() const { return std::string{\"Local\"}; }\n\tbool ConnectionString(const MdsEntityName &target, ServiceType svcType,\n\t\tstd::string &fullSvcName, std::string &connstr, MdsTime &expires) const;\n\tconst std::string TypeName() const { return std::string{\"Local\"}; }\n};\n\n}\n\n#endif // _CREDENTIALS_HH_\n// vim: set ai sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/DaemonConf.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"DaemonConf.hh\"\n#include \"Logger.hh\"\n#include \"Trace.hh\"\n#include \"Utility.hh\"\n#include \"Version.hh\"\n\n#include <string>\n#include <sstream>\n\nextern \"C\" {\n#include <stdlib.h>\n#include <unistd.h>\n#include <sys/types.h>\n#include <errno.h>\n#include <pwd.h>\n#include <grp.h>\n#include <sys/stat.h>\n#include <fcntl.h>\n}\n\n\nuid_t DaemonConf::GetUidFromName(const char* username)\n{\n    Trace trace(Trace::Daemon, \"GetUidFromName\");\n\n    uid_t uid = 0;    \n    if (!username) {\n        Logger::LogError(\"Error: GetUidFromName(): unexpected NULL pointer for username.\");\n        return uid;\n    }\n    \n    struct passwd *resultObj;\n    struct passwd wrkObj;\n    char buf[2048];\n    \n    getpwnam_r(username, &wrkObj, buf, sizeof(buf), &resultObj);\n    if (resultObj == NULL) {\n        Logger::LogWarn(\"WARN: GetUidFromName(): No user called '\" + std::string(username) + \"' is found.\");\n    }\n    else {\n        uid = resultObj->pw_uid;\n    }\n    trace.NOTE(\"Name='\" + std::string(username) + \"'. UID=\" + std::to_string(uid));\n    return uid;\n}\n\n\ngid_t DaemonConf::GetGidFromName(const char* groupname)\n{\n    Trace trace(Trace::Daemon, \"GetGidFromName\");\n    gid_t gid = 0;    \n    if (!groupname) {\n        Logger::LogError(\"Error: GetGidFromName(): unexpected NULL for groupname\");\n        return gid;\n    }\n\n    struct group *resultObj;\n    struct group wrkObj;\n    char buf[2048];\n    \n    getgrnam_r(groupname, &wrkObj, buf, sizeof(buf), &resultObj);\n    if (resultObj == NULL) {\n        Logger::LogWarn(\"WARN: GetGidFromName(): No group called '\" + std::string(groupname) + \"' is found.\");\n    }\n    else {\n        gid = resultObj->gr_gid;\n    }\n    \n    trace.NOTE(\"GetGidFromName() returned: Group='\" + std::string(groupname) + \"'. GID=\" + std::to_string(gid));\n    return gid;\n}\n\n\nvoid DaemonConf::SetPriv(uid_t uid, gid_t gid)\n{\n    Trace trace(Trace::Daemon, \"SetPriv\");\n    std::string uidstr = std::to_string(uid);\n    std::string gidstr = std::to_string(gid);\n\n    if (0 == uid) {\n        Logger::LogError(\"Error: unexpected user id \" + uidstr + \". Do nothing.\");\n        return;\n    }\n    if (0 == gid) {\n        Logger::LogError(\"Error: unexpected group id \" + gidstr + \". Do nothing.\");\n        return;\n    }    \n\n    int r2 = setgid(gid);\n    if (r2) {\n        int errnum = errno;\n        std::string errstr = MdsdUtil::GetErrnoStr(errnum);\n        Logger::LogError(\"Error: fatal error. setgid() failed to set id \" + gidstr + \". error: \" + errstr);\n        exit(1);\n    }\n    trace.NOTE(\"mdsd's groupid changed to \" + gidstr);\n\n    int r1 = setuid(uid);\n    if (r1) {\n        int errnum = errno;\n        std::string errstr = MdsdUtil::GetErrnoStr(errnum);\n        Logger::LogError(\"Error: fatal error. setuid() failed to set id \" + uidstr + \". error: \" + errstr);\n        exit(1); \n    }\n    else {        \n        trace.NOTE(\"mdsd's userid changed to id \" + uidstr);\n    }\n}\n\n/*\n  Run mdsd in daemon mode by forking the child process.\n */\nvoid DaemonConf::RunAsDaemon(const std::string & pidfile)\n{\n    Trace trace(Trace::Daemon, \"RunAsDaemon\");\n    pid_t ppid = getpid();\n    pid_t pid = fork();\n    if (-1 == pid) {\n        int errnum = errno;\n        std::string errstr = MdsdUtil::GetErrnoStr(errnum);\n        Logger::LogError(\"Fork child process failed with -1. error: \" + errstr);\n        exit(1);\n    }\n    if (pid > 0) {\n        Logger::LogError(\"Parent process \" + std::to_string(ppid) + \" exit. child process id=\" + std::to_string(pid));\n        exit(0);\n    }\n    \n    if (WritePid(pidfile) == false) {\n        exit(1);\n    }\n    \n    umask(0);\n    // Create a new session for the child process\n    pid_t sid = setsid();\n    if (sid < 0) {\n        int errnum = errno;\n        std::string errstr = MdsdUtil::GetErrnoStr(errnum);\n        Logger::LogError(\"child process setsid() returned \" + std::to_string(sid) + \". error: \" + errstr);\n        exit(1);\n    }\n    if ((chdir(\"/\")) < 0) {\n        int errnum = errno;\n        std::string errstr = MdsdUtil::GetErrnoStr(errnum);\n        Logger::LogError(\"Chdir() to root directory failed: \" + errstr);\n        exit(1);\n    }\n    \n    close(STDIN_FILENO);\n    close(STDOUT_FILENO);\n    close(STDERR_FILENO);\n\n    int uid = GetUidFromName(runAsUser);\n    int gid = GetGidFromName(runAsGroup);\n    if (uid >= 0 && gid >= 0) {\n        SetPriv(uid, gid);\n    }\n\n    std::ostringstream msg;\n    msg << \"START mdsd daemon ver(\" << Version::Version << \") pid(\" << getpid() << \") uid(\" << uid << \") gid (\" << gid << \")\" << std::endl;\n    Logger::LogError(msg.str());\n    Logger::LogWarn(msg.str());\n    Logger::LogInfo(msg.str());\n}\n\nbool DaemonConf::WritePid(const std::string & pidfile)\n{\n    Trace trace(Trace::Daemon, \"WritePid\");\n    int fd = open(pidfile.c_str(), O_WRONLY|O_CREAT|O_CLOEXEC, 0644);\n    MdsdUtil::FdCloser fdCloser(fd);\n\n    if (fd < 0) {\n        int errnum = errno;\n        std::ostringstream buf;\n        buf << \"Error: failed to open or create Pid file: \" << pidfile << \". \" << MdsdUtil::GetErrnoStr(errnum);\n        Logger::LogError(buf.str());\n        return false;\n    }\n    \n    bool status = true;\n    try{\n        MdsdUtil::WriteBufferAndNewline(fd, std::to_string(getpid()));\n    }\n    catch (const std::runtime_error & e) {\n        Logger::LogError(std::string(\"Error writing pid file: \") + e.what());\n        status = false;\n    }\n\n    return status;\n}\n\nbool DaemonConf::Chown(const std::string& filepath)\n{   \n    bool isOK = true;\n\n    uid_t uid = GetUidFromName(runAsUser);\n    gid_t gid = GetGidFromName(runAsGroup);\n    if (uid > 0 && gid > 0) {\n        int r = chown(filepath.c_str(), uid, gid);\n        if (r) {\n            int errnum = errno;\n            std::string errstr = MdsdUtil::GetErrnoStr(errnum);\n            Logger::LogError(\"Error: Chown() failed. logfile='\" + filepath + \"' user='\" + runAsUser\n                             + \"' group='\" + runAsGroup + \"' . error: \" + errstr);\n            isOK = false;\n        }\n    }\n    return isOK;\n}\n\n// vim: se ai sw=4 expandtab tabstop=4 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/DaemonConf.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _DAEMONCONF_HH_\n#define _DAEMONCONF_HH_\n      \n#include <sys/types.h>\n#include <string>\n\nclass DaemonConf\n{\npublic:\n    /*\n      Run mdsd in daemon mode by forking the child process.\n    */\n    static void RunAsDaemon(const std::string& pidfile);\n\n    /*\n      Change a file's user and group to the daemon runtime user/group.\n     */\n    static bool Chown(const std::string& filepath);\n\nprivate:\n    /*\n      Get a given username's user id. If user is not found, return 0.\n    */\n    static uid_t GetUidFromName(const char* username);\n\n    /*\n      Get a given groupname's groupid. If group is not found, return 0.\n    */\n    static gid_t GetGidFromName(const char* groupname);\n\n    /*\n      Set daemon userid and groupid to given Ids. If uid or gid are 0, do nothing.\n    */\n    static void SetPriv(uid_t uid, gid_t gid);\n\n    /*\n      Write final daemon process's process Id to pid file.\n     */\n    static bool WritePid(const std::string & pidfile);\n\n\nprivate:\n    constexpr static const char * runAsUser = \"syslog\";\n    constexpr static const char * runAsGroup = \"syslog\";\n};\n\n#endif\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/DerivedEvent.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"DerivedEvent.hh\"\n#include \"MdsdConfig.hh\"\n#include \"Pipeline.hh\"\n#include \"CanonicalEntity.hh\"\n#include \"Logger.hh\"\n#include \"Trace.hh\"\n#include \"LocalSink.hh\"\n\nDerivedEvent::DerivedEvent(MdsdConfig * config, const MdsEntityName &target, Priority prio, const MdsTime &interval,\n                std::string source)\n\t: ITask(interval), _config(config), _target(target), _prio(prio), _head(nullptr), _tail(nullptr)\n{\n\tTrace trace(Trace::DerivedEvent, \"DerivedEvent constructor\");\n\n\t// Find the source; make sure it exists\n\n\t_localSink = LocalSink::Lookup(source);\n\tif (! _localSink) {\n\t\tstd::ostringstream msg;\n\t\tmsg << \"DerivedEvent \" << target << \" references undefined source \" << source;\n\t\tLogger::LogError(msg.str());\n\t\tthrow std::runtime_error(msg.str());\n\t}\n\t_localSink->SetRetentionPeriod(interval);\n}\n\nDerivedEvent::~DerivedEvent()\n{\n}\n\n// Initial start time is a few seconds past the end of the current interval\nMdsTime\nDerivedEvent::initial_start()\n{\n\tTrace trace(Trace::DerivedEvent, \"DerivedEvent::initial_start\");\n\n\tMdsTime start;\t// Default constructor sets it to \"now\"\n\n\tstart += interval();\n\tstart = start.Round(interval().to_time_t());\n\tstart += MdsTime(2 + random()%5, random()%1000000);\n\n\tif (trace.IsActive()) {\n\t\tstd::ostringstream msg;\n\t\tmsg << \"Initial time for event: \" << start;\n\t\ttrace.NOTE(msg.str());\n\t}\n\n\treturn start;\n}\n\nvoid\nDerivedEvent::AddStage(PipeStage *stage)\n{       \n\tTrace trace(Trace::DerivedEvent, \"DerivedEvent::AddStage\");\n\n        if (trace.IsActive()) {\n                std::ostringstream msg;\n                msg << \"DerivedEvent \" << this << \" adding stage \" << stage->Name();\n                trace.NOTE(msg.str());\n        }\n\n        if (! _tail) {\n                // This is the first stage in the pipeline; set the head to point here\n                _head = stage;\n        } else {\n                // There's already a pipeline; make the old tail point to the newly-added stage\n                _tail->AddSuccessor(stage);\n        }\n        // Either way, we have a new tail in the pipeline\n        _tail = stage;\n}\n\n// Pull all the CanonicalEntity instances from the source that match the interval and send a dupe\n// into the processing pipeline; signal \"done\" after the last instance.\nvoid\nDerivedEvent::execute(const MdsTime& startTime)\n{\n\tTrace trace(Trace::DerivedEvent, \"DerivedEvent::execute\");\n\n\tif (trace.IsActive()) {\n\t\tstd::ostringstream msg;\n\t\tmsg << \"Start time \" << startTime << \", end time \" << startTime + interval();\n\t\ttrace.NOTE(msg.str());\n\t}\n\n\tauto head = _head;\n\n\t_head->Start(startTime);\n\t_localSink->Foreach(startTime, interval(), [head](const CanonicalEntity& ce){ head->Process(new CanonicalEntity(ce)); });\n\t_localSink->Flush();\t\t// Tell the sink to do its housekeeping\n\t_head->Done();\n}\n\n// vim: se sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/DerivedEvent.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _DERIVEDEVENT_HH_\n#define _DERIVEDEVENT_HH_\n\n#include \"ITask.hh\"\n#include \"MdsEntityName.hh\"\n#include \"Priority.hh\"\n\nclass MdsdConfig;\nclass PipeStage;\nclass LocalSink;\n\nclass DerivedEvent : public ITask\n{\npublic:\n\tDerivedEvent(MdsdConfig * config, const MdsEntityName &target, Priority prio, const MdsTime &interval,\n\t\tstd::string source);\n\t// I want a move constructor...\n\tDerivedEvent(DerivedEvent &&orig);\n\t// But do not want a copy constructor nor a default constructor\n\tDerivedEvent(DerivedEvent &) = delete;\n\tDerivedEvent() = delete;\n\n\tvirtual ~DerivedEvent();\n\n\tconst MdsEntityName & Target() const { return _target; }\n\tint FlushInterval() const { return _prio.Duration(); }\n\tvoid AddStage(PipeStage *);\n\nprotected:\n\t// Subclasses *must* override the execute() method, which is called to perform the actual\n\t// time-scheduled class.\n\tvirtual void execute(const MdsTime&);\n\n#if 0\n// Dunno if I need these....\n\n\t// Subclass gets notified via this callout when start() is called. If the subclass returns false,\n\t// the start operation aborts. In this case, start() can be called again; a failed startup is different\n\t// from a successful start followed by a cancel().\n\tvirtual bool on_start() { return true; }\n\n\t// Subclass gets notified when cancel() is called.\n\tvirtual void on_cancel() { }\n#endif\n\n\t// We'll want the initial start time to be shortly after the end of the next \"interval\".\n\t// We'll add some hysteresis to that start time.\n\tvirtual MdsTime initial_start();\n\nprivate:\n\tMdsdConfig *_config;\n\tMdsEntityName _target;\n\tPriority _prio;\n\tLocalSink *_localSink;\n\n\tPipeStage *_head;\n\tPipeStage *_tail;\n};\n\n#endif // _DERIVEDEVENT_HH_\n\n// vim: se sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/Engine.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"Engine.hh\"\n#include <iostream>\n#include <cstdlib>\n#include <ctime>\n#include <cmath>\n#include <functional>\n#include <sstream>\n\n#include \"MdsValue.hh\"\n#include \"TableSchema.hh\"\n#include \"MdsdConfig.hh\"\n#include \"Credentials.hh\"\n#include \"EventJSON.hh\"\n#include \"CanonicalEntity.hh\"\n#include \"OmiTask.hh\"\n#include \"Trace.hh\"\n#include \"LocalSink.hh\"\n#include \"EtwEvent.hh\"\n#include \"Utility.hh\"\n#include \"EventHubUploaderMgr.hh\"\n\nusing std::string;\n\nEngine::Engine() : blackholeEvents(false), _startTime(time(0)), current_config(nullptr)\n{\n}\n\nEngine::~Engine() {\n}\n\nEngine* Engine::engine = 0;\n\nvoid\nEngine::SetConfiguration(MdsdConfig* newconfig)\n{\n\tTrace trace(Trace::ConfigLoad, \"Engine::SetConfiguration\");\n\t\n\tEngine *current = Engine::GetEngine();\n\n\tstatic std::mutex mtx;\n\tstd::unique_lock<std::mutex> lock(mtx);\n\tMdsdConfig *prev_config = current->current_config;\n\tcurrent->current_config = newconfig;\n\tlock.unlock();\n\n\t//current->PushSchemas(newconfig);\n\tnewconfig->Initialize();\n\n\tnewconfig->StartScheduledTasks();\n\n\tif (prev_config) {\n\t\tprev_config->SelfDestruct(900);\t\t// Old config will delete itself in 900 seconds\n\t}\n}\n\n#ifdef DOING_MEMCHECK\nvoid\nEngine::ClearConfiguration()\n{\n\tcurrent_config->StopScheduledTasks();\n\tdelete current_config;\n\tcurrent_config = nullptr;\n}\n#endif\n\nvoid\nEngine::ProcessEvent(EventJSON& event)\n{\n\tTrace trace(Trace::EventIngest, \"Engine::ProcessEvent\");\n\n\t// Grab the config pointer at the beginning of processing; if the config gets\n\t// swapped while we're working, we won't care. The engine is careful to hold on\n\t// to previous MdsdConfig objects for a lengthy period of time after they're\n\t// swapped out.\n\tMdsdConfig* Config = GetConfig();\n\n\tif (blackholeEvents) {\n\t\treturn;\n\t}\n\n\t// Actual processing goes here\n\t// Listener() did basic validation before calling ProcessEvent()\n\tstring Source(event.GetSource());\n\n\tauto sink = LocalSink::Lookup(Source);\n\tif (!sink) {\n\t\tLogger::LogWarn(\"Received an event from source \\\"\" + Source + \"\\\" not used elsewhere in the active configuration\");\n\t\treturn;\n\t}\n\n\tif (event.IsEtwEvent()) {\n\t\tEtwEvent etwevt(event);\n\t\tetwevt.Process(sink);\n\t\treturn;\n\t}\n\n\tTableSchema* Schema = Config->GetSchema(Source);\n\tif (!Schema) {\n\t\tLogger::LogWarn(\"Received an event from source \\\"\" + Source + \"\\\" with no defined schema.\");\n\t\treturn;\n\t}\n\n\t// Build the CanonicalEntity to hold this event by running through the elements of the input event\n\t// and using the metadata in the schema to add columns\n\tauto ce = std::make_shared<CanonicalEntity>( Schema->Size() );\n\tce->SetPreciseTime(event.GetTimestamp());\n\tce->SetSchemaId(sink->SchemaId());\n\tauto datum = event.data_begin();\n\tTableSchema::const_iterator iter = Schema->begin();\n\twhile (datum != event.data_end() && iter != Schema->end()) {\n\t\tauto value = (*iter)->Convert(&(*datum));\n\t\tif (!value) {\n\t\t\tstd::ostringstream msg;\n\t\t\tmsg << \"Bad event (source \" << Source << \", schema \" << Schema->Name() << \"): couldn't convert value for \";\n\t\t\tmsg << (*iter)->Name(); msg << \" to \" << (*iter)->MdsType();\n\t\t\tmsg << \". Raw event: \" << event;\n\t\t\tLogger::LogError(msg.str());\n\t\t\treturn;\n\t\t}\n\t\tce->AddColumn((*iter)->Name(), value);\n\t\t++datum;\n\t\t++iter;\n\t}\n\tif (datum != event.data_end() || iter != Schema->end()) {\n\t\tstd::stringstream msg;\n\t\tmsg << \"Event from source '\" << Source << \"' contained unexpected number of columns. \";\n\t\tmsg << Source << \" has \" << event.data_count() << \"; \";\n\t\tmsg << \"Schema '\" << Schema->Name() << \"' has \" << Schema->Size() << \".\";\n\t\tLogger::LogError(msg.str());\n\t} else {\n\t\t// Add the CanonicalEntity object to the sink we found (above).\n\t\tsink->AddRow(ce);\n\t}\n}\n\nEngine*\nEngine::GetEngine()\n{\n\tif (!engine) {\n\t\tengine = new Engine();\n\t}\n\treturn engine;\n}\n\nbool\nEngine::GetConverter(const string& sourcetype, const string& targettype, typeconverter_t& converter)\n{\n\tstd::string inOutType;\n\tinOutType.reserve(sourcetype.size() + 1 + targettype.size());\n\tinOutType.append(sourcetype);\n\tinOutType.append(1, '/');\n\tinOutType.append(targettype);\n\treturn GetConverter(inOutType, converter);\n}\n\nbool\nEngine::GetConverter(const std::string & inOutType, typeconverter_t& converter)\n{\n\tauto iter = convertermap.find(inOutType);\n\tif (iter == convertermap.end()) {\n\t\treturn false;\n\t}\n\tconverter = iter->second;\n\treturn true;\n}\n\nstd::string\nEngine::ListConverters()\n{\n\tstd::ostringstream msg;\n\tbool isFirst = true;\n\n\tfor (const auto& item : convertermap) {\n\t\tif (isFirst) {\n\t\t\tisFirst = false;\n\t\t} else {\n\t\t\tmsg << \" \";\n\t\t}\n\t\tmsg << \"'\" << item.first << \"'\";\n\t}\n\n\treturn msg.str();\n}\n\nstd::map<std::string, typeconverter_t > Engine::convertermap = {\n\t{ \"bool/mt:bool\", [](cJSON * src) -> MdsValue* {\n\t\t\tif (src->type == cJSON_False) return new MdsValue(false);\n\t\t\tif (src->type == cJSON_True) return new MdsValue(true);\n\t\t\treturn 0;\n\t\t}\n\t},\n\t{ \"str/mt:bool\", [](cJSON * src) -> MdsValue* {\n\t\tif (cJSON_String == src->type && src->valuestring) {\n\t\t\tbool b = MdsdUtil::to_bool(src->valuestring);\n\t\t\treturn new MdsValue(b);\n\t\t}\n\t\treturn nullptr;\n\t}\n\t},\n\t{ \"str/mt:wstr\", [](cJSON * src) -> MdsValue* {\n\t\treturn (src->type == cJSON_String) ? ( new MdsValue(src->valuestring)) : 0;\n\t}\n\t}, \n\t{ \"double/mt:float64\", [](cJSON * src) -> MdsValue* {\n\t\treturn (src->type == cJSON_Number) ? ( new MdsValue(src->valuedouble)) : 0;\n\t}\n\t},\n\t{ \"str/mt:float64\", [](cJSON * src) -> MdsValue* {\n\t\tif (cJSON_String == src->type && src->valuestring) {\n\t\t\treturn new MdsValue(atof(src->valuestring));\n\t\t}\n\t\treturn nullptr;\n\t}\n\t},\n\t{ \"int/mt:int32\", [](cJSON * src) -> MdsValue* {\n\t\treturn (src->type == cJSON_Number) ? ( new MdsValue(long(src->valueint))) : 0;\n\t}\n\t},\n\t{ \"str/mt:int32\", [](cJSON * src) -> MdsValue* {\n\t\treturn (src->type == cJSON_String) ? ( new MdsValue(atol(src->valuestring))) : 0;\n\t}\n\t},\n\t{ \"int/mt:int64\", [](cJSON * src) -> MdsValue* {\n\t\treturn (src->type == cJSON_Number) ? ( new MdsValue(src->valueint)) : 0;\n\t}\n\t},\n\t{ \"str/mt:int64\", [](cJSON * src) -> MdsValue* {\n\t\treturn (src->type == cJSON_String) ? ( new MdsValue(strtoll(src->valuestring, NULL, 10))) : 0;\n\t}\n\t},\n\t{ \"int-timet/mt:utc\", [](cJSON * src) -> MdsValue* {\n\t\treturn MdsValue::time_t_to_utc(src); \n\t}\n\t},\n\t{ \"double-timet/mt:utc\", [](cJSON * src) -> MdsValue* {\n\t\treturn MdsValue::double_time_t_to_utc(src);\n\t}\n\t},\n\t{ \"str-rfc3339/mt:utc\", [](cJSON * src) -> MdsValue* {\n\t\treturn MdsValue::rfc3339_to_utc(src);\n\t}\n\t}\n\n};\n\n// vim: se sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/Engine.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _ENGINE_HH_\n#define _ENGINE_HH_\n\n#include <map>\n#include <string>\n#include <functional>\n#include <set>\n#include <utility>\n#include <mutex>\n#include \"MdsValue.hh\"\n#include \"EventJSON.hh\"\n#include \"MdsSchemaMetadata.hh\"\n#include \"MdsEntityName.hh\"\n\nclass MdsdConfig;\nclass Credentials;\n\nclass Engine\n{\npublic:\n\t~Engine();\n\n\t/// <summary>\n\t/// Get the singleton Engine instance. Not thread-safe for creation.\n\t/// </summary>\n\tstatic Engine* GetEngine();\n\n\t/// <summary>Cause incoming events to be blackholed instead of being sent to MDS</summary>\n\tvoid BlackholeEvents() { blackholeEvents = true; }\n\n\t/// <summary>Process an event</summary>\n\t/// <param name=\"event\">The event to be processed</param>\n\tvoid ProcessEvent(EventJSON& event);\n\n\t/// <summary>\n\t/// Transfer a configuration into the active engine. The previous configuration will remain undeleted\n\t/// for a time; when the engine believes it's no longer in use, the engine will delete it.\n\t/// </summary>\n\t/// <param name=\"newconfig\">The new configuration object.</param>\n\tstatic void SetConfiguration(MdsdConfig* newconfig);\n\n\t/// <summary>Fetch type converter. Returns false if sourcetype can't be converted to targettype</summary>\n\t/// <param name=\"sourcetype\">Name of the original (JSON) type (e.g. \"str\", \"int-timet\")</param>\n\t/// <param name=\"targettype\">Name of the destination (MDS) type (e.g. \"mt_bool\")</param>\n\t/// <param name=\"converter\">The type converter function, if one was found</param>\n\tbool GetConverter(const std::string& sourcetype, const std::string& targettype, typeconverter_t& converter);\n\n\t/// <summary>Fetch type converter. Return false if inOutType cannot be found.</summary>\n\t/// <param name=\"inOutType\">Name pairs in the format of \"jsonType/mdsType\". (e.g. \"bool/mt:bool\") </param>\n\t/// <param name=\"converter\">The type converter function, if one was found</param>\n\tbool GetConverter(const std::string & inOutType, typeconverter_t& converter);\n\n\t/// <summary>Get a list of all configured type converters, suitable for display in error messages.</summary>\n\tstatic std::string ListConverters();\n\n\tMdsdConfig* GetConfig() { return current_config; }\n\n\t/// <summary>Determines if the schema has been pushed for this account and tablename. Calling this\n\t/// method updates the cache of which schemas have been pushed.</summary>\n\t/// <return>True if this is the first time NeedsPush has been called with these args.</return>\n\t//bool NeedsPush(Credentials* creds, const MdsEntityName& target, const MdsSchemaMetadata*);\n\n#ifdef DOING_MEMCHECK\n\tvoid ClearPushedCache() { std::unique_lock<std::mutex> lock(_schemaCacheMutex);_pushedEvents.clear(); }\n\tvoid ClearConfiguration();\n#endif\n\nprivate:\n\tEngine();\n\n\tstatic Engine* engine;\n\tbool blackholeEvents;\n\ttime_t _startTime;\n\n\tMdsdConfig* current_config;\n\n\tstatic std::map<std::string, typeconverter_t > convertermap;\n\n\tstd::set<std::pair<const std::string, const MdsSchemaMetadata *> > _pushedEvents;\n\tstd::mutex _schemaCacheMutex;\n};\n\n#endif //_ENGINE_HH_\n\n// vim: se sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/EtwEvent.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"EtwEvent.hh\"\n#include \"Logger.hh\"\n#include \"Trace.hh\"\n#include \"CanonicalEntity.hh\"\n#include \"LocalSink.hh\"\n#include \"MdsValue.hh\"\n#include \"Engine.hh\"\n\n#include <sstream>\n\nstd::unordered_map<std::string, SchemaCache::IdType> EtwEvent::m_schemaIdMap;\n\nvoid\nEtwEvent::Process(LocalSink *sink)\n{\n    Trace trace(Trace::EventIngest, \"EtwEvent::Process\");\n    if (!sink) {\n        Logger::LogError(\"Error: unexpected NULL pointer for LocalSink.\");\n        return;\n    }\n\n    if (!m_event.IsEtwEvent()) {\n        Logger::LogError(\"Error: the input event is not an ETW event. Do nothing.\");\n        return;\n    }\n\n    std::string guidstr = ParseGuid();\n    if (guidstr.empty()) {\n        return;\n    }\n\n    int eventId = ParseEventId();\n    if (eventId < 0) {\n        return;\n    }\n\n    unsigned int ncolumns = m_event.data_count() + 2;\n    CanonicalEntity ce(ncolumns);\n    ce.SetPreciseTime(m_event.GetTimestamp());\n\n    auto schemaId = GetSchemaId(guidstr, eventId);\n    ce.SetSchemaId(schemaId);\n\n    bool hasError = false;\n\n    auto datum = m_event.data_begin();\n    while(datum != m_event.data_end()) {\n        std::string name;\n        auto mdsValue = ConvertData(&(*datum), name);\n        if (!mdsValue) {\n            hasError = true;\n            break;\n        }\n        ce.AddColumn(name, mdsValue);\n        ++datum;\n    }\n    if (!hasError) {\n        sink->AddRow(ce, 0);\n    }\n}\n\n// input cJSON is an array with 3 elements [\"Name\", \"Value\", \"srctype/mdstype\"]\nMdsValue*\nEtwEvent::ConvertData(cJSON* tuple, std::string & name)\n{\n    if (!ValidateJSON(tuple, cJSON_Array)) {\n        return nullptr;\n    }\n\n    const int ETW_TUPLE_SIZE = 3;\n    int arraySize = cJSON_GetArraySize(tuple);\n\n    if (ETW_TUPLE_SIZE != arraySize) {\n        std::ostringstream ss;\n        ss << \"Error: invalid data format: expected ETW tuple size=\" << ETW_TUPLE_SIZE << \"; actual size=\" << arraySize;\n        Logger::LogError(ss.str());\n        return nullptr;\n    }\n\n    cJSON* head = tuple->child;\n    if (!head || !GetJSONString(head, name)) {\n        return nullptr;\n    }\n    head = head->next;\n\n    cJSON * jvalue = head;\n    if (!jvalue) {\n        return nullptr;\n    }\n\n    head = head->next;\n    std::string inOutType;\n    if (!head || !GetJSONString(head, inOutType)) {\n        return nullptr;\n    }\n\n    typeconverter_t converter;\n    if (! Engine::GetEngine()->GetConverter(inOutType, converter)) {\n        std::ostringstream ss;\n        ss << \"Error: failed to get type converter '\" << inOutType << \"'. Supported converters: \" << Engine::ListConverters();\n        Logger::LogError(ss.str());\n        return nullptr;\n    }\n\n    return converter(jvalue);\n}\n\n\nstd::string\nEtwEvent::ParseGuid()\n{\n    std::string guidstr;\n    if (!m_event.GetGuid(guidstr)) {\n        std::ostringstream ss;\n        ss << \"Error: invalid event format: no expected '\" << s_GUIDName << \"' found. Do nothing.\";\n        Logger::LogError(ss.str());\n        return std::string();\n    }\n    return guidstr;\n}\n\nint\nEtwEvent::ParseEventId()\n{\n    int eventId = -1;\n    if (!m_event.GetEventId(eventId)) {\n        std::ostringstream ss;\n        ss << \"Error: invalid event format: no expected '\" << s_EventIdName << \"' found. Do nothing.\";\n        Logger::LogError(ss.str());\n        return -1;\n    }\n    return eventId;\n}\n\nbool\nEtwEvent::GetJSONString(cJSON* obj, std::string& value)\n{\n    if (!ValidateJSON(obj, cJSON_String)) {\n        return false;\n    }\n    value.assign(obj->valuestring);\n    return true;\n}\n\nbool\nEtwEvent::ValidateJSON(cJSON* obj, int expectedType)\n{\n    if (!obj) {\n        Logger::LogError(\"Error: unexpected NULL pointer for cJSON object.\");\n        return false;\n    }\n    if (expectedType != obj->type) {\n        std::ostringstream ss;\n        ss << \"Error: cJSON type: expected=\" << expectedType << \"; actual=\" << obj->type << \".\";\n        Logger::LogError(ss.str());\n        return false;\n    }\n    return true;\n}\n\nSchemaCache::IdType\nEtwEvent::GetSchemaId(const std::string & guidstr, int eventid)\n{\n    auto key = guidstr + std::to_string(eventid);\n\n    const auto & iter = m_schemaIdMap.find(key);\n    if (iter == m_schemaIdMap.end()) {\n        auto id = SchemaCache::Get().GetId();\n        m_schemaIdMap[key] = id;\n        return id;\n    }\n    else {\n        return iter->second;\n    }\n}\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/EtwEvent.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n\n#ifndef __ETWEVENT_HH__\n#define __ETWEVENT_HH__\n\n#include <string>\n#include <unordered_map>\n#include \"EventJSON.hh\"\n#include \"SchemaCache.hh\"\n\nclass LocalSink;\nclass MdsValue;\n\n/// This class implements functions to parse ETW JSON events. Each JSON message will\n/// follow a format like below:\n/// {\"TAG\":\"<tag>\",\n/// \"SOURCE\":\"ETW\",\n/// \"EVENTID\" : <id>,\n/// \"GUID\" : \"<guid>\", // NOTE: there is no {} around <guid>\n/// \"DATA\":[[\"name1\",\"val1\", \"jsonType/mdsType\"],[\"name2\", \"val2\", \"jsonType/mdsType\"]]}\n\nclass EtwEvent\n{\npublic:\n    EtwEvent(EventJSON& event) : m_event(event) {}\n    ~EtwEvent() {}\n\n    /// <summary>\n    /// Process current event. Create a new CanonicalEntity object with the event\n    /// data. Then save the CanonicalEntity into the given sink.\n    /// If there is any error with the event data, nothing will be saved to sink.\n    /// </summary>\n    /// <param name='sink'> Sink to save CanonicalEntity </param>\n    void Process(LocalSink* sink);\n\n    static const char* ETWName() { return s_ETWName; }\n    static const char* GUIDName() { return s_GUIDName; }\n    static const char* EventIDName() { return s_EventIdName; }\n\n    /// <summary>\n    /// Build and return a local table name given ETW GUID and EventID.\n    /// </summary>\n    static std::string BuildLocalTableName(const std::string & guid, int eventId)\n    {\n        return (std::string(s_ETWName) + \"_\" + guid + \"_\" + std::to_string(eventId));\n    }\n\nprivate:\n    std::string ParseGuid();\n    int ParseEventId();\n    MdsValue* ConvertData(cJSON* item, std::string & name);\n    bool GetJSONString(cJSON* obj, std::string& value);\n    bool ValidateJSON(cJSON* obj, int expectedType);\n\n    static SchemaCache::IdType GetSchemaId(const std::string & guidstr, int eventid);\n\nprivate:\n    EventJSON& m_event;\n\n    // Each ETW guid/eventid should correspond to a specific schema\n    static std::unordered_map<std::string, SchemaCache::IdType> m_schemaIdMap;\n\n    constexpr static const char* s_ETWName = \"ETW\";\n    constexpr static const char* s_GUIDName = \"GUID\";\n    constexpr static const char* s_EventIdName = \"EVENTID\";\n};\n\n\n#endif // __ETWEVENT_HH__\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/EventJSON.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"EventJSON.hh\"\n#include <cstdio>\nextern \"C\" {\n#include <sys/time.h>\n}\n#include \"EtwEvent.hh\"\n#include \"Logger.hh\"\n\nusing std::string;\n\nbool\nEventJSON::GetSource(string& value)\n{\n\tcJSON* item = cJSON_GetObjectItem(_event, \"SOURCE\");\n\tif (!ValidateJSON(\"SOURCE\", item, cJSON_String)) {\n\t\treturn false;\n\t} else {\n\t\tvalue.assign(item->valuestring);\n\n\t\tif (value == EtwEvent::ETWName()) {\n\t\t\tif (!GetEtwEventSource(value)) {\n\t\t\t\treturn false;\n\t\t\t}\n\t\t\t_isEtwEvent = true;\n\t\t}\n\t\treturn true;\n\t}\n}\n\nstring\nEventJSON::GetSource()\n{\n\tcJSON* item = cJSON_GetObjectItem(_event, \"SOURCE\");\n\tif (!ValidateJSON(\"SOURCE\", item, cJSON_String)) {\n\t\treturn std::string(\"\");\n\t} else {\n\t\tstd::string source = std::string(item->valuestring);\n\t\tif (source == EtwEvent::ETWName()) {\n\t\t\tif (!GetEtwEventSource(source)) {\n\t\t\t\treturn source;\n\t\t\t}\n\t\t\t_isEtwEvent = true;\n\t\t}\n\t\treturn source;\n\t}\n}\n\nbool\nEventJSON::GetGuid(std::string& value)\n{\n\tcJSON* guid = cJSON_GetObjectItem(_event, EtwEvent::GUIDName());\n\tif (!ValidateJSON(EtwEvent::GUIDName(), guid, cJSON_String)) {\n\t\treturn false;\n\t}\n\tvalue.assign(guid->valuestring);\n\treturn true;\n}\n\nbool\nEventJSON::GetEventId(int & eventId)\n{\n\tcJSON* obj = cJSON_GetObjectItem(_event, EtwEvent::EventIDName());\n\tif (!ValidateJSON(EtwEvent::EventIDName(), obj, cJSON_Number)) {\n\t\treturn false;\n\t}\n\teventId = obj->valueint;\n\treturn true;\n}\n\nbool\nEventJSON::GetEtwEventSource(std::string& value)\n{\n\tstd::string guidstr;\n\tint eventId = -1;\n\tif (!GetGuid(guidstr) || !GetEventId(eventId)) {\n\t\treturn false;\n\t}\n\tvalue = EtwEvent::BuildLocalTableName(guidstr, eventId);\n\treturn true;\n}\n\nbool\nEventJSON::GetTag(string& value)\n{\n\tcJSON* item = cJSON_GetObjectItem(_event, \"TAG\");\n\tif (!ValidateJSON(\"TAG\", item, cJSON_String)) {\n\t\treturn false;\n\t} else {\n\t\tvalue.assign(item->valuestring);\n\t\treturn true;\n\t}\n}\n\nbool\nEventJSON::ValidateJSON(const char* name, cJSON* obj, int expectedType)\n{\n    if (!obj) {\n        Logger::LogError(\"Error: unexpected NULL pointer for cJSON object.\");\n        return false;\n    }\n    if (expectedType != obj->type) {\n        std::ostringstream ss;\n        ss << \"Error: \";\n        if (name) {\n            ss << \"'\" << name << \"' \";\n        }\n        ss << \"JSON type: expected=\" << expectedType << \"; actual=\" << obj->type << \".\";\n        Logger::LogError(ss.str());\n        return false;\n    }\n    return true;\n}\n\n\nEventJSON::DataIterator\nEventJSON::data_begin()\n{\n\tcJSON* array = cJSON_GetObjectItem(_event, \"DATA\");\n\tif (!array || !(array->child)) {\n\t\treturn EventJSON::DataIterator((cJSON*)0);\n\t} else {\n\t\treturn EventJSON::DataIterator(array->child);\n\t}\n}\n\nunsigned int\nEventJSON::data_count()\n{\n\tcJSON* array = cJSON_GetObjectItem(_event, \"DATA\");\n\tif (array) {\n\t\treturn cJSON_GetArraySize(array);\n\t} else {\n\t\treturn 0;\n\t}\n}\n\nstd::ostream&\noperator<<(std::ostream& os, const EventJSON& ev)\n{\n\tchar *buf = cJSON_Print(ev._event);\n\tos << (const char*)buf;\n\tfree(buf);\n\treturn os;\n}\n\n// vim: se sw=8:\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/EventJSON.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _EVENTCJSON_HH_\n#define _EVENTCJSON_HH_\n\n#include <iterator>\n#include <string>\n#include \"Logger.hh\"\n#include \"MdsTime.hh\"\n\nextern \"C\" {\n#include \"cJSON.h\"\n}\n#include <cstdlib>\n\nclass EventJSON\n{\npublic:\n\tEventJSON(cJSON* event) : _event(event), _isEtwEvent(false) {}\n\tvoid PrintEvent() { char *buf = cJSON_Print(_event); Logger::LogInfo(buf); free(buf); }\n\tbool GetSource(std::string& source);\n\tstd::string GetSource();\n\tbool GetTag(std::string& tag);\n\tconst MdsTime& GetTimestamp() const { return _timestamp; }\n\n\tbool GetGuid(std::string& guid);\n\tbool GetEventId(int & eventId);\n\tbool IsEtwEvent() const { return _isEtwEvent; }\n\n\tclass DataIterator : public std::iterator<std::input_iterator_tag, cJSON*>\n\t{\n\tprivate:\n\t\tcJSON* _current;\n\n\tpublic:\n\t\tDataIterator(cJSON* item) : _current(item) {}\n\t\tDataIterator(const DataIterator& other) : _current(other._current) {}\n\t\tDataIterator& operator++() { _current = _current->next;\treturn *this; }\n\t\tDataIterator operator++(int) { DataIterator tmp(*this); operator++(); return tmp; }\n\t\tbool operator==(const DataIterator& other) { return _current == other._current; }\n\t\tbool operator!=(const DataIterator& other) { return _current != other._current; }\n\t\tcJSON& operator*() { return *_current; }\n\t\tcJSON* operator->() { return _current; }\n\t};\n\n\tDataIterator data_begin();\n\tDataIterator data_end() { return DataIterator((cJSON*)0); }\n\tunsigned int data_count();\n\n\tfriend std::ostream& operator<<(std::ostream& os, const EventJSON& ev);\n\nprivate:\n\tEventJSON();\n\n\tbool GetEtwEventSource(std::string& value);\n\tbool ValidateJSON(const char* name, cJSON* obj, int expectedType);\n\n\tcJSON* _event;\n\tMdsTime _timestamp;\n\tbool _isEtwEvent;\n};\n\n#endif //_EVENTCJSON_HH_\n\n// vim: se sw=8:\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/ExtensionMgmt.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"ExtensionMgmt.hh\"\n#include \"Logger.hh\"\n#include \"Utility.hh\"\n#include \"Trace.hh\"\n#include \"MdsdExtension.hh\"\n#include \"MdsdConfig.hh\"\n#include \"CmdLineConverter.hh\"\n#include <cassert>\n#include <cpprest/pplx/threadpool.h>\n#include <boost/bind.hpp>\n\nextern \"C\" {\n#include <signal.h>\n#include <sys/types.h>\n#include <sys/wait.h>\n}\n\nstd::map<const std::string, ExtensionInfo*> ExtensionList::_extlistByName;\nstd::map<pid_t, ExtensionInfo*> ExtensionList::_extlistByPid;\nstd::unordered_set<pid_t> ExtensionList::_killList;\n\nstd::mutex ExtensionList::_listmutex;\nstd::mutex ExtensionList::_klmutex;\n\nbool ExtensionMetaData::operator==(const ExtensionMetaData & other) const\n{\n    if (Name == other.Name &&\n        CommandLine == other.CommandLine &&\n        Body == other.Body &&\n        AlterLocation == other.AlterLocation)\n    {\n        return true;\n    }\n    return false;\n}\n\nExtensionInfo::ExtensionInfo() : StopTimer(nullptr),\n    StopTimerCancelled(false)\n{\n\n}\n\nExtensionInfo::~ExtensionInfo()\n{\n    if (StopTimer) {\n        StopTimerCancelled = true;\n        delete StopTimer;\n        StopTimer = nullptr;\n    }\n}\n\nstd::string\nExtensionInfo::GetStatus() const\n{\n    return ExtensionInfo::StatusToString(Status);\n}\n\nstd::map<ExtensionInfo::ExtStatus, std::string> ExtensionInfo::_statusMap = {\n    { ExtStatus::NORMAL, \"NORMAL\" },\n    { ExtStatus::BAD, \"BAD\" },\n    { ExtStatus::KILLING, \"KILLING\" },\n    { ExtStatus::EXIT, \"EXIT\" }\n};\n\nstd::string\nExtensionInfo::StatusToString(ExtStatus s)\n{\n    const auto &iter = _statusMap.find(s);\n    if (_statusMap.end() == iter) {\n        return \"UNKNOWN\";\n    }\n    return iter->second;\n}\n\nsize_t ExtensionList::GetSize()\n{\n    std::unique_lock<std::mutex> lock(_listmutex);\n    return _extlistByName.size();\n}\n\nbool \nExtensionList::AddItem(ExtensionInfo * extObj)\n{\n    Trace trace(Trace::Extensions, \"ExtensionList::AddItem\");\n    if (!extObj)\n    {\n        Logger::LogError(\"Error: unexpected NULL value for ExtensionInfo object.\");\n        return false;\n    }\n\n    const std::string & extname = extObj->MetaData.Name;\n    if (MdsdUtil::IsEmptyOrWhiteSpace(extname))\n    {\n        Logger::LogError(\"Error: unexpected empty or whitespace value for ExtensionName\");\n        return false;\n    }\n\n    std::unique_lock<std::mutex> lock(_listmutex);\n    \n    // search for the item. If found, delete the old one.\n    const auto & iter = _extlistByName.find(extname);\n    if (iter != _extlistByName.end())\n    {\n        ExtensionInfo *oldExtObj = iter->second;\n        delete oldExtObj;\n        oldExtObj = nullptr;\n    }\n    _extlistByName[extname] = extObj;\n    assert(0 != extObj->Pid);\n    _extlistByPid[extObj->Pid] = extObj;\n    trace.NOTE(\"Successfully added ExtensionInfo object with Name='\" + extname + \"'\");\n    return true;    \n}\n\n\nvoid\nExtensionList::AddPid(pid_t pid)\n{\n    Trace trace(Trace::Extensions, \"ExtensionList::AddPid\");\n    std::unique_lock<std::mutex> lock(_klmutex);\n    if (0 < _killList.count(pid)) {\n        Logger::LogError(\"Error: duplicate pid found: \" + std::to_string(pid));\n    }\n    else {\n        _killList.insert(pid);\n    }\n}\n\nstd::unordered_set<pid_t>\nExtensionList::GetAndClearPids()\n{\n    Trace trace(Trace::Extensions, \"ExtensionList::GetAndClearPids\");\n    std::unique_lock<std::mutex> lock(_klmutex);\n    std::unordered_set<pid_t> r = _killList;\n    _killList.clear();\n    return r;\n}\n\nExtensionInfo * \nExtensionList::GetItem(const std::string & extname)\n{\n    Trace trace(Trace::Extensions, \"ExtensionList::GetItem(extname)\");\n    if (MdsdUtil::IsEmptyOrWhiteSpace(extname))\n    {\n        Logger::LogError(\"Error: unexpected empty or whitespace value for ExtensionName\");\n        return nullptr;\n    }\n\n    ExtensionInfo * obj = nullptr;\n    std::unique_lock<std::mutex> lock(_listmutex);\n    const auto & iter = _extlistByName.find(extname);\n\n    if (iter != _extlistByName.end()) {\n        obj = iter->second;\n        trace.NOTE(\"Got ExtensionInfo object: '\" + extname + \"'\");\n    }\n    else {\n        trace.NOTE(\"ExtensionInfo is not found: '\" + extname + \"'.\");\n    }\n\n    return obj;\n}\n \n\nExtensionInfo * \nExtensionList::GetItem(pid_t extPid)\n{\n    Trace trace(Trace::Extensions, \"ExtensionList::GetItem(pid_t)\");\n    if (0 >= extPid)\n    {\n        Logger::LogError(\"Error: unexpected value for pid: \" + std::to_string(extPid));\n        return nullptr;\n    }\n    ExtensionInfo * obj = nullptr;\n    std::unique_lock<std::mutex> lock(_listmutex);\n    const auto & iter = _extlistByPid.find(extPid);\n\n    if (iter != _extlistByPid.end()) {\n        obj = iter->second;\n        trace.NOTE(\"Got ExtensionInfo with pid=\" + std::to_string(extPid));\n    }\n    else {\n        trace.NOTE(\"ExtensionInfo is not found with pid=\" + std::to_string(extPid));\n    }\n    return obj;\n}\n\nbool\nExtensionList::UpdateItem(pid_t oldpid, pid_t newpid)\n{\n    Trace trace(Trace::Extensions, \"ExtensionList::UpdateItem\");\n    assert(0 < oldpid);\n    assert(0 < newpid);\n\n    bool resultOK = true;\n    std::unique_lock<std::mutex> lock(_listmutex);\n    const auto & iter = _extlistByPid.find(oldpid);\n    if (iter != _extlistByPid.end()) {\n        ExtensionInfo *obj = iter->second;\n        _extlistByPid.erase(iter);\n        _extlistByPid[newpid] = obj;\n        trace.NOTE(\"Extension is updated: from pid \" + std::to_string(oldpid) + \" to pid \" + std::to_string(newpid));\n    }\n    else {\n        resultOK = false;\n        Logger::LogError(\"Extension is not found with pid=\" + std::to_string(oldpid));\n    }\n    return resultOK;\n}\n\nbool \nExtensionList::DeleteItem(const std::string & extname)\n{\n    Trace trace(Trace::Extensions, \"ExtensionList::DeleteItem\");\n    if (MdsdUtil::IsEmptyOrWhiteSpace(extname))\n    {\n        Logger::LogError(\"Error: unexpected empty or whitespace for ExtensionName\");\n        return false;\n    }\n\n    bool resultOK = true;\n\n    std::unique_lock<std::mutex> lock(_listmutex);\n    const auto & iter = _extlistByName.find(extname);\n    if (iter != _extlistByName.end())\n    {\n        ExtensionInfo * obj = iter->second;\n\n        _extlistByName.erase(iter);\n        _extlistByPid.erase(obj->Pid);\n\n        lock.unlock();\n        trace.NOTE(\"Deleted item: '\" + extname + \"'\");\n\n        delete obj;\n        obj = nullptr;\n        resultOK = true;\n    }\n    else\n    {\n        trace.NOTE(\"Extension is not found: '\" + extname + \"'\");\n        resultOK = false;\n    }\n\n    return resultOK;\n}\n\nbool\nExtensionList::DeleteItems(const std::set<std::string>& extnames)\n{\n    Trace trace(Trace::Extensions, \"ExtensionList::DeleteItems\");\n    if (0 == extnames.size())\n    {\n        return true;\n    }\n\n    bool resultOK = true;\n    std::unique_lock<std::mutex> lock(_listmutex);\n\n    for(const auto & extname : extnames) \n    {\n        const auto & iter = _extlistByName.find(extname);\n        if (iter != _extlistByName.end())\n        {\n            ExtensionInfo * obj = iter->second;\n            _extlistByName.erase(iter);\n            _extlistByPid.erase(obj->Pid);\n            trace.NOTE(\"Deleted item: '\" + extname + std::string(\"'\"));\n            delete obj;\n            obj = nullptr;\n        }\n        else\n        {\n            trace.NOTE(\"Extension is not found: '\" + extname + std::string(\"'\"));\n            resultOK = false;\n        }\n    }\n\n    return resultOK;\n}\n\nvoid\nExtensionList::DeleteAllItems()\n{\n    Trace trace(Trace::Extensions, \"ExtensionList::DeleteAllItems\");\n    std::unique_lock<std::mutex> lock(_listmutex);\n    for (auto x : _extlistByPid) {\n        delete x.second;\n    }\n    _extlistByPid.clear();\n    _extlistByName.clear();\n}\n\nvoid\nExtensionList::ForeachExtension(const std::function<void(ExtensionInfo*)>& fn)\n{\n    Trace trace(Trace::Extensions, \"ExtensionList::ForeachExtension\");\n    std::unique_lock<std::mutex> lock(_listmutex);\n    for (const auto & kv : _extlistByName) {\n        trace.NOTE(std::string(\"Walking ExtensionInfo with name='\") + kv.first + \"'\");\n        fn(kv.second);\n    }\n}\n\nExtensionMgmt * ExtensionMgmt::_extInstance = nullptr;\n\nExtensionMgmt*\nExtensionMgmt::GetInstance()\n{\n    if (!_extInstance) {\n        _extInstance = new ExtensionMgmt();\n        if(!_extInstance->InitSem())\n        {\n            delete _extInstance;\n            _extInstance = nullptr;\n        }\n    }\n    return _extInstance;\n}\n\nExtensionMgmt::ExtensionMgmt() : _extsemInitOK(false)\n{\n}\n\nExtensionMgmt::~ExtensionMgmt()\n{\n    Trace trace(Trace::Extensions, \"ExtensionMgmt::~ExtensionMgmt\");\n    if (_extsemInitOK)\n    {\n        if (-1 == sem_destroy(&_extsem))\n        {\n            std::string errstr = MdsdUtil::GetErrnoStr(errno);\n            Logger::LogError(\"Error: sem_destroy() failed: \" + errstr);\n        }\n    }\n}\n\nbool\nExtensionMgmt::InitSem()\n{\n    Trace trace(Trace::Extensions, \"ExtensionMgmt::InitSem\");\n    if (-1 == sem_init(&_extsem, 0, 0)) {\n        std::string errstr = MdsdUtil::GetErrnoStr(errno);\n        Logger::LogError(\"Error: sem_init() failed: \" + errstr);\n        _extsemInitOK = false;\n        return false;\n    }\n    _extsemInitOK = true;\n    return true;\n}\n\nbool\nExtensionMgmt::StartExtensions(MdsdConfig * config)\n{\n    Trace trace(Trace::Extensions, \"ExtensionMgmt::StartExtensions\");\n    if (!config) {\n        trace.NOTE(\"MdsdConfig* is NULL. Do nothing.\");\n        return true;\n    }\n    bool resultOK = false;\n    try {\n        ExtensionMgmt* extmgmt = GetInstance();\n        if (extmgmt) {\n            std::set<std::string> extlistInConfig;\n            resultOK = extmgmt->StartExtensionsFromConfig(config, extlistInConfig);\n            resultOK = resultOK && extmgmt->StopObsoleteExtensions(extlistInConfig);\n        }\n    }\n    catch(const std::exception & ex) {\n        Logger::LogError(std::string(\"Error: StartExtensions failed: \") + ex.what());\n        resultOK = false;\n    }\n    return resultOK;\n}\n\nvoid\nExtensionMgmt::StartExtensionsAsync(MdsdConfig * config)\n{\n    if (!config) {\n        return;\n    }\n\n    // If there is no old and new extension, do nothing\n    if (0 == config->GetNumExtensions() &&\n        0 == ExtensionList::GetSize()) {\n        return;\n    }\n\n    static std::future<bool> lastTask;\n    static std::mutex mtx;\n\n    try {\n        // multiple threads may call this function when automatic configuration mgr\n        // and main thread starts up\n        std::lock_guard<std::mutex> lock(mtx);\n        if (lastTask.valid()) {\n            if (!lastTask.get()) {\n                Logger::LogError(\"Previous StartExtensions() failed.\");\n            }\n        }\n        lastTask = std::async(std::launch::async, StartExtensions, config);\n    }\n    catch(const std::system_error& ex) {\n        Logger::LogError(std::string(\"Error: std::async failed calling 'StartExtensions': \") + ex.what());\n    }\n}\n\nbool\nExtensionMgmt::StartExtensionsFromConfig(\n    MdsdConfig * config, \n    std::set<std::string>& extlistInConfig)\n{\n    Trace trace(Trace::Extensions, \"ExtensionMgmt::StartExtensionsFromConfig\");\n    bool resultOK = true;\n\n    std::vector<ExtensionInfo*> changedList;\n    // key is old extension's pid.\n    std::map<pid_t, ExtensionMetaData> newDataList;\n\n    std::function<void(MdsdExtension*)> Visitor = \n        [this,&trace,&extlistInConfig,&resultOK,&changedList,&newDataList](MdsdExtension * extObj)\n    {\n        const std::string & extname = extObj->Name();\n        const std::string & cmdline = extObj->GetCmdLine();\n        const std::string & body = extObj->GetBody();\n        const std::string & alterLocation = extObj->GetAlterLocation();\n\n        assert(false == MdsdUtil::IsEmptyOrWhiteSpace(extname));\n        assert(false == MdsdUtil::IsEmptyOrWhiteSpace(cmdline));\n\n        extlistInConfig.insert(extname);\n\n        // check with ExtensionList\n        ExtensionInfo* oldExtInfo = ExtensionList::GetItem(extname);\n        if (!oldExtInfo) {\n            resultOK = resultOK && StartExtension(extname, cmdline, body, alterLocation);\n        }\n        else\n        {\n            ExtensionMetaData newMetaData(extname, cmdline, body, alterLocation);\n            bool sameMetaData = (oldExtInfo->MetaData == newMetaData);\n            if (!sameMetaData) {\n                trace.NOTE(\"Found new metadata for \" + extname);\n                changedList.push_back(oldExtInfo);\n                newDataList[oldExtInfo->Pid] = newMetaData;\n            }\n            else {\n                trace.NOTE(\"No metadata were changed for \" + extname);\n            }\n        }\n    };\n\n    config->ForeachExtension(Visitor);\n\n    if (0 < changedList.size()) {\n        resultOK = resultOK && RestartChangedExtensions(changedList, newDataList);\n    }\n    trace.NOTE(\"Finished with success = \" + MdsdUtil::ToString(resultOK));\n    return resultOK;\n}\n\n// terminate current Extension processes. each process will send SIGCHLD, which\n// will be handled in signal handler. The extension will be deleted in the signal handler.\nbool\nExtensionMgmt::RestartChangedExtensions(\n    const std::vector<ExtensionInfo*> & changedList,\n    const std::map<pid_t, ExtensionMetaData> & newDataList)\n{\n    Trace trace(Trace::Extensions, \"ExtensionMgmt::RestartChangedExtensions\");\n    bool resultOK = true;\n    if (0 == changedList.size()) {\n        return resultOK;\n    }\n    assert(changedList.size() == newDataList.size());\n    \n    for (const auto & ext : changedList) {\n        StopExtension(ext);\n    }\n\n    trace.NOTE(\"Wait for all changed extensions to be stopped ...\");\n    for (size_t i = 0; i < newDataList.size(); i++) {\n        bool extStopOK = WaitForAnyExtStop();\n        if (extStopOK) {\n            std::unordered_set<pid_t> changedPids = ExtensionList::GetAndClearPids();\n            for (const auto & pid : changedPids) {\n                trace.NOTE(\"GetAndClearPids(): pid=\" + std::to_string(pid));\n            }\n            resultOK = StartAllChangedExts(changedPids, newDataList);\n        }\n    }\n\n    trace.NOTE(\"Finished with success = \" + MdsdUtil::ToString(resultOK));\n    return resultOK;\n}\n\nbool\nExtensionMgmt::WaitForAnyExtStop()\n{\n    Trace trace(Trace::Extensions, \"ExtensionMgmt::WaitForAnyExtStop\");\n    bool resultOK = true;\n    struct timespec ts;\n    if (-1 == clock_gettime(CLOCK_REALTIME, &ts)) {\n        resultOK = false;\n    }\n    else {\n        ts.tv_sec += EXT_TERMINATE_GRACE_SECONDS + 1;\n\n        int waitstatus = 0;\n        time_t semStartTime = time(0);\n        while((waitstatus = sem_timedwait(&_extsem, &ts)) == -1 && EINTR == errno) {\n            semStartTime = time(0);\n            continue;\n        }\n        int waiterrno = errno;\n        if (-1 == waitstatus) {\n            if (ETIMEDOUT == waiterrno)\n            {\n                long waitTime = (long)(time(0) - semStartTime);\n                Logger::LogError(\"Error: sem_timedwait() timed out after \" + std::to_string(waitTime) + \" seconds.\");\n            }\n            else {\n                std::string errstr = MdsdUtil::GetErrnoStr(waiterrno);\n                Logger::LogError(\"Error: sem_timedwait() failed. Error string: \" + errstr);\n            }\n            resultOK = false;\n        }\n        else {\n            trace.NOTE(\"sem_timedwait() succeeded.\");\n        }\n    }\n    trace.NOTE(\"Finished with success = \" + MdsdUtil::ToString(resultOK));\n    return resultOK;\n}\n\n\nbool\nExtensionMgmt::StartAllChangedExts(\n    const std::unordered_set<pid_t> changedPids,\n    const std::map<pid_t, ExtensionMetaData> & newDataList) const\n{\n    Trace trace(Trace::Extensions, \"ExtensionMgmt::StartAllChangedExts\");\n    bool resultOK = true;\n\n    for (const auto & pid : changedPids) {\n        const auto & iter = newDataList.find(pid);\n\n        if (newDataList.end() == iter) {\n            Logger::LogError(\"Error: old extension pid is not found: \" + std::to_string(pid));\n            resultOK = false;\n        }\n        else {\n            ExtensionMetaData metadata = iter->second;\n            assert(metadata.Name.empty() == false);\n            resultOK = resultOK && StartOneChangedExt(pid, metadata);\n        }\n    }\n    trace.NOTE(\"Finished with success = \" + MdsdUtil::ToString(resultOK));\n    return resultOK;\n}\n\n\n\nbool\nExtensionMgmt::StartOneChangedExt(pid_t changedPid, const ExtensionMetaData & metadata) const\n{\n    Trace trace(Trace::Extensions, \"ExtensionMgmt::StartOneChangedExt\");\n    bool resultOK = true;\n\n    // only start new one when old one was terminated.\n    if (-1 == waitpid(changedPid, NULL,  WNOHANG) && ECHILD == errno) {\n        trace.NOTE(metadata.Name + \" with pid \" + std::to_string(changedPid) + \" was terminated. Start new one.\");\n        resultOK = resultOK && StartExtension(metadata);\n    }\n    else {\n        Logger::LogError(\"Error: \" + metadata.Name + \" with pid \" + std::to_string(changedPid) + \" was not terminated properly.\");\n        resultOK = false;\n    }\n\n    trace.NOTE(\"Finished with success = \" + MdsdUtil::ToString(resultOK));\n    return resultOK;\n}\n\n\nbool\nExtensionMgmt::StopObsoleteExtensions(const std::set<std::string> & extlistInConfig) const\n{\n    Trace trace(Trace::Extensions, \"ExtensionMgmt::StopObsoleteExtensions\");\n    if (0 == ExtensionList::GetSize()) {\n        return true;\n    }\n\n    std::set<std::string> obsoleteExtNames;\n    std::unordered_set<ExtensionInfo*> obsoleteExtObjs;\n\n    std::function<void(ExtensionInfo*)> Visitor = \n    [&extlistInConfig,&obsoleteExtNames,&obsoleteExtObjs](ExtensionInfo * extObj)\n    {\n        assert(nullptr != extObj);\n        if (extlistInConfig.find(extObj->MetaData.Name) == extlistInConfig.end()) {\n            obsoleteExtNames.insert(extObj->MetaData.Name);\n            obsoleteExtObjs.insert(extObj);\n        }\n    };\n\n    ExtensionList::ForeachExtension(Visitor);\n\n    // The extensions must be stopped first before being deleted\n    bool resultOK = true;\n    for (const auto & extObj : obsoleteExtObjs)\n    {\n        resultOK = resultOK && StopExtension(extObj);\n    }\n\n    resultOK = resultOK && ExtensionList::DeleteItems(obsoleteExtNames);\n\n    trace.NOTE(\"Finished with success = \" + MdsdUtil::ToString(resultOK));\n    return resultOK;\n}\n\nbool\nExtensionMgmt::StopAllExtensions()\n{\n    Trace trace(Trace::Extensions, \"ExtensionMgmt::StopAllExtensions\");\n    size_t nitems = ExtensionList::GetSize();\n    if (0 == nitems) {\n        return true;\n    }\n\n    bool resultOK = true;\n    unsigned int nexists = 0;\n    std::function<void(ExtensionInfo*)> StopExtFunc = [this,&nexists,&resultOK](ExtensionInfo * extObj)\n    {\n        assert(nullptr != extObj);\n        if (-1 != waitpid(extObj->Pid, NULL,  WNOHANG)) {\n            nexists++;\n        }\n        resultOK = resultOK && StopExtension(extObj);\n    };\n\n    ExtensionList::ForeachExtension(StopExtFunc);\n\n    trace.NOTE(\"Found \" + std::to_string(nexists) + \" running extensions. Wait for them to finish.\");\n    for (size_t i = 0; i < nexists; i++) {\n        resultOK = resultOK & WaitForAnyExtStop();\n    }\n    ExtensionList::DeleteAllItems();\n\n    trace.NOTE(\"Finished with success = \" + MdsdUtil::ToString(resultOK));\n    return resultOK;    \n}\n\n\n\nbool\nExtensionMgmt::MaskSignal(bool isBlock, int signum) const\n{\n    Trace trace(Trace::Extensions, \"ExtensionMgmt::MaskSignal\");\n    sigset_t ss;\n    std::string errmsg = \"\";\n    int errnum = 0;\n\n    if (-1 == sigemptyset(&ss)) {\n        errnum = errno;\n        errmsg = \"Error: sigemptyset() failed.\";\n    }\n    else\n    {\n        if (-1 == sigaddset(&ss, signum)) {\n            errnum = errno;\n            errmsg = \"Error: sigaddset() failed on signal: \" + std::to_string(signum);\n        }\n        else {\n            int how = isBlock? SIG_BLOCK : SIG_UNBLOCK;\n            if (-1 == sigprocmask(how, &ss, NULL))\n            {\n                errnum = errno;\n                errmsg = \"Error: sigprocmask() failed.\";\n            }\n        }\n    }\n\n    bool resultOK = true;\n    if (errmsg != \"\")\n    {\n        errmsg += \" Error string: \" + MdsdUtil::GetErrnoStr(errnum);\n        Logger::LogError(errmsg);\n        resultOK = false;\n    }\n    return resultOK;\n}\n\nbool\nExtensionMgmt::StartExtension(const ExtensionMetaData & metaData) const\n{\n    return StartExtension(metaData.Name, metaData.CommandLine, metaData.Body, metaData.AlterLocation);\n}\n\nbool\nExtensionMgmt::StartExtension(\n    const std::string & extName,\n    const std::string & cmdline,\n    const std::string & body,\n    const std::string & alterLocation\n) const\n{\n    Trace trace(Trace::Extensions, \"ExtensionMgmt::StartExtension\");\n    bool resultOK = true;\n\n    ExtensionInfo * oldExtInfo = ExtensionList::GetItem(extName);\n    if (oldExtInfo)\n    {\n        sleep(EXT_RETRY_WAIT_SECONDS);\n    }\n\n    if (!MaskSignal(true, SIGCHLD))\n    {\n        return false;\n    }\n\n    CmdLineConverter cconverter(cmdline);\n    char** cargv = cconverter.argv();\n\n    // use pipe to send child error to parent\n    int pipefds[2];\n    if (-1 == pipe(pipefds)) {\n        Logger::LogError(\"Error: pipe() failed: Error string: \" + MdsdUtil::GetErrnoStr(errno));\n        return false;\n    }\n\n    // Use FD_CLOEXEC so that if exec() succeeds, fd will be closed automatically.\n    if (fcntl(pipefds[1], F_SETFD, fcntl(pipefds[1], F_GETFD) | FD_CLOEXEC)) {\n        Logger::LogError(\"Error: fcntl() failed: Error string: \" + MdsdUtil::GetErrnoStr(errno));\n        return false;\n    }\n\n    pid_t pid = fork();\n    int forkerr = errno;\n    if (-1 == pid) {\n        Logger::LogError(\"Error: fork() failed: Error string: '\" + MdsdUtil::GetErrnoStr(forkerr) + \"'.\");\n        return false;\n    }\n\n    if (0 == pid) {\n        // child process\n        close(pipefds[0]);\n        int childerr = 0;\n        if (!MdsdUtil::IsEmptyOrWhiteSpace(body)) {\n            if (-1 == setenv(BODYENV, body.c_str(), 1)) {\n                childerr = errno;\n            }\n        }\n        if (0 == childerr) {\n            childerr = UnblockSignals();\n            if (0 == childerr) {\n                std::string fullpath = alterLocation + \"/\" + cargv[0];\n                execvp(fullpath.c_str(), cargv);\n                // child has error if it reaches here\n                childerr = errno;\n            }\n        }\n        // send error code to parent\n        if (write(pipefds[1], &childerr, sizeof(int)) < 0) {\n            Logger::LogError(\"Error: write() failed: Error string: '\" + MdsdUtil::GetErrnoStr(errno) + \"'.\");\n        }\n        _exit(0);\n    }\n    // parent process\n    close(pipefds[1]);\n\n    // read child error if any.\n    int readcount = 0;\n    int childerr = 0;\n    while (-1 == (readcount = read(pipefds[0], &childerr, sizeof(int)))) {\n        if (EAGAIN != errno && EINTR != errno) {\n            break;\n        }\n    }\n    bool childFailed = false;\n    if (readcount && childerr > 0) {\n        Logger::LogError(\"Error: create \" + extName + \" process failed. pid=\" + std::to_string(pid) + \". Error: \" + MdsdUtil::GetErrnoStr(childerr));\n        childFailed = true;\n    }\n    else {\n        trace.NOTE(\"Created process \" + extName + \": cmdline=\" + cmdline + \"; pid=\" + std::to_string(pid));\n    }\n    \n    resultOK = resultOK && UpdateExtensionList(oldExtInfo, extName, cmdline, body, alterLocation, pid, childFailed);\n    resultOK = resultOK && MaskSignal(false, SIGCHLD);\n    return resultOK;\n}\n\nbool\nExtensionMgmt::UpdateExtensionList(\n    ExtensionInfo * oldExtInfo,\n    const std::string & extName,\n    const std::string & cmdline,\n    const std::string & body,\n    const std::string & alterLocation,\n    pid_t pid,\n    bool extFailed) const\n{\n    Trace trace(Trace::Extensions, \"ExtensionMgmt::UpdateExtensionList\");\n    bool resultOK = true;\n\n    if (!oldExtInfo) {\n        trace.NOTE(\"Get a new extension definition. Add it to cache.\");\n\n        ExtensionInfo * extInfo = new ExtensionInfo();\n        extInfo->MetaData.Name = extName;\n        extInfo->MetaData.CommandLine = cmdline;\n        extInfo->MetaData.Body = body;\n        extInfo->MetaData.AlterLocation = alterLocation;\n        extInfo->Pid = pid;\n        extInfo->StartTime = time(NULL);\n        extInfo->Status = ExtensionInfo::NORMAL;\n        extInfo->RetryCount = extFailed? (EXT_MAX_RETRIES+1) : 0;\n\n        resultOK = ExtensionList::AddItem(extInfo);\n        if (!resultOK) {\n            delete extInfo;\n            extInfo = nullptr;\n        }\n    }\n    else {\n        pid_t oldpid = oldExtInfo->Pid;\n        trace.NOTE(\"Get existing extension. Update its pid from \" + std::to_string(oldpid) + \" to \" + std::to_string(pid));\n        oldExtInfo->Pid = pid;\n        oldExtInfo->StartTime = time(NULL);\n        oldExtInfo->Status = ExtensionInfo::NORMAL;\n        resultOK = ExtensionList::UpdateItem(oldpid, pid);\n    }\n    return resultOK;\n}\n\n\nint\nExtensionMgmt::UnblockSignals() const\n{\n    Trace trace(Trace::Extensions, \"ExtensionMgmt::UnblockSignals\");\n    int sigerr = 0;\n    sigset_t ss;\n    if (-1 == sigfillset(&ss)) {\n        sigerr = errno;\n        Logger::LogError(\"Error: sigfillset() failed. Error string: \" + MdsdUtil::GetErrnoStr(sigerr));\n    }\n    else {\n        if (-1 == sigprocmask(SIG_UNBLOCK, &ss, NULL)) {\n            sigerr = errno;\n            Logger::LogError(\"Error: sigprocmask() failed. Error string: \" + MdsdUtil::GetErrnoStr(sigerr));\n        }\n    }\n    return sigerr;\n}\n\n\nbool\nExtensionMgmt::StopExtension(ExtensionInfo * extObj) const\n{\n    Trace trace(Trace::Extensions, \"ExtensionMgmt::StopExtension\");\n\n    if (!extObj)\n    {\n        trace.NOTE(\"ExtensionInfo object is NULL. Do nothing.\");\n        return true;\n    }\n\n    pid_t extpid = extObj->Pid;\n    std::string extname = extObj->MetaData.Name;\n    trace.NOTE(\"Stopping \" + extname + \" pid=\" + std::to_string(extpid) + \" status=\" + extObj->GetStatus());\n\n    bool stopOK = false;\n    bool isPsExist = false;\n\n    ExtensionInfo::ExtStatus oldStatus = extObj->Status;\n    assert(ExtensionInfo::ExtStatus::EXIT != oldStatus);\n\n    extObj->Status = ExtensionInfo::ExtStatus::KILLING;\n    trace.NOTE(\"Set \" + extname + \"'s status to be KILLING. Pid=\" + std::to_string(extpid));\n\n    if (ExtensionInfo::ExtStatus::NORMAL == oldStatus ||\n        ExtensionInfo::ExtStatus::BAD == oldStatus)\n    {\n        stopOK = SendSignalToProcess(extpid, SIGINT, &isPsExist);\n    }\n\n    if (isPsExist)\n    {\n        trace.NOTE(\"Set timer to KillProcessByForce ...\");\n        extObj->StopTimer = new boost::asio::deadline_timer(crossplat::threadpool::shared_instance().service());\n        extObj->StopTimer->expires_from_now(boost::posix_time::seconds(EXT_TERMINATE_GRACE_SECONDS));\n        extObj->StopTimer->async_wait(boost::bind(&ExtensionMgmt::KillProcessByForce, \n            this, extpid, boost::asio::placeholders::error));\n    }\n\n    trace.NOTE(\"Finished with success = \" + MdsdUtil::ToString(stopOK));\n\n    return stopOK;\n}\n\nvoid\nExtensionMgmt::CatchSigChld(int signo)\n{\n    Trace trace(Trace::Extensions, \"ExtensionMgmt::CatchSigChld\");\n    trace.NOTE(std::string(\"Caught signal=\")  + std::to_string(signo) + \" : \" + std::string(strsignal(signo)));\n\n    assert(SIGCHLD == signo);\n\n    pid_t chldpid = 0;\n    int waitpiderr = 0;\n    bool haschild = false;\n\n    while(true) {\n        chldpid = waitpid((pid_t)-1, NULL, WNOHANG);\n        waitpiderr = errno;\n        trace.NOTE(\"waitpid() returned id=\" + std::to_string(chldpid) + \"\\n\");\n        if (0 < chldpid) {\n            UpdateStoppedExtension(chldpid);\n            haschild = true;\n        }\n        else {\n            break;\n        }\n    }\n\n    if (-1 == chldpid && ECHILD == waitpiderr && !haschild) {\n         if (-1 == sem_post(&_extsem)) {\n             std::string errstr = MdsdUtil::GetErrnoStr(errno);\n             trace.NOTE(\"Error: CatchSigchld: sem_post() failed: \" + errstr);\n         }\n    }\n}\n\nbool\nExtensionMgmt::UpdateStoppedExtension(pid_t extpid)\n{\n    Trace trace(Trace::Extensions, \"ExtensionMgmt::UpdateStoppedExtension\");\n\n    ExtensionInfo * extObj = ExtensionList::GetItem(extpid);\n    if (!extObj) {\n        Logger::LogError(\"no ExtensionInfo object found in cache for pid=\" + std::to_string(extpid));\n        return false;\n    }\n\n    ExtensionInfo::ExtStatus status = extObj->Status;\n    std::string extname = extObj->MetaData.Name;\n    trace.NOTE(\"Extension pid=\" + std::to_string(extpid) + \"; Status=\" + ExtensionInfo::StatusToString(status));\n\n    bool resultOK = true;\n    assert(ExtensionInfo::ExtStatus::NORMAL == status || ExtensionInfo::ExtStatus::KILLING == status);\n\n    if (ExtensionInfo::ExtStatus::NORMAL == status) {\n        resultOK = HandleExtensionFailure(extObj);\n    }\n    else if (ExtensionInfo::ExtStatus::KILLING == status) {\n        trace.NOTE(\"Change extension status to EXIT. Delete it from cache. Call sem_post().\");\n        extObj->Status = ExtensionInfo::ExtStatus::EXIT;\n        resultOK = resultOK && ExtensionList::DeleteItem(extname);\n        ExtensionList::AddPid(extpid);\n        if (-1 == sem_post(&_extsem)) {\n            std::string errstr = MdsdUtil::GetErrnoStr(errno);\n            trace.NOTE(\"Error: UpdateStoppedExtension: sem_post() failed: \" + errstr);\n            resultOK = false;\n        }\n    }\n    else {\n        resultOK = false;\n        Logger::LogError(\"Unexpected extension status. expected=NORMAL/KILLING; actual=\" + extObj->GetStatus());\n    }\n\n    trace.NOTE(\"Finished with success = \" + MdsdUtil::ToString(resultOK));\n    return resultOK;\n}\n\n\nbool\nExtensionMgmt::HandleExtensionFailure(ExtensionInfo * extObj)\n{\n    Trace trace(Trace::Extensions, \"ExtensionMgmt::HandleExtensionFailure\");\n    bool resultOK = true;\n    if (!extObj) {\n        Logger::LogError(\"Unexpected nullptr for extension object.\");\n        return false;\n    }\n\n    extObj->Status = ExtensionInfo::ExtStatus::BAD;\n\n    unsigned int extlife = static_cast<unsigned int>((time(NULL) - extObj->StartTime));\n    if (EXT_RETRY_TIMEOUT_SECONDS >= extlife) {\n        extObj->RetryCount++;\n    }\n    else {\n        extObj->RetryCount = 0;\n    }\n\n    trace.NOTE(\"Extension last life: \" + std::to_string(extlife) + \" seconds, retry count: \" + std::to_string(extObj->RetryCount));\n    if (EXT_MAX_RETRIES >= extObj->RetryCount) {\n        trace.NOTE(\"Meet retry criteria. Restart extension.\");\n        resultOK = resultOK && StartExtension(extObj->MetaData);\n    }\n    else {\n        trace.NOTE(\"Exceed max retries. Stop retrying. Delete it from cache.\");\n        extObj->Status = ExtensionInfo::ExtStatus::EXIT;\n        resultOK = resultOK && ExtensionList::DeleteItem(extObj->MetaData.Name);\n    }\n\n    trace.NOTE(\"Finished with success = \" + MdsdUtil::ToString(resultOK));\n    return resultOK;\n}\n\n\nbool\nExtensionMgmt::KillProcessByForce(pid_t pid, const boost::system::error_code& error) const\n{\n    Trace trace(Trace::Extensions, \"ExtensionMgmt::KillProcessByForce\");\n    bool resultOK = true;\n\n    TRACEINFO(trace, \"pid=\" << pid);\n\n    if (boost::asio::error::operation_aborted == error) {\n        trace.NOTE(\"Operation is aborted. Do nothing.\");\n        resultOK = false;\n    }\n    else {\n        ExtensionInfo * obj = ExtensionList::GetItem(pid);\n        if (obj->StopTimerCancelled) {\n            trace.NOTE(\"Extension with pid \" + std::to_string(pid) + \" is already cancelled. Stop further action.\");\n        }\n        else {\n            bool isPsExist = true;\n            resultOK = SendSignalToProcess(pid, SIGKILL, &isPsExist);\n        }\n    }\n    return resultOK;\n}\n\nbool\nExtensionMgmt::SendSignalToProcess(pid_t pid, int signum, bool *pIsPsExist) const\n{\n    Trace trace(Trace::Extensions, \"ExtensionMgmt::SendSignalToProcess\");\n    assert(0 < pid);\n    assert(0 < signum);\n\n    bool resultOK = true;\n    trace.NOTE(\"Start to send signal \" + std::to_string(signum) + \" to pid \" + std::to_string(pid));\n    (*pIsPsExist) = true;\n\n    if (-1 == kill(pid, signum))\n    {\n        int killerr = errno;\n        std::string errstr = MdsdUtil::GetErrnoStr(errno);\n        if (ESRCH == killerr)\n        {\n            trace.NOTE(\"process was not found with pid=\" + std::to_string(pid));\n            (*pIsPsExist) = false;\n        }\n        else\n        {\n            Logger::LogError(\"Error: failed to send signal. Error string: \" + errstr);\n            resultOK = false;\n        }\n    }\n    else\n    {\n        trace.NOTE(\"Sucessfully sent signal.\");\n    }\n    return resultOK;\n}\n\nextern \"C\" \n{\n    \nvoid CatchSigChld(int signo)\n{\n    ExtensionMgmt *e = ExtensionMgmt::GetInstance();\n    if (e) {\n        e->CatchSigChld(signo);\n    }\n}\n\nvoid CleanupExtensions()\n{\n    ExtensionMgmt *e = ExtensionMgmt::GetInstance();\n    if (e)\n    {\n        e->StopAllExtensions();\n    }\n}\n\n}\n\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/ExtensionMgmt.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _EXTENSIONINFO_HH_\n#define _EXTENSIONINFO_HH_\n\n#include <string>\n#include <ctime>\n#include <map>\n#include <mutex>\n#include <functional>\n#include <set>\n#include <unordered_set>\n#include <vector>\n#include <future>\n#include <stddef.h>\n#include <boost/asio.hpp>\n#include <atomic>\n\nextern \"C\" {\n#include <unistd.h>\n#include <semaphore.h>\n}\n\n/// <summary>\n/// Keep track of an extension's meta data. Any of these data change\n/// will mean the extension definition changed.\n/// </summary>\nstruct ExtensionMetaData\n{\n    std::string Name;\n    std::string CommandLine;\n    std::string Body;\n    std::string AlterLocation;\n\n    ExtensionMetaData()\n    {\n    }\n\n    ExtensionMetaData(\n        const std::string & name,\n        const std::string & cmdline,\n        const std::string & body,\n        const std::string & loc\n        ) : Name(name), CommandLine(cmdline), Body(body), AlterLocation(loc)\n    {}\n\n\n    /// <summary>\n    /// Compare this extension's meta data with some other meta data.\n    /// Return true if they are the same, return false otherwise.\n    /// </summary>\n    bool operator==(const ExtensionMetaData & other) const;\n};\n\n/// <summary>\n/// Keep track an extension process's information.\n/// </summary>\nclass ExtensionInfo\n{\npublic:\n    enum ExtStatus {\n        NORMAL,     // A new extension.\n        BAD,        // An extension starts to run but failed in the middle.\n        KILLING,    // An extension killed by SIGINT, or killed externally.\n        EXIT,       // An extension stopped and killed already.\n        UNKNOWN     // Unknown. Should be never in this status.\n    };\n    \n    // An extension's metadata.\n    ExtensionMetaData MetaData;\n    \n    // Process id of the extension process.\n    pid_t Pid = 0;\n\n    // Process start time in number of seconds in UTC.\n    time_t StartTime = 0;\n\n    // Number of times the extension is retried since last reset.\n    unsigned int RetryCount = 0;\n\n    // Extension status.\n    ExtStatus Status = UNKNOWN;\n    \n    // asio timer to kill the extension by force.\n    boost::asio::deadline_timer * StopTimer;\n\n    // whether the extension timer is already cancelled or not.\n    std::atomic<bool> StopTimerCancelled;\n\n    ExtensionInfo();\n\n    ~ExtensionInfo();\n\n    /// <summary>\n    /// Get the string format of the status.\n    /// </summary>\n    std::string GetStatus() const;\n\n    /// <summary>\n    /// Get the string format of the status.\n    /// </summary>\n    static std::string StatusToString(ExtStatus s);\n\nprivate:\n    static std::map<ExtStatus, std::string> _statusMap;\n};\n\n/// <summary>\n/// Keep track of all extension processes. \n/// </summary>\nclass ExtensionList\n{\npublic:\n\n    /// <summary>\n    /// Get number of items.\n    /// </summary>\n    static size_t GetSize();\n\n    /// <summary>\n    /// Add an item to the list. If the item already exists, free the memory of\n    /// existing one and add the new one.\n    /// Return true if no error. Return false if the input is invalid.\n    /// </summary>\n    static bool AddItem(ExtensionInfo * extObj);\n\n    /// <summary>\n    /// Get an item given its name.\n    /// Return the object pointer.\n    /// Return nullptr if not found or given name is invalid.\n    /// The caller shouldn't free the object pointer.\n    /// </summary>\n    static ExtensionInfo * GetItem(const std::string & extname);\n\n    /// <summary>\n    /// Get an item given its process id.\n    /// Return the object pointer.\n    /// Return nullptr if not found or given pid is invalid.\n    /// The caller shouldn't free the object pointer.\n    /// </summary>\n    static ExtensionInfo * GetItem(pid_t extPid);\n\n    /// <summary>\n    /// Update an existing item's pid.\n    /// Return true if success, false if any error.\n    /// </summary>\n    static bool UpdateItem(pid_t oldpid, pid_t newpid);\n\n    /// <summary>\n    /// Delete an item from the list.\n    /// Return true if the item is actually deleted. \n    /// Return false if the given name is invalid or not found.\n    /// </summary>\n    static bool DeleteItem(const std::string & extname);\n\n    /// <summary>\n    /// Delete a set of items with given names.\n    /// Return true if all items are actually deleted, or set is empty.\n    /// Return false if any item is not found.\n    /// </summary>\n    static bool DeleteItems(const std::set<std::string>& extnames);\n    static void DeleteAllItems();\n\n    /// <summary>\n    /// Use a given function to iterate over each extension object.\n    /// </summary>\n    static void ForeachExtension(const std::function<void(ExtensionInfo*)>& fn);\n\n    /// <summary>\n    /// Add a pid to the pid set.\n    /// </summary>\n    static void AddPid(pid_t pid);\n\n    /// <summary>\n    /// Get all pids of the pid set. Clear the original one.\n    /// </summary>\n    static std::unordered_set<pid_t> GetAndClearPids();\n\n\nprivate:\n    static std::map<const std::string, ExtensionInfo*> _extlistByName;\n    static std::map<pid_t, ExtensionInfo*> _extlistByPid;\n\n    static std::mutex _listmutex;\n\n    /// Store a list of PIDs that needs to be killed because their\n    /// meta data are changed.\n    static std::unordered_set<pid_t> _killList;\n    static std::mutex _klmutex;\n};\n\nclass MdsdExtension;\nclass MdsdConfig;\n\n/// <summary>\n/// Use configuration to create new extension processes, then manage\n/// the extension processes.\n/// </summary>\nclass ExtensionMgmt\n{\npublic:\n    /// <summary>\n    /// Free all resources.\n    /// </summary>\n    ~ExtensionMgmt();\n\n    /// <summary>\n    /// Get a singleton instance.\n    /// </summary>\n    static ExtensionMgmt* GetInstance();\n\n    /// <summary>\n    /// Start all extensions given a config synchronously. It will also\n    /// stop any obsolete extension.\n    /// Return true if success; Return false for any error.\n    /// </summary>\n    static bool StartExtensions(MdsdConfig * config);\n\n    /// <summary>\n    /// Calls StartExtensions() in async.\n    /// </summary>\n    static void StartExtensionsAsync(MdsdConfig * config);\n\n    /// <summary>\n    /// Stop all extensions.\n    /// Return true for success, false for any error.\n    /// </summary>\n    bool StopAllExtensions();\n\n    /// <summary>\n    /// Defines SIGCHLD signal handler, which is from child extension process. It will\n    /// - release child process resources.\n    /// - change extension object status.\n    /// - update the extension object in ExtensionList.\n    /// </summary>\n    void CatchSigChld(int signo);\n\nprivate:\n    ExtensionMgmt();\n\n    /// <summary>\n    /// Define semaphore to synchronize between stopped extensions\n    /// (handled in SIGCHLD signal handler) and creating new ones (in main thread)\n    /// </summary>\n    sem_t _extsem;\n\n    /// <summary>\n    /// True if semaphore is initialized properly, false if any error.\n    /// </summary>\n    bool _extsemInitOK;\n\n    /// <summary>\n    /// Singleton instance.\n    /// </summary>\n    static ExtensionMgmt * _extInstance;\n\n    /// <summary>\n    /// Environment name for extension. Extension uses it to read the value defined in <Body>\n    /// </summary>\n    static constexpr const char* BODYENV = \"MON_EXTENSION_BODY\";\n\n    /// <summary>\n    /// The grace period in number of seconds for the extension process to \n    /// terminate itself before it is killed by force. Because mdsd service's \n    /// grace period is 30-second, make it shorter than that.\n    /// </summary>\n    static const unsigned int EXT_TERMINATE_GRACE_SECONDS = 20;\n\n    /// <summary>\n    /// The maximum number of retries to start extension within\n    /// given window seconds.\n    /// </summary>\n    static const unsigned int EXT_MAX_RETRIES = 3;\n\n    /// <summary>\n    /// Numbe of seconds to wait before retrying the extension\n    /// </summary>\n    static const unsigned int EXT_RETRY_WAIT_SECONDS = 5;\n\n    /// <summary>\n    /// Extension restart retry timeout in number of seconds.\n    /// If the time difference is bigger than this window, reset\n    /// extension's RetryCount to be 0.\n    /// </summary>\n    static const unsigned int EXT_RETRY_TIMEOUT_SECONDS = 60;\n\n    /// <summary>\n    /// Initialize semaphore. Return true if no error; return false for any error.\n    /// </summary>\n    bool InitSem();\n\n    /// <summary>\n    /// Start all extensions defined in a config. It won't stop any extension.\n    /// It will return the extension names defined in the config in extlistInConfig.\n    /// Return true for success, false for any error.\n    /// </summary>\n    bool StartExtensionsFromConfig(\n        MdsdConfig * config, \n        std::set<std::string> & extlistInConfig);\n\n    /// <summary>\n    /// Restart all extensions whose meta data were changed.\n    /// Return true if success, false for any error.\n    /// <param name=\"changedList\">List of changed extensions.</param>\n    /// <param name=\"newDataList\">The meta data for the changed extensions. Key is old extension pid. </param>\n    /// </summary>\n    bool RestartChangedExtensions(const std::vector<ExtensionInfo*> & changedList,\n        const std::map<pid_t, ExtensionMetaData> & newDataList);\n\n    /// <summary>\n    /// Wait until any extension's change status SIGCHLD caught, or until timed out\n    /// after SEM_WAIT_SECONDS seconds.\n    /// Return true if success, false if error or timed out.\n    /// </summary>\n    bool WaitForAnyExtStop();\n\n    /// <summary>\n    /// Start all extensions whose meta data were changed.\n    /// Return true for success, false for any error.\n    /// </summary>\n    bool StartAllChangedExts(const std::unordered_set<pid_t> changedPids,\n        const std::map<pid_t, ExtensionMetaData> & newDataList) const;\n\n    /// <summary>\n    /// Start one extension instance whose meta data were changed.\n    /// Return true for success, false for any error.\n    /// </summary>\n    bool StartOneChangedExt(pid_t changedPid, const ExtensionMetaData & metadata) const;\n\n    /// <summary>\n    /// Any extension that's not in given set is obsolete.\n    /// For each obsolete extension, delete it from ExtensionList and Stop it.\n    /// Return true if no error is found. Otherwise, return false.\n    /// </summary>\n    bool StopObsoleteExtensions(const std::set<std::string> & extlistInConfig) const;\n\n    /// <summary>\n    /// Block or unblock a given signal to the process.\n    /// </summary>\n    bool MaskSignal(bool isBlock, int signum) const;\n\n    /// <summary>\n    /// Attempt to start a given extension process.\n    /// Return true if it starts OK, return false for any error.\n    /// If starting OK, the extensionInfo object will be added to ExtensionList. Its memory\n    /// will be managed there.\n    /// </summary>\n    bool StartExtension(\n        const std::string & extName,\n        const std::string & cmdline,\n        const std::string & body,\n        const std::string & alterLocation) const;\n\n    /// <summary>\n    /// Start an extension process given its meta data.\n    /// Return true if it starts OK, return false for any error.\n    /// </summary>\n    bool StartExtension(const ExtensionMetaData & metaData) const;\n\n    /// <summary>\n    /// Either create a new ExtensionInfo object or update existing one in\n    /// the extension list. If an extension failed to be created, it should not be\n    /// retried.\n    /// </summary>\n    bool UpdateExtensionList(\n        ExtensionInfo * oldExtInfo,\n        const std::string & extName,\n        const std::string & cmdline,\n        const std::string & body,\n        const std::string & alterLocation,\n        pid_t pid,\n        bool extFailed) const;\n\n    /// <summary>\n    /// Stop an extension process. It won't remove the ExtensionInfo item from ExtensionList.\n    /// </summary>\n    bool StopExtension(ExtensionInfo * extObj) const;\n\n    /// <summary>\n    /// Update the information of extension given its pid.\n    /// Return true for success, false for error.\n    /// </summary>\n    bool UpdateStoppedExtension(pid_t extpid);\n\n\n    /// <summary>\n    /// Handle extension that fails itself. Either retry it or delete it forever based on its status.\n    /// Return true if success, false if any error.\n    /// </summary>\n    bool HandleExtensionFailure(ExtensionInfo * extObj);\n\n    /// <summary>\n    /// This is to unblock all signal mask. For example, in child process, child process\n    /// may use this function to unblock signal mask inherited from parent process.\n    /// Return errno.\n    /// <summary>\n    int UnblockSignals() const;\n\n    /// <summary>\n    /// Kill a process by sending it SIGKILL. It doesn't validate whether \n    /// the process is actually killed or not. \n    /// Return true if signal is sent out properly.\n    /// Return false if signal is not sent out, or the operation is aborted.\n    /// <summary>\n    bool KillProcessByForce(pid_t pid, const boost::system::error_code& error) const;\n\n    /// <summary>\n    /// Send signal signum to process id pid. \n    /// Return whether the process exists or not through pIsPsExist.\n    /// Return true if signal is sent out properly, false if error. If process doesn't exist,\n    /// also return true.\n    /// <param name=\"pid\"> process id </param>\n    /// <param name=\"signum\"> signal number </param>\n    /// <param name=\"pIsPsExist\"> Return whether the process exists or not </param>\n    /// <summary>\n    bool SendSignalToProcess(pid_t pid, int signum, bool *pIsPsExist) const;\n\n};\n\n\n#endif // _EXTENSIONINFO_HH_\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/FileSink.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"FileSink.hh\"\n\n#include <iterator>\n#include <sstream>\n\n#include \"CanonicalEntity.hh\"\n#include \"MdsdConfig.hh\"\n#include \"Utility.hh\"\n#include \"RowIndex.hh\"\n#include \"Trace.hh\"\n#include \"MdsdMetrics.hh\"\n#include \"StoreType.hh\"\n#include \"Logger.hh\"\n\n// FileSink uses the name of the sink as the pathname. If the path isn't absolute, we make it\n// relative to /tmp.\n//\n// By design, each invocation of this constructor creates an independent\n// instance with its own ostream. They're all opened in append mode, which should keep simultaneous\n// writes from being interleaved. If writes are large enough that they become interleaved, this\n// design will need to be revisited. Perhaps Batches should hold reference-counted pointers to their\n// sinks, so that the destruction of the last batch instance pointing to a sink causes the sink to\n// be destroyed. Add to that a map from filename to a weak pointer to the filesink; when the FileSink\n// destructor is called (when the last strong refcounted pointer goes away), the destructor removes the weak\n// pointer from the map.\n//\nFileSink::FileSink(const std::string &name)\n  : IMdsSink(StoreType::Type::File), _name(name)\n{\n\tTrace trace(Trace::Local, \"FileSink::Constructor\");\n\n\t// Construct _path based on default directory\n\tif (name[0] != '/') {\n\t\t_path = \"/tmp/\";\t// Make a relative path into an absolute path\n\t}\n\t_path += name;\n\n\t// Do a quick sanity check to make sure the file can be opened. Allow any exception\n\t// from Open() to propagate upwards.\n\tOpen();\n\tClose();\n}\n\n// When destroying, remove from the global list of file sinks. No need to close the file; the \n// destructor for ostream is defined as closing the file.\nFileSink::~FileSink()\n{\n\tTrace trace(Trace::Local, \"FileSink::Destructor\");\n}\n\nvoid\nFileSink::Open()\n{\n\tif (! _file.is_open()) {\n\t\t_file.open(_path, std::ofstream::app);\t// Open for write in append mode\n\t\tif (!_file) {\n\t\t\tstd::system_error e(errno, std::system_category(), \"Failed to open \" + _path + \" for append\");\n\t\t\tLogger::LogError(\"Error: \" + e.code().message() + \" - \" + e.what());\n\t\t\tthrow e;\n\t\t}\n\t}\n}\n\n// Write the row, in readable form, to the output file. Add a timestamp. Don't bother with\n// async disk file writes; the primary goal of the FileSink is testability, so stability and certainty\n// is more important than absolute performance.\nvoid\nFileSink::AddRow(const CanonicalEntity &row, const MdsTime &)\n{\n\tstd::lock_guard<std::mutex> lock(_mutex);\n#if BUFFER_ALL_DATA\n\tstd::ostringstream msg;\n\tmsg << MdsTime::Now() << \"\\t\" << row << \"\\n\";\n\titems.push_back(std::move(msg.str()));\n#else\n\ttry {\n\t\tOpen();\n\t\t// If you emit std::endl, that does a flush, which isn't what we want.\n\t\t_file << MdsTime::Now() << \"\\t\" << row << \"\\n\";\n\t}\n\tcatch (const std::exception&)\n\t{ }\n#endif\n}\n\nvoid\nFileSink::Flush()\n{\n\tTrace trace(Trace::Local, \"FileSink::Flush\");\n\n\tstd::lock_guard<std::mutex> lock(_mutex);\n#if BUFFER_ALL_DATA\n\ttry {\n\t\tOpen();\n\t\tfor (const auto& item : items) {\n\t\t\t_file << item;\n\t\t}\n\t}\n\tcatch (const std::exception&)\n\t{ }\n\titems.clear();\n#endif\n\tClose();\n}\n\n// vim: se sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/FileSink.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _FILESINK_HH_\n#define _FILESINK_HH_\n\n#include \"IMdsSink.hh\"\n#include <map>\n#include <functional>\n#include <string>\n#include <mutex>\n#include <fstream>\n#include <vector>\n#include <exception>\n#include \"MdsTime.hh\"\n#include \"MdsEntityName.hh\"\n#include \"CanonicalEntity.hh\"\n\nclass FileSink : public IMdsSink\n{\npublic:\n\tFileSink(const std::string&);\t// Private constructor; must be called with _mapMutex locked\n\tvirtual ~FileSink();\n\n\tvirtual bool IsFile() const { return true; }\n\tvirtual void AddRow(const CanonicalEntity&, const MdsTime&);\n\tvirtual void Flush();\n\nprivate:\n\tconst std::string _name;\n\tstd::string _path;\n\n\tstd::ofstream _file;\n\n\tstd::mutex _mutex;\n\n\tstd::vector<std::string> items;\n\n\tvoid Open();\n\tvoid Close() { try { _file.close(); } catch (const std::exception&) { } }\n};\n\n#endif // _FILESINK_HH_\n\n// vim: se sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/IMdsSink.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"IMdsSink.hh\"\n#include \"XTableSink.hh\"\n#include \"LocalSink.hh\"\n#include \"FileSink.hh\"\n#include \"XJsonBlobSink.hh\"\n#include \"Trace.hh\"\n#include \"Logger.hh\"\n\n#include <map>\n#include <string>\n\nIMdsSink*\nIMdsSink::CreateSink(MdsdConfig * config, const MdsEntityName &target, const Credentials* creds)\n{\n\tTrace trace(Trace::ConfigLoad, \"IMdsSink::CreateSink\");\n\n\tswitch (target.GetStoreType()) {\n\n\t\tcase StoreType::XTable:\n\t\t\treturn new XTableSink(config, target, creds);\n\n\t\tcase StoreType::Local:\n\t\t\treturn new LocalSink(target.Basename());\n\n\t\tcase StoreType::File:\n\t\t\treturn new FileSink(target.Basename());\n\n\t\tcase StoreType::XJsonBlob:\n\t\t    return new XJsonBlobSink(config, target, creds);\n\n\t\tdefault:\n\t\t\tstd::ostringstream msg;\n\t\t\tmsg << \"Attempt to create sink of unknown type for target \" << target;\n\t\t\tLogger::LogError(msg.str());\n\t\t\ttrace.NOTE(msg.str());\n\t\t\tthrow std::logic_error(msg.str());\n\t}\n}\n\n// vim: se sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/IMdsSink.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _IMDSSINK_HH_\n#define _IMDSSINK_HH_\n\n#include <string>\n#include \"StoreType.hh\"\n#include \"MdsTime.hh\"\n#include \"MdsEntityName.hh\"\n\nclass CanonicalEntity;\nclass Credentials;\nclass MdsdConfig;\n\nclass IMdsSink\n{\npublic:\n\tvirtual bool IsXTable() const { return false; }\n\tvirtual bool IsBond() const { return false; }\n\tvirtual bool IsXJsonBlob() const { return false; }\n\tvirtual bool IsLocal() const { return false; }\n\tvirtual bool IsFile() const { return false; }\n\n\tstatic IMdsSink* CreateSink(MdsdConfig *, const MdsEntityName &target, const Credentials*);\n\n\tvirtual void AddRow(const CanonicalEntity&, const MdsTime&) = 0;\t// This is a pure virtual class\n\tvirtual void Flush() = 0;\n\tvirtual void ValidateAccess() {}\t\t// Throws if credentials cannot be used to access the target\n\t\t\t\t\t\t\t// May have desireable initialization side-effect(s)\n\n\tIMdsSink() = delete;\t\t\t\t// No default constructor\n\tIMdsSink(const IMdsSink&) = delete;\t\t// No copy constructor\n\n\tIMdsSink& operator=(const IMdsSink&) = delete;\t// No copy assignment\n\n\tIMdsSink(IMdsSink&&) = delete;\t\t\t\t// No Move constructor\n\tvirtual IMdsSink& operator=(IMdsSink&&) = delete;\t// No Move assignment\n\n\tvirtual ~IMdsSink() {}\n\tvoid SetRetentionPeriod(const MdsTime & period) { if (period > _retentionPeriod) _retentionPeriod = period; }\n\tconst MdsTime RetentionPeriod() const { return _retentionPeriod; }\n\ttime_t RetentionSeconds() const { return _retentionPeriod.to_time_t(); }\n\n\tStoreType::Type Type() const { return _type; }\n\nprotected:\n\tIMdsSink(StoreType::Type t) : _type(t), _retentionPeriod(0) {}\n\nprivate:\n\tStoreType::Type _type;\n\tMdsTime _retentionPeriod;\n};\n\n#endif // _IMDSSINK_HH_\n\n// vim: se sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/ITask.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"ITask.hh\"\n#include \"Logger.hh\"\n#include \"Trace.hh\"\n#include <cpprest/pplx/threadpool.h>\n\nITask::ITask(const MdsTime &interval)\n\t: _interval(interval), _timer(crossplat::threadpool::shared_instance().service()), _cancelled(false)\n{\n\tassert(interval != MdsTime(0));\n\tTrace trace(Trace::Scheduler, \"ITask Constructor\");\n}\n\nITask::~ITask()\n{\n}\n\nvoid\nITask::start()\n{\n\tusing namespace boost::posix_time;\n\n\tTrace trace(Trace::Scheduler, \"ITask::Start\");\n\n\t// Call subclass on_start() method. Last minute initialization happens there, and the subclass\n\t// can call the whole thing off by returning false.\n\tif (! on_start()) {\n\t\tLogger::LogError(\"Task refused startup\");\n\t\treturn;\n\t}\n\n\tMdsTime start { initial_start() };\n\ttime_t spanSeconds = _interval.to_time_t();\n\t_intervalStart = start.Round(spanSeconds) - _interval;\n\tif (trace.IsActive()) {\n\t\tstd::ostringstream msg;\n\t\tmsg << this << \" requested start@\" << start << \" for interval beginning at \" << _intervalStart;\n\t\tmsg << \" of size \" << spanSeconds << \" seconds\";\n\t\ttrace.NOTE(msg.str());\n\t}\n\t_nextTime = start.to_ptime();\n\t_timer.expires_at(_nextTime);\n\t_timer.async_wait(boost::bind(&ITask::DoWork, this, boost::asio::placeholders::error));\n}\n\nvoid\nITask::cancel()\n{\n\tTrace trace(Trace::Scheduler, \"ITask::Cancel\");\n\tif (_cancelled) {\n\t\ttrace.NOTE(\"Already cancelled; ignoring\");\n\t\treturn;\n\t} else {\n\t\tstd::lock_guard<std::mutex> lock(_mutex);\n\t\t_cancelled = true;\n\t\t_timer.cancel();\n\t}\n\n\ton_cancel();\t// Called with mutex NOT locked\n}\n\nvoid\nITask::DoWork(const boost::system::error_code& error)\n{\n\tTrace trace(Trace::Scheduler, \"ITask::DoWork\");\n\tMdsTime start { _intervalStart };\n\n\tif (error == boost::asio::error::operation_aborted) {\n\t\t// If the timer was cancelled, we have to assume the entire configuration may have been\n\t\t// deleted; don't touch it. When an MdsdConfig object is told to self-destruct, it first\n\t\t// cancels all timer-driven actions, then it waits some period of time, then it actually\n\t\t// deletes the object. When the timers are cancelled, the handlers are called with the\n\t\t// cancellation message. The MdsdConfig object is *probably* still valid, and as long\n\t\t// as the timer isn't rescheduled, all should be well. But I'm playing it safe here\n\t\t// and assuming an explicit cancel operation means \"the config is gone\".\n\t\t//\n\t\t// Of course, if the MdsdConfig is deleted, all the associated objects, including this\n\t\t// very ITask object, get deleted as well. Thus, the \"don't touch nothin'\" rule.\n\t\ttrace.NOTE(\"Timer cancelled\");\n\t\treturn;\n\t} else {\n\t\tstd::lock_guard<std::mutex> lock(_mutex);\n\t\tif (error || _cancelled) {\n\t\t\treturn;\n\t\t}\n\n\t\ttrace.NOTE(\"Rescheduling\");\n\t\t_intervalStart += _interval;\n\t\t_nextTime = _nextTime + _interval.to_duration();\n\t\t_timer.expires_at(_nextTime);\n\t\t_timer.async_wait(boost::bind(&ITask::DoWork, this, boost::asio::placeholders::error));\n\t}\n\n\t// Note that, as written, we do NOT hold the lock here; our use of the class instance\n\t// needs to be readonly. If that changes, revisit this locking pattern.\n\texecute(start);\n}\n\n// vim: se sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/ITask.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _ITASK_HH_\n#define _ITASK_HH_\n\n#include <mutex>\n#include <stddef.h>\n#include <boost/asio.hpp>\n#include <boost/bind.hpp>\n#include \"MdsTime.hh\"\n\nclass MdsdConfig;\n\n// Interface for regularly-scheduled tasks. When an ITask is created, the interval at which it should be\n// executed is set. Once the ITask::start() method is invoked, a timer is set to cause the virtual ITask::on_start()\n// method to be invoked at the requested frequency (every _interval seconds), until the ITask::cancel() method\n// is invoked.\nclass ITask\n{\npublic:\n\t// Task should run every _interval_ seconds\n\tITask(const MdsTime &interval);\n\t// I want a move constructor...\n\tITask(ITask &&orig);\n\t// But do not want a copy constructor nor a default constructor\n\tITask(ITask &) = delete;\n\tITask() = delete;\n\n\tvirtual ~ITask();\n\n\t// Requests that this repeating task be scheduled for execution\n\tvoid start();\n\n\t// Requests that the task be stopped. Any execution already in progress (or for which the timer has already\n\t// tripped but execution still awaits scheduling on a thread) will take place, but the _cancelled boolean\n\t// can be observed.\n\t//\n\t// Once cancelled(), a task cannot be restarted; that is, you cannot call start() again. You must instead\n\t// create a new instance of the task object. This is due to the boost deadline timer not being restartable,\n\t// which itself arises from enabling cancellation in the first place, near as I can tell.\n\tvoid cancel();\n\n\tMdsTime interval() const { return _interval; }\n\nprotected:\n\t// Subclasses *must* override the execute() method, which is called to perform the actual\n\t// time-scheduled class.\n\tvirtual void execute(const MdsTime&) = 0;\n\n\t// Subclass gets notified via this callout when start() is called. If the subclass returns false,\n\t// the start operation aborts. In this case, start() can be called again; a failed startup is different\n\t// from a successful start followed by a cancel().\n\tvirtual bool on_start() { return true; }\n\n\t// Subclass gets notified when cancel() is called.\n\tvirtual void on_cancel() { }\n\n\t// When start() is called, a time for the initial task invocation must be determined.\n\t// By default, wait 2-7 second; the randomness prevents all the tasks from being started\n\t// at the same time when running through all tasks scheduled for a given config. Any\n\t// derived class can override this function, e.g. if the task needs to run within 5 seconds\n\t// of the beginning of the next \"interval\".\n\tvirtual MdsTime initial_start() { return MdsTime::Now() + MdsTime(2 + random()%5, random()%1000000); }\n\n\t// Subclass can check to see if cancellation has been requested\n\tbool is_cancelled() { return _cancelled; }\n\nprivate:\n\tMdsTime _interval;\n\n\tstd::mutex _mutex;\n\tboost::asio::deadline_timer _timer;\n\tboost::posix_time::ptime _nextTime;\n\tbool _cancelled;\n\tMdsTime _intervalStart;\n\n\tvoid DoWork(const boost::system::error_code& error);\n};\n\n#endif // _ITASK_HH_\n\n// vim: se sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/IdentityColumns.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _IDENTITYCOLUMNS_HH_\n#define _IDENTITYCOLUMNS_HH_\n\n#include <vector>\n#include <string>\n#include <utility>\n\nusing ident_col_t = std::pair<std::string, std::string>;\nusing ident_vect_t = std::vector<ident_col_t>;\n\n#endif // _IDENTITYCOLUMNS_HH_\n\n// vim: se sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/LADQuery.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"Logger.hh\"\n#include \"Trace.hh\"\n#include \"LADQuery.hh\"\n#include \"CanonicalEntity.hh\"\n#include \"Utility.hh\"\n#include <iomanip>\n#include <sstream>\n#include <cctype>\n\nnamespace Pipe {\n\nconst std::string LADQuery::_name { \"LADQuery\" };\n\nvoid\nLADQuery::FullAggregate::Sample(double value)\n{\n\t_total += value;\n\t_last = value;\n\tif (_count) {\n\t\tif (value > _maximum)\n\t\t\t_maximum = value;\n\t\tif (value < _minimum)\n\t\t\t_minimum = value;\n\t} else {\n\t\t_maximum = _minimum = value;\n\t}\n\t_count += 1;\n}\n\n\n// The core DerivedEvent task pulls entities from the source that fall within the just-completed\n// time window (based on duration). The LADQuery looks like this:\n// 1) Group by the value in the nameAttrName column; mark that column to be preserved\n// 2) Compute aggregate stats for the value in the valueAttrName column and pass the single aggregate row down the pipe\n// 3) Add a column with the specified partition key\n// 4) Send the CanonicalEntity down the pipe twice, once with each of the two distinct row keys as defined for the LAD query\n//\n// The strings are pass-by-value; the initializers use move semantics to move the copies into the member variables.\n// If the compiler can determine the actual parameters are temporaries, or about to go out of scope, it can optimize\n// the copy away, thus giving us the move semantics we actually want. Worst case, we're still doing only a single\n// copy (to prepare the passed values).\nLADQuery::LADQuery(std::string valueAN, std::string nameAN, std::string pkey, std::string uuid)\n\t: _valueAttrName(std::move(valueAN)), _nameAttrName(std::move(nameAN)), _pkey(std::move(pkey)),\n\t  _uuid(std::move(uuid)), _lastSampleTime(0), _startOfSample(0)\n{\n}\n\nvoid\nLADQuery::Start(const MdsTime QIbase)\n{\n\t// Prepare to process all the rows in this sample period\n\t_lastSampleTime = QIbase;\n\t_startOfSample = QIbase;\n\n\t// Do whatever the base class needs\n\tPipeStage::Start(QIbase);\n}\n\nvoid\nLADQuery::Process(CanonicalEntity *item)\n{\n\tTrace trace(Trace::QueryPipe, \"LADQuery::Process\");\n\n\t// Get the value of the nameAttrName column\n\t// Look in the savedStats map for the FullAggregate object associated with that name\n\t//    if there is none, make one and then use it\n\t// Update the FullAggregate based on the value of the valueAttrName column\n\tMdsValue* value = item->Find(_valueAttrName);\n\tMdsValue* name = item->Find(_nameAttrName);\n\tif (!(value && name)) {\n\t\ttrace.NOTE(\"Name or Value column missing; skipping entity\");\n\t} else if (! name->IsString()) {\n\t\tLogger::LogWarn(\"Name column is not a string\");\n\t} else if (! value->IsNumeric()) {\n\t\tLogger::LogWarn(\"Value column is not numeric\");\n\t} else {\n\t\t_savedStats[*(name->strval)].Sample(value->ToDouble());\n\t\t_lastSampleTime = item->PreciseTime();\n\t}\n\n\tdelete item;\t// No longer needed; we've updated the correct aggregation object\n}\n\nvoid\nLADQuery::Done()\n{\n\tTrace trace(Trace::DerivedEvent, \"LADQuery::Done\");\n\t// For each savedStats object in the map:\n\t//   Build a new CE with the full set of stats\n\t//   Add the _partitionKey to the CE\n\t//   Dupe the CE\n\t//   Put one of the LAD keys on the original CE; put the other key on the dupe\n\t//   Send both rows to the successor pipe\n\t//\n\t// Call Done on the successor pipe\n\n\tstd::string descendingTicks = MdsdUtil::ZeroFill(MdsTime::MaxDateTimeTicks - _startOfSample.to_DateTime(), 19);\n\n\tfor (const auto & iter : _savedStats) {\n\t\tauto entity = new CanonicalEntity(10);\n\t\tentity->SetPreciseTime(MdsTime::Now());\t// For the \"time\" field in Jsonblob\n\n\t\tentity->AddColumn(_nameAttrName, new MdsValue(iter.first));\n\t\tentity->AddColumn(\"Total\", new MdsValue(iter.second.Total()));\n\t\tentity->AddColumn(\"Minimum\", new MdsValue(iter.second.Minimum()));\n\t\tentity->AddColumn(\"Maximum\", new MdsValue(iter.second.Maximum()));\n\t\tentity->AddColumn(\"Average\", new MdsValue(iter.second.Average()));\n\t\tentity->AddColumn(\"Count\", new MdsValue(iter.second.Count()));\n\t\tentity->AddColumn(\"Last\", new MdsValue(iter.second.Last()));\n\n\t\tentity->AddColumn(\"PartitionKey\", _pkey);\n\n\t\tauto dupe = new CanonicalEntity(*entity);\n\t\tdupe->SetPreciseTime(entity->PreciseTime());\t// For the \"time\" field in Jsonblob\n\n\t\tstd::string metric = EncodeAndHash(iter.first, 256);\n\n\t\tstd::ostringstream key1, key2;\n\t\tkey1 << descendingTicks << \"__\" << metric;\n\t\tkey2 << metric << \"__\" << descendingTicks;\n\t\tif (_uuid.length()) {\n\t\t\tkey1 << \"__\" << _uuid;\n\t\t\tkey2 << \"__\" << _uuid;\n\t\t}\n\n\t\ttrace.NOTE(\"Aggregation rowkey \" + key1.str());\n\t\tentity->AddColumn(\"RowKey\", key1.str());\n\t\tPipeStage::Process(entity);\n\t\ttrace.NOTE(\"Aggregation rowkey (dupe) \" + key2.str());\n\t\tdupe->AddColumn(\"RowKey\", key2.str());\n\t\tdupe->SetSourceType(CanonicalEntity::SourceType::Duplicated);\n\t\tPipeStage::Process(dupe);\n\t}\n\tPipeStage::Done();\t// Pass the \"done\" signal to the next stage\n\n\t// Empty the map now to free memory, rather than waiting for the next Start() call\n\t_savedStats.clear();\n}\n\nstd::string\nLADQuery::EncodeAndHash(const std::string &name, size_t limit)\n{\n\tTrace trace(Trace::DerivedEvent, \"LADQuery::EncodeAndHash\");\n\n\ttrace.NOTE(\"EncodeAndHash(\\\"\" + name + \"\\\")\");\n\tstd::string result;\n\tfor (const char c : name) {\n\t\tif (isalpha(c) || isdigit(c)) {\n\t\t\tresult.push_back(c);\n\t\t} else {\n\t\t\tstd::ostringstream encoded;\n\t\t\tencoded << \":\" << std::hex << std::uppercase << std::setw(4) << std::setfill('0') << (unsigned short)c;\n\t\t\tresult.append(encoded.str());\n\t\t}\n\t}\n\tif (result.size() > limit) {\n\t\ttrace.NOTE(\"Hashing required...\");\n\t\tauto hash = MdsdUtil::MurmurHash64(result, 0);\n\t\tstd::ostringstream hashstr;\n\t\tconst size_t charcnt = sizeof(hash)*2;\n\t\thashstr << \"|\" << std::hex << std::setw(charcnt) << std::setfill('0') << hash;\n\t\tresult.replace(limit - (1 + charcnt), std::string::npos, hashstr.str());\n\t}\n\ttrace.NOTE(\"Encoded to \\\"\" + result + \"\\\"\");\n\treturn result;\n}\n\n// End of namespace\n}\n\n// vim: se ai sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/LADQuery.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _LADQUERY_HH_\n#define _LADQUERY_HH_\n\n#include \"Pipeline.hh\"\n#include \"MdsEntityName.hh\"\n#include <string>\n#include <unordered_map>\n#include <cfloat>\n\n// Pipe stages must implement the Process method.\n// Pipe stages that retain data must implement the Done method.\n// Pipe stages must implement a constructor, which can have any parameters that might be required.\n\nnamespace Pipe {\n\nclass LADQuery : public PipeStage\n{\npublic:\n\t// Deliberately call-by-value.\n\tLADQuery(std::string valueAN, std::string nameAN, std::string pkey, std::string uuid);\n\n\tvoid Start(const MdsTime QIbase);\n\tvoid Process(CanonicalEntity *);\n\tconst std::string& Name() const { return _name; }\n\tvoid Done();\n\nprivate:\n\tstatic const std::string _name;\n\tconst std::string\t_valueAttrName;\n\tconst std::string\t_nameAttrName;\n\tconst std::string\t_pkey;\n\tconst std::string\t_uuid;\n\tMdsTime\t\t_lastSampleTime;\n\tMdsTime\t\t_startOfSample;\n\n\tstd::string EncodeAndHash(const std::string &, size_t);\n\n\t// Contains aggregated stats on a counter during processing of a LADQuery\n\tclass FullAggregate\n\t{\n\tpublic:\n\t\tFullAggregate() : _total(0.0), _minimum(DBL_MAX), _maximum(-DBL_MAX), _last(0.0), _count(0) {}\n\t\tvoid Sample(double value);\n\t\tdouble Total() const { return _total; }\n\t\tdouble Minimum() const { return _minimum; }\n\t\tdouble Maximum() const { return _maximum; }\n\t\tdouble Last() const { return _last; }\n\t\tlong Count() const { return _count; }\n\t\tdouble Average() const { return _count?(_total / _count):0.0; }\n\n\tprivate:\n\t\tdouble _total;\n\t\tdouble _minimum;\n\t\tdouble _maximum;\n\t\tdouble _last;\n\t\tlong   _count;\n\t};\n\n\t// Holds all the instances of aggregation stats during processing.\n\t// Cleared after each run. Bad things will happen if multiple threads\n\t// call LADQuery::Process, which really shouldn't happen.\n\tstd::unordered_map<std::string, FullAggregate> _savedStats;\n\n};\n\n}\n\n\n#endif // _LADQUERY_HH_\n\n// vim: se ai sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/LinuxMdsConfig.xsd",
    "content": "﻿<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<xs:schema attributeFormDefault=\"unqualified\" elementFormDefault=\"qualified\" xmlns:xs=\"http://www.w3.org/2001/XMLSchema\">\n  <xs:element name=\"MonitoringManagement\">\n    <xs:complexType>\n      <xs:sequence>\n        <xs:element name=\"Imports\">\n          <xs:complexType>\n            <xs:sequence>\n              <xs:element name=\"Import\">\n                <xs:complexType>\n                  <xs:attribute name=\"file\" type=\"xs:string\" use=\"required\" />\n                </xs:complexType>\n              </xs:element>\n            </xs:sequence>\n          </xs:complexType>\n        </xs:element>\n        <xs:element name=\"Accounts\">\n          <xs:complexType>\n            <xs:sequence>\n              <xs:element maxOccurs=\"unbounded\" name=\"Account\">\n                <xs:complexType>\n                  <xs:attribute name=\"moniker\" type=\"xs:string\" use=\"required\" />\n                  <xs:attribute name=\"isDefault\" type=\"xs:boolean\" use=\"optional\" />\n                  <xs:attribute name=\"autoKey\" type=\"xs:boolean\" use=\"optional\" />\n                  <xs:attribute name=\"name\" type=\"xs:string\" use=\"optional\" />\n                </xs:complexType>\n              </xs:element>\n              <xs:element maxOccurs=\"unbounded\" name=\"SharedAccessSignature\">\n                <xs:complexType>\n                  <xs:attribute name=\"moniker\" type=\"xs:string\" use=\"required\" />\n                  <xs:attribute name=\"key\" type=\"xs:string\" use=\"required\" />\n                  <xs:attribute name=\"account\" type=\"xs:string\" use=\"optional\" />\n                  <xs:attribute name=\"certificateStore\" type=\"xs:string\" use=\"optional\" />\n                </xs:complexType>\n              </xs:element>\n            </xs:sequence>\n          </xs:complexType>\n        </xs:element>\n        <xs:element name=\"Management\">\n          <xs:complexType>\n            <xs:sequence>\n              <xs:element name=\"Identity\">\n                <xs:complexType>\n                  <xs:sequence>\n                    <xs:element maxOccurs=\"unbounded\" name=\"IdentityComponent\">\n                      <xs:complexType>\n                        <xs:simpleContent>\n                          <xs:extension base=\"xs:string\">\n                            <xs:attribute name=\"name\" type=\"xs:string\" use=\"required\" />\n                            <xs:attribute name=\"envariable\" type=\"xs:string\" use=\"optional\" />\n                          </xs:extension>\n                        </xs:simpleContent>\n                      </xs:complexType>\n                    </xs:element>\n                  </xs:sequence>\n                </xs:complexType>\n              </xs:element>\n              <xs:element name=\"AgentResourceUsage\">\n                <xs:complexType>\n                  <xs:attribute name=\"diskQuotaInMB\" type=\"xs:unsignedShort\" use=\"required\" />\n                </xs:complexType>\n              </xs:element>\n            </xs:sequence>\n            <xs:attribute name=\"eventVolume\" type=\"xs:string\" use=\"required\" />\n            <xs:attribute name=\"defaultRetentionInDays\" type=\"xs:unsignedByte\" use=\"required\" />\n          </xs:complexType>\n        </xs:element>\n        <xs:element name=\"Schemas\">\n          <xs:complexType>\n            <xs:sequence>\n              <xs:element maxOccurs=\"unbounded\" name=\"Schema\">\n                <xs:complexType>\n                  <xs:sequence>\n                    <xs:element maxOccurs=\"unbounded\" name=\"Column\">\n                      <xs:complexType>\n                        <xs:attribute name=\"name\" type=\"xs:string\" use=\"required\" />\n                        <xs:attribute name=\"type\" type=\"xs:string\" use=\"required\" />\n                        <xs:attribute name=\"mdstype\" type=\"xs:string\" use=\"required\" />\n                      </xs:complexType>\n                    </xs:element>\n                  </xs:sequence>\n                  <xs:attribute name=\"name\" type=\"xs:string\" use=\"required\" />\n                </xs:complexType>\n              </xs:element>\n            </xs:sequence>\n          </xs:complexType>\n        </xs:element>\n        <xs:element name=\"Sources\">\n          <xs:complexType>\n            <xs:sequence>\n              <xs:element maxOccurs=\"unbounded\" name=\"Source\">\n                <xs:complexType>\n                  <xs:attribute name=\"name\" type=\"xs:string\" use=\"required\" />\n                  <xs:attribute name=\"schema\" type=\"xs:string\" use=\"required\" />\n                </xs:complexType>\n              </xs:element>\n            </xs:sequence>\n          </xs:complexType>\n        </xs:element>\n        <xs:element name=\"Events\">\n          <xs:complexType>\n            <xs:sequence>\n              <xs:element name=\"HeartBeats\">\n                <xs:complexType>\n                  <xs:sequence>\n                    <xs:element name=\"HeartBeat\">\n                      <xs:complexType>\n                        <xs:attribute name=\"eventName\" type=\"xs:string\" use=\"required\" />\n                        <xs:attribute name=\"storeType\" type=\"xs:string\" use=\"required\" />\n                        <xs:attribute name=\"duration\" type=\"xs:duration\" use=\"required\" />\n                        <xs:attribute name=\"sampleRateInSeconds\" type=\"xs:unsignedByte\" use=\"required\" />\n                      </xs:complexType>\n                    </xs:element>\n                  </xs:sequence>\n                </xs:complexType>\n              </xs:element>\n              <xs:element name=\"OMI\">\n                <xs:complexType>\n                  <xs:sequence>\n                    <xs:element maxOccurs=\"unbounded\" name=\"OMIQuery\">\n                      <xs:complexType>\n                        <xs:attribute name=\"eventName\" type=\"xs:string\" use=\"required\" />\n                        <xs:attribute name=\"sampleRateInSeconds\" type=\"xs:unsignedShort\" use=\"required\" />\n                        <xs:attribute name=\"eventSas\" type=\"xs:string\" use=\"required\" />\n                        <xs:attribute name=\"omiNamespace\" type=\"xs:string\" use=\"required\" />\n                        <xs:attribute name=\"cqlQuery\" type=\"xs:string\" use=\"required\" />\n                      </xs:complexType>\n                    </xs:element>\n                  </xs:sequence>\n                </xs:complexType>\n              </xs:element>\n              <xs:element name=\"MdsdEvents\">\n                <xs:complexType>\n                  <xs:sequence>\n                    <xs:element name=\"MdsdEventSource\">\n                      <xs:complexType>\n                        <xs:sequence>\n                          <xs:element maxOccurs=\"unbounded\" name=\"RouteEvent\">\n                            <xs:complexType>\n                              <xs:sequence>\n                                <xs:element maxOccurs=\"unbounded\" name=\"Filter\">\n                                  <xs:complexType>\n                                    <xs:attribute name=\"name\" type=\"xs:string\" use=\"required\" />\n                                    <xs:attribute name=\"op\" type=\"xs:string\" use=\"required\" />\n                                    <xs:attribute name=\"value\" type=\"xs:unsignedByte\" use=\"required\" />\n                                  </xs:complexType>\n                                </xs:element>\n                              </xs:sequence>\n                              <xs:attribute name=\"eventName\" type=\"xs:string\" use=\"required\" />\n                              <xs:attribute name=\"priority\" type=\"xs:string\" use=\"optional\" />\n                              <xs:attribute name=\"account\" type=\"xs:string\" use=\"optional\" />\n                            </xs:complexType>\n                          </xs:element>\n                        </xs:sequence>\n                        <xs:attribute name=\"source\" type=\"xs:string\" use=\"required\" />\n                      </xs:complexType>\n                    </xs:element>\n                  </xs:sequence>\n                </xs:complexType>\n              </xs:element>\n            </xs:sequence>\n          </xs:complexType>\n        </xs:element>\n      </xs:sequence>\n      <xs:attribute name=\"version\" type=\"xs:decimal\" use=\"required\" />\n      <xs:attribute name=\"namespace\" type=\"xs:string\" use=\"required\" />\n      <xs:attribute name=\"eventVersion\" type=\"xs:unsignedByte\" use=\"required\" />\n      <xs:attribute name=\"timestamp\" type=\"xs:dateTime\" use=\"required\" />\n    </xs:complexType>\n  </xs:element>\n</xs:schema>"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/Listener.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"Listener.hh\"\n#include \"Logger.hh\"\n#include \"Engine.hh\"\n#include \"EventJSON.hh\"\n#include \"Trace.hh\"\n#include \"Utility.hh\"\n#include <cstdlib>\n#include <cctype>\n#include <sstream>\n#include <boost/date_time/posix_time/conversion.hpp>\n#include <cpprest/pplx/threadpool.h>\n\nextern \"C\" {\n#include \"cJSON.h\"\n#include <unistd.h>\n}\n\n// Set default checkpoint time to 1/2 the default dupe-detection window. The default window\n// is one hour.\nunsigned int Listener::checkpointSeconds = 60 * 60 / 2;\n\n// Thread startproc. If and when the specific ProcessLoop() method returns, cleanup and exit.\n// The pthread interface requires this method to both accept and return a void*.\nvoid *\nListener::handler(void * obj)\n{\n        Trace trace(Trace::EventIngest, \"Listener::handler\");\n\n        // Create a shared_ptr to own the Listener object\n        auto listener = std::shared_ptr<Listener>((Listener*)obj);\n\n        trace.NOTE(\"Start timer for \" + listener->Name());\n        // Start the timer running\n        listener->Timer().expires_from_now(boost::posix_time::seconds(checkpointSeconds));\n        listener->Timer().async_wait(boost::bind(&Listener::timerhandler, listener, TimerTask::rotate));\n\n        auto result = listener->ProcessLoop();\n        trace.NOTE(\"Returned from ProcessLoop for \" + listener->Name());\n\n        listener->Shutdown();\n\n        return result;\n}\n\n// Upon receiving an indication from the sender that the session is over, call this method to\n// shutdown our end of it. This method should be called synchronously on the listening thread;\n// the only expected race is against the timer handler. Once we set _finished, the timer handler\n// will do its own cleanup the next time it runs. If the timer handler runs between the moment\n// Shutdown sets _finished and the moment it calls _timer.cancel(), we'll be cancelling a timer\n// that wasn't set, but that is perfectly fine.\nvoid\nListener::Shutdown()\n{\n        Trace trace(Trace::EventIngest, \"Listener::Shutdown\");\n        trace.NOTE(\"Shutting down \" + Name());\n        close(clientfd);\n        _finished = true;\n        _timer.cancel();\n}\n\n// Parse one or more objects out of a range of characters in the half-open range [start, end).\n// Return a pointer to the character immediately following the last object successfully parsed\n// (and skipping any trailing whitespace).\n// This return value is guaranteed to be <= end. If no message was successfully parsed, a null\n// pointer (0) will be returned.\n// The parser assumes *end is a NUL byte so it can treat the range as a C string.\nconst char *\nListener::ParseBuffer(const char* start, const char* end)\n{\n        Trace trace(Trace::EventIngest, \"Listener::ParseBuffer\");\n        const char * parse_end = 0;\n        const char * lkg_parse_end = 0;\n        cJSON * event;\n\n        if (*end != '\\0') {\n                std::ostringstream msg;\n                size_t n = end - start + 1;\n                msg << \"ParseBuffer \" << Name() << \" got a non-NUL terminated range, length = \" << n << \"\\n\";\n                DumpBuffer(msg, start, end);\n                throw Listener::exception(msg);\n        }\n\n        while ((start < end) && (event = cJSON_ParseWithOpts(start, &parse_end, 0))) {\n                if (parse_end > end) {\n                        std::ostringstream msg;\n                        msg << \"ParseBuffer found an object longer than the input buffer. Start \" << (void *)start << \", end \";\n                        msg << (void *)end << \", parse_end \" << (void *)parse_end << \"\\n\";\n                        if (*end != '\\0') {\n                                msg << \"Range is no longer NUL-terminated.\\n\";\n                        }\n                        DumpBuffer(msg, start, end);\n                        throw Listener::exception(msg);\n                }\n\n                bool status = TryParseEvent(event) || TryParseEcho(event);\n                if (!status) {\n                        LogBadJSON(event, Name() + \" ignored unknown JSON message\");\n                }\n\n                // Free the parsed event\n                cJSON_Delete(event);\n\n                // Advance past the object we just parsed, skip trailing whitespace.\n                // I don't really have to do this; cJSON handles leading whitespace. But it's better\n                // if I can consume a full buffer; that reduces copying of useless characters.\n                while ((parse_end < end) && (isspace(*parse_end))) {\n                        parse_end++;\n                }\n                start = lkg_parse_end = parse_end;\n        }\n        if (lkg_parse_end != parse_end) {\n                TRACEINFO(trace, \"parse_end (\" << (void*)parse_end << \") != lkg (\" << (void*)lkg_parse_end << \")\");\n        }\n        return lkg_parse_end;\n}\n\nbool\nListener::TryParseEvent(cJSON* event)\n{\n    Trace trace(Trace::EventIngest, \"Listener::TryParseEvent\");\n\n    cJSON* jsTAG = cJSON_GetObjectItem(event, \"TAG\");\n    if (!jsTAG || jsTAG->type != cJSON_String) {\n        return false;\n    }\n\n    cJSON* jsSOURCE = cJSON_GetObjectItem(event, \"SOURCE\");\n    cJSON* jsDATA = cJSON_GetObjectItem(event, \"DATA\");\n    if ((jsSOURCE && jsSOURCE->type == cJSON_String) && (jsDATA && jsDATA->type == cJSON_Array)) {\n        // That's plenty of validation for now.\n        if (trace.IsActive()) {\n            char *rendering = cJSON_Print(event);\n            auto len = strlen(rendering);\n            TRACEINFO(trace, \"Got event from source \" << jsSOURCE->valuestring << \" of total size \" << len);\n            if (trace.IsAlsoActive(Trace::IngestContents)) {\n                std::ostringstream msg;\n                std::string body(rendering, (len>1024?1024:len));\n                msg << Name() << \" received JSON event \" << body;\n                if (len > 1024) {\n                    msg << \" ... }\";\n                }\n                trace.NOTE(msg.str());\n            }\n            free(rendering);\n        }\n        if (IsNewTag(jsTAG)) {\n            // Process the event...\n            EventJSON evt(event);\n            Engine::GetEngine()->ProcessEvent(evt);\n        }\n        // Inform the client we've processed the event\n        EchoTag(jsTAG->valuestring);\n    }\n    else {\n        LogBadJSON(event, Name() + \" received incomplete JSON-encoded event\");\n    }\n    return true;\n}\n\nbool\nListener::TryParseEcho(cJSON* event)\n{\n    Trace trace(Trace::EventIngest, \"Listener::TryParseEcho\");\n\n    cJSON* jsECHO = cJSON_GetObjectItem(event, \"ECHO\");\n    if (jsECHO && jsECHO->type == cJSON_String) {\n            EchoTag(jsECHO->valuestring);\n            return true;\n    }\n    return false;\n}\n\nvoid\nListener::LogBadJSON(cJSON* event, const std::string& prefix)\n{\n        char *rendering = cJSON_Print(event);\n        Logger::LogError(prefix + \" {\" + rendering + \"}\");\n        free(rendering);\n}\n\n// Echo the tag, followed by a newline, back to the client.\nvoid\nListener::EchoTag(char * tagptr)\n{\n        try {\n                MdsdUtil::WriteBufferAndNewline(clientfd, tagptr);\n        }\n        catch (const MdsdUtil::would_block& e) {\n                std::ostringstream msg;\n                msg << \"Event source tag-reader is slow; dropping tag \" << tagptr;\n                Logger::LogWarn(msg);\n        }\n        catch (const std::system_error& e) {\n                if (EPIPE == e.code().value()) {\n                        throw Listener::exception(std::string(\"Event sender closed connection: \") + e.what());\n                }\n                else {\n                        Logger::LogError(std::string(\"Listener failed to echo TAG: \") + e.what());\n                }\n        }\n        catch (const std::runtime_error& e) {\n                Logger::LogError(std::string(\"Listener failed to echo TAG: \") + e.what());\n        }\n}\n\nListener::Listener(int fd) : clientfd(fd),\n                tagsAgedOut(0), tagsOldest(new tag_set()), tagsOld(new tag_set()), tagsCurr(new tag_set()),\n                _timer(crossplat::threadpool::shared_instance().service()), _finished(false)\n{\n        Trace trace(Trace::EventIngest, \"Listener::Listener\");\n\n        std::ostringstream msg;\n        msg << this;\n        _name = msg.str();\n\n        trace.NOTE(\"Constructed Listener \" + Name());\n}\n\nListener::~Listener()\n{\n        Trace trace(Trace::EventIngest, \"Listener::~Listener\");\n\n        Logger::LogWarn(\"Closing fd in ~Listener()\");\n        trace.NOTE(\"Destroying Listener \" + Name());\n        close(clientfd);\n        if (tagsAgedOut) {\n                delete tagsAgedOut;\n                tagsAgedOut = 0;\n        }\n        if (tagsOldest) {\n                delete tagsOldest;\n                tagsOldest = nullptr;\n        }\n        if (tagsOld) {\n                delete tagsOld;\n                tagsOld = nullptr;\n        }\n        if (tagsCurr) {\n                delete tagsCurr;\n                tagsCurr = nullptr;\n        }\n}\n\nbool Listener::IsNewTag(cJSON* jsTAG)\n{\n        Trace trace(Trace::EventIngest, \"Listener::IsNewTag\");\n        if (nullptr == jsTAG)\n        {\n                trace.NOTE(\"Got a NULL JSON object pointer\");\n                return false;\n        } else if (nullptr == jsTAG->valuestring) {\n                trace.NOTE(\"JSON object had NULL valuestring\");\n                return false;\n        } else if (0 == *(jsTAG->valuestring)) {\n                trace.NOTE(\"JSON object had zero-length valuestring\");\n                return false;\n        }\n\n        trace.NOTE(\"Checking tag \\\"\" + std::string(jsTAG->valuestring) + \"\\\"\");\n\n        bool isNewTag = true;\n        std::string tagstr(jsTAG->valuestring);\n\n        // Capture the tag sets to check. We're racing against the timer handler which will\n        // rotate the grandparent to great-grandparent, parent to grandparent, current to\n        // parent, and an empty set into current. When we capture current/parent/grand during\n        // the rotation operation, we might wind up with empty/current/parent or\n        // current/current/parent or current/parent/parent, but since we're checking right\n        // on the \"rotation\" time, the relevant time window really does encompass just current\n        // and parent at the instant we start looking. We might wind up checking a tag set\n        // twice, but we won't segfault and we won't miss checking a relevant tag set.\n\n        auto currentSet = tagsCurr;\n        auto parentSet = tagsOld;\n        auto grandparentSet = tagsOldest;\n\n        if (currentSet->end() != currentSet->find(tagstr) ||\n                parentSet->end() != parentSet->find(tagstr) ||\n                grandparentSet->end() != grandparentSet->find(tagstr))\n        {\n                isNewTag = false;\n                trace.NOTE(\"Tag is a duplicate\");\n        }\n        else\n        {\n                // Yes, I really mean tagsCurr. If rotation happened between the time this\n                // thread grabbed the set pointers and now, currentSet points to tagsOld, so\n                // putting this tag into currentSet might leave the tag active for just a hair\n                // less than the guaranteed interval. Better too long than too short.\n                tagsCurr->insert(tagstr);\n                trace.NOTE(\"Tag is new\");\n        }\n\n        return isNewTag;\n}\n\nvoid\nListener::RotateTagSets()\n{\n        Trace trace(Trace::EventIngest, \"Listener::RotateTagSets\");\n\n        if (trace.IsActive())\n        {\n                std::ostringstream msg;\n                msg << Name() << \" Tagset sizes: Curr=\" << tagsCurr->size() << \"; Old=\" << tagsOld->size();\n                msg << \"; Oldest=\" << tagsOldest->size();\n                trace.NOTE(msg.str());\n        }\n\n        tagsAgedOut = tagsOldest;\n        tagsOldest = tagsOld;\n        tagsOld = tagsCurr;\n        tagsCurr = new tag_set();\n}\n\nvoid\nListener::ScrubTagSets()\n{\n        Trace trace(Trace::EventIngest, \"Listener::ScrubTagSets\");\n\n        if (tagsAgedOut) {\n                if (trace.IsActive()) {\n                        std::ostringstream msg;\n                        msg << Name() << \" releasing \" << tagsAgedOut->size() << \" tags\";\n                        trace.NOTE(msg.str());\n                }\n                delete tagsAgedOut;\n                tagsAgedOut = 0;\n        }\n}\n\nvoid\nListener::timerhandler(std::shared_ptr<Listener> listener, Listener::TimerTask job)\n{\n        Trace trace(Trace::EventIngest, \"Listener::timerhandler\");\n\n        if (listener->IsFinished()) {\n                // Do nothing; especially, do not reschedule the timer. The deadline_timer code\n                // will allow its copy of the shared_ptr for this instance to go out of scope,\n                // triggering a safe delete of the Listener class instance. If we were cancelled,\n                // we'll want to do exactly the same thing, and Listener::handler is careful to set\n                // _socketClosed before it tries to cancel the timer. As a result, there's no need\n                // to check to see if we're being cancelled or not; if _socketClosed is set,\n                // just return.\n                trace.NOTE(listener->Name() + \" IsFinished is true\");\n                return;\n        }\n\n        switch (job) {\n        case TimerTask::rotate:\n                listener->RotateTagSets();\n                listener->Timer().expires_from_now(boost::posix_time::seconds(15));\n                listener->Timer().async_wait(boost::bind(&Listener::timerhandler, listener, TimerTask::cleanup));\n                break;\n\n        case TimerTask::cleanup:\n                listener->ScrubTagSets();\n                listener->Timer().expires_from_now(boost::posix_time::seconds(checkpointSeconds - 15));\n                listener->Timer().async_wait(boost::bind(&Listener::timerhandler, listener, TimerTask::rotate));\n                break;\n\n        default:\n                Logger::LogError(\"Listener::timerhandler saw unexpected state \" + std::to_string(job));\n                listener->Timer().expires_from_now(boost::posix_time::seconds(checkpointSeconds));\n                listener->Timer().async_wait(boost::bind(&Listener::timerhandler, listener, TimerTask::rotate));\n                break;\n        }\n}\n\nvoid\nListener::DumpBuffer(std::ostream& os, const char* start, const char* end)\n{\n        size_t n = end - start + 1;\n        if (n < 1024*1024) {\n                os << \"Buffer contents [\" << std::string(start, n) << \"]\";\n        } else {\n                os << \"Partial buffer contents [\" << std::string(start, 1024*1024) << \"]\";\n        }\n}\n\n// vim: set ai sw=8 expandtab :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/Listener.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#ifndef _LISTENER_HH_\n#define _LISTENER_HH_\n\n#include \"Logger.hh\"\n//#include \"PoolMgmt.hh\"\n#include <string>\n#include <ctime>\n#include <unordered_set>\n#include <stddef.h>\n#include <boost/asio.hpp>\n#include <boost/bind.hpp>\n#include <exception>\n#include <memory>\n\nextern \"C\" {\n#include \"cJSON.h\"\n}\n\n// Instances of Listener (and derived classes) *must* be referenced via shared_ptr. The thread startproc\n// and timerhandler functions race to be the last one with a pointer to the instance once ProcessLoop()\n// returns, and it's not even the timerhandler that holds the last pointer; it's boost::deadline_timer that\n// is often the last holder. We need to ensure the _timer object remains valid until deadline_timer lets\n// go of it.\nclass Listener\n{\nprivate:\n\ttypedef std::unordered_set<std::string> tag_set;\n\ttypedef enum { rotate, cleanup } TimerTask;\n\n\tListener(const Listener&) = delete;\t\t// Do not define; copy construction forbidden\n\tListener& operator=(const Listener &) = delete;\t// Ditto for assignment\n\tvoid Shutdown();\n\n\tvoid LogBadJSON(cJSON* event, const std::string&);\n\tbool IsNewTag(cJSON* jsTAG);\n\tvoid EchoTag(char* tag);\n\tvoid DumpBuffer(std::ostream& os, const char* start, const char* end);\n\tvoid RotateTagSets();\n\tvoid ScrubTagSets();\n\tbool TryParseEvent(cJSON* event);\n\tbool TryParseEcho(cJSON* event);\n\n\tint clientfd;\n\ttag_set *tagsAgedOut;\n\ttag_set *tagsOldest;\n\ttag_set *tagsOld;\n\ttag_set *tagsCurr;\n\n\tstatic unsigned int checkpointSeconds;\n\n\tboost::asio::deadline_timer _timer;\n\tboost::asio::deadline_timer& Timer() { return _timer; }\n\tstatic void timerhandler(std::shared_ptr<Listener>, TimerTask);\n\n\tbool _finished;\n\tstd::string _name;\n\nprotected:\n\tconst char * ParseBuffer(const char* start, const char* end);\n\tint fd() const { return clientfd; }\n\npublic:\n\tListener(int fd);\n\tvirtual ~Listener();\n\n\tvirtual void * ProcessLoop() { Logger::LogError(\"Listener::ProcessLoop() was called\"); return 0; }\n\n\tstatic void * handler(void *);\t// Thread proc for all listeners\n\n\tbool IsFinished() const { return _finished; }\n\tconst std::string& Name() const { return _name; }\n\n\tstatic void setDupeWindow(unsigned long seconds) { checkpointSeconds = seconds / 2; }\n\n\tclass exception : public std::exception\n\t{\n\tpublic:\n\t\texception(const std::string & msg) : std::exception(), _what(msg) {}\n\t\texception(const std::ostringstream &msg) : std::exception(), _what(msg.str()) {}\n\t\texception(const char * msg) : std::exception(), _what(msg) {}\n\n\t\tvirtual const char * what() const noexcept { return _what.c_str(); }\n\tprivate:\n\t\tstd::string _what;\n\t};\n};\n\n// vim: set ai sw=8:\n#endif // _LISTENER_HH_\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/LocalSink.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"LocalSink.hh\"\n\n#include <iterator>\n#include <sstream>\n#include <algorithm>\n\n#include \"CanonicalEntity.hh\"\n#include \"MdsdConfig.hh\"\n#include \"Utility.hh\"\n#include \"RowIndex.hh\"\n#include \"Trace.hh\"\n#include \"MdsdMetrics.hh\"\n#include \"StoreType.hh\"\n#include \"SchemaCache.hh\"\n#include \"Logger.hh\"\n#include \"EventHubUploaderId.hh\"\n#include \"EventHubType.hh\"\n#include \"EventHubUploaderMgr.hh\"\n\n// Class statics\n//\n// Table of local tables and a mutex to protect it. The map is altered only\n// while loading configurations, but that can happen in parallel with incoming events.\n// It may be that the global table is referenced only during config load, in which case\n// the mutex won't be needed.\n//\n// These are on the heap because there's no way to control the order of destruction of\n// global static objects declared in separate compilation units. The Batch class contains\n// a pointer to a sink; Batch instances in the global static MdsdConfig::_localBatches\n// BatchSet all point to LocalSink objects. If the LocalSink::_localTables map were a\n// global static, it might be destroyed at program-exit before the static _localBatches\n// was destroyed. In that case, when the Batch destructor deletes its LocalSink, the\n// LocalSink destructor tries to remove the object from the _localTables map which has\n// already been destroyed.\n\nstd::mutex* LocalSink::_ltMutex { nullptr };\nstd::map<const std::string, LocalSink*>* LocalSink::_localTables { nullptr };\n\nvoid\nLocalSink::Initialize()\n{\n\tif (_ltMutex == nullptr) {\n\t\t_ltMutex = new std::mutex;\n\t\t_localTables = new std::map<const std::string, LocalSink*>;\n\t}\n}\n\nLocalSink::LocalSink(const std::string &name)\n  : IMdsSink(StoreType::Type::Local), _name(name), _schemaId(0)\n{\n\tTrace trace(Trace::Local, \"LocalSink::Constructor\");\n\n\tstd::unique_lock<std::mutex> lock(*_ltMutex);\n\tauto result = _localTables->insert(std::pair<const std::string, LocalSink*>(_name, this));\n\tlock.unlock();\n\tif (!(result.second)) {\n\t\tthrow std::invalid_argument(\"Duplicate local table name\");\n\t}\n}\n\nLocalSink::~LocalSink()\n{\n\tTrace trace(Trace::Local, \"LocalSink::Destructor\");\n\n\tstd::lock_guard<std::mutex> lock(*_ltMutex);\n\t_localTables->erase(_name);\n}\n\nvoid\nLocalSink::AllocateSchemaId()\n{\n\t_schemaId = SchemaCache::Get().GetId();\n}\n\nLocalSink*\nLocalSink::Lookup(const std::string& name)\n{\n\tTrace trace(Trace::Local, \"LocalSink::Lookup\");\n\ttrace.NOTE(\"Looking for LocalSink \" + name);\n\tstd::lock_guard<std::mutex> lock(*_ltMutex);\n\tauto iter = _localTables->find(name);\n\tif (iter == _localTables->end()) {\n\t\ttrace.NOTE(\"Not found\");\n\t\treturn nullptr;\n\t} else {\n\t\ttrace.NOTE(\"Found it\");\n\t\treturn iter->second;\n\t}\n}\n\n// Copy the CE before adding it\nvoid\nLocalSink::AddRow(const CanonicalEntity &row, const MdsTime& )\n{\n\tTrace trace(Trace::Local, \"LocalSink::AddRow(CE)\");\n\n\tstd::shared_ptr<CanonicalEntity> item;\n\n\ttry {\n\t\titem.reset(new CanonicalEntity(row));\n\t}\n\tcatch (const std::exception& ex) {\n\t\tLogger::LogError(\"Exception copying item to insert into LocalSink \" + _name + \": \" + ex.what());\n\t\treturn;\n\t}\n\tAddRow(item);\n}\n\n// This version of AddRow assumes it can share the CE.\nvoid\nLocalSink::AddRow(std::shared_ptr<CanonicalEntity> item)\n{\n\tTrace trace(Trace::Local, \"LocalSink::AddRow(shared CE)\");\n\tsize_t nEvents = 0;\n\ttry {\n\t\t// Add row to event collection, ordered by the PreciseTime() in the item.\n\t\t// If retention period is zero, there are no downstream consumers; don't even bother\n\t\t// adding the item to the list. This behavior should change when local sinks are persisted;\n\t\t// the item should be written to the disk. If some fraction of a local sink is retained in\n\t\t// memory (as a performance optimization), that should not happen if RetentionPeriod() == 0\n\t\tif (RetentionPeriod()) {\n\t\t\tstd::lock_guard<std::mutex> lock(_mutex);\n\t\t\t_events.emplace_hint(_events.end(), item->PreciseTime(), item);\n\t\t\tnEvents = _events.size();\n\t\t}\n\n\t\tif (!_ehpubMonikers.empty() && CanonicalEntity::SourceType::Ingested == item->GetSourceType()) {\n\t\t\tSendToEventPub(item);\n\t\t}\n\t}\n\tcatch (const std::exception& ex) {\n\t\tLogger::LogError(\"Exception adding item to LocalSink \" + _name + \": \" + ex.what());\n\t\treturn;\n\t}\n\tTRACEINFO(trace, \"LocalSink \" << _name << \" now has \" << nEvents << \" rows\");\n}\n\n// Copy the value (shared_ptr<CanonicalEntity>) from the map elements in the range. This increases the\n// refcount on all the shared pointers; it doesn't actually copy the CanonicalEntity objects.\n// *** Must be called with _mutex already held ***\nLocalSink::vector_type\nLocalSink::ExtractRange(LocalSink::iterator start, LocalSink::iterator end)\n{\n\tLocalSink::vector_type extract;\n\ttypedef LocalSink::iterator::value_type value_type;\n\n\tif (start != end) {\n\t\ttry {\n\t\t\tauto count = std::distance(start, end);\n\t\t\textract.reserve(count);\n\t\t\tstd::for_each(start, end, [&extract](value_type& val){extract.push_back(val.second);});\n\t\t}\n\t\tcatch (const std::exception& ex) {\n\t\t\tLogger::LogError(\"Exception in ExtractRange on \" + _name + \": \" + ex.what());\n\t\t}\n\t}\n\treturn extract;\n}\n\nvoid\nLocalSink::Flush()\n{\n\tTrace trace(Trace::Local, \"LocalSink::Flush\");\n\n\t// The instance knows the longest timespan we'll ever be asked for (gap between\n\t// Foreach()'s begin and delta parameters. Just call Flush(now - span).\n\t// We actually double the span for safety's sake.\n\n\tFlush(MdsTime::Now() - RetentionPeriod() - RetentionPeriod());\n}\n\nvoid\nLocalSink::Flush(const MdsTime& when)\n{\n\tTrace trace(Trace::Local, \"LocalSink::Flush(when)\");\n\tTRACEINFO(trace, \"Flushing items older than \" << when << \" from LocalSink \" << _name);\n\n\tLocalSink::vector_type scrubList;\n\ttry {\n\t\tstd::lock_guard<std::mutex> lock(_mutex);\n\t\titerator rangeEnd = _events.lower_bound(when);\n\t\tif (rangeEnd == _events.begin()) {\n\t\t\tTRACEINFO(trace, \"Nothing to remove from LocalSink \" << _name);\n\t\t\treturn;\n\t\t}\n\t\tscrubList = ExtractRange(_events.begin(), rangeEnd);\n\n\t\t// Erase all the entries from the multimap (won't destroy the CEs) and release the lock\n\t\tTRACEINFO(trace, \"Removing \" << scrubList.size() << \" items from \" << _name);\n\t\t_events.erase(_events.begin(), rangeEnd);\n\t}\n\tcatch (const std::exception& ex) {\n\t\tLogger::LogError(\"Exception while removing range from \" + _name + \": \" + ex.what());\n\t}\n\n\t// Now we can delete these without blocking everyone else waiting on the sink. It is very\n\t// likely the shared_ptrs in this list have a refcount of 1 and will thus the CEs will be destructed.\n\t// By explicitly clearing the scrubList, we can determine how much real time is required\n\t// to destroy all those objects (time between this trace message and the \"Leaving\" message).\n\tTRACEINFO(trace, \"Destroying \" << scrubList.size() << \" items removed from \" << _name);\n\tscrubList.clear();\n}\n\n// Extract each event in the [begin, begin+delta) range, then invoke the function on each extracted event.\n// Release the shared ptr for the extracted events as we go, amortizing heap operations over time. A large\n// extract may be the last holder of a reference to a CE, so releasing as-we-go could make memory available sooner.\nvoid\nLocalSink::Foreach(const MdsTime &begin, const MdsTime &delta, const std::function<void(const CanonicalEntity &)>& fn)\n{\n\tTrace trace(Trace::Local, \"LocalSink::Foreach\");\n\tTRACEINFO(trace, \"begin at \" << begin << \", delta \" << delta);\n\n\tLocalSink::vector_type matchedEvents;\n\ttry {\n\t\tstd::lock_guard<std::mutex> lock(_mutex);\n\t\tmatchedEvents = ExtractRange(_events.lower_bound(begin), _events.lower_bound(begin + delta));\n\t}\n\tcatch (const std::exception& ex) {\n\t\tLogger::LogError(\"Exception while extracting range from \" + _name + \": \" + ex.what());\n\t\treturn;\n\t}\n\n\tTRACEINFO(trace, \"Extracted \" << matchedEvents.size() << \" events from \" << _name);\n\tfor (auto& eventPtr : matchedEvents) {\n\t\tfn(*eventPtr);\n\t\teventPtr.reset();\t// Done with this item; if we're the last user, let it go\n\t}\n}\n\nvoid\nLocalSink::SetEventPublishInfo(\n\tconst std::unordered_set<std::string> & monikers,\n\tstd::string eventDuration,\n\tstd::string tenant,\n\tstd::string role,\n\tstd::string roleInstance\n\t)\n{\n\tif (monikers.empty()) {\n\t\tthrow std::invalid_argument(\"SetEventPublishInfo(): moniker cannot be empty.\");\n\t}\n\n\t_ehpubMonikers = monikers;\n\t_eventDuration = std::move(eventDuration);\n\t_tenant = std::move(tenant);\n\t_role = std::move(role);\n\t_roleInstance = std::move(roleInstance);\n}\n\nvoid\nLocalSink::SendToEventPub(std::shared_ptr<CanonicalEntity> item)\n{\n\tTrace trace(Trace::Local, \"LocalSink::SendToEventPub\");\n\tif (!item) {\n\t\tthrow std::invalid_argument(\"LocalSink::SendToEventPub(): CanonicalEntity cannot be nullptr\");\n\t}\n\n\tauto jsonData = item->GetJsonRow(_eventDuration, _tenant, _role, _roleInstance);\n\tif (jsonData.empty()) {\n\t\tthrow std::runtime_error(\"LocalSink::SendToEventPub(): failed to get data to publish.\");\n\t}\n\n\tmdsd::EventDataT ehdata;\n\tehdata.SetData(jsonData);\n\n\tauto ehtype = mdsd::EventHubType::Publish;\n\tfor (const auto & moniker : _ehpubMonikers) {\n\t\tmdsd::EventHubUploaderMgr::GetInstance().AddMessageToUpload(\n\t\t\tmdsd::EventHubUploaderId(ehtype, moniker, _name),\n\t\t\tstd::move(ehdata));\n\t\tTRACEINFO(trace, \"LocalSink::SendToEventPub: moniker=\" << moniker << \"; sinkName=\" << _name);\n\t}\n}\n\n// vim: se sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/LocalSink.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _LOCALSINK_HH_\n#define _LOCALSINK_HH_\n\n#include \"IMdsSink.hh\"\n#include <map>\n#include <memory>\n#include <functional>\n#include <string>\n#include <mutex>\n#include <unordered_set>\n#include \"MdsTime.hh\"\n#include \"MdsEntityName.hh\"\n#include \"CanonicalEntity.hh\"\n#include \"SchemaCache.hh\"\n\nclass LocalSink : public IMdsSink\n{\npublic:\n\ttypedef std::multimap<const MdsTime, std::shared_ptr<CanonicalEntity>> map_type;\n\ttypedef std::vector<std::shared_ptr<CanonicalEntity>> vector_type;\n\ttypedef map_type::iterator iterator;\n\n\tLocalSink(const std::string&);\n\tvirtual ~LocalSink();\n\n\tvirtual bool IsLocal() const { return true; }\n\tvirtual void AddRow(const CanonicalEntity&, const MdsTime&);\n\tvirtual void Flush();\n\n\t// An ingested event goes to precisely one LocalSink; this method\n\t// lets us avoid copying the CE upon ingest\n\tvoid AddRow(std::shared_ptr<CanonicalEntity>);\n\n\tvoid Flush(const MdsTime &when);\n\tvoid Foreach(const MdsTime &when, const MdsTime &delta, const std::function<void(const CanonicalEntity &)>&);\n\n\tvoid AllocateSchemaId();\n\tSchemaCache::IdType SchemaId()\t\t  { return _schemaId; }\n\n\tstatic LocalSink * Lookup(const std::string& name);\n\n\tstatic void Initialize();\n\n\tvoid SetEventPublishInfo(const std::unordered_set<std::string> & monikers,\n\t\tstd::string eventDuration,\n\t\tstd::string tenant,\n\t\tstd::string role,\n\t\tstd::string roleInstance);\n\nprivate:\n\tvector_type ExtractRange(iterator start, iterator end);\n\tvoid SendToEventPub(std::shared_ptr<CanonicalEntity> item);\n\n\tmap_type _events;\n\tconst std::string _name;\n\n\t// Applies only to local sinks which directly receive json external data; derived\n\t// local tables will have a 0 _schemaId, and so will sinks that receive BOND and dynamic json external data.\n\tSchemaCache::IdType _schemaId;\n\n\tstd::mutex _mutex;\n\n\tstatic std::map<const std::string, LocalSink*>* _localTables;\n\tstatic std::mutex* _ltMutex;\n\n\t// event publishing information\n\tstd::unordered_set<std::string> _ehpubMonikers;\n\tstd::string _eventDuration;\n\tstd::string _tenant;\n\tstd::string _role;\n\tstd::string _roleInstance;\n};\n\n#endif // _LOCALSINK_HH_\n\n// vim: se sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/MdsBlobOutputter.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#ifndef _MDSBLOBOUTPUTTER_HH\n#define _MDSBLOBOUTPUTTER_HH\n#include <string>\n#include \"Crypto.hh\"\n#include \"Trace.hh\"\n#include \"Logger.hh\"\n#include \"Utility.hh\"\n#include <type_traits>\n#include <cstring>\n#include <stdexcept>\n\nclass MdsBlobOutputter\n{\npublic:\n    MdsBlobOutputter(size_t maxbytes) : _buffer(0), _end(0), _current(0)\n    {\n        if (maxbytes) {\n            _current = _buffer = new unsigned char [maxbytes];\n            _end = _buffer + maxbytes;\n        }\n    }\n\n    ~MdsBlobOutputter() { if (_buffer) delete [] _buffer; }\n\n    size_t size() const { return (_buffer) ? (_current - _buffer) : 0; }\n\n    void clear() { if (_buffer) { delete [] _buffer; _buffer = nullptr; } }\n\n    unsigned char * data() { return _buffer; }\n\n    template <typename T>\n    typename std::enable_if<std::is_integral<T>::value, void>::type\n    Write(const T& value)\n    {\n        Trace trace(Trace::BondDetails, \"Write<scalar>\");\n        TRACEINFO(trace, sizeof(T) << \" bytes\");\n        if (_current + sizeof(T) > _end) {\n            throw std::overflow_error(\"Bond blob buffer overflow\");\n        }\n        * reinterpret_cast<T*>(_current) = value;\n        _current += sizeof(T);\n    }\n\n    void\n    Write(const std::string& value)\n    {\n        Trace trace(Trace::BondDetails, \"Write<string>\");\n        size_t bytecount = value.size();\n        size_t totalbytes = bytecount + sizeof(uint32_t);\n        TRACEINFO(trace, value.size() << \" characters, \" << bytecount << \" bytes (\" << totalbytes << \" total)\");\n        if ((_current + totalbytes) > _end) {\n            throw std::overflow_error(\"Bond blob buffer overflow\"); \n        }       \n        * reinterpret_cast<uint32_t*>(_current) = bytecount;\n        ::memcpy(_current + sizeof(uint32_t), value.data(), bytecount);\n        _current += totalbytes;\n    }\n\n    void\n    Write(const std::u16string& value)\n    {\n        Trace trace(Trace::BondDetails, \"Write<u16string>\");\n        size_t bytecount = sizeof(std::u16string::value_type) * value.size();\n        size_t totalbytes = bytecount + sizeof(uint32_t);\n        TRACEINFO(trace, value.size() << \" characters, \" << bytecount << \" bytes (\" << totalbytes << \" total)\");\n        if ((_current + totalbytes) > _end) {\n            throw std::overflow_error(\"Bond blob buffer overflow\"); \n        }       \n        * reinterpret_cast<uint32_t*>(_current) = bytecount;\n        ::memcpy(_current + sizeof(uint32_t), value.data(), bytecount);\n        _current += totalbytes;\n    }\n\n    void\n    WriteShort(const std::u16string& value)\n    {\n        Trace trace(Trace::BondDetails, \"WriteShort<u16string>\");\n        size_t bytecount = sizeof(std::u16string::value_type) * value.size();\n        size_t totalbytes = bytecount + sizeof(uint16_t);\n        TRACEINFO(trace, value.size() << \" characters, \" << bytecount << \" bytes (\" << totalbytes << \" total)\");\n        if ((_current + totalbytes) > _end) {\n            throw std::overflow_error(\"Bond blob buffer overflow\");\n        }\n        * reinterpret_cast<uint16_t*>(_current) = static_cast<uint16_t>(bytecount);\n        ::memcpy(_current + sizeof(uint16_t), value.data(), bytecount);\n        _current += totalbytes;\n    }\n\n    void\n    Write(const Crypto::MD5Hash& value)\n    {\n        Trace trace(Trace::BondDetails, \"Write<Crypto::MD5Hash>\");\n        size_t len = Crypto::MD5Hash::DIGEST_LENGTH;\n        TRACEINFO(trace, len << \" bytes\");\n        if (_current + len > _end) {\n            throw std::overflow_error(\"Bond blob buffer overflow\");\n        }\n        ::memcpy(_current, value.GetBuffer(), len);\n        _current += len;\n    }\n\n\n    void\n    Write(const char * array, size_t len)\n    {\n        Trace trace(Trace::BondDetails, \"Write<char array>\");\n        if (len && !array) {\n            throw std::invalid_argument(\"Attempt to write non-zero length char* array from NULL pointer\");\n        }\n        if (!len) {\n            Logger::LogWarn(\"Blob writer asked to write zero-length char array\");\n            return;\n        }\n        TRACEINFO(trace, len << \" bytes to be written\");\n        if ((_current + len) > _end) {\n            throw std::overflow_error(\"Bond blob buffer overflow\");\n        }\n        ::memcpy(_current, array, len);\n        _current += len;\n    }\n\n    void\n    Write(const unsigned char * array, size_t len)\n    {\n        Trace trace(Trace::BondDetails, \"Write<unsigned char array>\");\n        if (len && !array) {\n            throw std::invalid_argument(\"Attempt to write non-zero length unsigned char* array from NULL pointer\");\n        }\n        if (!len) {\n            Logger::LogWarn(\"Blob writer asked to write zero-length unsigned char array\");\n            return;\n        }\n        TRACEINFO(trace, len << \" bytes to be written\");\n        if ((_current + len) > _end)\n        {\n            throw std::overflow_error(\"Bond blob buffer overflow\");\n        }\n        ::memcpy(_current, array, len);\n        _current += len;\n    }\n\n    void\n    WriteSuffix()\n    {\n        Write(0xdeadc0dedeadc0de);\n    }\n\nprivate:\n    unsigned char*  _buffer;\n    unsigned char*  _end;\n    unsigned char*  _current;\n\n    void\n    dumpstate(std::ostream& strm)\n    {\n        strm << \"_buffer=\" << static_cast<void*>(_buffer);\n        strm << \" _current=\" << static_cast<void*>(_current);\n        strm << \" _end=\" << static_cast<void*>(_end);\n    }\n};\n\n#endif // _MDSBLOBOUTPUTTER_HH\n\n// vim: se expandtab sw=4 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/MdsEntityName.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"MdsEntityName.hh\"\n#include \"MdsdConfig.hh\"\n#include \"Credentials.hh\"\n#include \"Crypto.hh\"\n#include \"Utility.hh\"\n#include \"Logger.hh\"\n#include \"Trace.hh\"\n#include <sstream>\n\nusing std::string;\n\n// MdsEntityName for SchemasTable in the account identified by these creds\nMdsEntityName::MdsEntityName(const MdsdConfig *config, const Credentials *creds) : _creds(creds)\n{\n\tTrace trace(Trace::EntityName, \"MdsEntityName constructor for SchemasTable\");\n\tif (!config) {\n\t\tthrow std::invalid_argument(\"Internal error: null config ptr\");\n\t} else if (!creds) {\n\t\tthrow std::invalid_argument(\"Internal error: null credentials\");\n\t}\n\n\t_storeType = StoreType::XTable;\n\t_physTableName = _basename = \"SchemasTable\";\n\t_isConstant = true;\n\t_isSchemasTable = true;\n}\n\n// Constructor for arbitrary table in some store (local or remote) accessed via a specific moniker.\nMdsEntityName::MdsEntityName(const std::string &eventName, bool noPerNDay, const MdsdConfig *config,\n                        const std::string &acct, StoreType::Type sinkType, bool isFullName)\n\t: _basename(eventName), _isConstant(true), _isSchemasTable(false), _storeType(sinkType), _creds(nullptr),\n\t  _physTableName(eventName),\n\t  _eventName(eventName), _eventVersion(config->EventVersion())\n{\n\tTrace trace(Trace::EntityName, \"MdsEntityName constructor\");\n\n\tif (eventName.empty()) {\n\t\tthrow std::invalid_argument(\"eventName must not be empty\");\n\t}\n\n\tauto maxNameLength = StoreType::max_name_length(_storeType);\n\n        if (sinkType == StoreType::Type::Local || sinkType == StoreType::Type::File) {\n\t\t// Local table names never get encoded/shortened. Also, they need no credentials and no MdsdConfig\n\t\tif (_basename.length() > maxNameLength) {\n\t\t\tstd::ostringstream msg;\n\t\t\tmsg << \"Event name \\\"\" << _basename << \"\\\" is too long for requested storeType (max \"\n\t\t\t    << maxNameLength << \" bytes)\";\n\t\t\tthrow std::invalid_argument(msg.str());\n\t\t}\n\t\tif (trace.IsActive()) {\n\t\t\tstd::ostringstream msg;\n\t\t\tmsg << \"Local/File EventName \\\"\" << eventName << \"\\\" yields basename \\\"\" << _basename<< \"\\\" and _isConstant=\"\n\t\t\t    << _isConstant;\n\t\t\ttrace.NOTE(msg.str());\n\t\t}\n\t\treturn;\n\t}\n\n\tif (!config) {\n\t\tthrow std::invalid_argument(\"Internal error: null config ptr\");\n\t}\n\tif (acct.empty()) {\n                if (! (_creds = config->GetDefaultCredentials())) {\n                        throw std::invalid_argument(\"No default credentials were defined\");\n                }\n        } else {\n                if (! (_creds = config->GetCredentials(acct))) {\n                        throw std::invalid_argument(\"No definition found for account moniker \" + acct);\n                }\n        }\n\n\t// The access credentials can influence how the actual name of the entity is computed, so\n\t// we have to look inside.\n\tif (isFullName && noPerNDay) {\n\t\t_isConstant = true;\n\t\ttrace.NOTE(\"Marked as isFullName without NDay suffix\");\n\t} else if (_creds->accessAnyTable()) {\n\t\tstd::ostringstream augmentedName;\n\t\tif (isFullName) {\n\t\t\taugmentedName << eventName;\n\t\t\ttrace.NOTE(\"Marked as isFullName and gets NDay suffix\");\n\t\t} else {\n\t\t\taugmentedName << config->Namespace() << eventName << \"Ver\" << config->EventVersion() << \"v0\";\n\t\t}\n\t\t_basename = _physTableName = augmentedName.str();\n\t\t_isConstant = noPerNDay;\t// This name might vary\n\n\t\t// The basename plus perNDay suffix (if any) must fit within the maximum entity name size\n\t\t// for MDS. If it doesn't, replace the basename with \"T\" followed by the MD5 hash of the\n\t\t// basename (without perNDay suffix), which is always short enough.\n\t\t// See Windows MA source NetTransport.cpp:GetNDayEventName()\n\t\tsize_t limit = maxNameLength - (_isConstant?0:8);\n\t\tif (_basename.size() > limit) {\n\t\t\ttrace.NOTE(\"Basename \" + _basename + \" too long; using MD5 hash\");\n\t\t\t_basename = \"T\" + Crypto::MD5HashString(_basename).to_string();\n\t\t}\n\t} else if (auto SAScreds = dynamic_cast<const CredentialType::SAS*>(_creds)) {\n\t\tif (!isFullName) {\n\t\t\tstd::ostringstream augmentedName;\n\t\t\taugmentedName << config->Namespace() << eventName << \"Ver\" << config->EventVersion() << \"v0\";\n\t\t\t_physTableName = augmentedName.str();\n\t\t}\n\t\t// SAS (non-account SAS) includes the tablename; extract it from there. Even if isFullName is set, we have to try this\n\t\tstd::map<string, string> qry;\n\t\tMdsdUtil::ParseQueryString(SAScreds->Token(), qry);\n\t\tauto item = qry.find(\"tn\");\n\t\tif (item != qry.end()) {\n\t\t\t_basename = item->second;\n\t\t} else if (!SAScreds->IsAccountSas()) {\n\t\t\t// We'll just use what we were given; it'll probably fail later, too.\n\t\t\tLogger::LogError(\"Table SAS lacks [tn=]: \" + SAScreds->Token());\n\t\t}\n\t}\n\n\tif (trace.IsActive()) {\n\t\tstd::ostringstream msg;\n\t\tmsg << \"EventName \\\"\" << eventName << \"\\\" yields basename \\\"\" << _basename<< \"\\\", physTableName \\\"\";\n\t\tmsg << _physTableName << \"\\\", and _isConstant=\" << _isConstant;\n\t\ttrace.NOTE(msg.str());\n\t}\n}\n\nstd::string\nMdsEntityName::Name() const\n{\n\tTrace trace(Trace::EntityName, \"MdsEntityName::Name\");\n\n\tif (_isConstant) {\n\t\ttrace.NOTE(\"Using \" + _basename);\n\t\treturn _basename;\n\t}\n\n\tstd::string fullname = _basename + MdsdUtil::GetTenDaySuffix();\n\ttrace.NOTE(\"Computed table name \" + fullname);\n\treturn fullname;\n}\n\nstd::ostream&\noperator<<(std::ostream &str, const MdsEntityName &target)\n{\n\tswitch(target._storeType) {\n\tcase StoreType::None:\n\t\tstr << \"[None]\"; break;\n\tcase StoreType::XTable:\n\t\tstr << \"[XTable]\"; break;\n\tcase StoreType::Local:\n\t\tstr << \"[Local]\"; break;\n\tcase StoreType::File:\n\t\tstr << \"[File]\"; break;\n\tdefault:\n\t\tstr << \"[unknown]\"; break;\n\t}\n\n\tstr << target._basename;\n\n\tif (! target._isConstant) {\n\t\tstr << \"*\";\n\t}\n\treturn str;\n}\n\n// vim: se sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/MdsEntityName.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _MDSENTITYNAME_HH_\n#define _MDSENTITYNAME_HH_\n\n#include <string>\n#include <iostream>\n#include \"MdsTime.hh\"\n#include \"StoreType.hh\"\n\nclass MdsdConfig;\nclass Credentials;\n\n// Repository of metadata about the MDS target we're writing. Might be a server-side XTable\n// or Bond blob; might be a local table. This object knows the name of that thing, the kind of\n// thing it is, and has a pointer to the access credentials (if any) needed to talk to it.\nclass MdsEntityName\n{\nfriend std::ostream& operator<<(std::ostream &str, const MdsEntityName &target);\n\npublic:\n\t// SchemasTable accessible with these creds\n\tMdsEntityName(const MdsdConfig *config, const Credentials *creds);\n\n\t// This arbitrary MDS entity (table, blob, whatever)\n\tMdsEntityName(const std::string &eventName, bool noPerNDay, const MdsdConfig *config,\n\t\t\tconst std::string &acct, StoreType::Type sinkType, bool isFullName=false);\n\n\t// Require autogenerated move-assignment and copy/move constructor\n\tMdsEntityName& operator=(MdsEntityName &&orig) = default;\n\tMdsEntityName(const MdsEntityName&) = default;\n\tMdsEntityName(MdsEntityName&&) = default;\n\n\t// <summary>Compute the XStore table name to be written to right now, at this instant.</summary>\n\tstd::string Name() const;\n\t// <summary>The XStore table \"family\" name, i.e. without 10day suffix.</summary>\n\tstd::string Basename() const { return _basename; }\n\t// <summary>The full-length table name, without 10day suffix, as it would appear in various\n\t// MDS tools. This can be longer than the 64-char max for XStore table names.</summary>\n\tstd::string PhysicalTableName() const { return _physTableName; }\n\n\t// <summary> Get the original Eventname </summary>\n\tstd::string EventName() const { return _eventName; }\n\n\t/// <summary>Get the original EventVersion</summary>\n\tint EventVersion() const { return _eventVersion; }\n\n\t// <summary>True if the table name never changes (e.g. no 10day suffix).</summary>\n\tbool IsConstant() const { return _isConstant; }\n\n\tbool IsSchemasTable() const { return _isSchemasTable; }\n\tStoreType::Type GetStoreType() const { return _storeType; }\n\tconst Credentials* GetCredentials() const { return _creds; }\n\nprivate:\n\t// The tablename, with version suffix but without the 10-day suffix, as used when writing\n\t// to XStore. If the name is \"too long\", this is the MD5-hashed name.\n\tstd::string _basename;\n\tbool _isConstant;\n\tbool _isSchemasTable;\n\tStoreType::Type _storeType;\n\tconst Credentials* _creds;\n\t// This form of the name is used in the PhysicalTableName column of SchemasTable. It\n\t// is identical to _basename except\n\t// when the name is too long, _basename is hashed, _physTableName is the unhashed,\n\t// very long form of the name.\n\t// Despite what the column (and this variable) are called,\n\t// this name is not the name of the actual physical table.\n\tstd::string _physTableName;\n\tstd::string _eventName;  // save the original event name\n\tint _eventVersion;      // save the original event version\n\n\t// const size_t MaxEntityNameLength = 63;\n};\n\nstd::ostream& operator<<(std::ostream &str, const MdsEntityName &target);\n\n#endif // _MDSENTITYNAME_HH_\n\n// vim: se sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/MdsSchemaMetadata.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"MdsSchemaMetadata.hh\"\n#include \"Crypto.hh\"\n#include \"IdentityColumns.hh\"\n#include \"TableSchema.hh\"\n#include \"Trace.hh\"\n#include \"MdsEntityName.hh\"\n#include \"CanonicalEntity.hh\"\n#include <algorithm>\n#include <map>\n#include <string>\n#include <vector>\n#include <unordered_set>\n\nusing std::string;\nusing std::vector;\n\nstd::map<string, MdsSchemaMetadata*> MdsSchemaMetadata::_cache;\nstd::mutex MdsSchemaMetadata::_mutex;\n\n#define STRINGPAIR(a,b) std::make_pair(string(a),string(b))\ntypedef std::pair<std::string, std::string> coldata_t;\n\nconst std::unordered_set<std::string> MdsSchemaMetadata::MetadataColumns\n        { \"TIMESTAMP\", \"PreciseTimeStamp\", \"PartitionKey\", \"RowKey\", \"N\", \"RowIndex\" };\n\n// Given a set of destination metadata and a CanonicalEntity, build the metadata MDS needs\n// to interpret the destination object (table, Bond blob, etc.)\nMdsSchemaMetadata*\nMdsSchemaMetadata::GetOrMake(const MdsEntityName &target, const CanonicalEntity* ce)\n{\n\tif (!ce) {\n\t\treturn nullptr;\n\t}\n\n        vector<coldata_t> unsortedSchema;\n\tunsortedSchema.reserve(ce->size() + 6);\n\n        // First, the timestamps...\n\tunsortedSchema.push_back(STRINGPAIR(\"TIMESTAMP\", \"mt:utc\"));\n\tunsortedSchema.push_back(STRINGPAIR(\"PreciseTimeStamp\", \"mt:utc\"));\n\n\t// Next, the data and identity columns (the identity columns are expected to have\n\t// already been added by this point). Ignore any of the \"special\" columns.\n\tfor (const auto & col : *ce) {\n\t\tif (! MetadataColumns.count(col.first)) {\n\t\t\tunsortedSchema.push_back(STRINGPAIR(col.first, col.second->TypeToString()));\n\t\t}\n\t}\n\n\t// XTable targets get some extra metadata\n\tif (target.GetStoreType() == StoreType::Type::XTable) {\n\t\tunsortedSchema.push_back(STRINGPAIR(\"PartitionKey\", \"mt:wstr\"));\n\t\tunsortedSchema.push_back(STRINGPAIR(\"RowKey\", \"mt:wstr\"));\n\t\tunsortedSchema.push_back(STRINGPAIR(\"N\", \"mt:wstr\"));\n\t\tunsortedSchema.push_back(STRINGPAIR(\"RowIndex\", \"mt:wstr\"));\n\t}\n\n\treturn GetOrMake(unsortedSchema);\n}\n\n\n// Given a vector of <name,type> pairs,\n// build the MDS table metadata (XML-format schema and MD5 hash of canonicalized schema).\nMdsSchemaMetadata*\nMdsSchemaMetadata::GetOrMake(vector<coldata_t>& schema)\n{\n        string elements;\n        for (auto it = schema.cbegin(); it != schema.cend(); ++it) {\n                elements += \"<Column name=\\\"\" + it->first + \"\\\" type=\\\"\" + it->second + \"\\\"></Column>\";\n        }\n\n        std::sort(schema.begin(), schema.end(),\n                [](coldata_t left, coldata_t right) -> bool { return (left.first.compare(right.first) < 0); } );\n\n        int columnCount = schema.size();\n        string schemaForMD5;\n\n        for (int i = 0; i < columnCount; ++i) {\n                schemaForMD5 += schema[i].first + \",\" + schema[i].second;\n                if (i < (columnCount-1)) {\n                        schemaForMD5 += \",\";\n                }\n        }\n\n        string md5 = Crypto::MD5HashString(schemaForMD5).to_string();\n\n\tstd::lock_guard<std::mutex> lock(_mutex);\t// Take lock on _cache; lock is released at function return\n\n\tauto it = _cache.find(schemaForMD5);\n\tif (it != _cache.end()) {\n\t\treturn it->second;\n\t}\n\n\t// Lock contention is rare, hits are common, and this string can\n\t// get moderately large. Deferring assembly until needed should save time in the long run.\n\n        string xmldata = \"<MdsConfig><Schemas><Schema name=\\\"Schema_\" + md5  + \"\\\">\";\n        xmldata += elements;\n        xmldata += \"</Schema></Schemas></MdsConfig>\";\n\n\t_cache[schemaForMD5] = new MdsSchemaMetadata(move(xmldata), move(md5), columnCount);\n\n\treturn _cache[schemaForMD5];\t\t// Be sure to return the address of the object in the cache\n}\n\n#ifdef DOING_MEMCHECK\n// Remove everything from the cache.\nvoid\nMdsSchemaMetadata::ClearCache()\n{\n\tTrace trace(Trace::ConfigLoad, \"MdsSchemaMetadata::ClearCache\");\n\n\tstd::lock_guard<std::mutex> lock(_mutex);\n\n\tsize_t count = 0;\n\tfor (auto entry : _cache) {\n\t\tdelete entry.second;\n\t\tcount++;\n\t}\n\t_cache.clear();\n\ttrace.NOTE(\"Deleted \" + std::to_string(count) + \" MdsSchemaMetadata objects from cache\");\n}\n#endif\n\n// vim: se sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/MdsSchemaMetadata.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _MDSSCHEMAMETADATA_HH_\n#define _MDSSCHEMAMETADATA_HH_\n\n#include <string>\n#include <utility>\n#include <vector>\n#include <map>\n#include <mutex>\n#include <unordered_set>\n#include \"Crypto.hh\"\n#include \"IdentityColumns.hh\"\n#include \"MdsEntityName.hh\"\n\nclass TableSchema;\nclass CanonicalEntity;\n\nclass MdsSchemaMetadata\n{\npublic:\n    typedef std::pair<std::string, std::string> coldata_t;\n\n    static const std::unordered_set<std::string> MetadataColumns;\n\n\t// Check cache for schema; if it exists, return pointer. Otherwise, create it, add it to cache, and return pointer.\n\tstatic MdsSchemaMetadata* GetOrMake(const MdsEntityName &target, const CanonicalEntity* ce);\n\n\tconst std::string& GetXML() const { return _xmldata; }\n\tconst std::string& GetMD5() const { return _md5; }\n\tsize_t GetSize() const { return _size; }\n\n#ifdef DOING_MEMCHECK\n\tstatic void ClearCache();\n#endif\n\nprivate:\n\tconst std::string _xmldata;\t// The MDS SchemasTable \"Schema\" column representation\n\tconst std::string _md5;\t// The MD5 checksum of the canonicalized schema\n\tconst size_t _size;\t// The number of columns, including identity columns and everything else\n\n\tMdsSchemaMetadata(std::string&& x, std::string&& m, size_t s) : _xmldata(x), _md5(m), _size(s) {}\n\tMdsSchemaMetadata() = delete;\t\t// No default constructor\n\n\tstatic MdsSchemaMetadata* GetOrMake(std::vector<coldata_t>&);\n\n\t// Maps from canonical name/type list to the object\n\tstatic std::map<std::string, MdsSchemaMetadata*> _cache;\n\tstatic std::mutex _mutex;\t// Ensures access to the cache is serialized\n};\n\n#endif // _MDSSCHEMAMETADATA_HH_\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/MdsValue.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"MdsValue.hh\"\n#include <cstdlib>\n#include <ctime>\n#include <cmath>\n#include <cstring>\n#include <string>\n#include <stdexcept>\n#include <sstream>\n#include <iomanip>\n#include <boost/lexical_cast.hpp>\n#include \"Utility.hh\"\n#include \"cpprest/json.h\"\n\n// Copy constructor\nMdsValue::MdsValue(const MdsValue& src) : type(src.type)\n{\n\tswitch(type) {\n\tcase mt_bool:\n\t\tbval = src.bval;\n\t\tbreak;\n\tcase mt_int32:\n\t\tlval = src.lval;\n\t\tbreak;\n\tcase mt_int64:\n\t\tllval = src.llval;\n\t\tbreak;\n\tcase mt_float64:\n\t\tdval = src.dval;\n\t\tbreak;\n\tcase mt_wstr:\n\t\tstrval = new std::string(*(src.strval));\n\t\tbreak;\n\tcase mt_utc:\n\t\tdatetimeval = src.datetimeval;\n\t\tbreak;\n\tdefault:\n\t\tthrow std::logic_error(\"Attempt to copy MdsValue of unknown type\");\n\t}\n}\n\n// Constructor for MdsTime\nMdsValue::MdsValue(const MdsTime& val)\n{\n\ttype = mt_utc;\n\tdatetimeval = val.to_pplx_datetime();\n}\n\n// Constructor for mi::Datetime\nMdsValue::MdsValue(const mi::Datetime& x)\n{\n\t*this = MdsValue(MdsTime(x));\n}\n\n// Move assignment operator\nMdsValue&\nMdsValue::operator=(MdsValue&& src)\n{\n\ttype = src.type;\n\n\tswitch(type) {\n\tcase mt_bool:\n\t\tbval = src.bval;\n\t\tbreak;\n\tcase mt_int32:\n\t\tlval = src.lval;\n\t\tbreak;\n\tcase mt_int64:\n\t\tllval = src.llval;\n\t\tbreak;\n\tcase mt_float64:\n\t\tdval = src.dval;\n\t\tbreak;\n\tcase mt_wstr:\n\t\tstrval = src.strval;\n\t\tsrc.strval = nullptr;\n\t\tbreak;\n\tcase mt_utc:\n\t\tdatetimeval = src.datetimeval;\n\t\tbreak;\n\tdefault:\n\t\tthrow std::logic_error(\"Attempt to move-assign MdsValue of unknown type\");\n\t}\n\n\treturn *this;\n}\n\n\n\nMdsValue*\nMdsValue::time_t_to_utc(cJSON* src)\n{\n\tif (src->type != cJSON_Number) return 0;\n\tif (src->valueint > LONG_MAX) return 0;\n\n\treturn new MdsValue(MdsTime(src->valueint, 0));\n}\n\nMdsValue*\nMdsValue::double_time_t_to_utc(cJSON* src)\n{\n\tif (src->type != cJSON_Number) return 0;\n\tif (src->valuedouble > double(LONG_MAX) || src->valuedouble < 0.) return 0;\n\n\tlong sec = int(floor(src->valuedouble));\n\tlong fraction = int(floor(1000000. * (src->valuedouble - floor(src->valuedouble))));\n\n\treturn MdsValue::sec_usec_to_utc(sec, fraction);\n}\n\nMdsValue*\nMdsValue::rfc3339_to_utc(cJSON* src)\n{\n\tif (src->type != cJSON_String) return 0;\n\n\tsize_t n = strlen(src->valuestring);\n\tif (n < 19) return 0;\t// Minimum legal length of an RFC 3339 datetime string\n\n\tlong tv_sec = 0, tv_usec = 0;\n\tif (!MdsdUtil::TimeValFromIso8601Restricted(src->valuestring, tv_sec, tv_usec)) return 0;\n\n\treturn MdsValue::sec_usec_to_utc(tv_sec, tv_usec);\n}\n\nvoid\nMdsValue::scale(double factor)\n{\n\tswitch(type) {\n\tcase mt_bool:\n\tcase mt_wstr:\n\tcase mt_utc:\n\tdefault:\n\t\tbreak;\n\tcase mt_int32:\n\t\tdval = factor * ((double)lval);\n\t\ttype = mt_float64;\n\t\tbreak;\n\tcase mt_int64:\n\t\tdval = factor * ((double)llval);\n\t\ttype = mt_float64;\n\t\tbreak;\n\tcase mt_float64:\n\t\tdval = factor * dval;\n\t\tbreak;\n\t}\n}\n\n\n// The OMI conversions are mostly mechanical, but templatizing them is pretty ugly due to the discriminated\n// unions in the MI_Value and MdsValue objects. It's // easy enough to use a macro to generate the common case:\n//\n// case MI_BOOLEAN:\n//\ttype = mt_bool;\n//\tbval = (bool) value.boolean;\n//\tbreak;\n// case MITYPE:\n//\ttype = MTTYPE;\n//\tMEMBER = (CTYPE)(value.UNIONARM);\n#define CVTUNARY(MITYPE, MTTYPE, MEMBER, CTYPE, UNIONARM) case MITYPE: type = MTTYPE; MEMBER = (CTYPE)(value.UNIONARM); break;\n\n// Arrays are a bit easier via macro; the MTTYPE, MEMBER, and CTYPE always correspond to strings.\ntemplate <typename ARRTYPE> static std::string *\nOMIarray2string(ARRTYPE arm)\n{\n\tstd::ostringstream result;\n\tfor (MI_Uint32 idx = 0; idx < arm.size; idx++) {\n\t\tauto val = arm.data[idx];\n\t\tif (idx) {\n\t\t\tresult << \", \";\n\t\t}\n\t\tresult << val;\n\t}\n\treturn new std::string(result.str());\n}\n#define CVTARRAY(MITYPE, TYPE, UNIONARM) case MITYPE: type=mt_wstr; strval=OMIarray2string<TYPE>(value.UNIONARM); break;\n\n// And there are some exceptions to the pattern that need to be handled explicitly.\n\nMdsValue::MdsValue(const MI_Value& value, MI_Type fieldtype)\n{\n\tswitch(fieldtype)\n\t{\n\t\tCVTUNARY(MI_BOOLEAN, mt_bool, bval, bool, boolean)\n\t\tCVTUNARY(MI_SINT8, mt_int32, lval, long, sint8)\n\t\tCVTUNARY(MI_UINT8, mt_int32, lval, long, uint8)\n\t\tCVTUNARY(MI_SINT16, mt_int32, lval, long, sint16)\n\t\tCVTUNARY(MI_UINT16, mt_int32, lval, long, uint16)\n\t\tCVTUNARY(MI_SINT32, mt_int32, lval, long, sint32)\n\t\tCVTUNARY(MI_UINT32, mt_int64, llval, long long, uint32)\n\t\tCVTUNARY(MI_SINT64, mt_int64, llval, long long, sint64)\n\t\tCVTUNARY(MI_UINT64, mt_int64, llval, long long, uint64)\n\t\tCVTUNARY(MI_REAL32, mt_float64, dval, double, real32)\n\t\tCVTUNARY(MI_REAL64, mt_float64, dval, double, real64)\n\t\tCVTUNARY(MI_CHAR16, mt_int32, lval, long, char16)\n\n\t\tCVTARRAY(MI_BOOLEANA, MI_BooleanA, booleana)\n\t\tCVTARRAY(MI_SINT8A, MI_Sint8A, sint8a)\n\t\tCVTARRAY(MI_UINT8A, MI_Uint8A, uint8a)\n\t\tCVTARRAY(MI_SINT16A, MI_Sint16A, sint16a)\n\t\tCVTARRAY(MI_UINT16A, MI_Uint16A, uint16a)\n\t\tCVTARRAY(MI_SINT32A, MI_Sint32A, sint32a)\n\t\tCVTARRAY(MI_UINT32A, MI_Uint32A, uint32a)\n\t\tCVTARRAY(MI_SINT64A, MI_Sint64A, sint64a)\n\t\tCVTARRAY(MI_UINT64A, MI_Uint64A, uint64a)\n\t\tCVTARRAY(MI_REAL32A, MI_Real32A, real32a)\n\t\tCVTARRAY(MI_REAL64A, MI_Real64A, real64a)\n\t\tCVTARRAY(MI_CHAR16A, MI_Char16A, char16a)\n\n        case MI_DATETIME:\n\t\t*this = MdsValue(MdsTime(value.datetime));\n\t\tbreak;\n\n        case MI_STRING:\n\t\ttype = mt_wstr;\n\t\tstrval = new std::string(value.string);\n\t\tbreak;\n\n        case MI_DATETIMEA:\n        {\n\t\ttype = mt_wstr;\n\t\tstd::ostringstream result;\n\t\tfor (MI_Uint32 idx = 0; idx < value.datetimea.size; idx++) {\n\t\t\tif (idx) {\n\t\t\t\tresult << \", \";\n\t\t\t}\n\t\t\tresult << MdsTime(value.datetimea.data[idx]);\n\t\t}\n\t\tstrval = new std::string(result.str());\n\t\tbreak;\n\t}\n\n        case MI_STRINGA:\n        {\n\t\ttype = mt_wstr;\n\t\tstd::ostringstream result;\n\t\tfor (MI_Uint32 idx = 0; idx < value.stringa.size; idx++) {\n\t\t\tif (idx) {\n\t\t\t\tresult << \", \";\n\t\t\t}\n\t\t\tresult << std::string(value.stringa.data[idx]);\n\t\t}\n\t\tstrval = new std::string(result.str());\n\t\tbreak;\n\t}\n\n        case MI_INSTANCE:\n        case MI_REFERENCE:\n        case MI_INSTANCEA:\n        case MI_REFERENCEA:\n\t\tthrow std::runtime_error(\"MdsValue asked to convert instance/reference\");\n\n        default:\n\t\tthrow std::runtime_error(\"MdsValue asked to convert unknown MI_Type\");\n    }\n}\n\n#if 0\nstd::string\nMdsValue::omi_time_to_string(const mi::Datetime& x)\n{\n\tMI_Uint32 y,mon,d,h,min,s,us;\n\tMI_Sint32 utc;\n\tx.Get(y,mon,d,h,min,s,us,utc);\n\n\tstruct tm t;\n\tt.tm_year = y-1900;\n\tt.tm_mon = mon-1;\n\tt.tm_mday = d;\n\tt.tm_hour = h;\n\tt.tm_min = min;\n\tt.tm_sec = s;\n\tt.tm_isdst = -1;  // let mktime() to decide daylight saving adjustment\n\n\ttime_t time1 = mktime(&t);\n\tlong sec = (long)(time1 + 60 * utc);\n\tlong usec = (long)us;\n\treturn sec_usec_to_string(sec, usec);\n}\n#endif\n\nstd::string\nMdsValue::ToString() const\n{\n\tstd::ostringstream s;\n\n\ts << *this;\n\n\treturn s.str();\n}\n\ndouble\nMdsValue::ToDouble() const\n{\n\tswitch(type) {\n\tcase mt_int32:\n\t\treturn (double) lval;\n\tcase mt_int64:\n\t\treturn (double) llval;\n\tcase mt_float64:\n\t\treturn dval;\n\tcase mt_wstr:\n\t\ttry {\n\t\t\treturn boost::lexical_cast<double>(*strval);\n\t\t}\n\t\tcatch(const boost::bad_lexical_cast &) {\n\t\t\tthrow std::domain_error(\"Value is a string which is not a valid floating-point number\");\n\t\t}\n\tcase mt_utc:\n\tcase mt_bool:\n\tdefault:\n\t\tthrow std::domain_error(\"Value is not a type which can be converted to float\");\n\t}\n}\n\nstd::ostream&\noperator<<(std::ostream& os, const MdsValue& mv)\n{\n\tswitch(mv.type)\n\t{\n\t\tcase MdsValue::MdsType::mt_bool:\n\t\t\tif (mv.bval) {\n\t\t\t\tos << \"true\";\n\t\t\t}\n\t\t\telse {\n\t\t\t\tos << \"false\";\n\t\t\t}\n\t\t\tbreak;\n\t\tcase MdsValue::MdsType::mt_int32:\n\t\t\tos << \"(int32)\" << mv.lval;\n\t\t\tbreak;\n\t\tcase MdsValue::MdsType::mt_int64:\n\t\t\tos << \"(int64)\" << mv.llval;\n\t\t\tbreak;\n\t\tcase MdsValue::MdsType::mt_float64:\n\t\t\tos << \"(float64)\" << mv.dval;\n\t\t\tbreak;\n\t\tcase MdsValue::MdsType::mt_wstr:\n\t\t\tos << \"(wstr)\\\"\" << *(mv.strval) << \"\\\"\";\n\t\t\tbreak;\n\t\tcase MdsValue::MdsType::mt_utc:\n\t\t\tos << \"(utc)[\" << mv.datetimeval.to_string(utility::datetime::ISO_8601) << \"]\";\n\t\t\tbreak;\n\t\tdefault:\n\t\t\tos << \"(no type)\";\n\t\t\tbreak;\n\t}\n\n\treturn os;\n}\n\nstd::string\nMdsValue::ToJsonSerializedString() const\n{\n\tweb::json::value jsonValue;\n\n\tswitch(type)\n\t{\n\t\tcase MdsValue::MdsType::mt_bool:\n\t\t\tjsonValue = web::json::value(bval);\n\t\t\tbreak;\n\t\tcase MdsValue::MdsType::mt_int32:\n\t\t\tjsonValue = web::json::value(lval);\n\t\t\tbreak;\n\t\tcase MdsValue::MdsType::mt_int64:\n\t\t\tjsonValue = web::json::value((int64_t)llval);\n\t\t\tbreak;\n\t\tcase MdsValue::MdsType::mt_float64:\n\t\t\tjsonValue = web::json::value(dval);\n\t\t\tbreak;\n\t\tcase MdsValue::MdsType::mt_wstr:\n\t\t\tjsonValue = web::json::value(*strval);\n\t\t\tbreak;\n\t\tcase MdsValue::MdsType::mt_utc:\n\t\t\tjsonValue = web::json::value(datetimeval.to_string(utility::datetime::ISO_8601));\n\t\t\tbreak;\n\t\tdefault:\n\t\t\tthrow std::logic_error(\"Attempt to get JSON value string of unknown type\");\n\t}\n\n\treturn jsonValue.serialize();\n}\n\nstd::string\nMdsValue::TypeToString() const\n{\n\tswitch(type) {\n\tcase mt_bool:\n\t\treturn \"mt:bool\";\n\tcase mt_int32:\n\t\treturn \"mt:int32\";\n\tcase mt_int64:\n\t\treturn \"mt:int64\";\n\tcase mt_float64:\n\t\treturn \"mt:float64\";\n\tcase mt_wstr:\n\t\treturn \"mt:wstr\";\n\tcase mt_utc:\n\t\treturn \"mt:utc\";\n\t}\n\tthrow std::logic_error(\"Attempt to convert unknown MDS type to string\");\n}\n\ntemplate <typename T> std::string *\nMdsValue::Array2Str(const mi::Array<T>& arr)\n{\n    auto str = new std::string;\n    for (MI_Uint32 i = 0; i < arr.GetSize(); i++) {\n        T x = arr[i];\n\tif (i) {\n\t\tstr->append(\", \");\n\t}\n        str->append(std::to_string(x));\n    }\n    return str;\n}\n\n// vim: se sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/MdsValue.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _MDSVALUE_HH_\n#define _MDSVALUE_HH_\n\n#include <functional>\n#include <string>\n#include <iostream>\n#include <sstream>\n#include <cstdint>\n\n#include <micxx/dinstance.h>\n#include <micxx/datetime.h>\n//#include <micxx/types.h>\n\nextern \"C\" {\n#include \"cJSON.h\"\n}\n\n#include \"MdsTime.hh\"\n\nclass MdsValue {\n\tfriend std::ostream& operator<<(std::ostream& os, const MdsValue& mv);\n\npublic:\n\tenum MdsType { mt_bool, mt_wstr, mt_float64, mt_int32, mt_int64, mt_utc };\n\tMdsType type;\n\tunion {\n\t\tbool bval;\n\t\tlong lval;\n\t\tlong long llval;\n\t\tdouble dval;\n\t\tutility::datetime datetimeval;\n\t\tconst std::string * strval;\n\t};\n\n\t~MdsValue() { if ((type == mt_wstr) && strval) { delete strval; } }\n\n\t// Type converters. These all return a new MdsValue, copied from the original input,\n\t// which the caller will be expected to delete.\n\tMdsValue(bool v) { type = mt_bool; bval = v; }\n\tMdsValue(long v) { type = mt_int32; lval = v; }\n\tMdsValue(long long v) { type = mt_int64; llval = v; }\n\tMdsValue(double v) { type = mt_float64; dval = v; }\n\tMdsValue(utility::datetime v) { type = mt_utc; datetimeval = v; }\n\tMdsValue(const std::string& v) { type = mt_wstr; strval = new std::string(v); }\n\tMdsValue(std::string&& v) { type = mt_wstr; strval = new std::string(std::move(v)); }\n\tMdsValue(const char * v) { type = mt_wstr; strval = new std::string(v); }\n\tMdsValue(const std::ostringstream & str) { type = mt_wstr; strval = new std::string(str.str()); }\n\tMdsValue(const MdsTime&);\n\tMdsValue(const mi::Datetime&);\n\tMdsValue(const MI_Value&, mi::Type);\n\n\tMdsValue(const MdsValue&);\t\t\t// Copy constructor\n\tMdsValue(MdsValue&&) = delete;\t\t\t// No move-constructor\n\tMdsValue* operator=(const MdsValue&) = delete;\t// No copy-assignment\n\tMdsValue& operator=(MdsValue&&);\t\t// Move assignment\n\n\tstatic MdsValue* time_t_to_utc(cJSON* src);\n\tstatic MdsValue* double_time_t_to_utc(cJSON* src);\n\tstatic MdsValue* sec_usec_to_utc(long sec, long fraction) { return new MdsValue(MdsTime(sec, fraction)); }\n\tstatic MdsValue* rfc3339_to_utc(cJSON* src);\n\n\t// In-place, apply a scale factor to the numeric value. Silently do nothing if the\n\t// value is non-numeric.\n\tvoid scale(double);\n\n\tbool IsString() const { return (type == mt_wstr); }\n\tbool IsNumeric() const { return (type == mt_float64 || type == mt_int32 || type == mt_int64); }\n\n\tstd::string ToString() const;\n\tstd::string ToJsonSerializedString() const;\n\tdouble ToDouble() const;\n\tstd::string TypeToString() const;\n\nprivate:\n\tMdsValue();\t\t\t\t// No void constructor (no \"NULL\" objects)\n\t\n\t//static std::string omi_time_to_string(const mi::Datetime& x);\n\t//static std::string sec_usec_to_string(long sec, long fraction);\n\ttemplate <typename T> static std::string * Array2Str(const mi::Array<T>&);\n};\n\ntypedef std::function<MdsValue* (cJSON* in)> typeconverter_t;\n\nstd::ostream& operator<<(std::ostream& os, const MdsValue& mv);\n\n#endif //_MDSVALUE_HH_\n\n// vim: se sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/MdsdConfig.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include <boost/bind.hpp>\n#include <boost/date_time/posix_time/posix_time_types.hpp>\n#include \"MdsdConfig.hh\"\n#include \"CfgCtxRoot.hh\"\n#include \"ConfigParser.hh\"\n#include \"TableSchema.hh\"\n#include \"Subscription.hh\"\n#include \"Batch.hh\"\n#include \"Credentials.hh\"\n#include \"OmiTask.hh\"\n#include \"MdsdExtension.hh\"\n#include \"ITask.hh\"\n#include \"Crypto.hh\"\n#include \"Logger.hh\"\n#include \"Utility.hh\"\n#include \"Trace.hh\"\n#include \"EventHubCmd.hh\"\n#include \"ConfigUpdateCmd.hh\"\n#include \"CmdXmlCommon.hh\"\n#include \"EventHubUploaderId.hh\"\n#include \"EventHubUploaderMgr.hh\"\n#include \"EventHubType.hh\"\n#include \"EventPubCfg.hh\"\n#include \"MdsdEventCfg.hh\"\n#include \"LocalSink.hh\"\n#include \"EventType.hh\"\n\n#include <fstream>\n#include <sstream>\n#include <iomanip>\n#include <algorithm>\n#include <iterator>\n#include <vector>\n#include <utility>\n#include <ctime>\n#include <cpprest/pplx/threadpool.h>\n\nextern \"C\" {\n#include <unistd.h>\n}\n\nusing std::string;\nusing std::vector;\nusing std::pair;\nusing std::make_pair;\n\n// The set of batches that aren't associated with any particular config instance. (Thus the\n// nullptr initializer.)\n//\n// This global static could be associated with the BatchSet class just as easily as the\n// MdsdConfig class.\nBatchSet MdsdConfig::_localBatches { nullptr };\n\nMdsdConfig::MdsdConfig(string path, string autokeyConfigPath) :\n    configFilePath(path),\n    _autokeyConfigFilePath(autokeyConfigPath),\n    eventVersion(1), _isUseful(false),\n\t_defaultCreds(nullptr), _batchSet(this), _batchFlushTimer(crossplat::threadpool::shared_instance().service()),\n\t_agentIdentity(MdsdUtil::GetHostname()),\n\t_autoKeyReloadTimer(crossplat::threadpool::shared_instance().service()), _monitoringManagementSeen(false),\n\t_hasAutoKey(false),\n\t_mdsdEventCfg(std::make_shared<mdsd::MdsdEventCfg>()),\n\t_eventPubCfg(std::make_shared<mdsd::EventPubCfg>(_mdsdEventCfg))\n{\n\tLoadFromConfigFile(path);\n}\n\nvoid\nMdsdConfig::Initialize()\n{\n\tTrace trace(Trace::ConfigLoad, \"MdsdConfig Initialize\");\n\n\tInitEventHubPub();\n\n\tFlushBatches(boost::system::error_code());\t\t// Also schedules the next flush\n}\n\n// No autokey support.\nbool\nMdsdConfig::LoadAutokey(const boost::system::error_code &e)\n{\n\tTrace trace(Trace::Credentials, \"LoadAutoKey\");\n\n\treturn false;\n}\n\n// there could be multiple monikers pointing to different storage accounts\n// pair: first=moniker, second=container SAS\nstd::vector<std::pair<std::string, std::string>>\nMdsdConfig::ExtractCmdContainerAutoKeys()\n{\n\tTrace trace(Trace::Credentials, \"GetContainerCred\");\n\tauto rootContainer = mdsd::CmdXmlCommon::GetRootContainerName();\n\tstd::vector<std::pair<std::string, std::string>> keylist;\n\n\tstd::unique_lock<std::mutex> lock(_ehMapMutex);\n\tfor (const auto & iter : _autoKeyMap) {\n\t\tif (rootContainer == iter.first.second) {\n\t\t\tkeylist.push_back(std::make_pair(iter.first.first, iter.second));\n\t\t}\n\t}\n\tlock.unlock();\n\n\t// Get default account to use: either the default credential or the first credential\n\tCredentials* cred = _defaultCreds;\n\tif (!cred) {\n\t\tcred = credentials.begin()->second;\n\t}\n\n\tif (!cred) {\n\t\tTRACEWARN(trace, \"No default account is found. No way to do config auto update.\");\n\t}\n\telse {\n\t\tfor (const auto & iter : keylist) {\n\t\t\tauto moniker = iter.first;\n\t\t\tif (moniker == cred->Moniker()) {\n\t\t\t\tcmdContainerSas = iter.second;\n\t\t\t\tbreak;\n\t\t\t}\n\t\t}\n\t\tif (!cmdContainerSas.empty()) {\n\t\t\tTRACEINFO(trace, \"Found container SAS to download config command blob: \" << cmdContainerSas);\n\t\t}\n\t}\n\n\treturn keylist;\n}\n\nvoid\nMdsdConfig::SetMappedMoniker(\n\tconst EventHubSasInfo_t & ehmap\n\t)\n{\n\tTrace trace(Trace::Credentials, \"SetMappedMoniker\");\n\tfor (const auto & ehEntry : ehmap) {\n\t\tauto & origMoniker = ehEntry.first;\n\t\tauto & itemsMap = ehEntry.second;\n\t\tfor (const auto & item : (*itemsMap)) {\n\t\t\tauto & eventName = item.first;\n\t\t\tauto & newMoniker = item.second.moniker;\n\t\t\t_mdsdEventCfg->UpdateMoniker(eventName, origMoniker, newMoniker);\n\t\t}\n\t}\n}\n\nvoid\nMdsdConfig::LoadEventHubKeys(\n\tconst std::vector<std::pair<std::string, std::string>>& keylist\n\t)\n{\n\tTrace trace(Trace::Credentials, \"LoadEventHubKeys\");\n\n\tfor (const auto & iter : keylist) {\n\t\tauto & moniker = iter.first; // this is what's in mdsd.xml\n\t\tauto & containerSas = iter.second;\n\n\t\ttrace.NOTE(\"Get EventHub cmd XML for moniker \" + moniker + \", containerSas \" + containerSas);\n\t\tif(!_mdsdEventCfg->IsEventHubEnabled(moniker)) {\n\t\t\ttrace.NOTE(\"Moniker \" + moniker + \" does not have EventHub\");\n\t\t\tcontinue;\n\t\t}\n\t\tmdsd::EventHubCmd ehCmd(Namespace(), EventVersion(), containerSas);\n\t\tehCmd.ProcessCmdXml();\n\t\t_ehNoticeItemsMap[moniker] = ehCmd.GetNoticeXmlItemsTable();\n\t\t_ehPubItemsMap[moniker] = ehCmd.GetPublisherXmlItemsTable();\n\t\ttrace.NOTE(\"Successfully get EventHub cmd XML items (that include SAS keys) for moniker \" + moniker);\n\t\tDumpEventPublisherInfo();\n\t}\n\n\tSetMappedMoniker(_ehNoticeItemsMap);\n\tSetMappedMoniker(_ehPubItemsMap);\n}\n\nmdsd::EhCmdXmlItems\nMdsdConfig::GetEventNoticeCmdXmlItems(\n\tconst std::string & moniker,\n\tconst std::string & eventName\n\t)\n{\n\tTrace trace(Trace::Credentials, \"MdsdConfig::GetEventNoticeCmdXmlItems\");\n\treturn GetEventHubCmdXmlItems(_ehNoticeItemsMap, moniker, eventName, \"EventNotice\");\n}\n\nmdsd::EhCmdXmlItems\nMdsdConfig::GetEventPublishCmdXmlItems(\n\tconst std::string & moniker,\n\tconst std::string & eventName\n\t)\n{\n\tTrace trace(Trace::Credentials, \"MdsdConfig::GetEventPublishCmdXmlItems\");\n\treturn GetEventHubCmdXmlItems(_ehPubItemsMap, moniker, eventName, \"EventPublish\");\n}\n\nmdsd::EhCmdXmlItems\nMdsdConfig::GetEventHubCmdXmlItems(\n\tEventHubItemsMap_t& ehmap,\n\tconst std::string & moniker,\n\tconst std::string & eventName,\n\tconst std::string & eventType\n\t)\n{\n\tTrace trace(Trace::Credentials, \"MdsdConfig::GetEventHubCmdXmlItems\");\n\tstd::lock_guard<std::mutex> lock(_ehMapMutex);\n\n\tauto iter = ehmap.find(moniker);\n\n\tif (iter == ehmap.end()) {\n\t\tstd::ostringstream strm;\n\t\tstrm << \"Failed to find \" << eventType << \" SAS & endpoint for moniker=\" << moniker;\n\t\tLogger::LogError(strm.str());\n\t\treturn mdsd::EhCmdXmlItems();\n\t}\n\tauto xmlItemsMap = iter->second;\n\tauto xmlItemsIter = xmlItemsMap->find(eventName);\n\tif (xmlItemsIter == xmlItemsMap->end()) {\n\t\tstd::ostringstream strm;\n\t\tstrm << \"Failed to find \" << eventType << \" SAS & endpoint for event=\" << eventName << \" (moniker=\" << moniker << \").\";\n\t\tLogger::LogError(strm.str());\n\t\treturn mdsd::EhCmdXmlItems();\n\t}\n\n\tTRACEINFO(trace, \"Found \" << eventType << \" (SAS & endpoint) for moniker=\" << moniker <<\n\t\t\", event=\" << eventName << \": \" << xmlItemsIter->second);\n\treturn xmlItemsIter->second;\n}\n\n\n// Flush the batch set and schedule the next flush. This should be explicitly called\n// only once; the method is also the timer-pop handler and thus arranges for itself\n// to be called again. The \"cancel()\" call is a safety measure in case the method is\n// called explicitly after loading.\nvoid\nMdsdConfig::FlushBatches(const boost::system::error_code &e)\n{\n\tTrace trace(Trace::Scheduler, \"MdsdConfig::FlushBatches\");\n\n\tif (e == boost::asio::error::operation_aborted) {\n\t\ttrace.NOTE(\"Timer cancelled\");\n\t} else {\n\t\t_batchSet.FlushIfStale();\n\t\t_batchFlushTimer.expires_from_now(boost::posix_time::minutes(1));\n\t\t_batchFlushTimer.async_wait(boost::bind(&MdsdConfig::FlushBatches, this, boost::asio::placeholders::error));\n\t}\n}\n\n// Stop timers that are not related to scheduled tasks:\n// _batchFlushTimer, _autoKeyReloadTimer\nvoid\nMdsdConfig::StopAllTimers()\n{\n\tTrace trace(Trace::Scheduler, \"MdsdConfig::StopAllTimers\");\n\t_batchFlushTimer.cancel();\n\t_autoKeyReloadTimer.cancel();\n}\n\nMdsdConfig::~MdsdConfig()\n{\n\tTrace trace(Trace::ConfigLoad, \"MdsdConfig Destructor\");\n\n\tStopAllTimers();\n\n\t// Configuration load/parse messages\n\tsize_t count = 0;\n\tfor (Message* msgptr : messages) {\n\t\tdelete msgptr;\n\t\tcount++;\n\t}\n\ttrace.NOTE(\"Removed \" + std::to_string(count) + \" messages\");\n\tmessages.clear();\n\n\t// Configured table schemas (distinct from cached MDS-ready forms of those schemas)\n\tcount = 0;\n\tfor (auto iter : schemas) {\n\t\tcount++;\n\t\tstd::ostringstream msg;\n\t\tmsg << \"Deleting TableSchema \\\"\" << iter.first << \"\\\" at address \" << iter.second;\n\t\ttrace.NOTE(msg.str());\n\t\tdelete iter.second;\n\t}\n\ttrace.NOTE(\"Removed \" + std::to_string(count) + \" TableSchemas\");\n\tschemas.clear();\n\n\t// Credentials\n\tcount = 0;\n\tfor (auto iter : credentials) {\n\t\tcount++;\n\t\tstd::ostringstream msg;\n\t\tmsg << \"Deleting Credentials \\\"\" << iter.first << \"\\\" at address \" << iter.second;\n\t\ttrace.NOTE(msg.str());\n\t\tdelete iter.second;\n\t}\n\ttrace.NOTE(\"Removed \" + std::to_string(count) + \" Credentials\");\n\tcredentials.clear();\n\n\t// Event sources\n\t// Just map source names to TableSchema*, and I've already deleted all the TableSchema objects.\n\ttrace.NOTE(\"Clearing all source entries\");\n\tsources.clear();\n\n\t// OmiTask\n\tcount = 0;\n\tfor (OmiTask* taskptr : _omiTasks) {\n\t\tcount++;\n\t\tstd::ostringstream msg;\n\t\tmsg << \"Deleting OmiTask at address \" << taskptr;\n\t\ttrace.NOTE(msg.str());\n\t\ttaskptr->Cancel();\n\t\tdelete taskptr;\n\t}\n\ttrace.NOTE(\"Removed \" + std::to_string(count) + \" OmiTask object(s)\");\n\t_omiTasks.clear();\n\n\t// ITask\n\tcount = 0;\n\tfor (ITask* taskptr : _tasks) {\n\t\tcount++;\n\t\tstd::ostringstream msg;\n\t\tmsg << \"Deleting ITask at address \" << taskptr;\n\t\ttrace.NOTE(msg.str());\n\t\ttaskptr->cancel();\n\t\tdelete taskptr;\n\t}\n\ttrace.NOTE(\"Removed \" + std::to_string(count) + \" ITask object(s)\");\n\t_tasks.clear();\n\n\t// Mdsd Extensions\n\tcount = 0;\n\tfor (auto & iter : extensions) {\n\t\tcount++;\n\t\tstd::ostringstream msg;\n\t\tmsg << \"Deleting MdsdExtension \\\"\" << iter.first << \"\\\" at address \" << iter.second;\n\t\ttrace.NOTE(msg.str());\n\t\tdelete iter.second;\n\t}\n\ttrace.NOTE(\"Removed \" + std::to_string(count) + \" MdsdExtension\");\n\textensions.clear();\n\n\t// BatchSet() - gets destroyed when this destructor completes\n\t// No need to flush; the BatchSet destructor will do that\n\n\t// Autokey map contains no pointers so it gets cleaned up correctly when this destructor completes\n\ttrace.NOTE(\"Clearing autokey map\");\n\t_autoKeyMap.clear();\n\n\t_defaultCreds = 0;\t// Already deleted it while clearing the credentials vector\n}\n\nvoid\nMdsdConfig::LoadFromConfigFile(string path)\n{\n\t// Create an appropriate root document context\n\tCfgCtxRoot root(this);\n\t// Instantiate a new parser with the context\n\tConfigParser parser(&root, this);\n\n\t// Open the path\n\tstd::ifstream infile(path);\n\tif (!infile) {\n\t\tAddMessage(error, \"Failed to open config file \" + path + \" for reading\");\n\t\treturn;\n\t}\n\n\t// Remember where we were when we were asked to load this file\n\tstring previousPath(currentPath);\n\tlong previousLine(currentLine);\n\tcurrentPath = path;\n\tcurrentLine = 0;\n\n\t// Read one line at a time, hand it to the parser's parse_chunk() method\n\tstring line;\n\twhile (std::getline(infile, line)) {\n\t\tNextLine();\n\t\tparser.ParseChunk(line);\n\t}\n\tif (!infile.eof()) {\n\t\tif (infile.bad()) {\n\t\t\tAddMessage(error, \"Corrupted stream\");\n\t\t}\n\t\telse if (infile.fail()) {\n\t\t\tAddMessage(error, \"IO operation failed\");\n\t\t}\n\t\telse {\n\t\t\tAddMessage(error, \"std::getline returned 0 for unknown reason\");\n\t\t}\n\t}\n\n\tcurrentPath = previousPath;\n\tcurrentLine = previousLine;\n}\n\nvoid\nMdsdConfig::AddMessage(severity_t s, const std::string& msg)\n{\n\tMessage* newmsg = new MdsdConfig::Message(currentPath, currentLine, s, msg);\n\tmessages.push_back(newmsg);\n}\n\nbool\nMdsdConfig::GotMessages(int mask) const\n{\n\tfor (const auto& msg : messages) {\n\t\tif (msg->severity & mask) {\n\t\t\treturn true;\n\t\t}\n\t}\n\treturn false;\n}\n\nvoid\nMdsdConfig::MessagesToStream(std::ostream& output, int mask) const {\n\tfor (const auto& msg : messages) {\n\t\tif (msg->severity & mask) {\n\t\t\toutput << msg->filename << \"(\" << msg->line << \") \" << SeverityToString(msg->severity)\n\t\t\t       << \": \" << msg->msg << \"\\n\";\n\t\t}\n\t}\n\toutput << std::flush;\n}\n\n// File scope constants\nstatic const std::string\n\t_str_fatal = \"Fatal\",\n\t_str_error = \"Error\",\n\t_str_warning = \"Warning\",\n\t_str_info = \"Info\",\n\t_str_unknown = \"?\"\n;\n\nconst std::string&\nMdsdConfig::SeverityToString(MdsdConfig::severity_t severity) const\n{\n\tswitch (severity) {\n\t\tcase MdsdConfig::info:\t\treturn _str_info;\n\t\tcase MdsdConfig::warning:\treturn _str_warning;\n\t\tcase MdsdConfig::error:\t\treturn _str_error;\n\t\tcase MdsdConfig::fatal:\t\treturn _str_fatal;\n\t\tdefault:\t\t\treturn _str_unknown;\t\t// Should never happen\n\t}\n}\n\nvoid\nMdsdConfig::AddSchema(TableSchema* schema)\n{\n\tif (schemas.count(schema->Name())) {\n\t\tAddMessage(error, \"Duplicate schema \" + schema->Name() + \" ignored\");\n\t\tdelete schema;\n\t}\n\telse {\n\t\tschemas[schema->Name()] = schema;\n\t}\n}\n\nvoid\nMdsdConfig::AddCredentials(Credentials* creds, bool makeDefault)\n{\n\tif (credentials.count(creds->Moniker())) {\n\t\tAddMessage(error, \"Duplicate creds \" + creds->Moniker() + \" ignored\");\n\t\tdelete creds;\n\t\treturn;\n\t}\n\n\tcredentials[creds->Moniker()] = creds;\n\tif (makeDefault) {\n\t\tif (_defaultCreds) {\n\t\t\tAddMessage(error, \"Cannot make \" + creds->Moniker() + \" default; another is already set\");\n\t\t} else {\n\t\t\t_defaultCreds = creds;\n\t\t}\n\t}\n}\n\nvoid\nMdsdConfig::AddSource(const string& source, const string& schema)\n{\n\tif (schema.length() > 0 && schemas.count(schema) == 0) {\n\t\tAddMessage(error, \"Undefined schema \" + schema + \" referenced\");\n\t}\n\telse if (sources.count(source)) {\n\t\tAddMessage(error, \"Source \" + source + \" already mapped to a schema; ignored\");\n\t}\n\telse {\n\t\tsources[source] = schemas[schema];\n\t}\n}\n\nvoid\nMdsdConfig::AddDynamicSchemaSource(const string& source)\n{\n\tif (_dynamic_sources.count(source)) {\n\t\tAddMessage(error, \"Dynamic Schema Source \" + source + \" has already been configured; ignored\");\n\t}\n\telse\n\t{\n\t\t_dynamic_sources.insert(source);\n\t}\n}\n\nbool\nMdsdConfig::AddIdentityColumn(const string& colname, const string& colval)\n{\n\tfor (auto iter = identityColumns.begin(); iter != identityColumns.end(); ++iter) {\n\t\tif (iter->first == colname) {\n\t\t\tAddMessage(error, \"Ignoring duplicate identity column \" + colname);\n\t\t\treturn false;\n\t\t}\n\t}\n\n\tidentityColumns.push_back(make_pair(colname, colval));\n\treturn true;\n}\n\nvoid\nMdsdConfig::GetIdentityColumnValues(std::back_insert_iterator<vector<pair<string, string> > > destination)\n{\n\tstd::copy(identityColumns.begin(), identityColumns.end(), destination);\n}\n\nvoid\nMdsdConfig::GetIdentityColumnTypes(std::back_insert_iterator<vector<pair<string, string> > > destination)\n{\n\tfor (auto iter = identityColumns.begin(); iter != identityColumns.end(); ++iter) {\n\t\tdestination = make_pair(iter->first, \"mt:wstr\");\n\t}\n}\n\nvoid\nMdsdConfig::GetIdentityValues(std::string & tenant, std::string& role, std::string& roleInstance)\n{\n\tident_vect_t identityColumns;\n    GetIdentityColumnValues(std::back_inserter(identityColumns));\n\n\tfor (const auto & col : identityColumns) {\n\t\tif (col.first.compare(TenantAlias()) == 0) {\n\t\t\ttenant = col.second;\n\t\t}\n\t\telse if (col.first.compare(RoleAlias()) == 0) {\n\t\t\trole = col.second;\n\t\t}\n\t\telse if (col.first.compare(RoleInstanceAlias()) == 0) {\n\t\t\troleInstance = col.second;\n\t\t}\n\t}\n}\n\nvoid\nMdsdConfig::AddEnvelopeColumn(std::string && name, std::string && value)\n{\n\tfor (const EnvelopeColumn & column : _envelopeColumns) {\n\t\tif (column.first == name) {\n\t\t\tthrow std::runtime_error(\"Column already in envelope\");\n\t\t}\n\t}\n\t_envelopeColumns.emplace_back(name, value);\n}\n\nvoid\nMdsdConfig::ForeachEnvelopeColumn(const std::function<void(const EnvelopeColumn&)>& process)\n{\n\tfor (const EnvelopeColumn & column : _envelopeColumns) {\n\t\tprocess(column);\n\t}\n}\n\nTableSchema*\nMdsdConfig::GetSchema(const string& source) const\n{\n\tconst auto &iter = sources.find(source);\n\tif (iter == sources.end()) {\n\t\treturn 0;\n\t}\n\n\treturn iter->second;\n}\n\nCredentials*\nMdsdConfig::GetCredentials(const string& moniker) const\n{\n\tconst auto &iter = credentials.find(moniker);\n\tif (iter == credentials.end()) {\n\t\treturn 0;\n\t}\n\n\treturn iter->second;\n}\n\nstd::string\nMdsdConfig::GetAutokey(const std::string& moniker, const std::string& fullTableName)\n{\n\tstd::lock_guard<std::mutex> lock(_aKMmutex);\n\n\tauto iter = _autoKeyMap.find(std::make_pair(moniker, fullTableName));\n\tif (iter == _autoKeyMap.end()) {\n\t\treturn std::string();\n\t}\n\treturn iter->second;\n}\n\nvoid\nMdsdConfig::DumpAutokeyTable(std::ostream &os)\n{\n\tos << \"Dump format: <MonikerName, ItemName>\" << std::endl;\n\tfor (const auto & iter : _autoKeyMap) {\n\t\tos << \"<\" << iter.first.first << \",\" << iter.first.second << \">\" << std::endl;\n\t}\n}\n\nbool\nMdsdConfig::IsQuotaExceeded(const std::string &name, unsigned long current) const\n{\n\tTrace trace(Trace::ConfigUse, \"MdsdConfig:IsQuotaExceeded\");\n\n\tauto iter = _quotas.find(name);\n\n\tif (iter == _quotas.end()) {\n\t\ttrace.NOTE(\"Check against unset quota \" + name);\n\t\treturn false;\n\t}\n\n\treturn (current > iter->second);\n}\n\nvoid\nMdsdConfig::AddOmiTask(OmiTask *task)\n{\n\t// Defer the creation of the batch; autokey data might not yet be loaded.\n\t// The task will create the batch when an attempt is made to start it\n\t_omiTasks.push_back(task);\n\t_isUseful = true;\n}\n\nvoid\nMdsdConfig::ForeachOmiTask(const std::function<void(OmiTask*)>& fn)\n{\n\tstd::for_each(_omiTasks.begin(), _omiTasks.end(), fn);\n}\n\nvoid\nMdsdConfig::AddTask(ITask *task)\n{\n        Trace trace(Trace::Scheduler, \"MdsdConfig::AddTask\");\n\tif (trace.IsActive()) {\n\t\tstd::ostringstream msg;\n\t\tmsg << \"Adding task \" << task;\n\t\ttrace.NOTE(msg.str());\n\t}\n\t_tasks.push_back(task);\n\t_isUseful = true;\n}\n\nvoid\nMdsdConfig::ForeachTask(const std::function<void(ITask*)>& fn)\n{\n        Trace trace(Trace::Scheduler, \"MdsdConfig::ForeachTask\");\n\ttrace.NOTE(\"Invoking function on \" + std::to_string(_tasks.size()) + \" task(s)\");\n\tstd::for_each(_tasks.begin(), _tasks.end(), fn);\n}\n\n\nvoid\nMdsdConfig::AddExtension(MdsdExtension * extension)\n{\n\tTrace trace (Trace::ConfigUse, \"MdsdConfig::AddExtension\");\n\tif (!extension) {\n\t\treturn;\n\t}\n\n\tconst std::string& extname = extension->Name();\n\tif (extensions.count(extname)) {\n\t\tAddMessage(error, \"Duplicate Extension \" + extname + \" ignored.\");\n\t\tdelete extension;\n\t\textension = nullptr;\n\t}\n\telse {\n\t\textensions[extname] = extension;\n\t\t_isUseful = true;\n\t}\n}\n\nvoid\nMdsdConfig::ForeachExtension(const std::function<void(MdsdExtension*)>& fn)\n{\n\tTrace trace (Trace::ConfigUse, \"MdsdConfig::ForeachExtension\");\n\tfor (const auto & kv : extensions) {\n\t\ttrace.NOTE(\"Walking MdsdExtension with name='\" + kv.first + \"'\");\n\t\tfn(kv.second);\n\t}\n}\n\n\nvoid\nMdsdConfig::StartScheduledTasks()\n{\n        Trace trace(Trace::Scheduler, \"MdsdConfig::StartScheduledTasks\");\n        ForeachOmiTask([](OmiTask *job) { job->Start(); });\n        ForeachTask([](ITask *task) { task->start(); });\n}\n\nvoid\nMdsdConfig::StopScheduledTasks()\n{\n        Trace trace(Trace::Scheduler, \"MdsdConfig::StopScheduledTasks\");\n\tForeachOmiTask([](OmiTask *job) { job->Cancel(); });\n        ForeachTask([](ITask *task) { task->cancel(); });\n}\n\n// Tells this configuration to remove itself in the future. The config takes\n// steps immediately to stop generating work for itself, then schedules the\n// final cleanup action to take place after the requested delay.\nvoid\nMdsdConfig::SelfDestruct(int seconds)\n{\n\tTrace trace(Trace::ConfigUse, \"MdsdConfig::SelfDestruct\");\n\tStopScheduledTasks();\n\tStopAllTimers();\n\n\t// Flush any data we're still holding on to. Don't use FlushBatches; that\n\t// will restart the autoflush timer, and we just stopped that. One last\n\t// flush will happen when the Destroyer calls delete.\n\t_batchSet.Flush();\n\n\t// Create a deadline_timer on the heap; when it expires, call our Destroyer helper\n\tauto timer = new boost::asio::deadline_timer(crossplat::threadpool::shared_instance().service());\n\ttimer->expires_from_now(boost::posix_time::seconds(seconds));\n\ttimer->async_wait(boost::bind(MdsdConfig::Destroyer, this, timer));\n}\n\n// This static private method does the final delete. Also deletes the heap timer.\nvoid\nMdsdConfig::Destroyer(MdsdConfig *config, boost::asio::deadline_timer *timer)\n{\n\tTrace trace(Trace::ConfigUse, \"MdsdConfig:Destroyer\");\n\n\tstd::ostringstream msg;\n\tmsg << \"Deleting MdsdConfig at \" << config;\n\ttrace.NOTE(msg.str());\n\tdelete config;\n\tdelete timer;\n}\n\n// Create a batch for a given target. If one has already been created for that target,\n// return the one we're already using.\nBatch*\nMdsdConfig::GetBatch(const MdsEntityName &target, int interval)\n{\n\tif (target.GetStoreType() == StoreType::Local) {\n\t\treturn _localBatches.GetBatch(target, interval);\n\t} else {\n\t\treturn _batchSet.GetBatch(target, interval);\n\t}\n}\n\nbool\nMdsdConfig::ValidateConfig(\n    bool isStartupConfig\n    ) const\n{\n    Trace trace(Trace::ConfigUse, \"MdsdConfig::ValidateConfig\");\n\n    if (!IsUseful()) {\n        std::ostringstream msg;\n        msg << \"No productive configuration resulted from loading config file(s): \" << configFilePath << \".\";\n        if (!isStartupConfig) {\n            msg << \" New configuration ignored.\\n\";\n        }\n        msg << \"Warnings detected:\\n\";\n        MessagesToStream(msg, MdsdConfig::warning);\n        Logger::LogWarn(msg);\n    }\n    if (GotMessages(MdsdConfig::fatal)) {\n        std::ostringstream msg;\n        msg << \"Fatal errors while loading configuration \" << configFilePath << \":\" << std::endl;\n        MessagesToStream(msg, MdsdConfig::fatal);\n        if (!isStartupConfig) {\n            msg << \"\\nNew configuration ignored; using previous configuration\";\n        }\n        Logger::LogError(msg);\n        return false;\n    }\n    if (GotMessages(MdsdConfig::error)) {\n        std::ostringstream msg;\n        msg << \"Config file \" << configFilePath << \" parsing errors:\\n\";\n        MessagesToStream(msg, MdsdConfig::error);\n        Logger::LogError(msg);\n        return false;\n    }\n    if (GotMessages(MdsdConfig::warning)) {\n        std::ostringstream msg;\n        msg << \"Config file \" << configFilePath << \"parsing warnings:\\n\";\n        MessagesToStream(msg, MdsdConfig::warning);\n        Logger::LogWarn(msg);\n    }\n\n    return true;\n}\n\nvoid\nMdsdConfig::DumpEventPublisherInfo()\n{\n    Trace trace(Trace::ConfigLoad, \"MdsdConfig::DumpEventPublisherInfo\");\n\n    if (!trace.IsActive()) {\n        return;\n    }\n    if (_ehPubItemsMap.empty()) {\n        TRACEINFO(trace, \"EventPublisher map is empty\");\n    }\n    else {\n        for (const auto & iter : _ehPubItemsMap) {\n            auto moniker = iter.first;\n            auto itemsmap = iter.second;\n            if (itemsmap->empty()) {\n                TRACEINFO(trace, \"Moniker='\" << moniker << \"'; Event: N/A.\");\n            }\n            else {\n                for (const auto& item : (*itemsmap)) {\n                    auto eventname = item.first;\n                    auto ehinfo = item.second;\n                    TRACEINFO(trace, \"Moniker='\" << moniker << \"'; EventName='\"\n                        << eventname << \"'; EHInfo: \" << ehinfo);\n                }\n            }\n        }\n    }\n}\n\nstd::string\nMdsdConfig::GetDefaultMoniker() const\n{\n\tauto defaultCreds = GetDefaultCredentials();\n\tif (!defaultCreds) {\n\t\tthrow std::runtime_error(\"No default credential is found.\");\n\t}\n\treturn defaultCreds->Moniker();\n}\n\nvoid\nMdsdConfig::AddMonikerEventInfo(\n\tconst std::string & moniker,\n\tconst std::string & eventName,\n\tStoreType::Type type,\n\tconst std::string & sourceName,\n\tmdsd::EventType eventType\n\t)\n{\n\tTrace trace(Trace::ConfigLoad, \"AddMonikerEventInfo\");\n\ttry {\n\t\tauto monikerToUse = moniker.empty()? GetDefaultMoniker() : moniker;\n\t\t_mdsdEventCfg->AddEventSinkCfgInfoItem({eventName, monikerToUse, type, sourceName, eventType });\n\t\tTRACEINFO(trace, \"Saved event=\" << eventName << \" moniker=\" << monikerToUse);\n\t}\n\tcatch(const std::exception& ex) {\n\t\tAddMessage(fatal, std::string(\"AddMonikerEventInfo() failed: \") + ex.what());\n\t}\n}\n\nvoid\nMdsdConfig::SetOboDirectPartitionFieldNameValue(std::string&& name, std::string&& value)\n{\n\t_oboDirectPartitionFieldsMap.emplace(name, value);\n\tif (name == \"resourceId\") {\n\t    _resourceId = value;\n\t}\n}\n\n\nstd::string\nMdsdConfig::GetOboDirectPartitionFieldValue(const std::string& name) const\n{\n\tif (name.empty()) {\n\t\tthrow std::invalid_argument(\"MdsdConfig::GetOboDirectPartitionFieldValue(name): name cannot be empty\");\n\t}\n\n\tstd::string value;\n\tauto it = _oboDirectPartitionFieldsMap.find(name);\n\tif (it != _oboDirectPartitionFieldsMap.end()) {\n\t\tvalue = it->second;\n\t}\n\telse {\n\t\tLogger::LogWarn(\"OboDirectPartitionField with name='\" + name\n\t\t\t\t+ \"' not found. Make sure the mdsd.xml includes the corresponding \"\n\t\t\t\t\"Management/OboDirectPartitionField element. Returning an empty string \"\n\t\t\t\t\"as the result value.\");\n\t}\n\n\treturn value;\n}\n\nvoid\nMdsdConfig::ValidateEvents()\n{\n\tTrace trace(Trace::ConfigLoad, \"MdsdConfig::ValidateEvents\");\n\ttry {\n\t\tValidateAnnotations();\n\t\tValidateEventHubPubKeys();\n\t\tValidateEventHubPubSinks();\n\t}\n\tcatch(const std::exception & ex) {\n\t\tAddMessage(error, std::string(\"MdsdConfig::ValidateEvents() failed: \") + ex.what());\n\t}\n}\n\nvoid\nMdsdConfig::ValidateAnnotations()\n{\n\tfor (const auto & name : _mdsdEventCfg->GetInvalidAnnotations()) {\n\t\tAddMessage(MdsdConfig::error, \"Unknown name '\" + name + \"' in EventStreamingAnnotation\");\n\t}\n}\n\nvoid\nMdsdConfig::ValidateEventHubPubKeys()\n{\n\tfor (const auto & publisherName : _eventPubCfg->CheckForInconsistencies(_hasAutoKey)) {\n\t\tAddMessage(MdsdConfig::error,\n\t\t\t\"Failed to find event publisher SAS key for item '\" + publisherName + \"'\");\n\t}\n}\n\nvoid\nMdsdConfig::ValidateEventHubPubSinks()\n{\n\tfor (const auto & publisherName: _mdsdEventCfg->GetEventPublishers())\n\t{\n\t\tif (!LocalSink::Lookup(publisherName)) {\n\t\t\tAddMessage(error, \"failed to find LocalSink object for Event Publisher \" + publisherName);\n\t\t} else {\n\t\t    _isUseful = true;  // Found a valid event publisher\n\t\t}\n\t}\n}\n\nvoid\nMdsdConfig::InitEventHubPub()\n{\n\tTrace trace(Trace::ConfigUse, \"MdsdConfig::InitEventHubPub\");\n\n\tSetEventHubPubForLocalSinks();\n\n\t// create uploaders first before setting SAS key\n\tmdsd::EventHubUploaderMgr::GetInstance().CreateUploaders(mdsd::EventHubType::Publish,\n\t\t_eventPubCfg->GetNameMonikers());\n\n\tSetupEventHubPubEmbeddedKeys();\n}\n\nvoid\nMdsdConfig::SetupEventHubPubEmbeddedKeys()\n{\n\tTrace trace(Trace::ConfigUse, \"MdsdConfig::SetupEventHubPubEmbeddedKeys\");\n\n\tauto& ehUploaderMgr = mdsd::EventHubUploaderMgr::GetInstance();\n\tauto ehtype = mdsd::EventHubType::Publish;\n\n\tfor (const auto & item : _eventPubCfg->GetEmbeddedSasData()) {\n\t\tauto & publisherName = item.first;\n\t\tauto & monikerSasMap = item.second;\n\n\t\tfor (const auto & keyItem : monikerSasMap) {\n\t\t\tauto & moniker = keyItem.first;\n\t\t\tauto & saskey = keyItem.second;\n\t\t\tehUploaderMgr.SetSasAndStart(mdsd::EventHubUploaderId(ehtype, moniker, publisherName), saskey);\n\t\t}\n\t}\n}\n\nvoid\nMdsdConfig::SetEventHubPubForLocalSinks()\n{\n\tTrace trace(Trace::ConfigUse, \"MdsdConfig::SetEventHubPubForLocalSinks\");\n\n\tstd::string tenant, role, roleInstance;\n\tGetIdentityValues(tenant, role, roleInstance);\n\n\tfor (const auto & item : _eventPubCfg->GetNameMonikers()) {\n\t\tauto & publisherName = item.first;\n\t\tauto sinkObj = LocalSink::Lookup(publisherName);\n\n\t\tif (!sinkObj) {\n\t\t\tthrow std::runtime_error(\"SetEventHubPubForLocalSinks(): failed to find LocalSink object for \"\n\t\t\t\t+ publisherName);\n\t\t}\n\t\telse {\n\t\t\tstd::string duration = GetDurationForEventName(publisherName);\n\t\t\tauto & monikers = item.second;\n\t\t\tsinkObj->SetEventPublishInfo(monikers, std::move(duration), tenant, role, roleInstance);\n\t\t}\n\t}\n}\n\n// vim: sw=8\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/MdsdConfig.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n\n#ifndef _MDSDCONFIG_HH_\n#define _MDSDCONFIG_HH_\n\n#include \"TableSchema.hh\"\n#include \"Batch.hh\"\n#include \"IdentityColumns.hh\"\n#include \"Priority.hh\"\n#include \"EventHubCmd.hh\"\n#include \"CfgEventAnnotationType.hh\"\n#include <string>\n#include <deque>\n#include <vector>\n#include <map>\n#include <unordered_map>\n#include <unordered_set>\n#include <iterator>\n#include <utility>\n#include <iostream>\n#include <mutex>\n#include <stddef.h>\n#include <boost/asio.hpp>\n\nclass Credentials;\nclass OmiTask;\nclass ITask;\nclass MdsdExtension;\n\nnamespace mdsd {\n\tstruct OboDirectConfig;\n    class EventPubCfg;\n    class MdsdEventCfg;\n    enum class EventType;\n}\n\nclass MdsdConfig\n{\npublic:\n\t/// <summary>\n\t/// Create an MdsdConfiguration from a configuration file\n\t/// </summary>\n\t/// <param name='path'>Pathname of the config file to load</param>\n\tMdsdConfig(std::string path, std::string autokeyConfigPath);\n\t~MdsdConfig();\n\n\t/// <summary>\n\t/// Initialize configuring activities, including loading autokey,\n\t/// flushing batches etc.\n\t/// </summary>\n\tvoid Initialize();\n\n\t/// <summary>\n\t/// Load a configuration file into an existing MdsdConfiguration\n\t/// </summary>\n\t/// <param name='path'>Pathname of the config file to load</param>\n\tvoid LoadFromConfigFile(std::string path);\n\n\n\t//////////// Parser Warnings and Errors //////////\n\n\ttypedef enum { anySeverity=15, info = 8, warning = 4, error = 2, fatal = 1 } severity_t;\n\n\t/// <summary>Return the readable name of a severity code</summary>\n\t/// <param name=\"severity\">The severity code</param>\n\tconst std::string& SeverityToString(severity_t severity) const;\n\n\t/// <summary>\n\t/// Record a message (warning, error, fatal error, etc) for this location in the parse of the file. This\n\t/// method always adds a newline (\\n) to the end of each message.\n\t/// </summary>\n\t/// <param name=\"severity\">The severity of the message (e.g. Info, Warning, Error, Fatal, etc.)</param>\n\t/// <param name=\"msg\">The message to be recorded</param>\n\tvoid AddMessage(severity_t severity, const std::string& msg);\n\n\t/// <summary>True if messages were recorded via MdsdConfig::AddMessage()</summary>\n\tbool GotMessages(int mask) const;\n\n\t/// <summary>Write all recorded messages to a stream</summary>\n\tvoid MessagesToStream(std::ostream& output, int mask) const;\n\n\tclass Message {\n\tpublic:\n\t\tMessage(const std::string& f, long l, severity_t s, const std::string& m)\n\t\t\t: filename(f), line(l), severity(s), msg(m) {}\n\t\t~Message() {}\n\n\t\tstd::string filename;\n\t\tlong line;\n\t\tseverity_t severity;\n\t\tstd::string msg;\n\t};\n\n\t///////////// Configuration Settings //////////////\n\n\t/// <summary>Indicates if some useful/productive settings have made it into the config</summary>\n\tbool IsUseful() const { return _isUseful; }\n\n\t/// <summary>True if <MonitoringManagement> element has been loaded once</summary>\n\tbool MonitoringManagementSeen() const { return _monitoringManagementSeen; }\n\tvoid MonitoringManagementSeen(bool state) { _monitoringManagementSeen = state; }\n\n\t/// <summary>Prefix for all event names.</summary>\n\tconst std::string& Namespace() const { return nameSpace; }\n\tvoid Namespace(const std::string& name) { nameSpace = name; }\n\n\t/// <summary>Version suffix for event names. \"5\" yields a suffix of \"Ver5v0\".</summary>\n\tint EventVersion() const { return eventVersion; }\n\tvoid EventVersion(int ver) { eventVersion = ver; }\n\n\t/// <summary>Config file timestamp.</summary>\n\tconst std::string& Timestamp() const { return timeStamp; }\n\tvoid Timestamp(const std::string& ts) { timeStamp = ts; }\n\n\t/// <summary>Number of partitions to spread across in MDS tables</summary>\n\tunsigned int PartitionCount() const { return _partitionCount; }\n\tvoid PartitionCount(unsigned int count) { _partitionCount = count; }\n\n\t/// <summary>How long to keep data in the agent</summary>\n\tunsigned long DefaultRetention() const { return _defaultRetention; }\n\tvoid DefaultRetention(unsigned long count) { _defaultRetention = count; }\n\n\n\t//////////// Identity ///////\n\n\t// Add an identity column to the set\n\tbool AddIdentityColumn(const std::string& colname, const std::string& colval);\n\t// Push name/value or name/type pairs into destination containers\n\tvoid GetIdentityColumnValues(std::back_insert_iterator<ident_vect_t>);\n\tvoid GetIdentityColumnTypes(std::back_insert_iterator<ident_vect_t>);\n\n\t// Get Tenant/Role/RoleInstance values. Return related Alias values if alias is used.\n\tvoid GetIdentityValues(std::string & tenant, std::string& role, std::string& roleInstance);\n\n\t// Aliases for the special Tenant, Role, and RoleInstance identity elements\n\tvoid SetTenantAlias(const std::string& name) { _tenantNameAlias = name; }\n\tvoid SetRoleAlias(const std::string& name) { _roleNameAlias = name; }\n\tvoid SetRoleInstanceAlias(const std::string& name) { _roleInstanceNameAlias = name; }\n\tconst std::string& TenantAlias() const { return _tenantNameAlias; }\n\tconst std::string& RoleAlias() const { return _roleNameAlias; }\n\tconst std::string& RoleInstanceAlias() const { return _roleInstanceNameAlias; }\n\n\tconst ident_vect_t * GetIdentityVector() { return &identityColumns; }\n\n\tconst std::string & AgentIdentity() const { return _agentIdentity; }\n\n\t//////////// Envelope ///////\n\n\tusing EnvelopeColumn = std::pair<std::string, std::string>;\n\tvoid AddEnvelopeColumn(std::string && name, std::string && value);\n\tvoid ForeachEnvelopeColumn(const std::function<void(const EnvelopeColumn&)>&);\n\n\t/////////// Table Schemas and Event Sources //////////\n\n\t/// <summary>Add a schema to configuration. Once invoked, the caller no longer owns the schema object.</summary>\n\t/// <param name=\"schema\">\n\t/// Pointer to the schema to be added. Once handed to AddSchema, the caller no longer owns the pointer.\n\t/// </param>\n\tvoid AddSchema(TableSchema* schema);\n\n\t/// <summary>Add a source to the configuration. The source name can be mapped to an already-known schema.</summary>\n\t/// <param name=\"source\">The name by which the source identities itself</param>\n\t/// <param name=\"schema\">The name of the schema</param>\n\tvoid AddSource(const std::string& source, const std::string& schema);\n\tbool IsValidSource(const std::string& source) { return (sources.count(source) > 0); }\n\n\t/// <summary>Add a source to the configuration. The source name will only be valid for dynamic schema input protocols.</summary>\n\t/// <param name=\"source\">The name by which the source identities itself</param>\n\tvoid AddDynamicSchemaSource(const std::string& source);\n\tbool IsValidDynamicSchemaSource(const std::string& source) { return (_dynamic_sources.count(source) > 0); }\n\n\t/// <summary>Get the table schema for a source; return 0 if the source is unknown</summary>\n\t/// <param name=\"source\">The name by which the event source identifies itself</param>\n\tTableSchema* GetSchema(const std::string& source) const;\n\n\t//////////// OMI Tasks //////////////\n\tvoid AddOmiTask(OmiTask *task);\n\tvoid AddOmiTask(const std::string &ev, Priority pri, Credentials *creds, bool noNPD,\n\t\t\tconst std::string &nmsp, const std::string &qry, time_t rate);\n\n\tvoid ForeachOmiTask(const std::function<void(OmiTask*)>&);\n\n\t//////////// Arbitrary Tasks //////////////\n\tvoid AddTask(ITask *task);\n\tvoid ForeachTask(const std::function<void(ITask*)>&);\n\n\t//////////// Extensions //////////////\n\t/// <summary>\n\t/// Add an extension object to configuration. Once invoked, the caller\n\t/// no longer owns the extension object.\n\t/// <param name=\"extension\"> Pointer to the extension object.\n\t/// Once handed to AddExtension, the caller no longer owns the pointer.\n\t/// </param>\n\t/// </summary>\n\tvoid AddExtension(MdsdExtension * extension);\n\tsize_t GetNumExtensions() const { return extensions.size(); }\n\tvoid ForeachExtension(const std::function<void(MdsdExtension*)>&);\n\n\t//////////// Credentials //////////////\n\n\t/// <summary>Add a Credential to configuration. Once invoked, the caller no longer owns the creds object.</summary>\n\t/// <param name=\"creds\">\n\t/// Pointer to the Credentials to be added. Once handed to AddCredentials, the caller no longer owns the pointer.\n\t/// </param>\n\t/// <param name=\"makeDefault\">True if these should be the default credentials for this configuration</param>\n\tvoid AddCredentials(Credentials* creds, bool makeDefault);\n\n\t/// <summary>Get the credentials for a moniker; return 0 if the moniker is unknown</summary>\n\t/// <param name=\"moniker\">The moniker of the credential of interest</param>\n\tCredentials* GetCredentials(const std::string& moniker) const;\n\n\t/// <summary>Get the default credentials. Returns 0 if there is no default.</summary>\n\tCredentials* GetDefaultCredentials() const { return _defaultCreds; }\n\n\t/// <summary>Get default moniker. Throw exception if no default is found.</summary>\n\tstd::string GetDefaultMoniker() const;\n\n\t/// <summary>Get the autokey URI, if any, for a [moniker,tablename] pair.</summary>\n\tstd::string GetAutokey(const std::string& moniker, const std::string& fullTableName);\n\n\t/// <summary>Get EventHub cmd XML items (currently SAS and MDS endpoint ID)\n\t/// for the moniker/eventName combination</summary>\n\tmdsd::EhCmdXmlItems GetEventNoticeCmdXmlItems(const std::string & moniker, const std::string & eventName);\n\tmdsd::EhCmdXmlItems GetEventPublishCmdXmlItems(const std::string & moniker, const std::string & eventName);\n\n\t///////////// Quotas /////////////\n\tvoid AddQuota(const std::string &name, unsigned long limit) { _quotas[name] = limit; }\n\tbool IsQuotaExceeded(const std::string &name, unsigned long current) const;\n\n\t// Record moniker, eventname, storetype, source name information.\n\t// If the input 'moniker' is empty, use the default one.\n\t// sourceName can be empty, e.g. OMIQuery.\n\tvoid AddMonikerEventInfo(const std::string & moniker, const std::string & eventName,\n\t\tStoreType::Type type, const std::string & sourceName, mdsd::EventType eventType);\n\n\t// Validate the events in configuration xml\n\tvoid ValidateEvents();\n\n\t///////// OboDirectConfig (XJsonBlob) //////////\n\tvoid AddOboDirectConfig(const std::string& eventName, std::shared_ptr<mdsd::OboDirectConfig>&& oboDirectConfig)\n\t{\n\t    _oboDirectConfigsMap.emplace(eventName, std::move(oboDirectConfig));\n\t}\n\n\t// Caller should catch the std::out_of_range exception if the map doesn't contain a key\n\t// matching eventName.\n\tstd::shared_ptr<mdsd::OboDirectConfig> GetOboDirectConfig(const std::string& eventName) const\n\t{\n\t    return _oboDirectConfigsMap.at(eventName);\n\t}\n\n\t///////////// Helpers /////////////\n\n\t// Return a reference to the set of batches associated with the current config\n\tBatchSet& GetBatchSet() { return _batchSet; }\n\t// Return a reference to the batches that correspond to \"local\" storageType. These\n\t// survive config reloads but require considerably more care in terms of resource\n\t// management.\n\t// static BatchSet& GetLocalBatchSet() { return _localBatches; }\n\t// Given MdsEntityName and autoflush interval, find an existing\n\t// batch (in the appropriate batch set) or make one.\n\tBatch* GetBatch(const MdsEntityName &, int interval);\n\n\tvoid StopAllTimers();\n\n\tvoid StartScheduledTasks();\n\tvoid StopScheduledTasks();\n\n\tvoid SelfDestruct(int seconds);\n\n\tbool ValidateConfig(bool isStartupConfig) const;\n\tstd::string GetCmdContainerSas() const { return cmdContainerSas; }\n\n\n\t// key: moniker name; value: a map of key=EventName; value: EventHub cmd XML items (currently SAS and MDS endpoint)\n\tusing EventHubSasInfo_t = std::unordered_map<std::string, std::shared_ptr<std::unordered_map<std::string, mdsd::EhCmdXmlItems>>>;\n\n\tvoid SetOboDirectPartitionFieldNameValue(std::string&& name, std::string&& value);\n\tstd::string GetOboDirectPartitionFieldValue(const std::string& name) const;\n\t// Currently the VM resource ID is obtained from (and set on) Management/OboDirectParititionField element with name=\"resourceId\"\n\t// Change this later if another better methods becomes available or if the logic needs to be changed.\n\tstd::string GetResourceId() const { return _resourceId; }\n\n\t// Below is for metric event Json object construction purpose\n\t// Currently, only DerivedEvent's duration attributes are stored\n\t// (because currently they are the only metric events available for Azure Monitor Json blob sink)\n\tvoid SetDurationForEventName(const std::string& eventName, const std::string& duration) { _eventNamesDurationsMap[eventName] = duration; }\n\tstd::string GetDurationForEventName(const std::string& eventName) const\n\t{\n\t\tauto it = _eventNamesDurationsMap.find(eventName);\n\t\treturn it == _eventNamesDurationsMap.end() ? std::string() : it->second;\n\t}\n\n\tstd::shared_ptr<mdsd::MdsdEventCfg>& GetMdsdEventCfg() {\n\t\treturn _mdsdEventCfg;\n\t}\n\n\tstd::shared_ptr<mdsd::EventPubCfg>& GetEventPubCfg() {\n\t\treturn _eventPubCfg;\n\t}\n\nprivate:\n\tMdsdConfig();\t// Disallow empty constructor\n\n\t// this is a record of the config file path. The file path can be renamed/moved during\n\t// 'this' MdsdConfig file time.\n\tstd::string configFilePath;\n\tstd::deque<Message*> messages;\n\tstd::string currentPath;\n\tlong currentLine;\n\tint msgMask;\n\n    std::string _autokeyConfigFilePath;\n\n\t/// <summary>Prefix for all event names.</summary>\n\tstd::string nameSpace;\n\n\t/// <summary>Version suffix for event names. \"5\" yields a suffix of \"Ver5v0\".</summary>\n\tint eventVersion;\n\n\t/// <summary>Timestamp of the config file.</summary>\n\tstd::string timeStamp;\n\n\tstd::map<const std::string, TableSchema*> schemas;\n\tstd::map<const std::string, Credentials*> credentials;\n\tstd::map<const std::string, TableSchema*> sources;\n\tstd::unordered_set<std::string> _dynamic_sources;\n\tident_vect_t identityColumns;\n\tstd::vector<EnvelopeColumn> _envelopeColumns;\n\tstd::string _tenantNameAlias;\n\tstd::string _roleNameAlias;\n\tstd::string _roleInstanceNameAlias;\n\tstd::vector<OmiTask*> _omiTasks;\n\tstd::vector<ITask*> _tasks;\n\tstd::map<const std::string, MdsdExtension*> extensions;\n\n\tstd::string _resourceId;\n\n\tunsigned int _partitionCount;\n\tunsigned long _defaultRetention;\n\n\tbool _isUseful;\n\n\tCredentials* _defaultCreds;\n\n\tBatchSet _batchSet;\n\tboost::asio::deadline_timer _batchFlushTimer;\n\tvoid FlushBatches(const boost::system::error_code &);\n\tstatic BatchSet _localBatches;\n\n\tstd::string _agentIdentity;\n\n\tstd::map<std::pair<std::string, std::string>, std::string> _autoKeyMap;\n\tstd::mutex _aKMmutex;\n\tboost::asio::deadline_timer _autoKeyReloadTimer;\n\n\tbool LoadAutokey(const boost::system::error_code &);\n\tvoid DumpAutokeyTable(std::ostream &os);\n\n\tstd::mutex _ehMapMutex;\n\t// key: (original, not mapped) moniker;\n\t// value: a map of key=EventName; value: EventHub cmd XML items (currently SAS and MDS endpoint)\n\tusing EventHubItemsMap_t = std::unordered_map<std::string, std::shared_ptr<std::unordered_map<std::string, mdsd::EhCmdXmlItems>>>;\n\tEventHubItemsMap_t _ehNoticeItemsMap;\n\tEventHubItemsMap_t _ehPubItemsMap;\n\n\tmdsd::EhCmdXmlItems GetEventHubCmdXmlItems(EventHubItemsMap_t& ehmap, const std::string & moniker,\n\t\tconst std::string & eventName, const std::string & eventType);\n\n\tvoid LoadEventHubKeys(const std::vector<std::pair<std::string, std::string>>& keylist);\n\tvoid DumpEventPublisherInfo();\n\tvoid SetMappedMoniker(const EventHubSasInfo_t & ehmap);\n\n\t// For EventHub notice, create uploaders in EH manager,\n\t// then set the SAS key and start the uploaders.\n\tvoid InitEventHubNotice();\n\n\t// Initialize EventHub publishers, set SAS keys\n\tvoid InitEventHubPub();\n\n\n\t// Make sure each annotated event exists\n\tvoid ValidateAnnotations();\n\n\t// Make sure each event publisher has some SAS key, either embedded, or from AutoKey\n\tvoid ValidateEventHubPubKeys();\n\n\t// Make sure each event publisher has a LocalSink object that'll publish data for it.\n\tvoid ValidateEventHubPubSinks();\n\n\tvoid SetupEventHubPubEmbeddedKeys();\n\tvoid SetEventHubPubForLocalSinks();\n\n\t// key: event name; value: mdsd::OboDirectConfig\n\tstd::unordered_map<std::string, std::shared_ptr<mdsd::OboDirectConfig>> _oboDirectConfigsMap;\n\n\t// key: OboDirect partition field name (e.g., \"resourceId\"); value: value (e.g., \"SUBSCRIPTIONS/91D12660-3DEC-467A-BE2A-213B5544DDC0/RESOURCEGROUPS/RMANDASHOERG/PROVIDERS/MICROSOFT.DEVICES/IOTHUBS/SHOEHUBSCUS3\")\n\tstd::unordered_map<std::string, std::string> _oboDirectPartitionFieldsMap;\n\n\t// key: eventName (e.g., \"WADMetricsPT1MP10DV2S\"); value: duration (e.g., \"PT1M\")\n\t// Currently only DerivedEvent's durations are stored.\n\tstd::unordered_map<std::string, std::string> _eventNamesDurationsMap;\n\n\tstatic void Destroyer(MdsdConfig *, boost::asio::deadline_timer *);\n\n\tstd::map<std::string, unsigned long> _quotas;\n\n\tbool _monitoringManagementSeen;\n\tstd::string cmdContainerSas;\n\n\t// true if autokey is used in any account; false otherwise.\n\tbool _hasAutoKey;\n\n\tstd::shared_ptr<mdsd::MdsdEventCfg> _mdsdEventCfg;\n\tstd::shared_ptr<mdsd::EventPubCfg> _eventPubCfg;\n\n\t/// <summary>\n\t/// Set the line number of the file being parsed. Used when recording messages generated during parsing.\n\t/// </summary>\n\t/// <param name=\"num\">The line sequence number of the chunk being handed to the parser</param>\n\tvoid SetLineNumber(long num) { currentLine = num; }\n\n\t/// <summary>Increment the line number indicator for the next chunk to be parsed</summary>\n\tvoid NextLine() { currentLine++; }\n\tstd::vector<std::pair<std::string, std::string>> ExtractCmdContainerAutoKeys();\n};\n\n#endif //_MDSDCONFIG_HH_\n\n// vim: se sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/MdsdExtension.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _MDSDEXTENSION_HH_\n#define _MDSDEXTENSION_HH_\n\n#include <string>\n\nclass MdsdExtension\n{\npublic:\n    /// <summary> Contruct new MdsdExtension object given extension name.\n    /// <param name=\"name\">Extension name</param>\n    /// </summary>\n    MdsdExtension(const std::string & name) :\n    _name(name), \n    _cpuPercentUsage(0),\n    _isCpuThrottling(false),\n    _memoryLimitInMB(0),\n    _isMemoryThrottling(false),\n    _ioReadLimitInKBPerSecond(0),\n    _ioReadThrottling(false),\n    _ioWriteLimitInKBPerSecond(0),\n    _ioWriteThrottling(false)\n    { }\n\n    ~MdsdExtension() { }\n\n    const std::string & Name() const { return _name; }\n\n    const std::string & GetCmdLine() const { return _cmdline; }\n    void SetCmdLine(const std::string & cmdline) { _cmdline = cmdline; }\n\n    const std::string & GetBody() const { return _body; }\n    void SetBody(const std::string & body) { _body = body; }\n\n    const std::string & GetAlterLocation() const { return _alterLocation; }\n    void SetAlterLocation(const std::string & alterLocation) { _alterLocation = alterLocation; }\n\n    float GetCpuPercentUsage() const { return _cpuPercentUsage; }\n    void SetCpuPercentUsage(float cpuPercentUsage) { _cpuPercentUsage = cpuPercentUsage; }\n\n    bool GetIsCpuThrottling() const { return _isCpuThrottling; }\n    void SetIsCpuThrottling(bool isCpuThrottling) { _isCpuThrottling = isCpuThrottling; }\n\n    unsigned long long GetMemoryLimitInMB() const { return _memoryLimitInMB; }\n    void SetMemoryLimitInMB(unsigned long long memoryLimitInMB) { _memoryLimitInMB = memoryLimitInMB; }\n\n    bool GetIsMemoryThrottling() const { return _isMemoryThrottling; }\n    void SetIsMemoryThrottling(bool isMemoryThrottling) { _isMemoryThrottling = isMemoryThrottling; }\n\n    unsigned long long GetIOReadLimitInKBPerSecond() const { return _ioReadLimitInKBPerSecond; }\n    void SetIOReadLimitInKBPerSecond(unsigned long long n) { _ioReadLimitInKBPerSecond = n; }\n\n    bool GetIsIOReadThrottling() const { return _ioReadThrottling; }\n    void SetIsIOReadThrottling(bool isThrottling) { _ioReadThrottling = isThrottling; }\n\n    unsigned long long GetIOWriteLimitInKBPerSecond() const { return _ioWriteLimitInKBPerSecond; }\n    void SetIOWriteLimitInKBPerSecond(unsigned long long n) { _ioWriteLimitInKBPerSecond = n; }\n\n    bool GetIsIOWriteThrottling() const { return _ioWriteThrottling; }\n    void SetIsIOWriteThrottling(bool isThrottling) { _ioWriteThrottling = isThrottling; }\n\nprivate:\n    MdsdExtension() = delete;\n\n    const std::string _name;\n    // Define command line to be std::string because we need to execute it.\n    std::string _cmdline;\n    std::string _body;\n    // Define alternative location path to be std::string because we need to use the path for execute.\n    std::string _alterLocation;\n\n    float _cpuPercentUsage;\n    bool _isCpuThrottling;\n\n    unsigned long long _memoryLimitInMB;\n    bool _isMemoryThrottling;\n\n    unsigned long long _ioReadLimitInKBPerSecond;\n    bool _ioReadThrottling;\n\n    unsigned long long _ioWriteLimitInKBPerSecond;\n    bool _ioWriteThrottling;\n};\n\n\n#endif // _MDSDEXTENSION_HH_\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/MdsdMetrics.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"MdsdMetrics.hh\"\n\nthread_local MdsdMetrics *MdsdMetrics::_instance = nullptr;\n\nstd::unordered_set<MdsdMetrics *> MdsdMetrics::_instances;\n\nstd::mutex MdsdMetrics::_setLock;\n\nstd::map<std::string, unsigned long long>\nMdsdMetrics::AggregateAll()\n{\n\tstd::map<std::string, unsigned long long> totals;\n\n\tfor (const MdsdMetrics * pinstance : _instances) {\n\t\tfor (const auto & item : pinstance->_metrics) {\n\t\t\ttotals[item.first] += item.second;\n\t\t}\n\t}\n\n\treturn totals;\n}\n\nunsigned long long\nMdsdMetrics::AggregateMetric(const std::string &metric)\n{\n\tunsigned long long total = 0;\n\n\tfor (const MdsdMetrics * pinstance : _instances) {\n\t\tauto & inst_map = pinstance->_metrics;\n\t\tauto iter = inst_map.find(metric);\n\t\tif (iter != inst_map.end()) {\n\t\t    total += iter->second;\n\t\t}\n\t}\n\n\treturn total;\n}\n\n#ifdef DOING_MEMCHECK\nbool MdsdMetrics::_allFree = false;\n\nvoid\nMdsdMetrics::ClearMetrics()\n{\n\tstd::lock_guard<std::mutex> lock(_setLock);\n\t_allFree = true;\n\tfor (MdsdMetrics *item : _instances) {\n\t    delete item;\n\t}\n\t_instances.clear();\n}\n\n#endif\n\n// vim: se sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/MdsdMetrics.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _MDSDMETRICS_HH_\n#define _MDSDMETRICS_HH_\n\n#include <map>\n#include <unordered_set>\n#include <mutex>\n#include <string>\n\n#ifdef DOING_MEMCHECK\n#define QUITABORT if (_allFree) return\n#else\n#define QUITABORT\n#endif\n\nclass MdsdMetrics\n{\npublic:\n\tstatic MdsdMetrics &GetInstance() { if (_instance == nullptr) { std::lock_guard<std::mutex> lock(_setLock);\n\t\t\t\t\t       _instance = new MdsdMetrics(); _instances.insert(_instance); } return *_instance; }\n\n\tstatic void Count(const std::string &metric) { QUITABORT; GetInstance().CountThis(metric); }\n\tstatic void Count(const std::string &metric, unsigned long long delta) { QUITABORT; GetInstance().CountThis(metric, delta); }\n\tvoid CountThis(const std::string &metric) { QUITABORT; _metrics[metric]++; }\n\tvoid CountThis(const std::string &metric, unsigned long long delta) { QUITABORT; _metrics[metric] += delta; }\n\n\tstatic std::map<std::string, unsigned long long> AggregateAll();\n\tstatic unsigned long long AggregateMetric(const std::string &metric);\n\nprivate:\n\t// One instance in each thread; that makes access within a thread lock-free\n\tstatic thread_local MdsdMetrics * _instance;\n\n\t// One global list of all per-thread instances...\n\tstatic std::unordered_set<MdsdMetrics *> _instances;\n\n\t// One lock protects the global list\n\tstatic std::mutex _setLock;\n\n\tstd::map<std::string, unsigned long long> _metrics;\n\n#ifdef DOING_MEMCHECK\npublic:\n\tvoid ClearMetrics();\n\tstatic bool _allFree;\nprivate:\n#endif\n\n\tMdsdMetrics() {}\n};\n\n#endif // _MDSDMETRICS_HH_\n// vim: se sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/Memcheck.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#ifdef DOING_MEMCHECK\n\n#include \"MdsSchemaMetadata.hh\"\n#include \"Engine.hh\"\n#include \"Logger.hh\"\n#include \"Trace.hh\"\n#include <valgrind/memcheck.h>\n\n// Only compiled when DOING_MEMCHECK is added as a -D on the compile line\n// Otherwise, it won't even compile, much less link.\n\nextern \"C\" void\n\nRunFinalCleanup()\n{\n    Trace trace(Trace::SignalHandlers, \"RunFinalCleanup\");\n\n    trace.NOTE(\"Clear Schema Metadata Cache\");\n    MdsSchemaMetadata::ClearCache();\n\n    trace.NOTE(\"Clear Extension object cache\");\n    CleanupExtensions();\n\n    Engine* engine = Engine::GetEngine();\n    trace.NOTE(\"Clear SchemasTable cache\");\n    engine->ClearPushedCache();\n    trace.NOTE(\"Cleanup MdsdConfig\");\n    engine->ClearConfiguration();\n    engine = nullptr;\n\n\n    // Must be last\n    trace.NOTE(\"Closing all logs\");\n    Logger::CloseAllLogs();\n    VALGRIND_DO_LEAK_CHECK;\n}\n\n#endif\n\n// vim: se sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/OMIQuery.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"OMIQuery.hh\"\n#include \"OmiTask.hh\"\n#include \"MdsSchemaMetadata.hh\"\n#include \"MdsValue.hh\"\n#include \"Trace.hh\"\n#include \"Logger.hh\"\n#include \"CanonicalEntity.hh\"\n#include \"Engine.hh\"\n#include \"MdsdConfig.hh\"\n#include \"MdsEntityName.hh\"\n#include \"Credentials.hh\"\n#include \"Batch.hh\"\n#include \"Utility.hh\"\n\n#include <iostream>\n#include <algorithm>\n#include <unordered_map>\n\n#include <unistd.h>\n#include <sys/syscall.h>\n#include <sys/time.h>\n#include <sys/stat.h>\n#include <cctype>\n\nextern \"C\" {\n#include <sys/time.h>\n}\n\nusing std::vector;\nusing std::pair;\n\nOMIQuery::OMIQuery(PipeStage *head, const std::string& ns, const std::string& qry, bool doUpload)\n\t: _pipeHead(head), _name_space(ns), _queryexpr(qry), _uploadData(doUpload), _connTimeoutMS(90000)\n{\n\tTrace trace(Trace::OMIIngest, \"OMIQuery::Constructor\");\n\n        if (MdsdUtil::NotValidName(_name_space)) {\n                throw std::invalid_argument(\"OMIQuery namespace must not be empty\");\n        } else if (MdsdUtil::IsEmptyOrWhiteSpace(_queryexpr)) {\n                throw std::invalid_argument(\"OMIQuery query expression must not be empty\");\n\t}\n\n\tconst std::string from = \"from\";\n\tconst std::string& whitespace = \" \\t\";\n\tstd::string queryLower = _queryexpr;\n\tstd::transform(queryLower.begin(), queryLower.end(), queryLower.begin(), ::tolower);\n\n\tsize_t frompos = queryLower.find(from);\n\tif (std::string::npos == frompos) {\n\t\tthrow std::invalid_argument(\"Invalid syntax in OMI query expression (invalid class name specification)\");\n\t}\n\n\tstd::string substr1 = _queryexpr.substr(frompos + from.length());\n\tauto strBegin = substr1.find_first_not_of(whitespace);\n\tif (std::string::npos == strBegin) {\n\t\tthrow std::invalid_argument(\"Invalid syntax in OMI query expression (invalid class name specification)\");\n\t}\n\tauto strEnd = substr1.find_first_of(whitespace, strBegin);\n\t_classname = substr1.substr(strBegin, strEnd-strBegin);\n        if (MdsdUtil::NotValidName(_classname)) {\n                throw std::invalid_argument(\"OMIQuery class must not be empty\");\n        }\n\n\t_schemaId = OmiTask::SchemaId(ns, qry);\n\tif (0 == _schemaId) {\n\t\tthrow std::invalid_argument(\"No schemaID has been allocated for this namespace and query\");\n\t}\n\n\ttrace.NOTE(\"Query namespace(\" + _name_space + \") class(\" + _classname + \") queryexp(\" + _queryexpr + \")\");\n}\n\nOMIQuery::~OMIQuery()\n{\n\tTrace trace(Trace::OMIIngest, \"OMIQuery::Destructor\");\n\n\t// Leave the processing pipeline alone; the OmiTask will have cleaned it up.\n}\n\nvoid OMIQuery::SetConnTimeout(unsigned int milliSeconds)\n{\n    Trace trace(Trace::OMIIngest, \"OMIQuery::SetConnTimeout\");\n\t_connTimeoutMS = milliSeconds;\n    trace.NOTE(\"Set OMI connection timeout(MS)=\" + std::to_string(_connTimeoutMS));\n}\n\nstd::unique_ptr<mi::Client> OMIQuery::CreateNewClient()\n{\n\tTrace trace(Trace::OMIIngest, \"OMIQuery::CreateNewClient\");\n\tbool resultOk = true;\n\tstd::unique_ptr<mi::Client> client;\t// Points to nothing\n\ttry {\n\t\tclient.reset(new mi::Client());\t// Make it own the newly allocated object\n\t\tmi::String locator = SCX_SOCKET_VAL;\n\t\tif (trace.IsActive()) {\n\t\t\tstd::ostringstream msg;\n\t\t\tmsg << \"locator='\" << locator.Str() << \"'; Timeout(MS)=\" << _connTimeoutMS;\n\t\t\ttrace.NOTE(msg.str());\n\t\t}\n\t\tresultOk = client->Connect(locator, \"\", \"\", _connTimeoutMS*1000);\n\t\tif (!resultOk) {\n\t\t    LogError(\"Error: Unable to connect to OMI service. (Is OMI installed and started?)\");\n\t\t    client.reset();\n\t\t}\n\t}\n\tcatch(...)\n\t{\n\t\tLogError(\"Error: Exception thrown while connecting to OMI service. (Is OMI functional?)\");\n\t\tclient.reset();\t// Deletes what it pointed to, if anythinq\n\t\tresultOk = false;\n\t}\n\ttrace.NOTE(\"ResultStatus=\" + std::to_string(resultOk));\n\treturn client;\n}\n\nbool OMIQuery::NoOp()\n{\n    Trace trace(Trace::OMIIngest, \"OMIQuery::NoOp\");\n    bool resultOK = true;\n\n    try \n    {\n        auto client = CreateNewClient();\n        if (client) {\n            resultOK = client->NoOp(_connTimeoutMS*1000);\n            if (!resultOK) {\n                LogError(\"Error: OMI NoOp() failed. Is OMI functional?\");\n            }\n            else {\n                trace.NOTE(\"NoOp finished Successfully.\");\n            }\n            client->Disconnect();\n        }\n        else {\n            resultOK = false;\n        }\n    }\n    catch(...)\n    {\n        LogError(\"Error: Exception thrown while performing OMI NoOp\" );\n        resultOK = false;\n    }\n\n    return resultOK;\n}\n\n// Execute the query; put the results in CanonicalEntity instances and\n// pass them into the processing pipeline associated with this query.\nbool OMIQuery::RunQuery(const MdsTime& qibase)\n{\t\n\tTrace trace(Trace::OMIIngest, \"OMIQuery::RunQuery\");\n\ttrace.NOTE(\"\\nrun query: \" + _name_space + \" : \" +  _queryexpr);\n\n\tbool resultOK = true;\n\tMdsTime queryTime;\t// Default constructor sets this to the current time\n\tmi::Array<mi::DInstance> instanceList;\n\tmi::Result result = MI_RESULT_OK;\n\ttry {\n\t\tauto client = CreateNewClient();\n\t\tif (! client) {\n\t\t\treturn false;\n\t\t}\n\n\t\tresultOK = client->EnumerateInstances(_name_space.c_str(), _classname.c_str(), true, _connTimeoutMS*1000, \n\t\t\t    instanceList, QUERYLANG, _queryexpr.c_str(), result);\n\t\tif (!resultOK || (result != MI_RESULT_OK)) {\n\t\t\tLogError(\"Error: OMI EnumerateInstances failed\");\n\t\t\tresultOK = false;\n\t\t}\n\t\tclient->Disconnect();\n\t}\n\tcatch(const std::exception& e)\n\t{\n\t\tresultOK = false;\n\t\tLogError(\"Error: OMI RunQuery() unexpected exception: \" + std::string(e.what()));\n\t}\n\tcatch(...)\n\t{\n\t\tresultOK = false;\n\t\tLogError(std::string(\"Error: OMI RunQuery() unexpected exception:\"));\n\t}\n\n\tif (resultOK) {\n\t\tMI_Uint32 count = instanceList.GetSize();\n\t\ttrace.NOTE(\"Found instances count=\" + std::to_string(count));\n\t\t_pipeHead->Start(qibase);\n\t\tfor (MI_Uint32 i = 0; i < count; i++)\n\t\t{\n\t\t\tCanonicalEntity * ce = new CanonicalEntity(instanceList[i].Count());\n\t\t\tresultOK = PopulateEntity(ce, instanceList[i]);\n\t\t\tif (resultOK) {\n\t\t\t\t// Suppress a CanonicalEntity with zero columns; could happen, means\n\t\t\t\t// nothing is wrong, just no data\n\t\t\t\tif (ce->size()) {\n\t\t\t\t\tce->SetPreciseTime(queryTime);\n\t\t\t\t\tce->SetSchemaId(_schemaId);\n\t\t\t\t\t_pipeHead->Process(ce);\n\t\t\t\t} else {\n\t\t\t\t\tdelete ce;\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tLogger::LogWarn(\"Problem(s) detected with this OMI instance; dropping it\");\n\t\t\t\tdelete ce;\n\t\t\t}\n\t\t}\n\t\t_pipeHead->Done();\n\t}\n\n\ttrace.NOTE(\"RunQuery finished with resultOK=\" + std::to_string(resultOK));\n\n\treturn resultOK;\n}\n\nbool\nOMIQuery::PopulateEntity(CanonicalEntity *ce, const mi::DInstance& item)\n{\n\tTrace trace(Trace::OMIIngest, \"OMIQuery::PopulateEntity\");\n\tmi::Uint32 count = item.Count();\n\ttrace.NOTE(\"Instance has #items=\" + std::to_string(count));\n\n\ttry {\n\tfor (mi::Uint32 i = 0; i < count; i++) {\n\t\tmi::String name;\n\t\tif (!item.GetName(i, name)) {\n\t\t\tLogError(\"While processing OMI results, failed to get name of column \" + std::to_string(i));\n\t\t\treturn false;\n\t\t}\n\t\tstd::string namestr (name.Str());\n\n                mi::Type type;\n                MI_Value value;\n                bool isNull = false;\n                bool isKey = false;\n\n                if (!item.GetValue(name, &value, type, isNull, isKey)) {\n                        LogError(\"While processing OMI results, failed to get value for column \" + std::to_string(i));\n                        return false;\n                }\n\n\t\tif (isNull) {\n\t\t\tce->AddColumn(namestr, \"[NULL]\");\n\t\t\tif (trace.IsActive()) {\n\t\t\t\tstd::ostringstream msg;\n\t\t\t\tmsg << \"Item[\" << i << \"]: \" << namestr << \" (OMI type \" << type << \") is NULL\";\n\t\t\t\ttrace.NOTE(msg.str());\n\t\t\t}\n\t\t} else if (type == MI_INSTANCE || type == MI_REFERENCE) {\n\t\t\ttrace.NOTE(\"Item[\" + std::to_string(i) + \"] is an Instance/Reference\");\n\t\t\tmi::DInstance subitem;\n\t\t\tbool resultOK;\n\t\t\tif (type == MI_INSTANCE) {\n\t\t\t\tresultOK = item.GetInstance(name, subitem);\n\t\t\t} else {\n\t\t\t\tresultOK = item.GetReference(name, subitem);\n\t\t\t}\n\t\t\tresultOK = resultOK && PopulateEntity(ce, subitem);\n\t\t\tif (!resultOK) {\n\t\t\t\tLogError(\"While processing OMI results, failed to unpack instance/reference\");\n\t\t\t\treturn false;\n\t\t\t}\n\t\t} else {\n\t\t\tstd::ostringstream msg;\n\t\t\tbool resultOK = true;\n\t\t\tmsg << \"Item[\" << i << \"]: \" << namestr << \" (MI_Type \" << type << \")\";\n\t\t\ttry {\n\t\t\t\tMdsValue * mdsvalue = new MdsValue { value, type };\n\t\t\t\tce->AddColumn(namestr, mdsvalue);\n\t\t\t\tmsg << \" \" << mdsvalue->TypeToString() << \" \" << *mdsvalue;\n\t\t\t}\n\t\t\tcatch (std::exception & e) {\n\t\t\t\tresultOK = false;\n\t\t\t\tmsg << \" failed type conversion (\" << e.what() << \")\";\n\t\t\t}\n\t\t\ttrace.NOTE(msg.str());\n\t\t\tif (!resultOK)\n\t\t\t\treturn false;\n\t\t}\n\t}\n\treturn true;\n\t}\n\tcatch (...) {\n\t\tLogError(\"Unknown exception caught in OMIQuery::PopulateEntity\");\n\t}\n\treturn false;\n}\n\nnamespace std {\ntemplate <> struct hash<MI_Result>\n{\n\tsize_t operator()(const MI_Result & res) const\n\t{\n\t\treturn static_cast<size_t>(res);\n\t}\n};\n}\n\nstd::string OMIQuery::Result_ToString(MI_Result result) const\n{\n    static std::unordered_map<MI_Result, const char *> resultCodes = \n    {\n        { MI_RESULT_OK, \"MI_RESULT_OK\" },\n        { MI_RESULT_FAILED, \"MI_RESULT_FAILED\" },\n        { MI_RESULT_ACCESS_DENIED, \"MI_RESULT_ACCESS_DENIED\" },\n        { MI_RESULT_INVALID_NAMESPACE, \"MI_RESULT_INVALID_NAMESPACE\" },\n        { MI_RESULT_INVALID_PARAMETER, \"MI_RESULT_INVALID_PARAMETER\" },\n        { MI_RESULT_INVALID_CLASS, \"MI_RESULT_INVALID_CLASS\" },\n        { MI_RESULT_NOT_FOUND, \"MI_RESULT_NOT_FOUND\" },\n        { MI_RESULT_NOT_SUPPORTED, \"MI_RESULT_NOT_SUPPORTED\" },\n        { MI_RESULT_CLASS_HAS_CHILDREN, \"MI_RESULT_CLASS_HAS_CHILDREN\" },\n        { MI_RESULT_CLASS_HAS_INSTANCES, \"MI_RESULT_CLASS_HAS_INSTANCES\" },\n        { MI_RESULT_INVALID_SUPERCLASS, \"MI_RESULT_INVALID_SUPERCLASS\" },\n        { MI_RESULT_ALREADY_EXISTS, \"MI_RESULT_ALREADY_EXISTS\" },\n        { MI_RESULT_NO_SUCH_PROPERTY, \"MI_RESULT_NO_SUCH_PROPERTY\" },\n        { MI_RESULT_TYPE_MISMATCH, \"MI_RESULT_TYPE_MISMATCH\" },\n        { MI_RESULT_QUERY_LANGUAGE_NOT_SUPPORTED, \"MI_RESULT_QUERY_LANGUAGE_NOT_SUPPORTED\" },\n        { MI_RESULT_INVALID_QUERY, \"MI_RESULT_INVALID_QUERY\" },\n        { MI_RESULT_METHOD_NOT_AVAILABLE, \"MI_RESULT_METHOD_NOT_AVAILABLE\" },\n        { MI_RESULT_METHOD_NOT_FOUND, \"MI_RESULT_METHOD_NOT_FOUND\" },\n        { MI_RESULT_NAMESPACE_NOT_EMPTY, \"MI_RESULT_NAMESPACE_NOT_EMPTY\" },\n        { MI_RESULT_INVALID_ENUMERATION_CONTEXT, \"MI_RESULT_INVALID_ENUMERATION_CONTEXT\" },\n        { MI_RESULT_INVALID_OPERATION_TIMEOUT, \"MI_RESULT_INVALID_OPERATION_TIMEOUT\" },\n        { MI_RESULT_PULL_HAS_BEEN_ABANDONED, \"MI_RESULT_PULL_HAS_BEEN_ABANDONED\" },\n        { MI_RESULT_PULL_CANNOT_BE_ABANDONED, \"MI_RESULT_PULL_CANNOT_BE_ABANDONED\" },\n        { MI_RESULT_FILTERED_ENUMERATION_NOT_SUPPORTED, \"MI_RESULT_FILTERED_ENUMERATION_NOT_SUPPORTED\" },\n        { MI_RESULT_CONTINUATION_ON_ERROR_NOT_SUPPORTED, \"MI_RESULT_CONTINUATION_ON_ERROR_NOT_SUPPORTED\" },\n        { MI_RESULT_SERVER_LIMITS_EXCEEDED, \"MI_RESULT_SERVER_LIMITS_EXCEEDED\" },\n        { MI_RESULT_SERVER_IS_SHUTTING_DOWN, \"MI_RESULT_SERVER_IS_SHUTTING_DOWN\" },\n        { MI_RESULT_CANCELED, \"MI_RESULT_CANCELED\" },\n        { MI_RESULT_OPEN_FAILED, \"MI_RESULT_OPEN_FAILED\" },\n        { MI_RESULT_INVALID_CLASS_HIERARCHY, \"MI_RESULT_INVALID_CLASS_HIERARCHY\" },\n        { MI_RESULT_WOULD_BLOCK, \"MI_RESULT_WOULD_BLOCK\" },\n        { MI_RESULT_TIME_OUT, \"MI_RESULT_TIME_OUT\" }\n    };\n\n    auto const & iter = resultCodes.find(result);\n    if (iter != resultCodes.end()) {\n    \treturn std::string(iter->second);\n    }\n\n    /* Not found! */\n    return std::string(\"MI_ERROR_CODE_\") + std::to_string(result);\n}\n\n// vim: set sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/OMIQuery.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _OMIQUERY_HH_\n#define _OMIQUERY_HH_\n\n#include \"MI.h\"\n#include \"omiclient/client.h\"\n#include \"Logger.hh\"\n#include \"MdsEntityName.hh\"\n#include \"Pipeline.hh\"\n#include \"SchemaCache.hh\"\n#include <string>\n#include <vector>\n#include <utility>\n#include <unordered_map>\n#include <memory>\n#include <mutex>\n\nclass MdsdConfig;\nclass MdsValue;\nclass CanonicalEntity;\nclass Credentials;\nclass Batch;\n\n/*\n\tOMIQuery provides the APIs to query OMI providers (example: SCX) and upload the results to MDS.\n\tExample usage:\n\t\tOMIQuery* q = new OMIQuery(parameters);\n\t\tbool isOK1 = q.RunQuery(...) // query 1\n\t\tbool isOK2 = q.RunQuery(...) // query 2\n\n\tTo run in multi-threading mode, create multiple OMIQuery objects.\n\n*/\n\ntypedef std::vector<std::pair<std::string,std::string>> omi_schemalist_t;\ntypedef std::unordered_map<std::string, MdsValue*> omi_datatable_t;\n\nclass OMIQuery\n{\npublic:\n\t// Create an OMIQuery object. If uploadData is true, the data will be\n\t// uploaded to MDS azure tables. if uploadData is false, data won't be uploaded.\t\n\tOMIQuery(PipeStage * head, const std::string& name_space, const std::string& queryexpr, bool uploadData = true);\n\n\t// Release OMI server connection resources\n\t~OMIQuery();\n\n\t// disable copy and move contructors\n\tOMIQuery(OMIQuery&& h) = delete;\n\tOMIQuery& operator=(OMIQuery&& h) = delete;\n\n\tOMIQuery(const OMIQuery&) = delete;\n\tOMIQuery& operator=(const OMIQuery &) = delete;\n\n\t// Run a noop query. This can be used to test the connection to server.\n\t// Return true if success; return false for any failure.\n\tbool NoOp();\n\n\t// Run an OMI query in given namespace with given query expression.\n\t// Example: name_space = \"root/scx\", queryexpr = \"select Name from SCX_UnixProcess\"\n\t// Return true if success; return false for any failure.\n\t// Puts the results into CanonicalEntity objects, which it passes to the head of\n\t// the processing pipeline (_pipehead).\n\tbool RunQuery(const MdsTime&);\n\n\t// Set the connection timeout value in milliSeconds.\n\tvoid SetConnTimeout(unsigned int milliSeconds);\n\n\t// Enable/disable uploading of data to MDS\n\tvoid EnableUpload(bool flag) { _uploadData = flag; }\n\nprivate:\n\tvoid LogError(const std::string &msg) const { Logger::LogError(msg); }\n\n\tstd::unique_ptr<mi::Client>  CreateNewClient();\n\n\t// Given an OMI instance, add its columns to a CanonicalEntity. The function will\n\t// recursively add the columns of any instances or references included within the instance.\n\tbool PopulateEntity(CanonicalEntity *, const mi::DInstance&);\n\n\tstd::string GetClassNameFromQuery(const std::string& queryexpr) const;\n\tstd::string Result_ToString(MI_Result result) const;\n\n\t// The top stage of the processing pipeline. All information about the ultimate destination\n\t// of each OMI record we capture is embedded in the various stages of the pipeline, which was\n\t// constructed when the config file was loaded.\n\tPipeStage * _pipeHead;\n\n \tstd::string _name_space;\t// OMI namespace\n \tstd::string _queryexpr;\t\t// OMI query (written in CQL)\n\tstd::string _classname;\t\t// Name of the OMI class from which the query pulls data\n\n \tbool _uploadData;\t\t// If false, run query but don't upload data. Good for testing query itself.\n\tunsigned int _connTimeoutMS;\t// timeout in milli-seconds to connect to OMI server for queries.\n\n\tSchemaCache::IdType _schemaId;\t// Identifies the schema for this query\n\n\t// Because same queries are going to be run again and again, use cache to save the schemas.\n\t// key=querynamespace+queryexpr; value: bool. If the query exists in the table, the\n\t// schema shouldn't be uploaded any long.\n\n\tstd::mutex tablemutex;\n\tstd::mutex enginemutex;\n\n\tconst char * SCX_SOCKET_KEY = \"socketfile\";\n\tconst char * SCX_SOCKET_VAL = \"/var/opt/omi/run/omiserver.sock\";\n\tconst char * QUERYLANG = \"CQL\";\n};\n\n#endif\n\n// vim: se sw=8:\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/OmiTask.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"OmiTask.hh\"\n#include \"OMIQuery.hh\"\n#include \"Batch.hh\"\n#include \"Logger.hh\"\n#include \"Trace.hh\"\n#include \"SchemaCache.hh\"\n#include <cpprest/pplx/threadpool.h>\n\nclass MdsdConfig;\n\nstd::map<std::string, SchemaCache::IdType> OmiTask::_qryToSchemaId;\n\nOmiTask::OmiTask(MdsdConfig *config, const MdsEntityName &target, Priority prio,\n\t\t const std::string& nmspc, const std::string& qry, time_t sampleRate)\n\t: _config(config), _target(target), _priority(prio), _namespace(nmspc), _query(qry),\n\t  _sampleRate(sampleRate?sampleRate:prio.Duration()), _retryCount(0), _timer(crossplat::threadpool::shared_instance().service()),\n\t  _cancelled(false), _omiConn(nullptr), _head(nullptr), _tail(nullptr)\n{\n\tTrace trace(Trace::OMIIngest, \"OmiTask Constructor\");\n\n\tif (nmspc.empty() || qry.empty()) {\n\t\tthrow std::invalid_argument(\"Missing at least one required attribute (omiNamespace, cqlQuery)\");\n\t}\n\n\t// Allocated a schemaId for this namespace+query, if necessary\n\tstd::string mapkey = _namespace + _query;\n\tif (0 == _qryToSchemaId.count(mapkey)) {\t// Not found - insert\n\t\t_qryToSchemaId[mapkey] = SchemaCache::Get().GetId();\n\t}\n}\n\nOmiTask::~OmiTask()\n{\n\t// Cleanup the query object\n\tif (_omiConn != nullptr) {\n\t\tdelete _omiConn;\n\t}\n\n\t// Cleanup the processing pipeline for query results. Cleanup is recursive; each stage\n\t// deletes its successor before completing its own cleanup.\n\tif (_head) {\n\t\tdelete _head;\n\t\t_head = nullptr;\n\t}\n}\n\nSchemaCache::IdType\nOmiTask::SchemaId(const std::string & ns, const std::string & qry)\n{\n\tconst auto & iter = _qryToSchemaId.find(ns+qry);\n\tif (iter == _qryToSchemaId.end()) {\n\t\treturn 0;\t// Not found\n\t} else {\n\t\treturn iter->second;\n\t}\n}\n\nvoid\nOmiTask::AddStage(PipeStage *stage)\n{\n\tTrace trace(Trace::QueryPipe, \"OmiTask::AddStage\");\n\n\tif (trace.IsActive()) {\n\t\tstd::ostringstream msg;\n\t\tmsg << \"OmiTask \" << this << \" adding stage \" << stage->Name();\n\t\ttrace.NOTE(msg.str());\n\t}\n\tif (! _tail) {\n\t\t// This is the first stage in the pipeline; set the head to point here\n\t\t_head = stage;\n\t} else {\n\t\t// There's already a pipeline; make the old tail point to the newly-added stage\n\t\t_tail->AddSuccessor(stage);\n\t}\n\t// Either way, we have a new tail in the pipeline\n\t_tail = stage;\n}\n\nvoid\nOmiTask::Start()\n{\n\tusing namespace boost::posix_time;\n\n\tTrace trace(Trace::OMIIngest, \"OmiTask::Start\");\n\n\ttrace.NOTE(_query);\n\tif (!_head) {\n\t\tLogger::LogError(\"No processing pipeline for event; should never happen\");\n\t\treturn;\n\t}\n\n\t// The OMIQuery object does all the retrieval work\n\ttry {\n\t\t_omiConn = new OMIQuery(_head, _namespace, _query, true);\n\t} catch (const std::exception& ex) {\n\t\tstd::ostringstream msg;\n\t\tmsg << \"Query task (\" << _query << \") not started because OMIQuery creation failed: \" << ex.what();\n\t\tLogger::LogError(msg.str());\n\t\treturn;\n\t}\n\n\t_firstTimeTaskStartTried.Touch();\n\tTryToStartAndRetryIfFailed(boost::system::error_code());\n}\n\nvoid\nOmiTask::TryToStartAndRetryIfFailed(const boost::system::error_code& error)\n{\n\tusing namespace boost::posix_time;\n\n\tTrace trace(Trace::OMIIngest, \"OmiTask::TryToStartAndRetryIfFailed\");\n\n\tif (error == boost::asio::error::operation_aborted) {\n\t\t// Same comments as in OmiTask::DoWork() applies here as well.\n\t\ttrace.NOTE(\"Timer cancelled\");\n\t\treturn;\n\t}\n\n\tif (_omiConn->NoOp()) {\n\t\t// Add some randomness to when we start regular queries\n\t\tMdsTime target { MdsTime::Now() + MdsTime(2 + random()%5, random()%1000000) };\n\t\t_qibase = target.Round(_sampleRate);\n\t\t_nextTime = target.to_ptime();\n\t\t_timer.expires_at(_nextTime);\n\t\t_timer.async_wait(boost::bind(&OmiTask::DoWork, this, boost::asio::placeholders::error));\n\t\tif (_retryCount > 0) {\n\t\t\tLogger::LogInfo(\"Query task(\" + _query + \") started after \" + std::to_string(_retryCount) + \" retries\");\n\t\t}\n\t\treturn;\n\t}\n\n\t// OMI noop query failed\n\tconst time_t maxRetryTimeSec = 30 * 60; // Retry up to 30 minutes\n\tif (MdsTime::Now() > _firstTimeTaskStartTried + maxRetryTimeSec) {\n\t    Logger::LogError(std::string(\"Can't connect to OMI server for more than \")\n\t                    .append(std::to_string(maxRetryTimeSec / 60)).append(\" minutes. Giving up.\"));\n\t    return;\n\t}\n\n    // Keep retrying yet with exponential back-off delays\n    const time_t retryIntervalSec = 10 * (1 << _retryCount); // Exponential back-off delay (starting from 10 seconds)\n    trace.NOTE(std::string(\"OMIQuery::NoOp() basic query failed. Will try to start the task again in \")\n                .append(std::to_string(retryIntervalSec)).append(\" seconds.\"));\n    Logger::LogError(\"Connection to OMI server failed; query task (\" + _query + \") not started. Will try to start the task again in \"\n            + std::to_string(retryIntervalSec) + \" seconds.\");\n    _timer.expires_from_now(boost::posix_time::seconds(retryIntervalSec));\n    _timer.async_wait(boost::bind(&OmiTask::TryToStartAndRetryIfFailed, this, boost::asio::placeholders::error));\n    _retryCount++;\n}\n\nvoid\nOmiTask::Cancel()\n{\n\tTrace trace(Trace::OMIIngest, \"OmiTask::Cancel\");\n\tstd::lock_guard<std::mutex> lock(_mutex);\n\t_cancelled = true;\n\t_timer.cancel();\n}\n\nvoid\nOmiTask::DoWork(const boost::system::error_code& error)\n{\n\tTrace trace(Trace::OMIIngest, \"OmiTask::DoWork\");\n\tif (error == boost::asio::error::operation_aborted) {\n\t\t// If the timer was cancelled, we have to assume the entire configuration may have been\n\t\t// deleted; don't touch it. When an MdsdConfig object is told to self-destruct, it first\n\t\t// cancels all timer-driven actions, then it waits some period of time, then it actually\n\t\t// deletes the object. When the timers are cancelled, the handlers are called with the\n\t\t// cancellation message. The MdsdConfig object is *probably* still valid, and as long\n\t\t// as the timer isn't rescheduled, all should be well. But I'm playing it safe here\n\t\t// and assuming an explicit cancel operation means \"the config is gone\".\n\t\t//\n\t\t// Of course, if the MdsdConfig is deleted, all the associated objects, including this\n\t\t// very OmiTask object, get deleted as well. Thus, the \"don't touch nothin'\" rule.\n\t\ttrace.NOTE(\"Timer cancelled\");\n\t\treturn;\n\t}\n\n\t// Note that, as written, we do NOT hold the lock here; our use of the class instance\n\t// needs to be readonly. If that changes, revisit this locking pattern.\n\t_omiConn->RunQuery(_qibase);\n\ttrace.NOTE(\"Back from RunQuery\");\n\n\tstd::lock_guard<std::mutex> lock(_mutex);\n\tif (error || _cancelled) {\n\t\treturn;\n\t}\n\ttrace.NOTE(\"Rescheduling \" + _query);\n\t_qibase += MdsTime(_sampleRate);\n\t_nextTime = _nextTime + boost::posix_time::seconds(_sampleRate);\n\t_timer.expires_at(_nextTime);\n\t_timer.async_wait(boost::bind(&OmiTask::DoWork, this, boost::asio::placeholders::error));\n}\n\n// vim: se sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/OmiTask.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _OMITASK_HH_\n#define _OMITASK_HH_\n\n#include <string>\n#include <mutex>\n#include <map>\n#include <stddef.h>\n#include <boost/asio.hpp>\n#include <boost/bind.hpp>\n#include \"Priority.hh\"\n#include \"MdsTime.hh\"\n#include \"MdsEntityName.hh\"\n#include \"StoreType.hh\"\n#include \"Pipeline.hh\"\n#include \"SchemaCache.hh\"\n\nclass OMIQuery;\nclass MdsdConfig;\n\nclass OmiTask\n{\npublic:\n\tOmiTask(MdsdConfig *config, const MdsEntityName& target, Priority prio, \n\t\tconst std::string& nmspc, const std::string& qry, time_t sampleRate);\n\t// I want a move constructor...\n\tOmiTask(OmiTask &&orig);\n\t// But do not want a copy constructor nor a default constructor\n\tOmiTask(OmiTask &) = delete;\n\tOmiTask() = delete;\n\n\t~OmiTask();\n\n\t// void AddUnpivot(const std::string &valueAttrName, const std::string &nameAttrName, const std::string &unpivotColumns);\n\tvoid AddStage(PipeStage *stage);\n\tvoid Start();\n\tvoid Cancel();\n\n\tconst MdsEntityName & Target() const { return _target; }\n\tint FlushInterval() const { return _priority.Duration(); }\n\tstatic SchemaCache::IdType SchemaId(const std::string & ns, const std::string &qry);\n\nprivate:\n\tMdsdConfig *_config;\n\tMdsEntityName _target;\n\tPriority _priority;\n\tstd::string _namespace;\n\tstd::string _query;\n\ttime_t _sampleRate;\n\tsize_t _retryCount;\n\tMdsTime _firstTimeTaskStartTried;\n\n\tstd::mutex _mutex;\n\tboost::asio::deadline_timer _timer;\n\tboost::posix_time::ptime _nextTime;\n\tbool _cancelled;\n\tMdsTime _qibase;\n\n\tstatic std::map<std::string, SchemaCache::IdType> _qryToSchemaId;\n\n\t// You may wonder \"why is this allocated on the heap?\"\n\t// Earlier in development, mdsd used a glib-based XML parser which returned Glib::ustring objects\n\t// instead of std::string. Various configuration classes stored those ustring objects and thus needed\n\t// the Glib-2.0 headers, which #define TRUE and FALSE; so, unfortunately, do the OMI headers. The\n\t// easiest solution to the compiler whining was to keep the OMI headers out of the MdsdConfig headers.\n\t// Using a pointer let us achieve that isolation.\n\t// In December 2015 we removed all use of Glib, so this was no longer an issue. At the time we\n\t// made that change, we decided it was safer to leave this as-is and clean it up in a subsequent\n\t// refactoring pass.\n\tOMIQuery *_omiConn;\n\n\tPipeStage *_head;\n\tPipeStage *_tail;\n\n\tvoid TryToStartAndRetryIfFailed(const boost::system::error_code& error);\n\tvoid DoWork(const boost::system::error_code& error);\n};\n\n#endif // _OMITASK_HH_\n\n// vim: se sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/PipeStages.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"Logger.hh\"\n#include \"Trace.hh\"\n#include \"PipeStages.hh\"\n#include \"Batch.hh\"\n#include \"CanonicalEntity.hh\"\n#include \"IdentityColumns.hh\"\n#include \"Credentials.hh\"\n#include \"MdsdConfig.hh\"\n#include \"MdsSchemaMetadata.hh\"\n#include \"StoreType.hh\"\n#include \"Utility.hh\"\n#include \"MdsTime.hh\"\n#include <boost/tokenizer.hpp>\n\nnamespace Pipe {\n\nconst std::string Unpivot::_name { \"Unpivot\" };\n\nUnpivot::Unpivot(const std::string &valueName, const std::string &nameName, const std::string &columns,\n\t\tstd::unordered_map<std::string, ColumnTransform>&& transforms)\n    :\t_valueName(valueName), _nameName(nameName), _transforms(transforms)\n{\n\tTrace trace(Trace::QueryPipe, \"Unpivot constructor\");\n\n\ttypedef boost::tokenizer<boost::char_separator<char> > tokenizer_t;\n\n\tboost::char_separator<char> delim(\", \");\t// space and comma\n\ttokenizer_t tokens(columns, delim);\n\tfor (const auto &item : tokens) {\n\t\t_columns.insert(item);\n\t}\n\tif (_columns.empty()) {\n\t\tthrow std::invalid_argument(\"No column names specified for <Unpivot>\");\n\t} else if (_valueName.empty()) {\n\t\tthrow std::invalid_argument(\"Invalid name for unpivot value\");\n\t} else if (_nameName.empty()) {\n\t\tthrow std::invalid_argument(\"Invalid name for unpivot name column\");\n\t}\n\n\tif (trace.IsActive()) {\n\t\tstd::ostringstream msg;\n\t\tmsg << \"Unpivoting these columns: \";\n\t\tfor (const std::string &name : _columns) {\n\t\t\tmsg << \"[\" << name;\n\t\t\tconst auto& iter = _transforms.find(name);\n\t\t\tif (iter != _transforms.end()) {\n\t\t\t\tColumnTransform& xform = iter->second;\n\t\t\t\tmsg << \" --> \" << xform.Name;\n\t\t\t\tif (xform.Scale != 1.0) {\n\t\t\t\t\tmsg << \" scale \" << xform.Scale;\n\t\t\t\t}\n\t\t\t}\n\t\t\tmsg << \"]\";\n\t\t}\n\t\ttrace.NOTE(msg.str());\n\t}\n}\n\n// Tear apart the input item to produce multiple output items.\nvoid\nUnpivot::Process(CanonicalEntity *item)\n{\n\tTrace trace(Trace::QueryPipe, \"Unpivot::Process\");\n\n\t// 1: Run through the item and build a master CanonicalEntity which has only the\n\t//    columns that are *not* to be unpivoted. Count the pivoted columns.\n\tCanonicalEntity master;\n\tmaster.SetPreciseTime(item->GetPreciseTimeStamp());\n\tunsigned pivotCount = 0;\n\tfor (auto col = item->begin(); col != item->end(); col++) {\n\t\tif (_columns.count(col->first)) {\n\t\t\tpivotCount++;\n\t\t} else {\n\t\t\tmaster.AddColumn(col->first, col->second);\n\t\t\t// col->second is an MdsValue* and it's now owned by master;\n\t\t\t// update item's ownership\n\t\t\tcol->second = nullptr;\n\t\t}\n\t}\n\n\t// 2: If there were no pivoted columns, emit a warning, drop it on the floor,\n\t//    and return. (If we needed to send it to the pipeline, we'd have to dupe\n\t//    it from master into a heap-allocated copy and send that, since we'd\n\t//    already have torn all of the columns into master.)\n\tif (!pivotCount) {\n\t\tstd::ostringstream msg;\n\t\tmsg << \"<Unpivot> matched no columns for this event: \" << *item;\n\t\tLogger::LogWarn(msg);\n\t\tdelete item;\n\t\treturn;\n\t}\n\tif (trace.IsActive()) {\n\t\tstd::ostringstream msg;\n\t\tmsg << \"Unpivoting \" << pivotCount << \" columns.\";\n\t\ttrace.NOTE(msg.str());\n\t}\n\n\t// 3: Run through the item again. Each time a column-to-be-unpivoted is found, duplicate\n\t//    the master CE, add the \"name\" and \"value\" columns, and send the row to our\n\t//    successor. Apply any translations to \"name\" at this time.\n\tfor (auto col = item->begin(); col != item->end(); col++) {\n\t\tif (_columns.count(col->first)) {\n\t\t\tCanonicalEntity *ce = new CanonicalEntity { master };\n\t\t\tconst auto & iter = _transforms.find(col->first);\n\t\t\tif (iter == _transforms.end()) {\n\t\t\t\t// No transform; use as-is\n\t\t\t\tce->AddColumn(_nameName, col->first);\n\t\t\t} else {\n\t\t\t\t// iter points to a pair<string, ColumnTransform>\n\t\t\t\t// So iter->second is a ColumnTransform\n\t\t\t\tColumnTransform& xform = iter->second;\n\t\t\t\tce->AddColumn(_nameName, xform.Name);\n\t\t\t\t// Apply the scale factor stored in the transform. MdsValue::scale() does appropriate\n\t\t\t\t// type conversion and does nothing, silently, if the value is not numeric.\n\t\t\t\tcol->second->scale(xform.Scale);\n\t\t\t}\n\t\t\tce->AddColumn(_valueName, col->second);\n\t\t\t// col->second is an MdsValue* and it's now owned by the dupe ce;\n\t\t\t// update item's ownership\n\t\t\tcol->second = nullptr;\n\n\t\t\tPipeStage::Process(ce);\n\t\t}\n\t}\n\n\t// 4. At this point we're done with the original item, which itself is not forwarded\n\t//    down the pipeline. Delete it.\n\tdelete item;\n}\n\nconst std::string BatchWriter::_name { \"BatchWriter\" };\n\nBatchWriter::BatchWriter(Batch * b, const ident_vect_t * idvec, unsigned int pcount, StoreType::Type storeType)\n\t: _batch(b), _idvec(idvec), _identString(), _storeType(storeType)\n{\n\tstd::vector<std::string> identValues;\n\tbool firstTime = true;\n\n\tfor (const auto &iter : *(_idvec)) {\n\t\tif (!firstTime)\n\t\t\t_identString.append(\"___\");\n\t\t_identString.append(iter.second);\n\t\tfirstTime = false;\n\t}\n\n\t// If the CanonicalEntity has identity columns, it may need partition and row keys.\n\t// The identity column data is sufficient to form the standard MDS partition and row keys,\n\t// which we do here. Only the data sink knows whether these keys are actually needed.\n\t_Nstr = MdsdUtil::ZeroFill(MdsdUtil::EasyHash(_identString) % (unsigned long long)pcount, 19);\n}\n\n\n// End of the processing pipeline. Adding the item to a batch is defined as a \"copy\" operation,\n// so we should throw away the \"original\" after that.\nvoid\nBatchWriter::Process(CanonicalEntity *item)\n{\n\tTrace trace(Trace::QueryPipe, \"BatchWriter::Process\");\n\t// Based on the target store type, ensure the proper keys are set\n\tif (_storeType == StoreType::XTable) {\n\t\ttrace.NOTE(\"Adding XTable columns\");\n\t\tbool doDefaultColumns = false;\n\t\tstd::string rowIndex = MdsdUtil::ZeroFill(RowIndex::Get(), 19);\n\t\tif (item->PartitionKey().empty()) {\n\t\t\titem->AddColumn(\"PartitionKey\", _Nstr + \"___\" + MdsdUtil::ZeroFill(_qibase.to_DateTime(), 19));\n\t\t\tdoDefaultColumns = true;\n\t\t}\n\t\tif (item->RowKey().empty()) {\n\t\t\titem->AddColumn(\"RowKey\", _identString +\"___\" + rowIndex);\n\t\t\tdoDefaultColumns = true;\n\t\t}\n\t\tif (doDefaultColumns) {\n\t\t\titem->AddColumn(\"PreciseTimeStamp\", new MdsValue(item->GetPreciseTimeStamp()));\n\t\t\titem->AddColumn(\"N\", _Nstr);\n\t\t\titem->AddColumn(\"RowIndex\", rowIndex);\n\t\t}\n\t\titem->AddColumn(\"TIMESTAMP\", new MdsValue(_qibase));\n\t}\n\n\t_batch->AddRow(*item);\n\tdelete item;\n}\n\n// Let the batch know we're done writing for now\nvoid\nBatchWriter::Done()\n{\n\t_batch->Flush();\n}\n\nconst std::string Identity::_name { \"Identity\" };\n\n// Add identity columns to a CanonicalEntity\nvoid\nIdentity::Process(CanonicalEntity *item)\n{\n\tstd::vector<std::string> identValues;\n\n\tfor (const auto &iter : *(_idvec)) {\n\t\titem->AddColumn(iter.first, iter.second);\n\t\tidentValues.push_back(iter.second);\n\t}\n\n\tPipeStage::Process(item);\n}\n\nconst std::string BuildSchema::_name { \"BuildSchema\" };\n\n// Track which event schemas have been pushed to the appropriate central SchemasTable\n\n// This unordered set tracks the pushed schemas. The key is a string with these components\n// separated by single forward slashes (\"/\"):\n//\tMDS account moniker (*not* XStore account name)\n//\tFull table name (augmented by namespace prefix and NDay suffix as appropriate)\n//\tMD5 checksum of the canonicalized schema\n// This cache is global and never reset (except by agent restart).\nstd::unordered_set<std::string> BuildSchema::_pushedSchemas;\n\n// The \"target\" metadata tells us where the corresponding SchemasTable should be. The\n// \"fixed\" flag, if true, claims that all events sent to this stage will have exactly\n// the same schema. When it is fixed, it need only be computed once at startup and,\n// if the table rolls every N days, at the beginning of each N day period.\nBuildSchema::BuildSchema(MdsdConfig *config, const MdsEntityName &target, bool fixed)\n\t: _target(target), _schemaIsFixed(fixed), _schemaRequired(false), _lastFullName()\n{\n\t// In order to upload MDS schema metadata, we must use the target's credentials to write to\n\t// an arbitrary table. Local and File table have no credentials at all.\n\tconst Credentials *creds = target.GetCredentials();\n\tif (creds && creds->accessAnyTable()) {\n\t\t// We need to write the schema. All we need to do is get a Batch pointer to which we\n\t\t// can write the SchemasTable entry.\n\t\t_schemaRequired = true;\n\t\tMdsEntityName schemaTarget { config, creds };\n\t\t_batch = config->GetBatch(schemaTarget, 60);\n\t\t_moniker = creds->Moniker();\n\t\t_agentIdentity = config->AgentIdentity();\n\t}\n}\n\nvoid\nBuildSchema::Process(CanonicalEntity *item)\n{\n\tif (item && _schemaRequired) {\n\t\tTrace trace(Trace::XTable, \"Pipe::BuildSchema::Process\");\n\n\t\t// This preamble does its best to bail out of schema writing as early and cheaply\n\t\t// as possible. We're silent from a tracing standpoint when taking the bailouts.\n\t\tstd::string fullName = _target.Name();\n\t\tif (_schemaIsFixed && (fullName == _lastFullName)) {\n\t\t\t// Schema is constant, and we've already written it for this tablename\n\t\t\t// (Example: schemas defined by <Schema>)\n\t\t\t// State for this is managed below\n\t\t\tgoto done;\n\t\t}\n\n\t\t// Construct the key used to see if we've pushed this schema already\n\t\tauto metadata = MdsSchemaMetadata::GetOrMake(_target, item);\n\t\tif (!metadata) {\n\t\t\tgoto done;\n\t\t}\n\t\tstd::string key = _moniker + \"/\" + fullName + \"/\" + metadata->GetMD5();\n\t\tif (_pushedSchemas.count(key)) {\n\t\t\t// We've already written it for this schema and tablename\n\t\t\t// (Example: schema computed from an OMI reply and written to a 10day table)\n\t\t\tgoto done;\n\t\t}\n\n\t\t// OK, push the metadata and record it\n\t\tCanonicalEntity schemaCE { 12 };\n\n\t\tstd::string physicalTableName = _target.PhysicalTableName();\n\t\tstd::string rowkey = physicalTableName + \"___\" + metadata->GetMD5();\n\t\tstd::string N = MdsdUtil::ZeroFill(physicalTableName.size() % 10, 19);\n\t\tstd::string pkey = N + \"___\" + MdsdUtil::ZeroFill(MdsTime::FakeTimeStampTicks, 19);\n\t\ttrace.NOTE(\"Schema row: pkey \" + pkey + \" rowkey \" + rowkey);\n\t\tutility::datetime timestamp1601;\n\t\ttimestamp1601 = timestamp1601 + 1;\n\n\t\tschemaCE.AddColumn(\"PartitionKey\", pkey);\n\t\tschemaCE.AddColumn(\"RowKey\", rowkey);\n\t\tschemaCE.AddColumn(\"TIMESTAMP\", new MdsValue(timestamp1601));\n\t\tschemaCE.AddColumn(\"N\", N);\n\t\tschemaCE.AddColumn(\"PhysicalTableName\", physicalTableName);\n\t\tschemaCE.AddColumn(\"MD5Hash\", metadata->GetMD5());\n\t\tschemaCE.AddColumn(\"Schema\", metadata->GetXML());\n\t\tschemaCE.AddColumn(\"Uploader\", _agentIdentity);\n\t\tschemaCE.AddColumn(\"UploadTS\", new MdsValue(MdsTime::Now()));\n\t\tschemaCE.AddColumn(\"Reserved1\", \"\");\n\t\tschemaCE.AddColumn(\"Reserved2\", \"\");\n\t\tschemaCE.AddColumn(\"Reserved3\", \"\");\n\n\t\t_batch->AddRow(schemaCE);\n\t\t_pushedSchemas.insert(key);\n\n\t\t// If the input to this pipeline is always the same (i.e. fixed schema), then\n\t\t// we only have to do this once (or, perhaps, once every N days).\n\t\tif (_schemaIsFixed) {\n\t\t\t// Manage the state required by the bailout-early preamble\n\t\t\tif (_target.IsConstant()) {\n\t\t\t\t_schemaRequired = false;\t// Never have to do it again\n\t\t\t} else {\n\t\t\t\t_lastFullName = fullName;\n\t\t\t}\n\t\t}\n\t\ttrace.NOTE(\"Finished; passing item to next stage\");\n\t}\n\n\tdone:\n\tPipeStage::Process(item);\n}\n\n\n// End of namespace\n}\n\n// vim: se ai sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/PipeStages.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _PIPESTAGES_HH_\n#define _PIPESTAGES_HH_\n\n#include \"Pipeline.hh\"\n#include \"IdentityColumns.hh\"\n#include \"MdsEntityName.hh\"\n#include \"RowIndex.hh\"\n#include <string>\n#include <unordered_set>\n#include <unordered_map>\n\nclass Batch;\n\n// Used by Pipe::Unpivot to implement <MapName> transforms\nstruct ColumnTransform\n{\npublic:\n\tstd::string Name;\n\tdouble Scale;\n\n\tColumnTransform(std::string name, double scale = 1.0) : Name(name), Scale(scale) {}\n};\n\n// Pipe stages must implement the Process method.\n// Pipe stages that retain data must implement the Done method.\n// Pipe stages must implement a constructor, which can have any parameters that might be required.\n\nnamespace Pipe {\n\nclass Unpivot : public PipeStage\n{\npublic:\n\tUnpivot(const std::string &valueName, const std::string &nameName, const std::string &columns,\n\t\tstd::unordered_map<std::string, ColumnTransform>&& transforms);\n\t// ~Unpivot() {}\n\n\tvoid Process(CanonicalEntity *);\n\tconst std::string& Name() const { return _name; }\n\nprivate:\n\tstatic const std::string _name;\n\tconst std::string _valueName;\n\tconst std::string _nameName;\n\tstd::unordered_set<std::string> _columns;\n\tstd::unordered_map<std::string, ColumnTransform> _transforms;\n};\n\n// BatchWriter class is the final stage in a pipe and is responsible for getting the CanonicalEntity ready for\n// consumption by the sink that lies behind the batch. The principal task is managing the PartitionKey and\n// RowKey that are needed by some, but not all, sinks.\n// If Start() is called, then Done() must be called. If no call Start() is made, there's no need to call Done.\n// StoreType::XTable expects Start/Done pairs so it can correctly generate partition keys.\nclass BatchWriter : public PipeStage\n{\npublic:\n\tBatchWriter(Batch * b, const ident_vect_t *idvec, unsigned int pcount, StoreType::Type storeType);\n\n\tvoid Process(CanonicalEntity *);\n\tconst std::string& Name() const { return _name; }\n\tvoid Start(const MdsTime QIBase) { _qibase = QIBase; }\n\tvoid AddSuccessor(PipeStage *) { throw std::logic_error(\"BatchWriter stage may not have a successor stage\"); }\n\tvoid Done();\n\nprivate:\n\tstatic const std::string _name;\n\tBatch *_batch;\n\tconst ident_vect_t * _idvec;\n\tstd::string _identString;\n\tStoreType::Type _storeType;\n\tstd::string _Nstr;\n\n\tMdsTime _qibase;\n};\n\n// Add \"Identity\" columns to the CanonicalEntity and pass it along\nclass Identity : public PipeStage\n{\npublic:\n\tIdentity(const ident_vect_t * idvec) : _idvec(idvec) {}\n\n\tvoid Process(CanonicalEntity *);\n\tconst std::string& Name() const { return _name; }\n\nprivate:\n\tstatic const std::string _name;\n\tconst ident_vect_t * _idvec;\n};\n\n// Build the MDS server-side schema based on the CanonicalEntity. If the combination of schema and full table name\n// (with NDay suffix as appropriate) has not yet been pushed to the matching SchemasTable, arrange for that to happen.\nclass BuildSchema : public PipeStage\n{\npublic:\n\tBuildSchema(MdsdConfig *config, const MdsEntityName &target, bool schemaIsFixed);\n\n\tvoid Process(CanonicalEntity *);\n\tconst std::string& Name() const { return _name; }\n\nprivate:\n\tstatic const std::string _name;\n\tconst MdsEntityName _target;\n\tbool\t\t_schemaIsFixed;\n\tbool\t\t_schemaRequired;\n\tstd::string\t_lastFullName;\n\tstd::string\t_moniker;\n\tstd::string\t_agentIdentity;\n\tBatch*\t\t_batch;\n\n\tstatic std::unordered_set<std::string> _pushedSchemas;\n};\n\n\n}\n\n#endif // _PIPESTAGES_HH_\n\n// vim: se ai sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/Pipeline.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"Pipeline.hh\"\n\nPipeStage::PipeStage()\n  :  _next(nullptr)\n{\n}\n\nPipeStage::~PipeStage()\n{\n\tif (_next) {\n\t\tdelete _next;\n\t\t_next = nullptr;\n\t}\n}\n\nvoid\nPipeStage::AddSuccessor(PipeStage *next)\n{\n\t_next = next;\n}\n\nvoid\nPipeStage::Start(const MdsTime QIBase)\n{\n\tif (_next) {\n\t\t_next->Start(QIBase);\n\t}\n}\n\nvoid\nPipeStage::Process(CanonicalEntity *item)\n{\n\tif (item != nullptr) {\n\t\tif (_next) {\n\t\t\t_next->Process(item);\n\t\t} else {\n\t\t\t// Drop on floor; leak the memory, if any.\n\t\t}\n\t}\n}\n\nvoid\nPipeStage::Done()\n{\n\tif (_next) {\n\t\t_next->Done();\n\t}\n}\n\n// vim: se ai sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/Pipeline.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _PIPELINE_HH_\n#define _PIPELINE_HH_\n\n#include \"MdsTime.hh\"\n#include <string>\n\nclass CanonicalEntity;\n\n// You must override Name()\n// If you override Start() you must finish by calling PipeStage::Start(QIbase)\n// If you override Process() and want to pass an item down the pipe, use PipeStage::Process(item)\n// If you override Done() you may call PipeStage::Process(item) and must finish by calling PipeStage::Done()\n\nclass PipeStage\n{\npublic:\n\tvirtual ~PipeStage();\n\n\tvirtual void AddSuccessor(PipeStage *next);\n\tvirtual void Start(const MdsTime QIbase);\n\tvirtual void Process(CanonicalEntity *);\n\tvirtual const std::string& Name() const = 0;\n\tvirtual void Done();\n\nprotected:\n\tPipeStage();\n\nprivate:\n\tPipeStage *_next;\n};\n\t\n\n\n\n\n\n#endif // _PIPELINE_HH_\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/PoolMgmt.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _POOLMGMT_HH_\n#define _POOLMGMT_HH_\n\n#include <string>\n#include <unordered_set>\n#include <boost/pool/pool_alloc.hpp>\n#include <iostream>\n\n\nclass PoolMgmt\n{\npublic:\n\ttypedef std::basic_string<char, std::char_traits<char>, boost::fast_pool_allocator<char>> PoolString;\n\n\tstruct PoolStringHasher\n  \t{\n      \tstd::size_t operator()(const PoolString& k) const\n      \t{\n         \tsize_t h = std::hash<std::string>()(k.data());\n         \treturn h;\n      \t}\n  \t};\n\n\tstruct PoolStringEqualTo\n\t{\n\t    bool operator()(const PoolString& p1, const PoolString& p2) const \n\t    {\n\t        return (0 == std::strcmp(p1.data(), p2.data()));\n\t    }\n\t};  \t\n\n\t// This will release all memory blocks that aren’t used at the moment.\n\t// The memory will be returned to OS.\n\tstatic void ReleaseMemory()\n\t{\n\t\tboost::singleton_pool<boost::fast_pool_allocator_tag, sizeof(char)>::release_memory();\n\t\tboost::singleton_pool<boost::fast_pool_allocator_tag, sizeof(int)>::release_memory();\n\t\tboost::singleton_pool<boost::fast_pool_allocator_tag, sizeof(PoolString)>::release_memory();\n\t}\n\n\t// This will release all memory blocks including those currently used. \n\t// The memory will be returned to OS.\n\tstatic void PurgeMemory()\n\t{\n\t\tboost::singleton_pool<boost::fast_pool_allocator_tag, sizeof(char)>::purge_memory();\n\t\tboost::singleton_pool<boost::fast_pool_allocator_tag, sizeof(int)>::purge_memory();\n\t\tboost::singleton_pool<boost::fast_pool_allocator_tag, sizeof(PoolString)>::purge_memory();\t\t\n\t}\n};\n\ntypedef std::unordered_set<PoolMgmt::PoolString, PoolMgmt::PoolStringHasher, PoolMgmt::PoolStringEqualTo, boost::fast_pool_allocator<PoolMgmt::PoolString>> PoolStringUnorderedSet;\n\n\n#endif\n\n// vim: se sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/Priority.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"Priority.hh\"\n\n#include <map>\n#include \"Utility.hh\"\n\nstatic std::map<std::string, time_t> priorityMap {\n\t{ \"high\", 60 },\n\t{ \"medium\", 300 }, { \"normal\", 300 }, { \"default\", 300 },\n\t{ \"low\", 900 } };\n\nPriority::Priority(const std::string & name)\n{\n\tconst auto &iter = priorityMap.find(MdsdUtil::to_lower(name));\n\tif (iter == priorityMap.end()) {\n\t\t_duration = priorityMap[\"default\"];\n\t} else {\n\t\t_duration = iter->second;\n\t}\n}\n\nbool\nPriority::Set(const std::string & name)\n{\n\tconst auto &iter = priorityMap.find(MdsdUtil::to_lower(name));\n\tif (iter == priorityMap.end()) {\n\t\treturn false;\n\t}\n\n\t_duration = iter->second;\n\treturn true;\n}\n\n// vim: se sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/Priority.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _PRIORITY_HH_\n#define _PRIORITY_HH_\n\n#include <string>\n#include <ctime>\n\nclass Priority\n{\npublic:\n\tPriority(const std::string & name);\n\tPriority() : _duration(300) {}\n\t~Priority() {}\n\n\tbool Set(const std::string & name);\n\n\ttime_t Duration() const { return _duration; }\n\nprivate:\n\ttime_t _duration;\n};\n\n#endif // _PRIORITY_HH_\n\n// vim: se sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/ProtocolHandlerBase.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"ProtocolHandlerBase.hh\"\n\nstd::mutex ProtocolHandlerBase::_static_lock;\nstd::unordered_map<std::string, SchemaCache::IdType> ProtocolHandlerBase::_key_id_map;\n\n\nSchemaCache::IdType\nProtocolHandlerBase::schema_id_for_key(const std::string& key)\n{\n    std::lock_guard<std::mutex> lock(_static_lock);\n\n    auto it = _key_id_map.find(key);\n\n    if (it != _key_id_map.end()) {\n        return it->second;\n    } else {\n        auto id = SchemaCache::Get().GetId();\n        _key_id_map.insert(std::make_pair(key, id));\n        return id;\n    }\n}\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/ProtocolHandlerBase.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#ifndef _PROTOCOL_HANDLER_BASE_HH\n#define _PROTOCOL_HANDLER_BASE_HH\n\n#include \"SchemaCache.hh\"\n#include <mutex>\n#include <unordered_map>\n\n/*\n * This class exists to eliminate duplicate code shared by the ProtocolHandler classes.\n *\n * The subclasses (e.g. ProtocolHandlerBond) are not, nor intended to be, thread safe.\n * The ProtocolListener classes allocate a separate instance per connection where each connection\n * has a separate thread.\n */\nclass ProtocolHandlerBase\n{\nprotected:\n    virtual ~ProtocolHandlerBase() = default;\n\n    std::unordered_map<uint64_t, SchemaCache::IdType> _id_map;\n\n    static SchemaCache::IdType schema_id_for_key(const std::string& key);\n    static std::mutex _static_lock;\n    static std::unordered_map<std::string, SchemaCache::IdType> _key_id_map;\n};\n\n\n#endif //_PROTOCOL_HANDLER_BASE_HH\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/ProtocolHandlerBond.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"ProtocolHandlerBond.hh\"\n\n#include <cassert>\n\n#include \"Logger.hh\"\n#include \"MdsValue.hh\"\n#include \"CanonicalEntity.hh\"\n#include \"Trace.hh\"\n#include \"LocalSink.hh\"\n#include \"Utility.hh\"\n\nextern \"C\" {\n#include <unistd.h>\n}\n\nProtocolHandlerBond::~ProtocolHandlerBond()\n{\n    Trace trace(Trace::EventIngest, \"ProtocolHandlerBond::Destructor\");\n    close(_fd);\n    Logger::LogInfo(std::string(\"ProtocolHandlerBond: Connection on \") + std::to_string(_fd) +  \" closed\");\n}\n\nvoid ProtocolHandlerBond::Run()\n{\n    Trace trace(Trace::EventIngest, \"ProtocolHandlerBond::Run\");\n    while(true)\n    {\n        try\n        {\n            mdsdinput::Message msg;\n            mdsdinput::Ack ack;\n\n            // Read message\n            _io.ReadMessage(msg);\n\n            // Process message\n            ack.msgId = msg.msgId;\n            ack.code = handleEvent(msg);\n\n            // Ack message\n            _io.WriteAck(ack);\n        }\n        catch (mdsdinput::eof_exception)\n        {\n            Logger::LogInfo(std::string(\"ProtocolHandlerBond: EOF on \") + std::to_string(_fd));\n            return;\n        }\n        catch (mdsdinput::msg_too_large_error)\n        {\n            Logger::LogWarn(std::string(\"ProtocolHandlerBond: Received oversized message on \") + std::to_string(_fd));\n            return;\n        }\n        catch (std::exception& ex)\n        {\n            Logger::LogError(std::string(\"ProtocolHandlerBond: Unexpected exception while processing messages on \") + std::to_string(_fd) + \": \" + ex.what());\n            return;\n        }\n    }\n}\n\nclass FieldReceiver\n{\npublic:\n    FieldReceiver(CanonicalEntity& ce)\n        : _ce(ce)\n    {}\n\n    void BoolField(const std::string& name, bool value)\n    {\n        _ce.AddColumnIgnoreMetaData(name, new MdsValue(value));\n    }\n\n    void Int32Field(const std::string& name, int32_t value)\n    {\n        _ce.AddColumnIgnoreMetaData(name, new MdsValue(static_cast<long>(value)));\n    }\n\n    void Int64Field(const std::string& name, int64_t value)\n    {\n        // The explicit cast is necessary. Without it, the value will get treated as mt_int32.\n        _ce.AddColumnIgnoreMetaData(name, new MdsValue(static_cast<long long>(value)));\n    }\n\n    void DoubleField(const std::string& name, double value)\n    {\n        _ce.AddColumnIgnoreMetaData(name, new MdsValue(value));\n    }\n\n    void TimeField(const std::string& name, const mdsdinput::Time& value, bool isTimestampField)\n    {\n        MdsTime time(value.sec, value.nsec/1000);\n        _ce.AddColumnIgnoreMetaData(name, new MdsValue(time));\n        if (isTimestampField)\n        {\n            _ce.SetPreciseTime(time);\n        }\n    }\n\n    void StringField(const std::string& name, const std::string& value)\n    {\n        _ce.AddColumnIgnoreMetaData(name, new MdsValue(value));\n    }\nprivate:\n    CanonicalEntity& _ce;\n};\n\nmdsdinput::ResponseCode\nProtocolHandlerBond::handleEvent(const mdsdinput::Message& msg)\n{\n    Trace trace(Trace::EventIngest, \"ProtocolHandlerBond::handleEvent\");\n\n    TRACEINFO(trace, \"Received msg {MsgId: \" << msg.msgId << \", Source: \" << msg.source << \"}\");\n\n    auto sink = LocalSink::Lookup(msg.source);\n    if (!sink)\n    {\n        Logger::LogWarn(\"Received an event from source \\\"\" + msg.source + \"\\\" not used elsewhere in the active configuration\");\n        return mdsdinput::ACK_INVALID_SOURCE;\n    }\n\n    // This check may be overly restrictive.\n    // Perhaps we should allow it if the message's dynamically defined schema matches the predefined schema.\n    if (sink->SchemaId() != 0)\n    {\n        Logger::LogWarn(\"ProtocolHandlerBond: Received an event from source \\\"\" + msg.source + \"\\\" that is not valid for dynamic schema input\");\n        return mdsdinput::ACK_INVALID_SOURCE;\n    }\n\n    auto ce = std::make_shared<CanonicalEntity>();\n    ce->SetPreciseTime(MdsTime::Now());\n\n    FieldReceiver fr(*ce);\n    auto responseCode = _decoder.Decode(msg, fr);\n    if (mdsdinput::ACK_SUCCESS == responseCode)\n    {\n        SchemaCache::IdType schemaId;\n        auto it = _id_map.find(msg.schemaId);\n        if (it != _id_map.end())\n        {\n            schemaId = it->second;\n        } else {\n            schemaId = schema_id_for_key(_decoder.GetSchemaKey(msg.schemaId));\n            _id_map.insert(std::make_pair(msg.schemaId, schemaId));\n            TRACEINFO(trace, \"Mapped connection schemaId (\" << msg.schemaId << \") to SchemaCache id (\" << schemaId << \")\");\n        }\n\n        ce->SetSchemaId(schemaId);\n\n        TRACEINFO(trace, \"Message added to LocalSink with schemaId (\" << schemaId << \")\");\n        sink->AddRow(ce);\n    }\n\n    return responseCode;\n}\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/ProtocolHandlerBond.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#ifndef _PROTOCOL_HANDLER_BOND_HH_\n#define _PROTOCOL_HANDLER_BOND_HH_\n\n#include \"ProtocolHandlerBase.hh\"\n#include \"MdsdInputMessageIO.h\"\n#include \"MdsdInputMessageDecoder.h\"\n\n/*\n * This class is not, nor is it intended to be, thread safe.\n *\n * ProtocolListenerBond allocates a separate instance of this class per connection\n * and each connection is handled by a separate thread.\n */\nclass ProtocolHandlerBond: public ProtocolHandlerBase\n{\npublic:\n    explicit ProtocolHandlerBond(int fd)\n        : _fd(fd), _fdio(fd), _io(_fdio)\n    {}\n\n    ~ProtocolHandlerBond();\n\n    void Run();\n\nprivate:\n    mdsdinput::ResponseCode handleEvent(const mdsdinput::Message& msg);\n\n    int _fd;\n    mdsdinput::FDIO _fdio;\n    mdsdinput::MessageIO<mdsdinput::FDIO> _io;\n    mdsdinput::MessageDecoder _decoder;\n};\n\n// vim: set ai sw=8:\n#endif // _PROTOCOL_HANDLER_BOND_HH_\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/ProtocolHandlerJSON.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include <sstream>\n#include \"ProtocolHandlerJSON.hh\"\n\n#include \"Logger.hh\"\n#include \"MdsValue.hh\"\n#include \"CanonicalEntity.hh\"\n#include \"Trace.hh\"\n#include \"LocalSink.hh\"\n#include \"Utility.hh\"\n\nextern \"C\" {\n#include <unistd.h>\n}\n\nProtocolHandlerJSON::~ProtocolHandlerJSON()\n{\n    Trace trace(Trace::EventIngest, \"ProtocolHandlerJSON::Destructor\");\n    close(_fd);\n    Logger::LogInfo(std::string(\"ProtocolHandlerJSON: Connection on \") + std::to_string(_fd) +  \" closed\");\n}\n\nvoid ProtocolHandlerJSON::Run()\n{\n    Trace trace(Trace::EventIngest, \"ProtocolHandlerJSON::Run\");\n    msg_data_t msg_data;\n    while(true)\n    {\n        try\n        {\n            mdsdinput::Ack ack;\n\n            // Read message\n            size_t msg_size = readMsgSize();\n            readMsgData(msg_data, msg_size);\n\n            // Process message\n            ack = handleMsg(msg_data);\n\n            // Ack message\n            writeAck(ack.msgId, ack.code);\n        }\n        catch (mdsdinput::eof_exception)\n        {\n            Logger::LogInfo(std::string(\"ProtocolHandlerJSON: EOF on \") + std::to_string(_fd));\n            return;\n        }\n        catch (mdsdinput::msg_too_large_error)\n        {\n            Logger::LogWarn(std::string(\"ProtocolHandlerJSON: Received oversized message on \") + std::to_string(_fd));\n            return;\n        }\n        catch (std::exception& ex)\n        {\n            Logger::LogError(std::string(\"ProtocolHandlerJSON: Unexpected exception while processing messages on \") + std::to_string(_fd) + \": \" + ex.what());\n            return;\n        }\n    }\n}\n\nsize_t ProtocolHandlerJSON::readMsgSize()\n{\n    Trace trace(Trace::EventIngest, \"ProtocolHandlerJSON::readMsgSize\");\n    char sbuf[8];\n    size_t sidx = 0;\n\n    do {\n        ssize_t n = read(_fd, &sbuf[sidx], 1);\n        if (n < 0)\n        {\n            if (errno != EINTR)\n            {\n                throw std::system_error(errno, std::system_category());\n            }\n        }\n        else if (n == 0)\n        {\n            throw mdsdinput::eof_exception();\n        }\n        else\n        {\n            if (sbuf[sidx] == '\\n')\n            {\n                break;\n            }\n            sidx++;\n        }\n    } while (sidx < sizeof(sbuf));\n\n    if (sidx == sizeof(sbuf))\n    {\n        throw mdsdinput::msg_too_large_error(\"ProtocolHandlerJSON: Message size string is too long\");\n    }\n\n    sbuf[sidx] = 0;\n\n    size_t size = std::stoul(sbuf);\n    if (size == 0 || size > MAX_MSG_DATA_SIZE)\n    {\n        throw std::runtime_error(\"Invalid message size\");\n    }\n\n    return size;\n}\n\nvoid ProtocolHandlerJSON::readMsgData(msg_data_t& msg_data, size_t size)\n{\n    Trace trace(Trace::EventIngest, \"ProtocolHandlerJSON::readMsgData\");\n\n    char* ptr = &msg_data[0];\n    size_t idx = 0;\n    do {\n        ssize_t n = read(_fd, ptr, size - idx);\n        if (n < 0)\n        {\n            if (errno != EINTR)\n            {\n                throw std::system_error(errno, std::system_category());\n            }\n        }\n        else if (n == 0)\n        {\n            throw mdsdinput::eof_exception();\n        }\n        else\n        {\n            idx += n;\n            ptr += n;\n        }\n    } while (idx < size);\n    msg_data[size] = 0;\n}\n\nvoid ProtocolHandlerJSON::writeAck(uint64_t msgId, mdsdinput::ResponseCode rcode)\n{\n    Trace trace(Trace::EventIngest, \"ProtocolHandlerJSON::writeAck\");\n\n    std::ostringstream out;\n    out << msgId << \":\" << rcode << std::endl;\n    auto str = out.str();\n    ssize_t n = write(_fd, str.c_str(), str.size());\n    if (n < 0)\n    {\n        throw std::system_error(errno, std::system_category());\n    }\n    else if (n < static_cast<ssize_t>(str.size()))\n    {\n        throw mdsdinput::eof_exception();\n    }\n}\n\nmdsdinput::Ack ProtocolHandlerJSON::decodeMsg(msg_data_t& msg_data, std::string& source, CanonicalEntity& ce)\n{\n    Trace trace(Trace::EventIngest, \"ProtocolHandlerJSON::decodeMsg\");\n\n    mdsdinput::Ack ack;\n    rapidjson::Document d;\n    d.ParseInsitu(&msg_data[0]);\n\n    ack.code = mdsdinput::ACK_DECODE_ERROR;\n\n    // Build/fetch schema\n    if (!d.IsArray())\n    {\n        throw std::runtime_error(\"Invalid JSON document: Was not an array\");\n    }\n    if (d.Size() != 5)\n    {\n        std::ostringstream msg;\n        msg << \"Invalid JSON document: Array size invalid: Expected 5, got \" << d.Size();\n        throw std::runtime_error(msg.str());\n    }\n\n    const rapidjson::Value& jsource = d[0];\n    const rapidjson::Value& jmsgId = d[1];\n    const rapidjson::Value& jschemaId = d[2];\n    const rapidjson::Value& jschema = d[3];\n    const rapidjson::Value& jmsgdata = d[4];\n\n    if (!jsource.IsString())\n    {\n        throw std::runtime_error(\"Invalid JSON document: source (0) is not a String\");\n    }\n\n    if (!jmsgId.IsNumber())\n    {\n        throw std::runtime_error(\"Invalid JSON document: msgId (1) is not a Number\");\n    }\n\n    if (!jschemaId.IsNumber())\n    {\n        throw std::runtime_error(\"Invalid JSON document: schemaId (2) is not a Number\");\n    }\n\n    if (!jmsgdata.IsArray())\n    {\n        throw std::runtime_error(\"Invalid JSON document: data (4) is not an Array\");\n    }\n\n    if (!jschema.IsNull() && !jschema.IsArray())\n    {\n        throw std::runtime_error(\"Invalid JSON document: schema (3) is not an Array\");\n    }\n\n    auto schema_id = jschemaId.GetUint64();\n\n    std::shared_ptr<mdsdinput::SchemaDef> schema;\n    if (!jschema.IsNull())\n    {\n        bool hasTimestampIndex = false;\n        uint32_t timestampIndex;\n        schema = std::make_shared<mdsdinput::SchemaDef>();\n\n        for (rapidjson::Value::ConstValueIterator it = jschema.Begin(); it != jschema.End(); ++it)\n        {\n            if (it == jschema.Begin() && !it->IsArray())\n            {\n                // If the first element of the array is not an array, not null, and is an unsigned integer\n                // then use it as the timestamp index.\n                if (!it->IsNull() && it->IsUint())\n                {\n                    hasTimestampIndex = true;\n                    timestampIndex = static_cast<uint32_t>(it->GetUint64());\n                }\n            }\n            else\n            {\n                if (!it->IsArray() || it->Size() != 2)\n                {\n                    throw std::runtime_error(\"Invalid Schema\");\n                }\n                const rapidjson::Value &name = (*it)[0];\n                const rapidjson::Value &ft = (*it)[1];\n\n                if (!name.IsString() || !ft.IsString())\n                {\n                    throw std::runtime_error(\"Invalid Schema\");\n                }\n                mdsdinput::FieldDef fd;\n                fd.name = name.GetString();\n                if (!ToEnum(fd.fieldType, ft.GetString()))\n                {\n                    throw std::runtime_error(\"Invalid Schema\");\n                }\n                schema->fields.push_back(fd);\n            }\n        }\n\n        if (hasTimestampIndex)\n        {\n            if (timestampIndex < schema->fields.size())\n            {\n                schema->timestampFieldIdx.set(timestampIndex);\n            }\n        }\n\n        if (!_schema_cache->AddSchemaWithId(schema, schema_id))\n        {\n            ack.code = mdsdinput::ACK_DUPLICATE_SCHEMA_ID;\n            return ack;\n        }\n    }\n    else\n    {\n        try\n        {\n            schema = _schema_cache->GetSchema(schema_id);\n        }\n        catch(std::out_of_range)\n        {\n            ack.code = mdsdinput::ACK_UNKNOWN_SCHEMA_ID;\n            return ack;\n        }\n    }\n\n    if (schema->fields.size() != jmsgdata.Size())\n    {\n        std::ostringstream msg;\n        msg << \"Invalid message data: Array size invalid: Expected \" << schema->fields.size() << \", got \" << jmsgdata.Size();\n        throw std::runtime_error(msg.str());\n    }\n\n    ack.msgId = jmsgId.GetInt64();\n    source = std::string(jsource.GetString(), jsource.GetStringLength());\n\n    //\n    for (int i = 0; i < (int)schema->fields.size(); ++i)\n    {\n        mdsdinput::FieldDef fd = schema->fields.at(i);\n        const rapidjson::Value& val = jmsgdata[i];\n        switch (fd.fieldType)\n        {\n            case mdsdinput::FT_INVALID:\n                ack.code = mdsdinput::ACK_DECODE_ERROR;\n                return ack;\n            case mdsdinput::FT_BOOL:\n                if (!val.IsBool())\n                {\n                    throw std::runtime_error(\"Invalid Message data\");\n                }\n                ce.AddColumnIgnoreMetaData(fd.name, new MdsValue(val.GetBool()));\n                break;\n            case mdsdinput::FT_INT32:\n                if (!val.IsInt())\n                {\n                    throw std::runtime_error(\"Invalid Message data\");\n                }\n                ce.AddColumnIgnoreMetaData(fd.name, new MdsValue(static_cast<long>(val.GetInt())));\n                break;\n            case mdsdinput::FT_INT64:\n                if (!val.IsInt64())\n                {\n                    throw std::runtime_error(\"Invalid Message data\");\n                }\n                // The explicit cast is necessary. Without it, the value will get treated as mt_int32.\n                ce.AddColumnIgnoreMetaData(fd.name, new MdsValue(static_cast<long long>(val.GetInt64())));\n                break;\n            case mdsdinput::FT_DOUBLE:\n                if (!val.IsNumber())\n                {\n                    throw std::runtime_error(\"Invalid Message data\");\n                }\n                ce.AddColumnIgnoreMetaData(fd.name, new MdsValue(val.GetDouble()));\n                break;\n            case mdsdinput::FT_TIME:\n                if (!val.IsArray() || val.Size() != 2)\n                {\n                    throw std::runtime_error(\"Invalid Message data\");\n                }\n                {\n                    MdsTime time(val[0].GetUint64(), val[1].GetUint()/1000);\n                    ce.AddColumnIgnoreMetaData(fd.name,  new MdsValue(time));\n                    if (!schema->timestampFieldIdx.empty() && static_cast<uint32_t>(i) == *(schema->timestampFieldIdx))\n                    {\n                        ce.SetPreciseTime(time);\n                    }\n                }\n                break;\n            case mdsdinput::FT_STRING:\n                if (!val.IsString())\n                {\n                    throw std::runtime_error(\"Invalid Message data\");\n                }\n                ce.AddColumnIgnoreMetaData(fd.name, new MdsValue(std::string(val.GetString(), val.GetStringLength())));\n                break;\n            default:\n                throw std::runtime_error(\"Invalid field type in schema\");\n        }\n    }\n\n    SchemaCache::IdType mdsdSchemaId;\n    auto it = _id_map.find(schema_id);\n    if (it != _id_map.end())\n    {\n        mdsdSchemaId = it->second;\n    } else {\n        mdsdSchemaId = schema_id_for_key(_schema_cache->GetSchemaKey(schema_id));\n        _id_map.insert(std::make_pair(schema_id, mdsdSchemaId));\n        TRACEINFO(trace, \"Mapped connection schemaId (\"+std::to_string(schema_id)+\") to SchemaCache id (\"+std::to_string(mdsdSchemaId)+\")\");\n    }\n\n    ce.SetSchemaId(mdsdSchemaId);\n\n    ack.code = mdsdinput::ACK_SUCCESS;\n\n    return ack;\n}\n\n\nmdsdinput::Ack\nProtocolHandlerJSON::handleMsg(msg_data_t& msg_data)\n{\n    Trace trace(Trace::EventIngest, \"ProtocolHandlerJSON::handleMsg\");\n\n    mdsdinput::Ack ack;\n    std::string source;\n\n    auto ce = std::make_shared<CanonicalEntity>();\n    ce->SetPreciseTime(MdsTime::Now());\n\n    try\n    {\n        ack = decodeMsg(msg_data, source, *ce);\n    }\n    catch(std::exception& ex)\n    {\n        std::ostringstream strm;\n        strm << \"ProtocolHandlerJSON: Error decoding message '\";\n        for(auto c: msg_data) {\n            strm << c;\n        }\n        strm << \"' from fd \" << _fd << \": \" << ex.what();\n\n        Logger::LogWarn(strm);\n        ack.code = mdsdinput::ACK_DECODE_ERROR;\n    }\n\n    if (ack.code == mdsdinput::ACK_SUCCESS)\n    {\n\n        auto sink = LocalSink::Lookup(source);\n        if (!sink)\n        {\n            Logger::LogWarn(\"ProtocolHandlerJSON: Received an event from source \\\"\" + source +\n                            \"\\\" not used elsewhere in the active configuration\");\n            ack.code = mdsdinput::ACK_INVALID_SOURCE;\n        }\n        else\n        {\n            TRACEINFO(trace, \"Message added to LocalSink\");\n            sink->AddRow(ce);\n        }\n    }\n\n    return ack;\n}\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/ProtocolHandlerJSON.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#ifndef _PROTOCOL_HANDLER_JSON_HH_\n#define _PROTOCOL_HANDLER_JSON_HH_\n\n#include \"ProtocolHandlerBase.hh\"\n#include \"CanonicalEntity.hh\"\n#include \"MdsdInputMessageIO.h\"\n#include \"MdsdInputMessageDecoder.h\"\n#include \"MdsdInputSchemaCache.h\"\n#include <array>\n\n#include \"rapidjson/document.h\"\n#include \"rapidjson/stringbuffer.h\"\n\n\n/*\n * This class is not, nor is it intended to be, thread safe.\n *\n * ProtocolListenerDynamicJSON allocates a separate instance of this class per connection\n * and each connection is handled by a separate thread.\n */\nclass ProtocolHandlerJSON: public ProtocolHandlerBase\n{\npublic:\n    static constexpr size_t MAX_MSG_DATA_SIZE = 128 * 1024-1;\n    typedef std::array<char, MAX_MSG_DATA_SIZE+1> msg_data_t;\n\n    explicit ProtocolHandlerJSON(int fd)\n        : _fd(fd), _schema_cache(std::make_shared<mdsdinput::SchemaCache>())\n    {}\n\n    ~ProtocolHandlerJSON();\n\n    void Run();\n\nprivate:\n    size_t readMsgSize();\n    void readMsgData(msg_data_t& msg_data, size_t size);\n\n    void writeAck(uint64_t msgId, mdsdinput::ResponseCode rcode);\n\n    mdsdinput::Ack decodeMsg(msg_data_t& msg_data, std::string& source, CanonicalEntity& ce);\n\n    mdsdinput::Ack handleMsg(msg_data_t& msg_data);\n\n    int _fd;\n    std::shared_ptr<mdsdinput::SchemaCache> _schema_cache;\n};\n\n// vim: set ai sw=8:\n#endif // _PROTOCOL_HANDLER_JSON_HH_\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/ProtocolListener.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"ProtocolListener.hh\"\n#include \"Logger.hh\"\n#include \"Trace.hh\"\n#include \"Utility.hh\"\n\n#include <cstring>\n#include <map>\n#include <boost/algorithm/string.hpp>\n#include <chrono>\n\nextern \"C\" {\n#include <sys/socket.h>\n#include <sys/un.h>\n#include <sys/stat.h>\n#include <netinet/in.h>\n#include <poll.h>\n\nextern void StopProtocolListenerMgr();\n}\n\nProtocolListener::~ProtocolListener()\n{\n    Trace trace(Trace::EventIngest, \"ProtocolListener::handleEvent\");\n    Stop();\n}\n\nvoid\nProtocolListener::openListener()\n{\n    Trace trace(Trace::EventIngest, \"ProtocolListener::openListener\");\n\n    int fd = MdsdUtil::CreateAndBindUnixSocket(_file_path);\n\n    // Allow processes under non-root UID (e.g., rsyslogd on Ubuntu) to send msg to this\n    mode_t mode = 0666;\n    if (-1 == chmod(_file_path.c_str(), mode)) {\n        close(fd);\n        throw std::system_error(errno, std::system_category(),\n            \"chmod(\" + _file_path + \", \" + std::to_string(mode) + \")\");\n    }\n\n    _listenfd = fd;\n}\n\nvoid\nProtocolListener::Start()\n{\n    Trace trace(Trace::EventIngest, \"ProtocolListener::Start\");\n\n    std::unique_lock<std::mutex> lock(_lock);\n\n    if (_thread.get_id() != std::thread::id())\n    {\n        return;\n    }\n\n    openListener();\n\n    if (listen(_listenfd, 10))\n    {\n        throw std::system_error(errno, std::system_category(), \"listen()\");\n    }\n\n    std::thread thread([this](){this->run();});\n    _thread.swap(thread);\n}\n\nvoid\nProtocolListener::Stop()\n{\n    Trace trace(Trace::EventIngest, \"ProtocolListener::Stop\");\n\n    std::unique_lock<std::mutex> lock(_lock);\n\n    if (_listenfd != -1)\n    {\n        close(_listenfd);\n        _listenfd = -1;\n        _thread.detach();\n    }\n}\n\nbool\nProtocolListener::stopCheck()\n{\n    std::lock_guard<std::mutex> lock(_lock);\n    return _listenfd == -1 || std::this_thread::get_id() != _thread.get_id();\n}\n\nvoid\nProtocolListener::run()\n{\n    Trace trace(Trace::EventIngest, \"ProtocolListener::run\");\n\n    int lfd;\n    {\n        std::lock_guard<std::mutex> lock(_lock);\n        lfd = _listenfd;\n    }\n    while(!stopCheck()) {\n        struct pollfd fds[1];\n        fds[0].fd = lfd;\n        fds[0].events = POLLIN;\n        fds[0].revents = 0;\n\n        int r = poll(&fds[0], 1, 1000);\n        if (r < 0)\n        {\n            if (errno == EINTR)\n            {\n                continue;\n            }\n\n            if (!stopCheck())\n            {\n                // Log all other errors and return.\n                Logger::LogError(std::string(\"ProtocolListener(\" + _protocol + \"): poll() returned an unexpected error: \") + std::strerror(errno));\n\n                // Initiate a clean process exit.\n                StopProtocolListenerMgr();\n                // After calling StopProtocolListenerMgr() the only safe thing to do is return.\n            }\n\n            return;\n        }\n        if (r == 1)\n        {\n            int newfd = accept(lfd, NULL, 0);\n            if (newfd > 0)\n            {\n                if (!stopCheck())\n                {\n                    HandleConnection(newfd);\n                }\n                else\n                {\n                    close(newfd);\n                }\n            }\n            else\n            {\n                // If accept was interrupted, or the connection was reset (RST)\n                // before it could be accepted, then just continue.\n                if (errno == EINTR || errno == ECONNABORTED)\n                {\n                    continue;\n                }\n\n                // If the per-process (EMFILE) or system (ENFILE) descriptor limit is reached\n                // then sleep for a while in the hope that the situation will improve.\n                if (errno == EMFILE || errno == ENFILE)\n                {\n                    Logger::LogError(std::string(\"ProtocolListener(\") + _protocol + \"): descriptor limit reached: \" + std::strerror(errno));\n                    Logger::LogWarn(std::string(\"ProtocolListener(\" + _protocol + \"): waiting 1 minute before trying to accept new connections\"));\n                    std::this_thread::sleep_for(std::chrono::seconds(60));\n                    continue;\n                }\n\n                if (!stopCheck())\n                {\n                    // Log all other errors and return.\n                    Logger::LogError(std::string(\"ProtocolListener(\" + _protocol + \"): accept() returned an unexpected error: \") + std::strerror(errno));\n\n                    // Other errors indicate (probably) fatal conditions.\n                    // Initiate a clean process exit.\n                    StopProtocolListenerMgr();\n                    // After calling StopProtocolListenerMgr() the only safe thing to do is return.\n                }\n\n                return;\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/ProtocolListener.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#ifndef _PROTOCOL_LISTENER_HH_\n#define _PROTOCOL_LISTENER_HH_\n\n#include <thread>\n#include <mutex>\n#include <string>\n\nclass ProtocolListener\n{\npublic:\n    virtual ~ProtocolListener();\n\n    std::string Protocol() { return _protocol; }\n\n    void Start();\n    void Stop();\n\n    std::string FilePath() { return _file_path; };\n\nprotected:\n    ProtocolListener(const std::string& prefix, const std::string& protocol)\n            : _prefix(prefix), _protocol(protocol), _listenfd(-1)\n    {\n        _file_path = _prefix + \"_\" + _protocol + \".socket\";\n    }\n\n    virtual void openListener();\n    virtual void HandleConnection(int fd) = 0;\n\n    std::string _prefix;\n    std::string _protocol;\n\n    std::string _file_path;\n\n    std::mutex _lock;\n    int _listenfd;\n    std::thread _thread;\n\n    bool stopCheck();\n    void run();\n};\n\n// vim: set ai sw=8:\n#endif // _PROTOCOL_LISTENER_HH_\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/ProtocolListenerBond.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"ProtocolListenerBond.hh\"\n#include \"ProtocolHandlerBond.hh\"\n#include \"Logger.hh\"\n#include \"Trace.hh\"\n\n#include <sstream>\n\nstatic\nvoid\nhandler(int fd)\n{\n    ProtocolHandlerBond h(fd);\n\n    h.Run();\n}\n\nvoid\nProtocolListenerBond::HandleConnection(int fd)\n{\n    Trace trace(Trace::EventIngest, \"ProtocolListenerBond::HandleConnection\");\n\n    std::thread thread(handler, fd);\n    std::ostringstream out;\n    out << \"ProtocolListenerBond: Created BOND thread \" << thread.get_id() << \" for fd \" << fd;\n    thread.detach();\n    Logger::LogInfo(out.str());\n}\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/ProtocolListenerBond.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#ifndef _PROTOCOL_LISTENER_BOND_HH_\n#define _PROTOCOL_LISTENER_BOND_HH_\n\n#include \"ProtocolListener.hh\"\n\nclass ProtocolListenerBond : public ProtocolListener\n{\npublic:\n    ProtocolListenerBond(const std::string& prefix)\n        : ProtocolListener(prefix, \"bond\")\n    {}\n\nprotected:\n    virtual void HandleConnection(int fd);\n};\n\n// vim: set ai sw=8:\n#endif // _PROTOCOL_LISTENER_BOND_HH_\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/ProtocolListenerDynamicJSON.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"ProtocolListenerDynamicJSON.hh\"\n#include \"ProtocolHandlerJSON.hh\"\n#include \"Logger.hh\"\n#include \"Trace.hh\"\n\nstatic\nvoid\nhandler(int fd)\n{\n    ProtocolHandlerJSON h(fd);\n\n    h.Run();\n}\n\nvoid\nProtocolListenerDynamicJSON::HandleConnection(int fd)\n{\n    Trace trace(Trace::EventIngest, \"ProtocolListenerDynamicJSON::HandleConnection\");\n\n    std::thread thread(handler, fd);\n    std::ostringstream out;\n    out << \"ProtocolListenerDynamicJSON: Created Dynamic JSON thread \" << thread.get_id() << \" for fd \" << fd;\n    Logger::LogInfo(out.str());\n    thread.detach();\n}\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/ProtocolListenerDynamicJSON.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#ifndef _PROTOCOL_LISTENER_DYNAMIC_JSON_HH_\n#define _PROTOCOL_LISTENER_DYNAMIC_JSON_HH_\n\n#include \"ProtocolListener.hh\"\n\nclass ProtocolListenerDynamicJSON : public ProtocolListener\n{\npublic:\n    ProtocolListenerDynamicJSON(const std::string& prefix)\n        : ProtocolListener(prefix, \"djson\")\n    {}\n\nprotected:\n    virtual void HandleConnection(int fd);\n};\n\n// vim: set ai sw=8:\n#endif // _PROTOCOL_LISTENER_DYNAMIC_JSON_HH_\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/ProtocolListenerJSON.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"ProtocolListenerJSON.hh\"\n#include \"StreamListener.hh\"\n#include \"Logger.hh\"\n#include \"Trace.hh\"\n\nstatic\nvoid\nhandler(int fd)\n{\n    StreamListener::handler(new StreamListener(fd));\n}\n\nvoid\nProtocolListenerJSON::HandleConnection(int fd)\n{\n    Trace trace(Trace::EventIngest, \"ProtocolListenerJSON::HandleConnection\");\n\n    std::thread thread(handler, fd);\n    std::ostringstream out;\n    out << \"ProtocolListenerJSON: Created JSON thread \" << thread.get_id() << \" for fd \" << fd;\n    Logger::LogInfo(out.str());\n    thread.detach();\n}\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/ProtocolListenerJSON.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#ifndef _PROTOCOL_LISTENER_JSON_HH_\n#define _PROTOCOL_LISTENER_JSON_HH_\n\n#include \"ProtocolListener.hh\"\n\nclass ProtocolListenerJSON : public ProtocolListener\n{\npublic:\n    ProtocolListenerJSON(const std::string& prefix)\n        : ProtocolListener(prefix, \"json\")\n    {}\n\nprotected:\n    virtual void HandleConnection(int fd);\n};\n\n// vim: set ai sw=8:\n#endif // _PROTOCOL_LISTENER_JSON_HH_\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/ProtocolListenerMgr.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"ProtocolListenerMgr.hh\"\n#include \"ProtocolListenerDynamicJSON.hh\"\n#include \"ProtocolListenerJSON.hh\"\n#include \"ProtocolListenerTcpJSON.hh\"\n#include \"ProtocolListenerBond.hh\"\n#include \"Logger.hh\"\n#include \"Trace.hh\"\n#include \"Utility.hh\"\n\nextern \"C\" {\n#include <unistd.h>\n}\n\nstatic MdsdUtil::LockedFile pidPortFile;\n\nProtocolListenerMgr::~ProtocolListenerMgr() {\n    Trace trace(Trace::EventIngest, \"ProtocolListenerMgr::Destructor\");\n}\n\nvoid\nProtocolListenerMgr::Init(const std::string& prefix, int port, bool retry_random)\n{\n    Trace trace(Trace::EventIngest, \"ProtocolListenerMgr::Init\");\n\n    TRACEINFO(trace, \"Prefix: \" + prefix + \", Port: \" + std::to_string(port));\n\n    if (nullptr == _mgr)\n    {\n        _mgr = new ProtocolListenerMgr(prefix, port, retry_random);\n    }\n}\n\nProtocolListenerMgr* ProtocolListenerMgr::_mgr = nullptr;\n\nProtocolListenerMgr*\nProtocolListenerMgr::GetProtocolListenerMgr()\n{\n    return _mgr;\n}\n\nbool\nProtocolListenerMgr::Start()\n{\n    Trace trace(Trace::EventIngest, \"ProtocolListenerMgr::Start\");\n\n    std::unique_lock<std::mutex> lock(_lock);\n    if (_stop)\n    {\n        bool failed = false;\n        _stop = false;\n        pidPortFile.Open(_prefix + \".pidport\");\n        pidPortFile.WriteLine(std::to_string(getpid()));\n\n        _bond_listener.reset(new ProtocolListenerBond(_prefix));\n        _djson_listener.reset(new ProtocolListenerDynamicJSON(_prefix));\n        _json_listener.reset(new ProtocolListenerJSON(_prefix));\n        _tcp_json_listener.reset(new ProtocolListenerTcpJSON(_prefix, _port, _retry_random));\n\n        try\n        {\n            _bond_listener->Start();\n        }\n        catch (std::system_error& ex)\n        {\n            _bond_listener.release();\n            Logger::LogError(std::string(\"ProtocolListenerMgr: BOND Listener failed to start: \") + ex.what());\n            failed = true;\n        }\n\n        if (!failed)\n        {\n            try\n            {\n                _djson_listener->Start();\n            }\n            catch (std::system_error &ex)\n            {\n                _djson_listener.release();\n                Logger::LogError(std::string(\"ProtocolListenerMgr: Dynamic JSON Listener failed to start: \") + ex.what());\n                failed = true;\n            }\n        }\n\n        if (!failed)\n        {\n            try\n            {\n                _json_listener->Start();\n            }\n            catch (std::system_error &ex)\n            {\n                _json_listener.release();\n                Logger::LogError(std::string(\"ProtocolListenerMgr: JSON Listener failed to start: \") + ex.what());\n                failed = true;\n            }\n        }\n\n        if (!failed)\n        {\n            try\n            {\n                _tcp_json_listener->Start();\n                pidPortFile.WriteLine(std::to_string(static_cast<ProtocolListenerTcpJSON *>(_tcp_json_listener.get())->Port()));\n            }\n            catch (std::system_error &ex)\n            {\n                _tcp_json_listener.release();\n                Logger::LogError(std::string(\"ProtocolListenerMgr: TCP JSON Listener failed to start: \") + ex.what());\n                failed = true;\n            }\n        }\n\n        // One of the listeners failed to start. Stop the manager so things get cleaned up before process exit.\n        if (failed)\n        {\n            _lock.unlock();\n            Stop();\n            return false;\n        }\n    }\n    return true;\n}\n\nvoid\nProtocolListenerMgr::Stop()\n{\n    Trace trace(Trace::EventIngest, \"ProtocolListenerMgr::Stop\");\n\n    std::lock_guard<std::mutex> lock(_lock);\n    if (!_stop) {\n        try\n        {\n            if (_bond_listener)\n            {\n                _bond_listener->Stop();\n                unlink(_bond_listener->FilePath().c_str());\n                _bond_listener.release();\n            }\n            if (_djson_listener)\n            {\n                _djson_listener->Stop();\n                unlink(_djson_listener->FilePath().c_str());\n                _djson_listener.release();\n            }\n            if (_json_listener)\n            {\n                _json_listener->Stop();\n                unlink(_json_listener->FilePath().c_str());\n                _json_listener.release();\n            }\n            if (_tcp_json_listener)\n            {\n                _tcp_json_listener->Stop();\n                _tcp_json_listener.release();\n            }\n        }\n        catch(std::exception& ex) {\n            Logger::LogError(\"Error: ProtocolListenerMgr::Stop() unexpected exception while stopping listeners: \" + std::string(ex.what()));\n        }\n        catch(...) {\n            Logger::LogError(\"Error: ProtocolListenerMgr::Stop() unknown exception while stopping listeners.\");\n        }\n\n        try {\n            pidPortFile.Remove();\n        }\n        catch(std::exception& ex) {\n            Logger::LogError(\"Error: ProtocolListenerMgr::Stop() unexpected exception while trying to remove pid-port file: \" + std::string(ex.what()));\n        }\n        catch(...) {\n            Logger::LogError(\"Error: ProtocolListenerMgr::Stop() unknown exception while trying to remove pid-port file.\");\n        }\n        _stop = true;\n        _cond.notify_all();\n    }\n}\n\nvoid\nProtocolListenerMgr::Wait()\n{\n    Trace trace(Trace::EventIngest, \"ProtocolListenerMgr::Wait\");\n\n    std::unique_lock<std::mutex> lock(_lock);\n\n    // Wait for stop\n   _cond.wait(lock, [this]{return this->_stop;});\n}\n\nextern \"C\"\nvoid\nStopProtocolListenerMgr()\n{\n    auto plmgmt = ProtocolListenerMgr::GetProtocolListenerMgr();\n    if (plmgmt != nullptr)\n    {\n        plmgmt->Stop();\n    }\n}\n\nextern \"C\" void\nTruncateAndClosePidPortFile()\n{\n    try {\n        pidPortFile.TruncateAndClose();\n    }\n    catch(std::exception& ex) {\n        Logger::LogError(\"Error: TruncateAndClosePidPortFile() unexpected exception: \" + std::string(ex.what()));\n    }\n    catch(...) {\n        Logger::LogError(\"Error: TruncateAndClosePidPortFile() unknown exception.\");\n    }\n}\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/ProtocolListenerMgr.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#ifndef _PROTOCOL_LISTENER_MGMT_HH_\n#define _PROTOCOL_LISTENER_MGMT_HH_\n\n#include \"ProtocolListener.hh\"\n\n#include <string>\n#include <mutex>\n#include <condition_variable>\n#include <unordered_set>\n\nclass ProtocolListenerMgr\n{\npublic:\n    ~ProtocolListenerMgr();\n\n    static void Init(const std::string& prefix, int port, bool retry_random);\n    static ProtocolListenerMgr* GetProtocolListenerMgr();\n\n    bool Start();\n    void Stop();\n    void Wait();\n\nprivate:\n    ProtocolListenerMgr(const std::string& prefix, int port, bool retry_random)\n        : _prefix(prefix), _port(port), _retry_random(retry_random), _stop(true)\n    {}\n\n    static ProtocolListenerMgr* _mgr;\n\n    std::string _prefix;\n    int _port;\n    bool _retry_random;\n    std::mutex _lock;\n    std::condition_variable _cond;\n    bool _stop;\n    std::unique_ptr<ProtocolListener> _bond_listener;\n    std::unique_ptr<ProtocolListener> _djson_listener;\n    std::unique_ptr<ProtocolListener> _json_listener;\n    std::unique_ptr<ProtocolListener> _tcp_json_listener;\n};\n\n// vim: set ai sw=8:\n#endif // _PROTOCOL_LISTENER_MGMT_HH_\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/ProtocolListenerTcpJSON.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"ProtocolListenerTcpJSON.hh\"\n#include \"StreamListener.hh\"\n#include \"Logger.hh\"\n#include \"Trace.hh\"\n#include \"Utility.hh\"\n\nstatic\nvoid\nhandler(int fd)\n{\n    StreamListener::handler(new StreamListener(fd));\n}\n\nvoid\nProtocolListenerTcpJSON::openListener()\n{\n    Trace trace(Trace::EventIngest, \"ProtocolListenerTcpJSON::openListener\");\n\n    int fd = socket(AF_INET, SOCK_STREAM, 0);\n    if (-1 == fd)\n    {\n        throw std::system_error(errno, std::system_category(), \"socket(AF_INET, SOCK_STREAM)\");\n    }\n    MdsdUtil::FdCloser fdCloser(fd);\n\n    int reuseaddr = 1;\n    if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &reuseaddr, sizeof(reuseaddr)))\n    {\n        throw std::system_error(errno, std::system_category(), \"setsockopt(SO_REUSEADDR)\");\n    }\n\n    struct {\n        int l_onoff;\n        int l_linger;\n    } linger { 0, 0 };\n    if (setsockopt(fd, SOL_SOCKET, SO_LINGER, &linger, sizeof(linger)))\n    {\n        throw std::system_error(errno, std::system_category(), \"setsockopt(SO_LINGER)\");\n    }\n\n    if (_port == 0)\n    {\n        Logger::LogInfo(std::string(\"ProtocolListenerTcpJSON: Binding to a random port\"));\n    }\n\n    struct sockaddr_in loopback;\n    loopback.sin_family = AF_INET;\n    loopback.sin_port = htons(_port);\n    loopback.sin_addr.s_addr = htonl(INADDR_LOOPBACK);\n    if (bind(fd, (struct sockaddr *)&loopback, sizeof(loopback)))\n    {\n        // If the first bind attempt was to a random port, then it doesn't matter what the errno is.\n        // Just throw the exception. Trying, again, on a random port is also likely to fail.\n        if (errno != EADDRINUSE || !_retry_random || _port == 0)\n        {\n            throw std::system_error(errno, std::system_category(),\n                                    std::string(\"bind(AF_INET, \") + std::to_string(_port) + \")\");\n        }\n\n        Logger::LogWarn(\"ProtocolListenerTcpJSON: Port \" + std::to_string(_port) +\n                        \" is already in use. Will try a random port.\");\n\n        loopback.sin_port = 0;\n        if (bind(fd, (struct sockaddr *) &loopback, sizeof(loopback)))\n        {\n            throw std::system_error(errno, std::system_category(),\n                                    std::string(\"bind(AF_INET, 0)\"));\n        }\n    }\n\n    socklen_t len = sizeof(loopback);\n    if (getsockname(fd, (struct sockaddr*)&loopback, &len))\n    {\n        throw std::system_error(errno, std::system_category(), \"getsockname()\");\n    }\n    auto _requested_port = _port;\n    _port = (int)ntohs(loopback.sin_port);\n\n    if (_requested_port != _port)\n    {\n        Logger::LogWarn(std::string(\"ProtocolListenerTcpJSON: Listener port is \") + std::to_string(_port));\n    }\n\n    fdCloser.Release();\n\n    _listenfd = fd;\n}\n\nvoid\nProtocolListenerTcpJSON::HandleConnection(int fd)\n{\n    Trace trace(Trace::EventIngest, \"ProtocolListenerTcpJSON::HandleConnection\");\n\n    std::thread thread(handler, fd);\n    std::ostringstream out;\n    out << \"ProtocolListenerTcpJSON: Created TCP JSON thread \" << thread.get_id() << \" for fd \" << fd;\n    thread.detach();\n    Logger::LogInfo(out.str());\n}\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/ProtocolListenerTcpJSON.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#ifndef _PROTOCOL_LISTENER_TCP_JSON_HH_\n#define _PROTOCOL_LISTENER_TCP_JSON_HH_\n\n#include \"ProtocolListener.hh\"\n\nclass ProtocolListenerTcpJSON : public ProtocolListener\n{\npublic:\n    ProtocolListenerTcpJSON(const std::string& prefix, int port, bool retry_random)\n            : ProtocolListener(prefix, \"json\"), _port(port), _retry_random(retry_random)\n    {\n        _file_path = _prefix + \".pidport\";\n    }\n\n    int Port() { return _port; };\n\nprotected:\n    virtual void openListener();\n    virtual void HandleConnection(int fd);\n\n    int _port;\n    bool _retry_random;\n};\n\n// vim: set ai sw=8:\n#endif // _PROTOCOL_LISTENER_TCP_JSON_HH_\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/RowIndex.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"RowIndex.hh\"\n#include <ctime>\n#include <climits>\n#include <mutex>\n\nthread_local unsigned long long RowIndex::_index = ULLONG_MAX;\nunsigned long long RowIndex::_baseValue = 0;\nstd::mutex RowIndex::_mutex;\n\nunsigned long long\nRowIndex::Get()\n{\n\tif (_index == ULLONG_MAX) {\n\t\tunsigned long long now = (((unsigned long long) time(0)) & 0xfffff) << 32;\n\t\tstd::lock_guard<std::mutex> lock(_mutex);\n\t\t_index = _baseValue + now;\n\t\t_baseValue += 1ULL << 54;\n\t}\n\n\treturn _index++;\n}\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/RowIndex.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _ROWINDEX_HH_\n#define _ROWINDEX_HH_\n\n#include <mutex>\n\nclass RowIndex\n{\npublic:\n\tstatic unsigned long long Get();\nprivate:\n\tstatic thread_local unsigned long long _index;\n\tstatic unsigned long long _baseValue;\n\tstatic std::mutex _mutex;\n\n\tRowIndex();\n};\n\n#endif //_ROWINDEX_HH_\n\n// vim: se sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/SaxParserBase.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"SaxParserBase.hh\"\n\n#include <cassert>\n#include <exception>\n#include <sstream>\nextern \"C\" {\n#include <stdarg.h>\n#include <stdio.h>\n}\n\n///////////////////////////////////////////////////////////////////////\n// SAX callback dispatchers that are registered for every SaxParserBase\n// instance, which will call the actual callbacks in the instance.\n\nstatic void OnStartDocumentCallback(void* userData)\n{\n\tauto parser = static_cast<SaxParserBase*>(userData);\n    assert(nullptr != parser);\n\n\tparser->OnStartDocument();\n}\n\nstatic void OnEndDocumentCallback(void* userData)\n{\n\tauto parser = static_cast<SaxParserBase*>(userData);\n    assert(nullptr != parser);\n\n\tparser->OnEndDocument();\n}\n\nstatic void OnCommentCallback(void* userData, const xmlChar* comment)\n{\n\tauto parser = static_cast<SaxParserBase*>(userData);\n    assert(nullptr != parser);\n\n    const std::string commentStr((comment == nullptr) ? \"\" : reinterpret_cast<const char*>(comment));\n\n    parser->OnComment(commentStr);\n}\n\nstatic void OnStartElementCallback(\n    void*           userData,\n    const xmlChar*  localname,\n    const xmlChar** attributes\n)\n{\n\tauto parser = static_cast<SaxParserBase*>(userData);\n    assert(nullptr != parser);\n\n    std::string name(reinterpret_cast<const char*>(localname));\n    SaxParserBase::AttributeMap attrs;\n\n    while (attributes != nullptr && *attributes != nullptr) {\n    \tauto key = reinterpret_cast<const char*>(attributes[0]);\n    \tauto value = reinterpret_cast<const char*>(attributes[1]);\n\n    \tauto retval = attrs.emplace(key, value);\n    \tif (!retval.second) {\n    \t    std::ostringstream oss;\n    \t    oss << \"An extra instance of attribute \\\"\" << key\n    \t        << \"\\\" in element \\\"\" << name << \"\\\" was seen and ignored\";\n    \t    parser->OnWarning(oss.str());\n    \t}\n    \tattributes += 2;\n    }\n\n    parser->OnStartElement(name, attrs);\n}\n\nstatic void OnCharactersCallback(\n\tvoid*          userData,\n\tconst xmlChar* chars,\n\tint len\n)\n{\n\tauto parser = static_cast<SaxParserBase*>(userData);\n    assert(nullptr != parser);\n\n    const std::string charsStr(reinterpret_cast<const char*>(chars), static_cast<size_t>(len));\n\n    parser->OnCharacters(charsStr);\n}\n\nstatic void OnEndElementCallback(\n    void*          userData,\n    const xmlChar* localname\n)\n{\n\tauto parser = static_cast<SaxParserBase*>(userData);\n    assert(nullptr != parser);\n\n    const std::string name(reinterpret_cast<const char*>(localname));\n\n    parser->OnEndElement(name);\n}\n\nstatic constexpr size_t MESSAGE_BUFFER_SIZE = 512;\n\nstatic void OnWarningCallback(void* userData, const char* msg, ...)\n{\n\tauto parser = static_cast<SaxParserBase*>(userData);\n\tassert(nullptr != parser);\n\n\tchar buf[MESSAGE_BUFFER_SIZE];\n\tva_list arglist;\n\n\tva_start(arglist, msg);\n\tvsnprintf(buf, MESSAGE_BUFFER_SIZE, msg, arglist);\n\tva_end(arglist);\n\n\tconst std::string warning(buf);\n\n\tparser->OnWarning(warning);\n}\n\nstatic void OnErrorCallback(void* userData, const char* msg, ...)\n{\n\tauto parser = static_cast<SaxParserBase*>(userData);\n\tassert(nullptr != parser);\n\n\tchar buf[MESSAGE_BUFFER_SIZE];\n\tva_list arglist;\n\n\tva_start(arglist, msg);\n\tvsnprintf(buf, MESSAGE_BUFFER_SIZE, msg, arglist);\n\tva_end(arglist);\n\n\tconst std::string error(buf);\n\n\tparser->OnError(error);\n}\n\nstatic void OnFatalErrorCallback(void* userData, const char* msg, ...)\n{\n\tauto parser = static_cast<SaxParserBase*>(userData);\n\tassert(nullptr != parser);\n\n\tchar buf[MESSAGE_BUFFER_SIZE];\n\tva_list arglist;\n\n\tva_start(arglist, msg);\n\tvsnprintf(buf, MESSAGE_BUFFER_SIZE, msg, arglist);\n\tva_end(arglist);\n\n\tconst std::string fatalError(buf);\n\n\tparser->OnFatalError(fatalError);\n}\n\nstatic void OnCDataBlockCallback(\n\tvoid*          userData,\n\tconst xmlChar* chars,\n\tint len\n)\n{\n\tauto parser = static_cast<SaxParserBase*>(userData);\n    assert(nullptr != parser);\n\n    const std::string cdata(reinterpret_cast<const char*>(chars), static_cast<size_t>(len));\n\n    parser->OnCDataBlock(cdata);\n}\n\n///////////////// End of SAX callback dispatchers /////////////////////\n\n\n///////////////////////////////////////////////////////////////////////\n// Helper function to get the xmlSAXHandler pointer with the callback\n// dispatcher functions already registered.\n\nstatic xmlSAXHandler* GetSaxHandler()\n{\n\tstatic xmlSAXHandler saxHandler = {\n\t\t    nullptr, // internalSubset;\n\t\t    nullptr, // isStandalone;\n\t\t    nullptr, // hasInternalSubset;\n\t\t    nullptr, // hasExternalSubset;\n\t\t    nullptr, // resolveEntity;\n\t\t    nullptr, // getEntity;\n\t\t    nullptr, // entityDecl;\n\t\t    nullptr, // notationDecl;\n\t\t    nullptr, // attributeDecl;\n\t\t    nullptr, // elementDecl;\n\t\t    nullptr, // unparsedEntityDecl;\n\t\t    nullptr, // setDocumentLocator;\n\t\t    OnStartDocumentCallback, // startDocument;\n\t\t    OnEndDocumentCallback,   // endDocument;\n\t\t    OnStartElementCallback,  // startElement;\n\t\t    OnEndElementCallback,    // endElement;\n\t\t    nullptr, // reference;\n\t\t    OnCharactersCallback,    // characters;\n\t\t    nullptr, // ignorableWhitespace;\n\t\t    nullptr, // processingInstruction;\n\t\t    OnCommentCallback,       // comment;\n\t\t    OnWarningCallback,       // warning;\n\t\t    OnErrorCallback,         // error;\n\t\t    OnFatalErrorCallback,    // fatalError; /* unused error() get all the errors */\n\t\t    nullptr, // getParameterEntity;\n\t\t    OnCDataBlockCallback,    // cdataBlock;\n\t\t    nullptr, // externalSubset;\n\t\t    0,       // initialized;\n\t\t    /* The following fields are extensions available only on version 2 */\n\t\t    nullptr, // _private;\n\t\t    nullptr, // startElementNs;\n\t\t    nullptr, // endElementNs;\n\t\t    nullptr  // serror;\n\t};\n\n\treturn &saxHandler;\n}\n\n///////////////////////////////////////////////////////////////////////\n// SaxParserBase implementation\n\nSaxParserBase::SaxParserBase()\n    : m_ctxt(nullptr)\n{\n    xmlSAXHandlerPtr saxHander = GetSaxHandler();\n\n    m_ctxt = xmlCreatePushParserCtxt(saxHander, NULL, NULL, 0, NULL);\n\n    if (m_ctxt == nullptr)\n    {\n        throw SaxParserBaseException(\"Failed to create Xml parser context\");\n    }\n\n    // The instance pointer should be saved so that the callback\n    // dispatcher functions can route the calls to the proper\n    // SaxParserBase instance.\n    m_ctxt->userData = this;\n}\n\nSaxParserBase::~SaxParserBase()\n{\n\tif (m_ctxt != nullptr)\n\t{\n\t\txmlFreeParserCtxt(m_ctxt);\n\t}\n}\n\n#define MAX_SAX_CHUNK_SIZE 1024\n\nvoid SaxParserBase::Parse(const std::string & doc)\n{\n    if (m_ctxt == nullptr) {\n        throw SaxParserBaseException(\"Xml parser context wasn't created correctly at the construction time\");\n    }\n\n    const char* buf = doc.c_str();\n    size_t totalLen = doc.length();\n    size_t remainingLen = totalLen;\n\n    while (remainingLen > 0) {\n    \tconst size_t chunkSize = std::min((size_t)MAX_SAX_CHUNK_SIZE, remainingLen);\n    \tconst int terminate = (chunkSize == remainingLen);\n    \tint result = xmlParseChunk(m_ctxt, buf, (int)chunkSize, terminate);\n    \tif (result) {\n    \t\tconst int offsetBegin = totalLen - remainingLen;\n    \t\tconst int offsetEnd = offsetBegin + (int)chunkSize;\n\n    \t\tstd::ostringstream oss;\n\n    \t\toss << \"xmlParseChunk error between offset \" << offsetBegin\n    \t\t\t<< \" and \" << offsetEnd << \" (return code = \" << result << \")\";\n    \t\tthis->OnError(oss.str());\n\n    \t\treturn;\n    \t}\n    \tremainingLen -= chunkSize;\n    \tbuf += chunkSize;\n    }\n}\n\nvoid SaxParserBase::ParseChunk(std::string chunk, bool terminate)\n{\n\tif (m_ctxt == nullptr) {\n\t\tthrow SaxParserBaseException(\"Xml parser context wasn't created correctly at the construction time\");\n\t}\n\n\tint result = xmlParseChunk(m_ctxt, chunk.c_str(), (int)chunk.length(), terminate);\n\tif (result) {\n\t\tstd::ostringstream oss;\n\n\t\toss << \"xmlParseChunk error (return code = \" << result << \")\";\n\t\tthis->OnError(oss.str());\n\t}\n}\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/SaxParserBase.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _SAXPARSERBASE_HH_\n#define _SAXPARSERBASE_HH_\n\n#include <libxml/parser.h>\n\n#include <string>\n#include <unordered_map>\n#include <stdexcept>\n\nclass SaxParserBaseException : public std::runtime_error\n{\npublic:\n\tSaxParserBaseException(const std::string& message)\n\t\t: std::runtime_error(message)\n\t{}\n\n    SaxParserBaseException(const char* message)\n        : std::runtime_error(message)\n    {}\n};\n\n/// <summary>\n/// A simple base class for a specific SAX parser. User of this class\n/// will derive a subclass from this and override necessary On...() methods\n/// to achieve their desired SAX parsing. Currently not supporting all\n/// the callbacks that LibXML2 supports. Should add all of them gradually.\n/// This base class's callback handler methods are all empty so that users\n/// don't have to implement all those methods, when they need only a few\n/// of them.\n/// </summary>\nclass SaxParserBase\n{\npublic:\n\ttypedef std::unordered_map<std::string, std::string> AttributeMap;\n\n\tSaxParserBase();\n\tvirtual ~SaxParserBase();\n\n\t// Callbacks for various SAX parsing events\n\tvirtual void OnStartDocument() {}\n\tvirtual void OnEndDocument() {}\n\tvirtual void OnComment(const std::string& comment) {}\n\tvirtual void OnStartElement(const std::string& name, const AttributeMap& attributes) {}\n\tvirtual void OnCharacters(const std::string& chars) {}\n\tvirtual void OnEndElement(const std::string& name) {}\n\n\tvirtual void OnWarning(const std::string& text) {}\n\tvirtual void OnError(const std::string& text) {}\n\tvirtual void OnFatalError(const std::string& text) {}\n\n\tvirtual void OnCDataBlock(const std::string& text) {}\n\n\t/// <summary>\n\t/// Parse an entire XML document passed as a string.\n\t/// <param name=\"doc\">The entire XML document passed as a string</param>\n\t/// </summary>\n    void Parse(const std::string & doc);\n\n    /// <summary>\n    /// Parse a chunk of XML document passed as a string.\n    /// This is needed so that a subclass don't have to use the\n    /// libxml's C API to do the chunk parsing. We wanted to separate\n    /// all I/Os from this class, so we'd need to provide this for\n    /// any subclass.\n    /// <param name=\"chunk\">The XML chunk to be parsed, passed as a string</param>\n    /// <param name=\"terminate\">Indicates whether the passed chunk is the last one\n    /// in the whole XML document</param>\n    void ParseChunk(std::string chunk, bool terminate = false);\n\nprivate:\n\txmlParserCtxtPtr\tm_ctxt;\n};\n\n#endif // _SAXPARSERBASE_HH_\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/SchemaCache.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"SchemaCache.hh\"\n#include \"Trace.hh\"\n#include <sstream>\n#include \"Crypto.hh\"\n\nstd::mutex SchemaCache::_mutex;\nSchemaCache* SchemaCache::_singleton;\n\nSchemaCache& SchemaCache::Get()\n{\n\tTrace trace(Trace::SchemaCache, \"SchemaCache::Get\");\n\n\t// Double-check locking to ensure we instantiate the singleton exactly once.\n\t// We already needed the mutex for other purposes, so do this manually instead\n\t// of using std::call_once and std::once_flag.\n\tif (_singleton == nullptr) {\n\t\t_mutex.lock();\n\t\tif (_singleton == nullptr) {\n\t\t\t_singleton = new SchemaCache();\n\t\t\ttrace.NOTE(\"Allocating singleton cache\");\n\t\t}\n\t\t_mutex.unlock();\n\t}\n\n\treturn *_singleton;\n}\n\n// Store the id, move the schema string from the argument into the object, compute the MD5 hash.\n// If the caller(s) all along the line enabled move semantics, we should wind up with the schema inside\n// this object without any copying.\nSchemaCache::Info::Info(SchemaCache::IdType id, std::string schema)\n\t: _id(id), _schema(std::move(schema)), _md5(Crypto::MD5HashString(_schema))\n{\n}\n\nstd::map<SchemaCache::IdType, SchemaCache::CachedType> &\nSchemaCache::Select(Kind kind)\n{\n\tswitch(kind)\n\t{\n\tcase XTable:\n\t\treturn _XTableCache;\n\tcase Bond:\n\t\treturn _BondCache;\n\tdefault:\n\t\tthrow std::invalid_argument(\"Access to SchemaCache of unknown kind\");\n\t}\n}\n\nbool\nSchemaCache::IsCached(SchemaCache::IdType id, Kind kind) noexcept\n{\n\tTrace trace(Trace::SchemaCache, \"SchemaCache::IsCached\");\n\ttry {\n\t\tstd::lock_guard<std::mutex> lock(_mutex);\n\t\tbool found = (Select(kind).count(id) > 0);\n\t\tif (trace.IsActive()) {\n\t\t\tstd::ostringstream msg;\n\t\t\tmsg << \"Cache(\" << kind;\n\t\t\tif (found) {\n\t\t\t\tmsg << \") did \";\n\t\t\t} else {\n\t\t\t\tmsg << \") did not \";\n\t\t\t}\n\t\t\tmsg << \"contain key \" << id;\n\t\t\ttrace.NOTE(msg.str());\n\t\t}\n\t\treturn found;\n\t} catch (std::exception& ex) {\n\t\t// We don't cache anything for unknown kinds of schemas\n\t\ttrace.NOTE(std::string(\"Exception caught: \") + ex.what());\n\t\treturn false;\n\t}\n}\n\nSchemaCache::CachedType\nSchemaCache::Find(SchemaCache::IdType id, Kind kind)\n{\n\tauto& cache = Select(kind);\n\n\t// Lock the map down long enough to copy the result\n\tstd::unique_lock<std::mutex> lock(_mutex);\n\tauto it = cache.find(id);\n\tlock.unlock();\n\n\tif (it == cache.end()) {\n\t\tstd::ostringstream msg;\n\t\tmsg << \"SchemaCache(\" << kind << \") does not contain id \" << id;\n\t\tthrow std::runtime_error(msg.str());\n\t}\n\n\treturn it->second;\n}\n\nvoid\nSchemaCache::Evict(SchemaCache::IdType id, Kind kind) noexcept\n{\n\t// Select is not nothrow, but the only exception it throws is one we want\n\t// to ignore (invalid kind). std::map::erase is nothrow.\n\ttry {\n\t\tstd::lock_guard<std::mutex> lock(_mutex);\n\t\t(void) Select(kind).erase(id);\n\t}\n\tcatch (...)\n\t{\n\t}\n}\n\n// Create an info structure by moving the schema into it.\nvoid\nSchemaCache::Insert(SchemaCache::IdType id, Kind kind, std::string schema)\n{\n\tTrace trace(Trace::SchemaCache, \"SchemaCache::Insert\");\n\n\tauto entry = std::make_shared<SchemaCache::Info>(id, std::move(schema));\n\tauto & cache = Select(kind);\n\tstd::lock_guard<std::mutex> lock(_mutex);\n\tcache[id] = std::move(entry);\n\tif (trace.IsActive()) {\n\t\tstd::ostringstream msg;\n\t\tmsg << \"Added id \" << id << \" to cache of type \" << kind;\n\t\ttrace.NOTE(msg.str());\n\t}\n}\n\nstd::ostream&\noperator<<(std::ostream& strm, SchemaCache::Kind kind)\n{\n\tswitch(kind)\n\t{\n\tcase SchemaCache::Kind::XTable:\n\t\tstrm << \"XTable\";\n\t\tbreak;\n\tcase SchemaCache::Kind::Bond:\n\t\tstrm << \"Bond\";\n\t\tbreak;\n\tdefault:\n\t\tstrm << \"!Unknown!\";\n\t\tbreak;\n\t}\n\treturn strm;\n}\n\n#ifdef ENABLE_TESTING\n\nvoid\nTEST__SchemaCache_Reset()\n{\n\tif (SchemaCache::_singleton) {\n\t\tdelete SchemaCache::_singleton;\n\t\tSchemaCache::_singleton = nullptr;\n\t}\n}\n\nstd::map<SchemaCache::IdType, SchemaCache::CachedType>&\nTEST__SchemaCache_Select(SchemaCache::Kind kind)\n{\n\treturn SchemaCache::Get().Select(kind);\n}\n\n#endif // ENABLE_TESTING\n\n// vim: set ai sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/SchemaCache.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#ifndef _SCHEMACACHE_HH_\n#define _SCHEMACACHE_HH_\n#pragma once\n\n#include <mutex>\n#include <memory>\n#include <atomic>\n#include <map>\n#include <string>\n#include <cstdint>\n#include <iostream>\n#include \"Crypto.hh\"\n\n/*\n\tThe notion of \"schema\" is tied to the CanonicalEvent. Each row in an MDS destination\n\t(table, Bond blob, etc.) can have its own schema.\n\tThe generator of a CanonicalEvent (JSON, OMI, query engine) can \"know\" the schema.\n\tPipeStages that alter the CE can likewise \"know\" the altered schema.\n\n\tSo...\n\n\to Tag the CE (at instantiation) with its schema id as \"known\" by the source/generator.\n\to PipeStage augmentors should map from input schema id to output schema id (the new,\n\t  altered schema produced by the augmenting stage). These are general-purpose augmentor (e.g.\n\t  \"Add identity columns\") and should literally keep a map of <inputID, outputID>.\n\to A unique augmenter (e.g. a configured query) should gets its own schema ID when the config\n\t  is processed. If the augmentor performs an identity transformation, it should pass along\n\t  the input schema ID(s). If it does its own projection, it should have its own (new) ID.\n*/\n\nclass SchemaCache\n{\npublic:\n\t/////////////  Types    //////////\n\n\t// The id type\n\tusing IdType = unsigned long long;\n\n\t// The actual data kept in the cache.\n\t// The result of the Schema() method shares lifetime with the Info object; if you want it to live longer\n\t// you'll need to copy it. Same for the MD5Hash returned by the Hash() method.\n\tclass Info {\n\tpublic:\n\t\tInfo(SchemaCache::IdType id, std::string schema);\n\n\t\tSchemaCache::IdType\tId() const { return _id; }\n\t\tconst std::string&\tSchema() const { return _schema; }\n\t\tconst Crypto::MD5Hash &\tHash() const { return _md5; }\n\n\tprivate:\n\t\tSchemaCache::IdType\t_id;\n\t\tstd::string\t\t_schema;\n\t\tCrypto::MD5Hash\t\t_md5;\n\t};\n\n\t// The kinds of schema we can store\n\tenum Kind { Unknown, XTable, Bond };\n\n\t// The value type is a shared pointer to the Info object.\n\t// When we return the shared_ptr to clients, the ref count on the actual object is\n\t// managed for us. When the last shared_ptr is deleted, the underlying object is cleaned up.\n\tusing CachedType = std::shared_ptr<SchemaCache::Info>;\n\n\t/////////////  Methods  //////////\n\t\n\t// Return the singleton instance of the SchemaCache\n\tstatic SchemaCache& Get();\n\n\t// Allocate a new schema ID and return it. Using an atomic_long, so no locking needed.\n\tSchemaCache::IdType GetId() noexcept { return _nextId++; }\n\n\t// Check to see if a schema of a given kind has been cached for a given ID\n\tbool IsCached(SchemaCache::IdType id, SchemaCache::Kind kind) noexcept;\n\n\t// Return the cached schema of that kind for that id. Throws if none is cached.\n\tCachedType Find(SchemaCache::IdType id, SchemaCache::Kind kind);\n\n\t// Remove a cached schema. Silent if nothing is cached for the id/kind\n\tvoid Evict(SchemaCache::IdType id, SchemaCache::Kind kind) noexcept;\n\n\t// Insert a schema. Discard the currently cached schema, if any.\n\t// The schema is moved into the Info object, if possible.\n\tvoid Insert(SchemaCache::IdType id, SchemaCache::Kind kind, std::string schema);\n\n#ifdef ENABLE_TESTING\n\tfriend void TEST__SchemaCache_Reset();\n\tfriend std::map<SchemaCache::IdType, SchemaCache::CachedType>& TEST__SchemaCache_Select(Kind kind);\n#endif\n\n\t//////////  Stream IO  //////////\n\tfriend std::ostream& operator<<(std::ostream&, Kind);\n\nprivate:\n\t// Default constructor is private and used by the static accessor. Neither copy nor move or assignment\n\t// are allowed.\n\tSchemaCache() : _nextId(1) {}\n\tSchemaCache(const SchemaCache &) = delete;\n\tSchemaCache& operator=(const SchemaCache &) = delete;\n\n\tstatic SchemaCache *\t_singleton;\t// Points to the singleton instance of this class\n\t// As a static, the linker will ensure this is all zeroes, the correct bit pattern for nullptr\n\n\tstatic std::mutex\t_mutex;\t\t// Protects access to the cache\n\n\tstd::atomic_ullong\t_nextId;\t// Next schema ID to use\n\n\t// We only have two kinds of schemas, so make each schema its own map and provide a simple\n\t// method to get a reference to the map for any particular kind. This is moderately scalable;\n\t// the Select method is a fast switch() on the Kind. At a certain point, it may become\n\t// smarter to change to a single map whose key is pair<Kind, IdType>. Eliminate Select() and\n\t// simply build the right key wherever it's used.\n\tstd::map<SchemaCache::IdType, SchemaCache::CachedType> _BondCache;\n\tstd::map<SchemaCache::IdType, SchemaCache::CachedType> _XTableCache;\n\n\t// Return a reference to the map which caches the desired schema type\n\tstd::map<SchemaCache::IdType, SchemaCache::CachedType>& Select(Kind kind);\n\n};\n\n#ifdef ENABLE_TESTING\nvoid TEST__SchemaCache_Reset();\nstd::map<SchemaCache::IdType, SchemaCache::CachedType>& TEST__SchemaCache_Select(SchemaCache::Kind kind);\n#endif // ENABLE_TESTING\n\n#endif // _SCHEMACACHE_HH_\n\n// vim: se ai sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/Signals.c",
    "content": "/*\n   Copyright (c) Microsoft Corporation. All rights reserved.\n   Licensed under the MIT license.\n*/\n\n#define _XOPEN_SOURCE\n\n#include <unistd.h>\n#include <stdlib.h>\n#include <signal.h>\n#include <execinfo.h>\n\n#define STACK_DEPTH 50\n\n#ifdef DOING_MEMCHECK\nextern void RunFinalCleanup();\n#endif\n\nextern void LogStackTrace(int, void**, int);\nextern void LogAbort();\n\nextern void CatchSigChld(int signo);\nextern void CleanupExtensions();\nextern void SetCoreDumpLimit();\nextern void TruncateAndClosePidPortFile();\nextern void StopProtocolListenerMgr();\n\n/* Signals on which we want to backtrace */\nstatic int signalsToBacktrace[] = { SIGSEGV, SIGFPE, SIGILL, SIGTRAP, SIGBUS, SIGSTKFLT, SIGXFSZ };\n\nstatic int\nCatchAndMaskAll(int sig, void(*handler)(int))\n{\n    struct sigaction sa;\n    sa.sa_handler = handler;\n    sigfillset(&sa.sa_mask);\n    sa.sa_flags = 0;\n    return sigaction(sig, &sa, 0);\n}\n\nstatic void\nSetBacktraceSignalHandler(void (*backtraceHandler)())\n{\n    int i;\n    for (i = 0; i < sizeof(signalsToBacktrace) / sizeof(int); i++) {\n        CatchAndMaskAll(signalsToBacktrace[i], backtraceHandler);\n    }\n}\n\nstatic void\nResetBacktraceSignalHandlersToDefault()\n{\n    int i;\n    for (i = 0; i < sizeof(signalsToBacktrace) / sizeof(int); i++) {\n        signal(signalsToBacktrace[i], SIG_DFL);\n    }\n    signal(SIGABRT, SIG_DFL); // We need to reset the SIGABRT handler to default as well, so that our own SIGABRT handler will not be called on this path.\n}\n\nvoid\nCatchSigUsr1(int signo)\n{\n\tchar msg[] = \"In SIGUSR1 handler\\n\";\n\tstatic int FirstTime = 1;\n#ifdef DOING_MEMCHECK\n\twrite(2, msg, sizeof(msg));\n\tif (FirstTime) {\n\t\tRunFinalCleanup();\n\t\tFirstTime = 0;\n\t\t/* Let all registered atexit() handlers run */\n\t\texit(1);\n\t} else {\n\t\t/* What, still here? Die, dang it! */\n\t\t_exit(1);\n\t}\n#else\n\texit(1);\n#endif\n}\n\nvoid\nCatchSigUsr2(int signo)\n{\n    extern void RotateLogs();\n\n    RotateLogs();\n}\n\nvoid\nCatchSigHup(int signo)\n{\n    extern void LoadNewConfiguration();\n\n    LoadNewConfiguration();\n}\n\nvoid\nEmitStackTrace(int signo)\n{\n    void *stack[STACK_DEPTH];\n    int count = backtrace(stack, STACK_DEPTH);\n    LogStackTrace(signo, stack, count);\n}\n\nvoid\nCatchFatal(int signo)\n{\n    // Code below can easily raise another signal\n    // (SIGABRT, most likely), which shouldn't be handled by this handler\n    // again, so we have to reset the handler to default. And we do that\n    // for all signals on which we may want to dump stack trace.\n    ResetBacktraceSignalHandlersToDefault();\n\n    EmitStackTrace(signo);\n\n    CleanupExtensions();\n\n    TruncateAndClosePidPortFile();\n}\n\nvoid\nCatchFatalAndExit(int signo)\n{\n    CatchFatal(signo);\n    _exit(signo);\n}\n\nvoid\nCatchFatalAndAbort(int signo)\n{\n    CatchFatal(signo);\n    abort();\n}\n\nvoid\nCatchTerm(int signo)\n{\n    CleanupExtensions();\n    StopProtocolListenerMgr();\n\n    // The Main thread will exit once ProtocolListenerMgr has stopped.\n\n    /*\n    struct sigaction sa_dfl;\n    sa_dfl.sa_handler = SIG_DFL;\n    sigemptyset(&sa_dfl.sa_mask);\n    sa_dfl.sa_restorer = 0;\n    sa_dfl.sa_flags = 0;\n    sigaction(signo, &sa_dfl, 0);\n    raise(signo);\n    */\n}\n\nvoid\nCatchSigAbort(int signo)\n{\n    LogAbort();\n    // If the SIGABRT signal is ignored, or caught by a handler that returns, the abort() function\n    // will still terminate the process. It does this by restoring the default disposition for\n    // SIGABRT and then raising the signal for a second time.\n}\n\nvoid BlockSignals()\n{\n    sigset_t ss;\n    sigemptyset(&ss);\n    sigaddset(&ss, SIGHUP);\n    sigaddset(&ss, SIGALRM);\n    sigprocmask(SIG_BLOCK, &ss, NULL);\n}\n\nvoid\nSetSignalCatchers(int coreDumpAtFatal)\n{\n    CatchAndMaskAll(SIGUSR1, CatchSigUsr1);\n    CatchAndMaskAll(SIGUSR2, CatchSigUsr2);\n\n    CatchAndMaskAll(SIGINT, CatchTerm);\n    CatchAndMaskAll(SIGTERM, CatchTerm);\n    CatchAndMaskAll(SIGQUIT, CatchTerm);\n\n    CatchAndMaskAll(SIGHUP, CatchSigHup);\n\n    // SIGABRT shouldn't try to backtrace, because of a glibc bug (https://sourceware.org/bugzilla/show_bug.cgi?id=16159)\n    // so catch it with a different handler, where it'll just log the event in mdsd.err and really abort.\n    CatchAndMaskAll(SIGABRT, CatchSigAbort);\n\n    void (*backtraceHandler)();\n    if (coreDumpAtFatal) {\n        SetCoreDumpLimit();\n        backtraceHandler = CatchFatalAndAbort;\n    }\n    else {\n        backtraceHandler = CatchFatalAndExit;\n    }\n\n    SetBacktraceSignalHandler(backtraceHandler);\n    signal(SIGPIPE, SIG_IGN);\n\n    struct sigaction sa_chld;\n    sa_chld.sa_handler = CatchSigChld;\n    sigemptyset(&sa_chld.sa_mask);\n    sa_chld.sa_flags = SA_NOCLDSTOP;\n    sigaction(SIGCHLD, &sa_chld, 0);\n}\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/StoreType.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"StoreType.hh\"\n\n#include <map>\n#include <string>\n#include \"Utility.hh\"\nextern \"C\" {\n#include <limits.h>\n}\n\nnamespace StoreType {\n\n// Names should be all lower case, since from_string canonicalizes to lower case\n// before searching the map\nstatic std::map<std::string, StoreType::Type> typeMap {\n\t{ \"local\", StoreType::Type::Local },\n\t{ \"xtable\", StoreType::Type::XTable },\n\t{ \"central\", StoreType::Type::XTable },\n\t{ \"jsonblob\", StoreType::Type::XJsonBlob },\n\t{ \"centraljson\", StoreType::Type::XJsonBlob },  // For parity with WAD...\n\t{ \"file\", StoreType::Type::File }\n};\n\nstatic std::map<StoreType::Type, size_t> nameLengthLimit {\n\t{ StoreType::Type::None, 0 },\n\t{ StoreType::Type::XTable, 63 },\n\t{ StoreType::Type::XJsonBlob, PATH_MAX /* No explicit limit we've heard about this. */ },\n\t{ StoreType::Type::Local, 255 },\n\t{ StoreType::Type::File, PATH_MAX }\n};\n\nstatic std::map<StoreType::Type, bool> needsSchemaGeneration {\n\t{ StoreType::Type::None, false },\n\t{ StoreType::Type::XTable, true },\n\t{ StoreType::Type::XJsonBlob, false },\n\t{ StoreType::Type::Local, false },\n\t{ StoreType::Type::File, false }\n};\n\nType\nfrom_string(const std::string & n)\n{\n\tconst auto &iter = typeMap.find(MdsdUtil::to_lower(n));\n\tif (iter == typeMap.end()) {\n\t\treturn None;\n\t} else {\n\t\treturn iter->second;\n\t}\n}\n\nsize_t\nmax_name_length(StoreType::Type t)\n{\n\tconst auto & iter = StoreType::nameLengthLimit.find(t);\n\tif (iter == StoreType::nameLengthLimit.end()) {\n\t\treturn 0;\n\t} else {\n\t\treturn iter->second;\n\t}\n}\n\nbool\nDoSchemaGeneration(StoreType::Type storetype)\n{\n\tconst auto &iter = needsSchemaGeneration.find(storetype);\n\tif (iter == needsSchemaGeneration.end()) {\n\t\tthrow std::domain_error(\"Don't know if schema generation is needed for StoreType \" + std::to_string(storetype));\n\t}\n\t\n\treturn iter->second;\n}\n\nbool\nDoAddIdentityColumns(StoreType::Type storetype)\n{\n\treturn (storetype != StoreType::Local);\n}\n\n};\n\n// vim: se sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/StoreType.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _STORETYPE_HH_\n#define _STORETYPE_HH_\n\n#include <string>\n\nnamespace StoreType {\n\nenum Type { None, XTable, Bond, XJsonBlob, Local, File };\n\nStoreType::Type from_string(const std::string &);\n\nsize_t max_name_length(StoreType::Type t);\n\nbool DoSchemaGeneration(StoreType::Type storetype);\n\nbool DoAddIdentityColumns(StoreType::Type storetype);\n\n};\n\n#endif // _STORETYPE_HH_\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/StreamListener.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\nextern \"C\" {\n#include <unistd.h>\n#include <pthread.h>\n#include <sys/select.h>\n}\n#include <cerrno>\n#include <cctype>\n#include <cstring>\n#include <cstdlib>\n#include <cstdio>\n#include <sstream>\n\n#include \"StreamListener.hh\"\n#include \"MdsTime.hh\"\n#include \"Trace.hh\"\n#include \"Utility.hh\"\n\nint\nReadFromSocket(int fd, char * buffer, size_t amount)\n{\n  fd_set readfds;\n\n  while (1) {\n    FD_ZERO(&readfds);\n    FD_SET(fd, &readfds);\n    auto res = select(fd+1, &readfds, 0, 0, 0);\n    if (0 == res) {\n      // Spurious wakeup; they happen, honest.\n      continue;\n    }\n    if (-1 == res) {\n      auto saved_errno = errno;\n      throw std::system_error(saved_errno, std::system_category(), \"StreamListener select() failed.\");\n    }\n\n    int len = read(fd, buffer, amount);\n    if (-1 == len) {\n      // Something unusual happened\n      auto saved_errno = errno;\n      if (EINTR == errno || EWOULDBLOCK == errno || EAGAIN == errno) {\n        continue;\n      }\n      throw std::system_error(saved_errno, std::system_category(), \"StreamListener read() failed.\");\n    }\n\n    // If we got here, then we read some data or hit eof; either way, we're done\n    return len;\n  }\n}\n\nvoid *\nStreamListener::ProcessLoop()\n{\n  Trace trace(Trace::EventIngest, \"StreamListener::ProcessLoop\");\n\n  const int msgbuflen=1024;\n  char msgbuf[msgbuflen];\n\n  if (-1 == fcntl(fd(), F_SETFL, O_NONBLOCK)) {\n    auto saved_errno = errno;\n    Logger::LogError(std::string(\"StreamListener failed to set O_NONBLOCK: \").append(MdsdUtil::GetErrnoStr(saved_errno)));\n    return 0;\n  }\n\n  buflen = 256 * 1024;\n  trigger = 3 * (buflen>>2);    // 75% full\n  buffer = (char *)malloc(buflen + 1);  // Always room to turn \"byte array\" into \"string\"\n  if (0 == buffer) {\n    Logger::LogError(\"Initial buffer alloc out of memory\");\n    return(0);\n  }\n  current = buffer;\n\n  // buffer points to the beginning of the allocated buffer.\n  // buflen is the usable size of the buffer (which was allocated with 1 extra byte for a terminal NUL).\n  // current points to the location at which we might try to write into the buffer.\n  // When the buffer is empty, current==buffer\n  // When the buffer is full, current==(buffer+buflen), a valid address at which a single byte can be written.\n\n  while (1) {\n    // Invariant: unparsed data in buffer is less than the threshold for expanding the buffer\n    auto inuse = current - buffer;    // How far we were in the old buffer\n\n    if (inuse >= trigger) {\n      // Sanity check: no legal message is bigger than N MiB\n      if (inuse > 4*1024*1024) {\n        std::ostringstream msg;\n        msg << \"Buffered incomplete JSON data (\" << inuse << \" bytes) exceeds max; probable desync. Buffer head:\\n[[\";\n        msg << MdsdUtil::StringNCopy(buffer, 1024) << \"]]\\nDropping connection.\";\n        Logger::LogError(msg.str());\n        return(0);\n      }\n\n      // Resize the buffer\n      TRACEINFO(trace, \"Reallocate ingest buffer; was (buflen \" << buflen << \", trigger \" << trigger << \")\");\n      if (trace.IsActive() && trace.IsAlsoActive(Trace::IngestContents)) {\n        TRACEINFO(trace, \"Old buffer start: [[\" << MdsdUtil::StringNCopy(buffer, 1024) << \"]]\");\n      }\n      buffer = (char *)realloc(buffer, 2 * buflen + 1);\n      if (0 == buffer) {\n        snprintf(msgbuf, msgbuflen, \"Buffer realloc(%ld) out of memory\", 2 * buflen + 1);\n        Logger::LogError(msgbuf);\n        return(0);\n      }\n      current = buffer + inuse;\n      buflen *= 2;\n      trigger *= 2;\n      TRACEINFO(trace, \"Now (buflen \" << buflen << \", trigger \" << trigger << \")\");\n    }\n    \n    int len;\n    try {\n      len = ReadFromSocket(fd(), current, (buflen - inuse));\n    }\n    catch (const std::exception& e) {\n      Logger::LogError(e.what());\n      return 0;\n    }\n\n    if (0 == len) {     // End of file - closed socket.\n      snprintf(msgbuf, 1024, \"End of file on thread %llx - exiting thread\", (long long int)pthread_self());\n      trace.NOTE(msgbuf);\n      return 0;\n    }\n\n    // OK, I have some characters. Question is - do I have at least one valid\n    // JSON object? Best we can do is guess.\n    // If the last character is a backslash, it's not safe to hand the buffer\n    // to the parser; if the object is actually incomplete, the backslash will\n    // escape the NUL terminator and the parser will go off the edge of the buffer.\n    // Did I receive a right-brace in the most recent receive? If not, then\n    // I can't possibly have a valid object; go read more.\n    // If I saw a right brace, I *might* have a valid object; try to parse it.\n    // If I get a NULL back from the parser, I have no valid object; go read more.\n    // If I got a valid pointer, then I had at least one valid object, but they've\n    // been parsed; the pointer tells me where the next object might begin, so\n    // shuffle it to the top of the buffer and go read more.\n\n    const char * cursor = current + len - 1;    // Last character read\n    if (*cursor == '\\\\') {\n      // Not safe to parse, and there has to be more coming; go read more.\n      current += len;\n      continue;\n    }\n    while (cursor >= current) {\n      if (*cursor == '}') break;\n      cursor--;\n    }\n    if (cursor < current) {\n      // Nope, can't be an object; go read more.\n      current += len;\n      continue;\n    }\n\n    // Found a right brace. I might have valid objects. Parse the full buffer.\n    *(current+len) = '\\0';\n    try {\n      cursor = Listener::ParseBuffer(buffer, current+len);\n    }\n    catch (const Listener::exception &e) {\n      std::ostringstream msg;\n      msg << MdsTime() << \": closing connection due to JSON parse error: \" << e.what();\n      Logger::LogError(msg.str());\n      return(0);\n    }\n\n    if (0 == cursor) {\n      // Nope, no object; go read more.\n      current += len;\n      continue;\n    }\n\n    // OK, processed something. cursor points to the next possible start of object\n    // (I can rely on ParseBuffer to have clobbered any trailing whitespace.)\n    if (cursor == current+len) {\n      current = buffer;         // Processed everything; nothing remains\n    } else {\n      int delta = current + len - cursor;       // Remaining unprocessed characters\n      (void) memmove(buffer, cursor, delta);\n      current = buffer + delta;\n    }\n  }\n  /* NOTREACHED */\n}\n\n// vim: set ai sw=2 expandtab :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/StreamListener.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#ifndef _STREAMLISTENER_HH_\n#define _STREAMLISTENER_HH_\n\n#include <cstdlib>\n#include <cstddef>\n\n#include \"Listener.hh\"\n\n/// <summary>Listens for JSON-encoded events on a TCP socket</summary>\nclass StreamListener : public Listener\n{\nprivate:\n  StreamListener(const StreamListener&);\t\t// Do not define; copy construction forbidden\n  StreamListener& operator=(const StreamListener &);\t// Ditto for assignment\n\n  char * buffer = nullptr;\t// Data received from client\n  size_t buflen = 0;\t    // Size of buffer\n  ptrdiff_t trigger = 0;\t// Offset into buffer of leftover data that causes an increase in buffer size\n  char * current = nullptr;\t// Point at which new data will be added\n\npublic:\n  StreamListener(int fd) : Listener(fd) {}\n  virtual ~StreamListener() { if (buffer) free(buffer); }\n\n  void * ProcessLoop();\n};\n\n// vim: set ai sw=2\n#endif // _STREAMLISTENER_HH_\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/Subscription.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"Subscription.hh\"\n#include \"Batch.hh\"\n#include \"Credentials.hh\"\n#include \"MdsEntityName.hh\"\n#include \"PipeStages.hh\"\n#include \"LocalSink.hh\"\n#include \"Trace.hh\"\n\nSubscription::Subscription(LocalSink *sink, const MdsEntityName &target, Priority pr, const MdsTime& interval)\n\t: ITask(interval), _sink(sink), _target(target), _priority(pr), _head(nullptr), _tail(nullptr)\n{\n\tTrace trace(Trace::ConfigLoad, \"Subscription constructor(table ref)\");\n\n\tcommon_constructor();\n}\n\nSubscription::Subscription(LocalSink *sink, MdsEntityName &&target, Priority pr, const MdsTime& interval)\n\t: ITask(interval), _sink(sink), _target(target), _priority(pr), _head(nullptr), _tail(nullptr)\n{\n\tTrace trace(Trace::ConfigLoad, \"Subscription constructor(table move)\");\n\n\tcommon_constructor();\n}\n\nvoid\nSubscription::common_constructor()\n{\n\tTrace trace(Trace::ConfigLoad, \"Subscription common constructor path\");\n\n\t_sink->SetRetentionPeriod(interval());\n\n\tif (trace.IsActive()) {\n\t\tstd::ostringstream msg;\n\t\tmsg << \"Retention period \" << _sink->RetentionSeconds();\n\t\ttrace.NOTE(msg.str());\n\t}\n\n}\n\n// Initial start time is a few seconds past the end of the current interval\nMdsTime\nSubscription::initial_start()\n{\n        Trace trace(Trace::EventIngest, \"Subscription::initial_start\");\n\n        MdsTime start;  // Default constructor sets it to \"now\"\n\n        start += interval();\n        start = start.Round(interval().to_time_t());\n        start += MdsTime(2 + random()%5, random()%1000000);\n\n        if (trace.IsActive()) {\n                std::ostringstream msg;\n                msg << \"Initial time for event: \" << start;\n                trace.NOTE(msg.str());\n        }\n\n        return start;\n}\n\nvoid\nSubscription::AddStage(PipeStage *stage)\n{\n        if (! _tail) {\n                // This is the first stage in the pipeline; set the head to point here\n                _head = stage;\n        } else {\n                // There's already a pipeline; make the old tail point to the newly-added stage\n                _tail->AddSuccessor(stage);\n        }\n        // Either way, we have a new tail in the pipeline\n        _tail = stage;\n}\n\n// Pull everything in the sink on the interval [start, start+duration)\n// For each event, call _head->Process(new CanonicalEntity(event))\nvoid\nSubscription::execute(const MdsTime& startTime)\n{\n        Trace trace(Trace::EventIngest, \"Subscription::execute\");\n\n        if (trace.IsActive()) {\n                std::ostringstream msg;\n                msg << \"Start time \" << startTime << \", end time \" << startTime + interval();\n                trace.NOTE(msg.str());\n        }\n\n\t_head->Start(startTime);\n\ttry {\n\t\t_sink->Foreach(startTime, interval(), [this](const CanonicalEntity& ce){ _head->Process(new CanonicalEntity(ce)); });\n\t}\n\tcatch (std::exception & ex) {\n\t\ttrace.NOTE(std::string(\"Exception leaked: \") + ex.what());\n\t}\n\ttrace.NOTE(\"All lines processed\");\n\t_sink->Flush();\t\t// Tell the sink to do its housekeeping\n        _head->Done();\n}\n\nstd::ostream&\noperator<<(std::ostream& os, const Subscription& sub)\n{\n\tos << &sub << \" (Event \" << sub._target << \", interval \" << sub._priority.Duration() << \")\";\n\n\treturn os;\n}\n\n// vim: se sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/Subscription.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _SUBSCRIPTION_HH_\n#define _SUBSCRIPTION_HH_\n\n#include <iostream>\n#include \"Priority.hh\"\n#include \"MdsEntityName.hh\"\n#include \"CanonicalEntity.hh\"\n#include \"Pipeline.hh\"\n#include \"ITask.hh\"\n\nclass LocalSink;\n\nclass Subscription : public ITask\n{\n\tfriend std::ostream& operator<<(std::ostream& os, const Subscription& sub);\n\npublic:\n\t//Subscription(const std::string &ev, bool, const MdsdConfig*, const std::string &acct, StoreType::Type, Priority);\n\tSubscription(LocalSink *sink, const MdsEntityName& target, Priority, const MdsTime& interval);\n\tSubscription(LocalSink *sink, MdsEntityName&& target, Priority, const MdsTime& interval);\n\t~Subscription() { if (_head) delete _head; _head = nullptr; }\n\n\tvoid AddStage(PipeStage *stage);\n\n\tconst MdsEntityName& target() const { return _target; }\n\tPriority priority() const { return _priority; }\n\ttime_t Duration() const { return interval().to_time_t(); }\n\nprotected:\n\t// Returns the time at which the first call should be made\n\tMdsTime initial_start();\n\n\t// Invoked regularly to process data for the interval() seconds beginning at this time\n\tvoid execute(const MdsTime&);\n\nprivate:\n\tSubscription();\n\tvoid common_constructor();\n\n\tLocalSink *_sink;\n\tconst MdsEntityName _target;\n\tconst Priority _priority;\n\n\t// Ingest processing pipeline. When the subscription is deleted, the destructor must tear\n\t// down the pipeline. The teardown is recursive; delete the head, and it'll delete its\n\t// successor before finishing up.\n        PipeStage *_head;\n        PipeStage *_tail;\n};\n\nstd::ostream& operator<<(std::ostream& os, const Subscription& sub);\n\n#endif // _SUBSCRIPTION_HH_\n\n// vim: set ai sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/TableColumn.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"TableColumn.hh\"\n\nvoid\nTableColumn::AppendXmlSchemaElement(std::string& xmlbody) const\n{\n\txmlbody += \"<Column name=\\\"\";\n\txmlbody += _name;\n\txmlbody += \"\\\" type=\\\"\";\n\txmlbody += _mdstype;\n\txmlbody += \"\\\"></Column>\";\n}\n\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/TableColumn.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _TABLECOLUMN_HH_\n#define _TABLECOLUMN_HH_\n\n#include \"MdsValue.hh\"\n\nclass TableColumn\n{\npublic:\n\tTableColumn(const std::string& n, const std::string& t, typeconverter_t& c)\n\t\t: _name(n), _mdstype(t), _converter(c) {}\n\t~TableColumn() {}\n\tconst std::string& Name() const { return _name; }\n\tconst std::string& MdsType() const { return _mdstype; }\n\t\n\t/// <summary>Append to the body the MDS XML \"schema\" definition element for this column</summary>\n\t/// <param name=\"xmlbody\">The XML body to which the generated element should be appended</param>\n\tvoid AppendXmlSchemaElement(std::string& xmlbody) const;\n\n\t/// <summary>Convert a cJSON object to the configured MDS type</summary>\n\t/// <param name=\"in\">The cJSON entity to be converted</param>\n\t/// <returns>Pointer to a newly-allocated MdsValue. Returns 0 if the conversion failed.</returns>\n\tMdsValue* Convert(cJSON * in) const { return _converter(in); }\nprivate:\n\tTableColumn();\n\n\tconst std::string _name;\n\tconst std::string _mdstype;\n\tconst typeconverter_t _converter;\n};\n\n#endif //_TABLECOLUMN_HH_\n\n// vim: se sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/TableSchema.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"TableSchema.hh\"\n#include \"TableColumn.hh\"\n#include \"Engine.hh\"\n#include <functional>\n#include <string>\n\nTableSchema::~TableSchema()\n{\n\tfor (TableColumn * tblcol : _columns) {\n\t\tdelete tblcol;\n\t}\n}\n\nTableSchema::ErrorCode\nTableSchema::AddColumn(const std::string& name, const std::string& srctype, const std::string& mdstype)\n{\n\tif (! _legal_types.count(srctype)) return BadSrcType;\n\tif (! _legal_mdstypes.count(mdstype)) return BadMdsType;\n\n\tfor (TableColumn * tblcol : _columns) {\n\t\tif (tblcol->Name() == name) return DupeColumn;\n\t}\n\n\ttypeconverter_t converter;\n\tif (! Engine::GetEngine()->GetConverter(srctype, mdstype, converter)) {\n\t\treturn NoConverter;\n\t}\n\n\tauto newcolumn = new TableColumn(name, mdstype, converter);\n\t_columns.push_back(newcolumn);\n\treturn Ok;\n}\n\nvoid\nTableSchema::PushColumnInfo(std::back_insert_iterator<std::vector<std::pair<std::string, std::string> > > inserter)\nconst\n{\n\tfor (const auto & tblcol : _columns) {\n\t\t*(inserter++) = std::make_pair(tblcol->Name(), tblcol->MdsType());\n\t}\n}\n\nstd::set<std::string> TableSchema::_legal_types = {\n\t\"bool\",\n\t\"int\",\n\t\"str\",\n\t\"double\",\n\t\"int-timet\",\n\t\"double-timet\",\n\t\"str-rfc3339\",\n\t\"str-rfc3194\"\n};\n\nstd::set<std::string> TableSchema::_legal_mdstypes = {\n\t\"mt:bool\",\n\t\"mt:wstr\",\n\t\"mt:float64\",\n\t\"mt:int32\",\n\t\"mt:int64\",\n\t\"mt:utc\"\n};\n\n// vim: se sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/TableSchema.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _TABLESCHEMA_HH_\n#define _TABLESCHEMA_HH_\n\n#include \"MdsValue.hh\"\n#include \"TableColumn.hh\"\n#include <vector>\n#include <set>\n#include <utility>\n#include <string>\n#include <iterator>\n\nclass TableSchema\n{\npublic:\n\tTableSchema(const std::string& n) : _name(n) {}\n\t~TableSchema();\n\n\tenum ErrorCode {\n\t\tOk = 0,\n\t\tNoConverter = 1,\n\t\tDupeColumn = 2,\n\t\tBadSrcType = 3,\n\t\tBadMdsType = 4\n\t};\n\n\t/// <summary>Add a column to this schema.</summary>\n\t/// <param name=\"n\">Name of the column</param>\n\t/// <param name=\"srctype\">The JSON type for the column, as the data arrives in an event</param>\n\t/// <param name=\"mdstype\">The MDS type for the column in MDS</param>\n\tErrorCode AddColumn(const std::string& n, const std::string& srctype, const std::string& mdstype);\n\n\t// Act kinda like a container; allow iterators on the vector of TableColumn*.\n\ttypedef std::vector<TableColumn*>::iterator iterator;\n\ttypedef std::vector<TableColumn*>::const_iterator const_iterator;\n\n\titerator begin() { return _columns.begin(); }\n\tconst_iterator begin() const { return _columns.begin(); }\n\titerator end() { return _columns.end(); }\n\tconst_iterator end() const { return _columns.end(); }\n\n\tsize_t Size() const { return _columns.size(); }\n\n\t/// <summary>Push pairs of [column name, column typename] into a vector</summary>\n\tvoid PushColumnInfo(std::back_insert_iterator<std::vector<std::pair<std::string, std::string> > >) const;\n\n\tconst std::string& Name() const { return _name; }\n\nprivate:\n\tTableSchema();\n\n\tconst std::string _name;\n\tstd::vector<TableColumn*> _columns;\n\n\tstatic std::set<std::string> _legal_types;\n\tstatic std::set<std::string> _legal_mdstypes;\n};\n\n#endif //_TABLESCHEMA_HH_\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/TermHandler.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"Logger.hh\"\n#include <cstdlib>\n\nextern \"C\" { void EmitStackTrace(int signo); }\n\n// Log uncaught exception before terminate the process.\nvoid TerminateHandler()\n{\n    try { \n        throw;\n    }\n    catch(const std::exception& e) { \n        Logger::LogError(\"Error: mdsd is terminated with exception: \" + std::string(e.what()));\n    }\n    catch(...) {\n        Logger::LogError(\"Error: mdsd is terminated with unknown exception.\");\n    }\n    EmitStackTrace(0);\n    abort();\n}\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/Version.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"Version.hh\"\n#include <string>\n\n#define QUOTE(x) #x\n#define VAL(x) QUOTE(x)\n\n#define STATIC_VER VAL(MAJOR) \".\" VAL(MINOR) \".\" VAL(PATCH) \"+\" VAL(BUILD_NUMBER)\n\nnamespace Version\n{\nconst std::string Version(STATIC_VER);\n}\n// vim: se sw=8\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/Version.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _VERSION_HH_\n#define _VERSION_HH_\n\n#define MAJOR 1\n#define MINOR 6\n#define PATCH 100\n\n#include <string>\n\nnamespace Version\n{\nextern const std::string Version;\n}\n\n#endif //_VERSION_HH_\n\n// vim: se sw=8\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/XJsonBlobBlockCountsMgr.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"XJsonBlobBlockCountsMgr.hh\"\n#include \"Utility.hh\"\n#include \"Trace.hh\"\n#include <stdexcept>\n#include <memory>\n#include <system_error>\n#include <sstream>\n#include <cpprest/filestream.h>\n#include <cpprest/containerstream.h>\n#include <stdio.h>\n\n\nXJsonBlobBlockCountsMgr&\nXJsonBlobBlockCountsMgr::GetInstance()\n{\n    static XJsonBlobBlockCountsMgr s_instance;\n    return s_instance;\n}\n\n\nvoid\nXJsonBlobBlockCountsMgr::SetPersistDir(const std::string& persistDir, bool mdsdConfigValidationOnly)\n{\n    Trace trace(Trace::JsonBlob, \"XJsonBlobBlockCountsMgr::SetPersistDir\");\n\n    TRACEINFO(trace, \"persistDir=\\\"\" << persistDir << \"\\\"\");\n\n    if (persistDir.empty()) {\n        throw std::invalid_argument(\"persistDir can't be empty.\");\n    }\n    m_persistDir = persistDir;\n    m_mdsdConfigValidationOnly = mdsdConfigValidationOnly;\n}\n\n\nvoid\nXJsonBlobBlockCountsMgr::CreatePersistDirIfNotDone()\n{\n    Trace trace(Trace::JsonBlob, \"XJsonBlobBlockCountsMgr::CreatePersistDirIfNotDone\");\n\n    if (m_persistDirCreated || m_mdsdConfigValidationOnly) {\n        return;\n    }\n\n    if (m_persistDir.empty()) {\n        throw std::runtime_error(\"Jsonblob block counts persist dir is not set.\");\n    }\n\n    MdsdUtil::CreateDirIfNotExists(m_persistDir, 01755);\n\n    m_persistDirCreated = true;\n}\n\n\npplx::task<size_t>\nXJsonBlobBlockCountsMgr::ReadBlockCountAsync(\n        const std::string& containerName,\n        const std::string& blobName) const\n{\n    Trace trace(Trace::JsonBlob, \"XJsonBlobBlockCountsMgr::ReadBlockCountAsync\");\n\n    if (m_mdsdConfigValidationOnly) {\n        throw std::runtime_error(\"XJsonBlobBlockCountsMgr::ReadBlockCountAsync: Can't be called when mdsd config validation only\");\n    }\n\n    if (containerName.empty()) {\n        throw std::invalid_argument(\"XJsonBlobBlockCountsMgr::ReadBlockCountAsync: containerName can't be empty.\");\n    }\n    if (blobName.empty()) {\n        throw std::invalid_argument(\"XJsonBlobBlockCountsMgr::ReadBlockCountAsync: blobName can't be empty.\");\n    }\n\n    std::string file_path(m_persistDir);\n    file_path.append(\"/\").append(containerName);\n\n    // If there's no block-count file, then the block count is just 0.\n    if (!MdsdUtil::IsRegFileExists(file_path)) {\n        return pplx::task_from_result((size_t)0);\n    }\n\n    return concurrency::streams::fstream::open_istream(file_path)\n    .then([=](concurrency::streams::istream inFile) -> pplx::task<size_t>\n    {\n        concurrency::streams::container_buffer<std::string> streamBuffer;\n        return inFile.read_to_end(streamBuffer)\n        .then([=](size_t bytesRead) -> pplx::task<size_t>\n        {\n            if (bytesRead == 0 && inFile.is_eof()) {\n                // Invalid file format. Treat it silently as 0 block count.\n                return pplx::task_from_result((size_t)0);\n            }\n\n            std::istringstream iss(streamBuffer.collection());\n            std::string blobNameInFile;\n            iss >> blobNameInFile;\n\n            if (blobNameInFile != blobName) {\n                // Persisted block count is for the past, so the block count for the current blob should be 0.\n                return pplx::task_from_result((size_t)0);\n            }\n\n            size_t blockCountInFile;\n            iss >> blockCountInFile;\n            return pplx::task_from_result(blockCountInFile);\n        })\n        .then([=](size_t blockCount) -> pplx::task<size_t>\n        {\n            return inFile.close()\n            .then([=]() -> pplx::task<size_t>\n            {\n                return pplx::task_from_result(blockCount);\n            });\n        });\n    });\n}\n\n\npplx::task<void>\nXJsonBlobBlockCountsMgr::WriteBlockCountAsync(\n        const std::string& containerName,\n        const std::string& blobName,\n        const size_t blockCount) const\n{\n    Trace trace(Trace::JsonBlob, \"XJsonBlobBlockCountsMgr::WriteBlockCountAsync\");\n\n    if (m_mdsdConfigValidationOnly) {\n        throw std::runtime_error(\"XJsonBlobBlockCountsMgr::WriteBlockCountAsync: Can't be called when mdsd config validation only\");\n    }\n\n    if (containerName.empty()) {\n        throw std::invalid_argument(\"XJsonBlobBlockCountsMgr::WriteBlockCountAsync: containerName can't be empty.\");\n    }\n    if (blobName.empty()) {\n        throw std::invalid_argument(\"XJsonBlobBlockCountsMgr::WriteBlockCountAsync: blobName can't be empty.\");\n    }\n    if (blockCount == 0) {\n        throw std::invalid_argument(\"XJsonBlobBlockCountsMgr::WriteBlockCountAsync: 0 blockCount is not allowed.\");\n    }\n\n    // m_persistDir + \"/\" + containerName is the full file path.\n    // blobName and blockCount are the only content in the file.\n    // First write to a tmp file path and then rename it to the correct path\n    std::string file_path(m_persistDir);\n    file_path.append(\"/\").append(containerName);\n    std::string file_path_tmp(file_path);\n    file_path_tmp.append(\".tmp\");\n\n    return concurrency::streams::fstream::open_ostream(file_path_tmp)\n    .then([=](concurrency::streams::ostream outFile) -> pplx::task<void>\n    {\n        std::string content(blobName);\n        content.append(\"\\n\").append(std::to_string(blockCount)).append(\"\\n\");\n        return outFile.print(content)\n        .then([=](size_t) -> pplx::task<void>\n        {\n            return outFile.close();\n        });\n    })\n    .then([=]() -> pplx::task<void>\n    {\n        if (-1 == rename(file_path_tmp.c_str(), file_path.c_str())) {\n            auto errnum = errno;\n            std::error_code ec(errnum, std::system_category());\n            throw std::runtime_error(std::string(\"XJsonBlobBlockCountsMgr::WriteBlockCountAsync: \"\n                    \"rename(\").append(file_path_tmp).append(\", \").append(file_path).append(\" failed. \"\n                            \"Reason: \").append(ec.message()));\n        }\n\n        return pplx::task_from_result();\n    });\n}\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/XJsonBlobBlockCountsMgr.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef __XJSONBLOBBLOCKCOUNTSMGR_HH__\n#define __XJSONBLOBBLOCKCOUNTSMGR_HH__\n\n#include <string>\n#include <cpprest/pplx/pplxtasks.h>\n\n// Singleton pattern\nclass XJsonBlobBlockCountsMgr\n{\npublic:\n    static XJsonBlobBlockCountsMgr& GetInstance();\n\n    XJsonBlobBlockCountsMgr(const XJsonBlobBlockCountsMgr&) = delete;\n    XJsonBlobBlockCountsMgr(XJsonBlobBlockCountsMgr&&) = delete;\n    XJsonBlobBlockCountsMgr& operator=(const XJsonBlobBlockCountsMgr&) = delete;\n    XJsonBlobBlockCountsMgr& operator=(XJsonBlobBlockCountsMgr&&) = delete;\n\n    // Called from main() after mdsd_prefix is determined.\n    void SetPersistDir(const std::string& persistDir, bool mdsdConfigValidationOnly);\n\n    // Called from XJsonBlobSink::XJsonBlobSink()\n    void CreatePersistDirIfNotDone();\n\n    pplx::task<size_t> ReadBlockCountAsync(const std::string& containerName, const std::string& blobName) const;\n\n    pplx::task<void> WriteBlockCountAsync(const std::string& containerName, const std::string& blobName, const size_t blockCount) const;\n\nprivate:\n    XJsonBlobBlockCountsMgr() : m_persistDirCreated(false), m_mdsdConfigValidationOnly(false) {}\n    ~XJsonBlobBlockCountsMgr() {}\n\n    bool m_persistDirCreated;\n    bool m_mdsdConfigValidationOnly;\n    std::string m_persistDir;   // e.g., \"/var/run/mdsd/default_jsonblob_block_counts\"\n};\n\n#endif // __XJSONBLOBBLOCKCOUNTSMGR_HH__\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/XJsonBlobRequest.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"XJsonBlobRequest.hh\"\n#include \"XJsonBlobBlockCountsMgr.hh\"\n#include <string>\n#include <map>\n#include \"MdsTime.hh\"\n#include \"Constants.hh\"\n#include \"Crypto.hh\"\n#include \"Logger.hh\"\n#include \"Trace.hh\"\n#include \"Utility.hh\"\n#include \"AzureUtility.hh\"\n#include \"Version.hh\"\n#include <cassert>\n#include <type_traits>\n#include <cstring>\n#include <stdexcept>\n#include <iomanip>\n#include <chrono>\n\n#include <stdafx.h>\n#include <was/storage_account.h>\n#include <was/blob.h>\n#include <was/common.h>\n#include <wascore/streams.h>\n\nXJsonBlobRequest::XJsonBlobRequest(\n        const XJsonBlobSink::RequestInfo& info,\n        const MdsTime& blobBaseTime,\n        const std::string& blobIntervalISO8601Duration,\n        const std::string& containerName,\n        const std::string& reqId,\n        const std::shared_ptr<BlockListT>& blocklist)\n\t: _info(info), _blobBaseTime(blobBaseTime),\n\t  _containerName(containerName), _requestId(reqId), _totalDataBytes(0),\n\t  _blockList(blocklist)\n{\n\tTrace trace(Trace::JsonBlob, \"XJBR::XJBR, reqId=\" + _requestId);\n\n\tif (blobIntervalISO8601Duration.empty()) {\n\t\tthrow std::invalid_argument(\"Empty string param (blobIntervalISO8601Duration)\");\n\t}\n\tif (containerName.empty()) {\n\t\tthrow std::invalid_argument(\"Empty string param (containerName)\");\n\t}\n\tif (reqId.empty()) {\n\t\tthrow std::invalid_argument(\"Empty string param (reqId)\");\n\t}\n\tif (!blocklist) {\n\t\tthrow std::invalid_argument(\"Null blocklist\");\n\t}\n\n\t// Blob name example: resourceId=test_resource_id/i=agentIdentityHash/y=2015/m=05/d=03/h=00/m=00/name=PT1H.json\n\tstd::stringstream blobnamestr;\n\tif (!_info.primaryPartitionField.empty()) {\n\t    blobnamestr << _info.primaryPartitionField << '/';\n\t}\n\tif (!_info.agentIdentityHash.empty()) {\n\t\tblobnamestr << _info.agentIdentityHash << '/';\n\t}\n\tblobnamestr << blobBaseTime.to_strftime(\"y=%Y/m=%m/d=%d/h=%H/m=%M/\");\n\tif (!_info.partitionFields.empty())\t{\n\t    blobnamestr << _info.partitionFields << '/';\n\t}\n\tblobnamestr << blobIntervalISO8601Duration <<\".json\";\n\t_blobName = blobnamestr.str();\n\tTRACEINFO(trace, \"Preliminary blobname \" << _blobName);\n}\n\n\nXJsonBlobRequest::~XJsonBlobRequest()\n{\n    // Just to see that this fire-and-forget object is really destructed.\n    Trace trace(Trace::JsonBlob, \"XJBR::~XJBR, reqId=\" + _requestId);\n}\n\n\nstatic const std::string jsonRowSeparator(\",\\n\");\n\nvoid\nXJsonBlobRequest::AddJsonRow(std::string&& jsonRow)\n{\n    Trace trace(Trace::JsonBlob, \"XJBR::AddJsonRow\");\n\n    if (jsonRow.empty()) {\n        TRACEINFO(trace, \"Empty jsonRow string passed. Nothing to do. Return\");\n        return;\n    }\n\n    if (!_dataset.empty()) {\n        _totalDataBytes += jsonRowSeparator.length();\n    }\n    _totalDataBytes += jsonRow.size();\n    _dataset.emplace_back(std::move(jsonRow));\n\n    TRACEINFO(trace, \"# rows in dataset = \" << _dataset.size() << \", total data bytes = \" << _totalDataBytes);\n}\n\n\nstatic std::string\nGetStorageExceptionDetails(const azure::storage::storage_exception& e)\n{\n    std::ostringstream oss;\n    oss << \"Storage exception: \" << e.what();\n\n    azure::storage::request_result result = e.result();\n    azure::storage::storage_extended_error err = result.extended_error();\n    if (!err.message().empty()) {\n        oss << \", Extended info: \" << err.message();\n    }\n\n    oss << \", HTTP status code: \" << std::to_string(result.http_status_code());\n\n    return oss.str();\n}\n\n\nclass XJBRAsyncTaskError : public std::runtime_error\n{\npublic:\n    XJBRAsyncTaskError(const std::string& taskName, const std::string& message)\n        : std::runtime_error(message), _taskName(taskName) {}\n\n    std::string GetTaskName() const { return _taskName; }\n\nprivate:\n    std::string _taskName;\n};\n\n// Used to synchronize access to a BlockList. A std::shared_ptr<BlockListOwner> should stay alive\n// while exclusive access to the BlockList is needed across tasks/threads. The wrapping \n// std::shared_ptr<BlockListOwner> will provide copy counting, and will only deconstruct\n// the BlockListOwner when the last instance of the wrapping std::shared_ptr<BlockListOwner> is \n// deconstructed.\nstruct BlockListOwner\n{\n    std::shared_ptr<BlockListT> _blockList;\n    const std::string _ownerName;\n    const std::string _requestId;\n\n    BlockListOwner(std::shared_ptr<BlockListT> blockList, const std::string& ownerName, const std::string& requestId) : \n        _blockList(blockList), _ownerName(ownerName), _requestId(requestId)\n    {\n        Trace trace(Trace::JsonBlob, \"BlockListOwner::BlockListOwner\");\n        TRACEINFO(trace, \"Attempting to set block list owner for \" << _requestId << \" to \" << _ownerName);\n        _blockList->LockIfOwnedByNoneThenSetOwner(_ownerName);\n        TRACEINFO(trace, \"Set block list owner for \" << _requestId << \" to \" << _ownerName);\n    }\n    ~BlockListOwner()\n    {\n        Trace trace(Trace::JsonBlob, \"BlockListOwner::~BlockListOwner\");\n        TRACEINFO(trace, \"Resetting block list owner for \" << _requestId << \" (currently \" << _ownerName << \")\");\n        _blockList->ResetOwnerAndNotify();\n    }\n};\n\n\n/*static*/ void\nXJsonBlobRequest::Send(\n        std::shared_ptr<XJsonBlobRequest> req,\n        const std::string& connString)\n{\n\tTrace trace(Trace::JsonBlob, \"XJBR::Send id=\" + req->_requestId);\n\n\tif (!req) {\n        Logger::LogWarn(\"XJBR::Send(): Null request was passed. This shouldn't happen. Returning anyway...\");\n        return;\n\t}\n\n\tif (req->_dataset.empty()) {\n\t    Logger::LogWarn(\"Nothing to upload to the XJsonBlob blob \" + req->_blobName + \". Returning...\");\n\t    return;\n\t}\n\n\ttry\t{\n\t    TRACEINFO(trace, \"Get reference to container/blob \" << req->_containerName << \"/\" << req->_blobName);\n\t    auto cloudStorageAccount = azure::storage::cloud_storage_account::parse(connString);\n\n\t    // The endpoint URL and storage account are not really needed, but just for informational purpose...\n\t    auto endpointURL = cloudStorageAccount.blob_endpoint().primary_uri().to_string();\n\t    std::string storageAccountName = MdsdUtil::GetStorageAccountNameFromEndpointURL(endpointURL);\n\n\t    TRACEINFO(trace, \"Storage endpoint URL: \" << endpointURL << \", extracted storage account name: \"\n\t            << storageAccountName << \", requestId: \" << req->_requestId);\n\n\t    req->_blobRef =\n\t            cloudStorageAccount\n\t            .create_cloud_blob_client()\n\t            .get_container_reference(req->_containerName)\n\t            .get_block_blob_reference(req->_blobName);\n\n        // Start only when the mutex is not owned by any other request.\n        // Owner name really doesn't matter as long as it's non-empty. \n        // requestId is only for logging.\n        auto blockListOwner = std::make_shared<BlockListOwner>(req->_blockList, req->_blobName, req->_requestId);\n\n        XJsonBlobRequest::UploadNewBlockAsync(req)\n        .then([req]() -> pplx::task<void>\n        {\n            // This is a value-based continuation, so if the previous task throws,\n            // this task is not executed, so no need to do wait on prev_task.\n\n            return XJsonBlobRequest::UploadBlockListAsync(req);\n        })\n        .then([req]() -> pplx::task<void>\n        {\n            // Another value-based continuation\n\n            return XJsonBlobBlockCountsMgr::GetInstance().WriteBlockCountAsync(req->_containerName, req->_blobName, req->_blockList->get().size());\n        })\n        // Copy capture the BlockListOwner so that it stays alive through this\n        // continuation task.\n        .then([req, blockListOwner](pplx::task<void> prev_task)\n        {\n            // This is a task-based continuation, so this task will be executed\n            // even if any previous task throws.\n\n            Trace trace(Trace::JsonBlob, \"XJBR::Send final continuation task, req id=\" + req->_requestId);\n\n            try {\n                // Wait, to handle prev async task exceptions right away\n                prev_task.wait();\n\n                // There were no exceptions if we reached this point.\n                if (trace.IsActive()) {\n                    TRACEINFO(trace, \"Added new block to blob [\" << req->_blobName << \"]. Now there are \"\n                            << req->_blockList->get().size() << \" blocks in the blob.\");\n                }\n            }\n            catch (const XJBRAsyncTaskError& e) {\n                Logger::LogError(e.GetTaskName().append(\": \").append(e.what()));\n            }\n            catch (const std::exception& e) {\n                Logger::LogError(std::string(\"[XJBR::UploadBlockListCompletion]: \").append(e.what()));\n            }\n            catch (...) {\n                // Don't leak any exception from this async function body\n                Logger::LogError(\"[XJBR::UploadBlockListCompletion]: Unknown exception\");\n            }\n        });\n\t}\n\tcatch (const azure::storage::storage_exception& e) {\n        Logger::LogError(\"Storage exception generated while starting async blob write: \" + GetStorageExceptionDetails(e));\n\t}\n\tcatch (const std::exception& e) {\n\t    Logger::LogError(std::string(\"Exception generated while starting async blob write: \").append(e.what()));\n\t}\n\tcatch (...) {\n\t    Logger::LogError(\"Unknown exception generated while starting async blob write\");\n\t}\n}\n\nstatic std::string\nGetBase64HashString(const std::string& content)\n{\n    azure::storage::core::hash_provider provider = azure::storage::core::hash_provider::create_md5_hash_provider();\n    provider.write((const unsigned char*)content.c_str(), content.length());\n    provider.close();\n    return provider.hash();\n}\n\n\nstatic constexpr size_t maxBlocksInBlob = 50000;\n\nstatic const std::string first_block_id = utility::conversions::to_base64(0);\nstatic const std::string first_block_content = \"{\\\"records\\\":[\\n\";\nstatic const std::string last_block_id = utility::conversions::to_base64(maxBlocksInBlob - 1); // 49999\nstatic const std::string last_block_content = \"\\n]}\";\n\n\n/*static*/ pplx::task<void>\nXJsonBlobRequest::UploadNewBlockAsync(\n        const std::shared_ptr<XJsonBlobRequest>& req)\n{\n    Trace trace(Trace::JsonBlob, \"XJBR::UploadNewBlock id=\" + req->_requestId);\n\n    if (!req) {\n        throw std::invalid_argument(\"Null shared_ptr<XJsonBlobRequest>\");\n    }\n\n    // Handy references\n    auto& blobRef = req->_blobRef;\n    auto& blockList = req->_blockList->get();\n    auto& blobName = req->_blobName;\n    auto& newBlockId = req->_newBlockId;\n    auto& newBlockContent = req->_newBlockContent;\n\n    if (blockList.size() >= maxBlocksInBlob) {\n        std::ostringstream ss;\n        ss << \"Can't add any more block to blob \" << blobName\n           << \". There are already max blobs (\" << blockList.size() << \") in the blob.\";\n        throw XJBRAsyncTaskError(\"XJBR::UploadNewBlockAsync\", ss.str());\n    }\n\n    if (!blockList.empty() && blockList.size() < 2) {\n        throw XJBRAsyncTaskError(\"XJBR::UploadNewBlockAsync\",\n                \"Blob format error: No first/last blocks in \" + blobName + \".  Returning...\");\n    }\n\n    if (!blockList.empty()\n            && (blockList.front().id() != first_block_id || blockList.back().id() != last_block_id)) {\n        throw XJBRAsyncTaskError(\"XJBR::UploadNewBlockAsync\", \"Blob format error: First block id (\"\n                + blockList.front().id() + \") or last block id (\"\n                + blockList.back().id() + \") is incorrect in \" + blobName + \". Returning.\");\n    }\n\n    std::vector<pplx::task<void>> blockUploadTasks; // maximum 3 uploads\n\n    if (blockList.empty()) {\n        TRACEINFO(trace, \"Blob \" << blobName << \" is empty. Adding first/last blocks.\");\n\n        auto first_block_stream = concurrency::streams::bytestream::open_istream(first_block_content);\n        auto taskUploadFirstBlock = blobRef.upload_block_async(first_block_id, first_block_stream, GetBase64HashString(first_block_content));\n        blockUploadTasks.push_back(taskUploadFirstBlock);\n\n        auto last_block_stream = concurrency::streams::bytestream::open_istream(last_block_content);\n        auto taskUploadLastBlock = blobRef.upload_block_async(last_block_id, last_block_stream, GetBase64HashString(last_block_content));\n        blockUploadTasks.push_back(taskUploadLastBlock);\n    }\n\n    // Add the new block. New block's id # is blockList.size() - 2 (first/last) + 1 (new block).\n    // Above is correct only for non-empty block list. Empty block list case needs to be handled\n    // as a special case, as we don't want to update the block list until blocks are really uploaded.\n    size_t newBlockNum = blockList.empty() ? 1 : (blockList.size() - 1);\n    newBlockId = utility::conversions::to_base64(newBlockNum);\n    TRACEINFO(trace, \"Adding a new block (numeric ID=\" << newBlockNum\n            << \", base64 ID=\" << newBlockId << \") to blob \" << blobName);\n\n    // Construct the new block content.\n    newBlockContent.reserve(req->_totalDataBytes + jsonRowSeparator.length());    // + 2 for possible preceding \",\\n\"\n    if (blockList.size() > 2) {\n        // Not the first content block, so prepend \",\"\n        newBlockContent.append(jsonRowSeparator);\n    }\n    bool first = true;\n    for (const auto& row : req->_dataset) {\n        if (first) {\n            first = false;\n        }\n        else {\n            newBlockContent.append(jsonRowSeparator);\n        }\n        newBlockContent.append(row);\n    }\n    auto new_block_stream = concurrency::streams::bytestream::open_istream(newBlockContent);\n    auto taskUploadContentBlock = blobRef.upload_block_async(newBlockId, new_block_stream, GetBase64HashString(newBlockContent));\n    blockUploadTasks.push_back(taskUploadContentBlock);\n\n    return pplx::when_all(blockUploadTasks.begin(), blockUploadTasks.end());\n}\n\n\n/*static*/ pplx::task<void>\nXJsonBlobRequest::UploadBlockListAsync(const std::shared_ptr<XJsonBlobRequest>& req)\n{\n    Trace trace(Trace::JsonBlob, \"XJBR::UploadBlockListAsync, req id=\" + req->_requestId);\n\n    // handy references\n    auto& request = *req;\n    auto& blockList = request._blockList->get();\n\n    // Update block list only after block(s) is/are uploaded successfully\n    if (blockList.empty()) {\n        blockList.emplace_back(azure::storage::block_list_item(first_block_id));\n        blockList.emplace_back(azure::storage::block_list_item(last_block_id));\n    }\n    blockList.insert(blockList.end() - 1, azure::storage::block_list_item(request._newBlockId));\n\n    // Finally upload the block list!\n    return request._blobRef.upload_block_list_async(blockList);\n}\n\n\n/*static*/ void\nXJsonBlobRequest::ReconstructBlockListIfNeeded(std::shared_ptr<XJsonBlobRequest> req)\n{\n    Trace trace(Trace::JsonBlob, \"XJBR::ReconstructBlockListIfNeeded\");\n\n    if (!req->_blockList->get().empty()) {\n        throw std::runtime_error(\"XJBR::ReconstructBlockListIfNeeded: Block list is not empty.\");\n    }\n\n    // Start only when the mutex is not owned by any other request.\n    // Owner name really doesn't matter as long as it's non-empty.\n    auto blockListOwner = std::make_shared<BlockListOwner>(req->_blockList, req->_blobName, req->_requestId);\n\n    // Copy capture the BlockListOwner so that it stays alive through the\n    // continuation task.\n    XJsonBlobBlockCountsMgr::GetInstance().ReadBlockCountAsync(req->_containerName, req->_blobName)\n    .then([req, blockListOwner](pplx::task<size_t> prev_task)\n    {\n        Trace trace(Trace::JsonBlob, \"XJBR::ReconstructBlockListIfNeeded continuation\");\n        TRACEINFO(trace, \"In XJBR::ReconstructBlockListIfNeeded continuation.\");\n\n        try {\n            auto blockCount = prev_task.get();\n\n            TRACEINFO(trace, \"Obtained blockCount=\" << blockCount\n                    << \" for container=\" << req->_containerName << \" and blob=\" << req->_blobName);\n\n            if (blockCount == 0) {\n                return;\n            }\n\n            if (blockCount < 3  // A persisted block count is always at least 3 blocks.\n                                // \"{ ...\" for first block, \"}\" for last block, at least one content block.\n                    || blockCount > maxBlocksInBlob) {\n                Logger::LogError(std::string(\"Invalid block count (\").append(std::to_string(blockCount))\n                        .append(\") returned from XJBBlockCountsMgr::ReadBlockCount. \"\n                        \"Valid block count is at least 3 and at most \").append(std::to_string(maxBlocksInBlob))\n                        .append(\". Block list won't be reconstructed.\"));\n                return;\n            }\n\n            // Finally we can reconstruct the block list.\n            auto& blockList = req->_blockList->get();\n            blockList.emplace_back(azure::storage::block_list_item(first_block_id));\n            size_t lastBlockNum = blockCount - 2;\n            for (size_t blockNum = 1; blockNum <= lastBlockNum; blockNum++) {\n                blockList.emplace_back(azure::storage::block_list_item(utility::conversions::to_base64(blockNum)));\n            }\n            blockList.emplace_back(azure::storage::block_list_item(last_block_id));\n        }\n        catch (std::exception& e) {\n            Logger::LogError(std::string(\"Exception thrown from XJBBlockCountsMgr::ReadBlockCount. \"\n                    \"Block list can't be reconstructed. Exception message: \").append(e.what()));\n        }\n        catch (...) {\n            Logger::LogError(\"Unknown exception thrown from XJBBlockCountsMgr::ReadBlockCount. \"\n                    \"Block list can't be reconstructed\");\n        }\n    });\n    TRACEINFO(trace, \"After ReadBlockCountAsync.\");\n}\n\n\n// vim: se sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/XJsonBlobRequest.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _XJSONBLOBREQUEST_HH_\n#define _XJSONBLOBREQUEST_HH_\n\n#include \"XJsonBlobSink.hh\"\n#include \"MdsTime.hh\"\n#include <string>\n#include <vector>\n#include <memory>\n#include <mutex>\n\n#include <was/storage_account.h>\n#include <was/blob.h>\n#include <was/common.h>\n\nclass XJsonBlobRequest\n{\npublic:\n\tXJsonBlobRequest(\n\t        const XJsonBlobSink::RequestInfo& info,\n\t        const MdsTime& blobBaseTime,\n\t        const std::string& blobIntervalISO8601Duration,\n\t        const std::string& containerName,\n\t        const std::string& reqId,\n\t        const std::shared_ptr<BlockListT>& blocklist);\n\n\t~XJsonBlobRequest();\n\n\tstatic void Send(\n\t        std::shared_ptr<XJsonBlobRequest> req,\n\t        const std::string & connString);\n\n\tconst std::string & UUID() const { return _requestId; }\n\n\tsize_t EstimatedSize() const { return _totalDataBytes; }\n\n\tvoid AddJsonRow(std::string&& jsonRow);\n\n\tstatic void ReconstructBlockListIfNeeded(std::shared_ptr<XJsonBlobRequest> req);\n\nprivate:\n\tstatic pplx::task<void> UploadNewBlockAsync(const std::shared_ptr<XJsonBlobRequest>& req);\n\tstatic pplx::task<void> UploadBlockListAsync(const std::shared_ptr<XJsonBlobRequest>& req);\n\n\tXJsonBlobSink::RequestInfo    _info;\n\n\tstd::string _containerName;\n\tstd::string _blobName;\n\n\tMdsTime _blobBaseTime;  // Base time for the current blob\n\n\t// As we add a new _rowbuf to the collection, we accumulate its size so we know when we hit\n\t// maximum length.\n\n\tsize_t\t\t_totalDataBytes;\n\tstd::vector<std::string> _dataset;\n\n\t// UUID for this request; attached to storage request(s) end-to-end\n\tstd::string _requestId;\n\n\t// Async request handling members\n\tazure::storage::cloud_block_blob _blobRef;\n\tstd::shared_ptr<BlockListT> _blockList;\n\tstd::string _newBlockId;\n\tstd::string _newBlockContent;\n};\n\n#endif // _XJSONBLOBREQUEST_HH_\n\n// vim: se ai sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/XJsonBlobSink.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"XJsonBlobSink.hh\"\n#include \"XJsonBlobRequest.hh\"\n#include \"XJsonBlobBlockCountsMgr.hh\"\n\n#include <iterator>\n#include <sstream>\n\n#include \"CanonicalEntity.hh\"\n#include \"MdsdConfig.hh\"\n#include \"Credentials.hh\"\n#include \"Utility.hh\"\n#include \"AzureUtility.hh\"\n#include \"RowIndex.hh\"\n#include \"Trace.hh\"\n#include \"Logger.hh\"\n#include \"MdsdMetrics.hh\"\n#include \"StoreType.hh\"\n#include \"Constants.hh\"\n#include \"MdsTime.hh\"\n#include \"CfgOboDirectConfig.hh\"\n\n#include <wascore/basic_types.h>\n\n\nXJsonBlobSink::XJsonBlobSink(MdsdConfig* config, const MdsEntityName &target, const Credentials* c)\n    : IMdsSink(StoreType::Type::XJsonBlob), _template(target), _creds(c)\n    , _namespace(config->Namespace()), _firstReqCreated(false)\n    , _blobBaseTime(0) // Make sure to set _blobBaseTime to a past long time ago when constructing\n{\n    Trace trace(Trace::JsonBlob, \"XJBS::Constructor\");\n\n    if (!config) {\n        throw std::invalid_argument(\"Null MdsdConfig* config\");\n    }\n    if (!c) {\n        throw std::invalid_argument(\"Null Credentials* c\");\n    }\n\n    auto eventName = target.EventName();\n\n    try {\n        auto oboDirectConfig = config->GetOboDirectConfig(eventName); // May throw std::out_of_range if eventName is not a key stored in the map.\n        InitializeForOboDirect(config, oboDirectConfig);\n    }\n    catch (const std::out_of_range& e) {\n        // No OboDirect config. It's LAD JsonBlob sink scenario.\n    \tInitializeForLadWithoutOboDirect(config);\n    }\n\n    // Finally, fill in duration/tenant/role/roleInstance (for metric Json content)\n    _template.duration = config->GetDurationForEventName(eventName);\n    config->GetIdentityValues(_template.tenant, _template.role, _template.roleInstance);\n\n    XJsonBlobBlockCountsMgr::GetInstance().CreatePersistDirIfNotDone();\n}\n\n\nstatic void\nAppendBlobPathComponent(\n\t\tconst std::string& fieldName,\n\t\tconst std::string& fieldNameInBlobPath,\n\t\tMdsdConfig* config,\n\t\tstd::string& blobPathComponentString)\n{\n\tif (fieldName.empty()) {\n\t\tthrow std::invalid_argument(\"AppendBlobPathComponent(): fieldName cannot be empty\");\n\t}\n\n\tauto fieldValue = config->GetOboDirectPartitionFieldValue(fieldName);\n\tif (fieldValue.empty()) {\n\t\tstd::string msg = \"No CentralJson blob path field value found for field name \"\n\t\t\t\t+ fieldName + \". Make sure that your mdsd config XML contains \"\n\t\t\t\t\"OboDirectPartitionField element with the corresponding field name \"\n\t\t\t\t\"attribute in Management/Identity section.\";\n\t\tLogger::LogError(msg);\n\t\tthrow std::runtime_error(msg);\n\t}\n\tif (!blobPathComponentString.empty()) {\n\t\tblobPathComponentString.append(\"/\");\n\t}\n\tblobPathComponentString.append(fieldNameInBlobPath).append(\"=\").append(fieldValue);\n}\n\n\nvoid\nXJsonBlobSink::InitializeForOboDirect(MdsdConfig* config, const std::shared_ptr<mdsd::OboDirectConfig>& oboDirectConfig)\n{\n    _blobIntervalISO8601Duration = oboDirectConfig->timePeriods;\n    _blobIntervalSec = MdsTime::FromIS8601Duration(_blobIntervalISO8601Duration).to_time_t();\n    if (0 == _blobIntervalSec)\n    {\n        //Logger::LogError(\"Invalid ISO8601 duration (\" + blobIntervalISO8601Duration + \") given. This shouldn't happen. Default 'PT1H' will be used.\");\n        _blobIntervalSec = 60*60; // 1 hour\n        _blobIntervalISO8601Duration = \"PT1H\";\n    }\n\n    const auto& primaryPartitionFieldName = oboDirectConfig->primaryPartitionField; // handy reference\n    if (!primaryPartitionFieldName.empty())\n    {\n    \t// Compose primaryPartitionField (e.g., \"name1=xxx\")\n    \tAppendBlobPathComponent(primaryPartitionFieldName, primaryPartitionFieldName, config, _template.primaryPartitionField);\n    }\n\n    if (!oboDirectConfig->partitionFields.empty())\n    {\n        // Compose partitionFields (e.g., \"name1=xxx/name2=yyy\")\n        std::istringstream iss(oboDirectConfig->partitionFields); // oboDirectConfig.partitionFields is e.g., 'name1,name2'\n        while (iss.good())\n        {\n            std::string partitionFieldName;\n            getline(iss, partitionFieldName, ',');\n            if (!partitionFieldName.empty()) {\n                AppendBlobPathComponent(partitionFieldName, partitionFieldName, config, _template.partitionFields);\n            }\n        }\n    }\n}\n\n\nvoid\nXJsonBlobSink::InitializeForLadWithoutOboDirect(MdsdConfig* config)\n{\n\t// LAD JsonBlob's interval is fixed to 1 hour.\n    _blobIntervalSec = 60*60; // 1 hour\n    _blobIntervalISO8601Duration = \"PT1H\";\n\n    AppendBlobPathComponent(\"resourceId\", \"resourceId\", config, _template.primaryPartitionField);\n    AppendBlobPathComponent(\"agentIdentityHash\", \"i\", config, _template.agentIdentityHash);\n}\n\n\nvoid\nXJsonBlobSink::ComputeConnString()\n{\n    Trace trace(Trace::JsonBlob, \"XJBS::ComputeConnString\");\n\n    const MdsEntityName& Target = _template.target;\t// Easy to use reference\n\n    // This is pretty easy for XJsonBlob; we currently support shared-key creds only.\n    // expires & eventName don't apply to XJsonBlob (at least yet), so just dummy vars passed.\n\n    MdsTime expires;\n    std::string eventName;\n\n    if (_creds->ConnectionString(Target, Credentials::ServiceType::Blob, eventName, _connString, expires) ) {\n        TRACEINFO(trace, Target << \"=[\" << _connString << \"] expires \" << expires << \"(N/A for XJsonBlob)\");\n    } else {\n        Logger::LogError(\"Error: Couldn't construct connection string for XJsonBlob eventName \" + Target.Basename());\n    }\n}\n\n// The only credentials that need to be validated are \"Shared key\" or an account SAS; if we have a service SAS or Autokey,\n// we'll find out if they work when we try to use them. We can validate shared key credentials\n// by creating the container for the eventName, if it doesn't already exist.  Since this gets\n// called only during config load, it's reasonable to perform the operation synchronously.\nvoid\nXJsonBlobSink::ValidateAccess()\n{\n    Trace trace(Trace::JsonBlob, \"XJBS::ValidateAccess\");\n\n    auto sasCreds = dynamic_cast<const CredentialType::SAS*>(_creds);\n    if (_creds->Type() == Credentials::SecretType::Key\n            || (sasCreds && sasCreds->IsAccountSas())) {\n        ComputeConnString();\t// Force computation, since this is called at config time\n        // \"Container name will be the concatenation of namespace, event name, and event version if present.\"\n        // \"For example: obodirectnamespacetestevent1ver2v0\"\n        // from https://microsoft.sharepoint.com/teams/SPS-AzMon/Shared Documents/Design Documents/Direct Mode Design.docx?web=1\n        _containerName = MdsdUtil::to_lower(_namespace + _template.target.Basename()); // Azure Storage allows lowercase only in container name\n        MdsdUtil::CreateContainer(_connString, _containerName);\n    }\n}\n\nXJsonBlobSink::~XJsonBlobSink()\n{\n    Trace trace(Trace::JsonBlob, \"XJBS::Destructor\");\n}\n\n// Convert the CanonicalEntity to Json and add it to the accumulated buffer. Flush it\n// if it fills up.\n//\n// Note that AddRow() doesn't keep the CanonicalEntity; we copy anything we need from it.\nvoid\nXJsonBlobSink::AddRow(const CanonicalEntity &row, const MdsTime& qibase)\n{\n    Trace trace(Trace::JsonBlob, \"XJBS::AddRow\");\n\n    TRACEINFO(trace, \"containerName = \" << _containerName << \", blob basetime = \" << _blobBaseTime << \", blob interval (sec) = \" << _blobIntervalSec << \", qibase = \" << qibase);\n\n    // If the query interval is beyond blob base time + blob interval,\n    // we should flush the current block and reset the base time accordingly.\n    if (qibase >= _blobBaseTime + _blobIntervalSec)\n    {\n        Flush();\n        _blobBaseTime = qibase.Round(_blobIntervalSec); // Make sure to round down to the specified blob interval\n        _blockList.reset();\n        TRACEINFO(trace, \"New blob basetime = \" << _blobBaseTime);\n    }\n\n    // If we have no in-progress request, either because we just flushed or because we're just\n    // starting up, make one.\n    if (!_request) {\n        try {\n            std::string requestId = utility::uuid_to_string(utility::new_uuid());\n            if (!_blockList) {\n                _blockList = std::make_shared<BlockListT>();\n            }\n            _request.reset(new XJsonBlobRequest(_template, _blobBaseTime, _blobIntervalISO8601Duration,\n                    _containerName, requestId, _blockList));\n            // This is the only place we create any XJBReq, so we must check if this is the first time\n            // to see if we need to try to reconstruct the block list from a persisted block count file.\n            // If there are other places where XJBReq is created, this must be done there as well...\n            if (!_firstReqCreated) {\n                XJsonBlobRequest::ReconstructBlockListIfNeeded(_request);\n                _firstReqCreated = true;\n            }\n        } catch (std::exception & ex) {\n            std::ostringstream msg;\n            msg << \"Exception (\" << ex.what() << \") caught while creating new XJsonBlobRequest; dropping row\";\n            trace.NOTE(msg.str());\n            Logger::LogError(msg.str());\n            MdsdMetrics::Count(\"Dropped_Entities\");\n            return;\n        }\n    }\n\n    // The XJsonBlobRequest object stores generated json rows that\n    // correspond to the generated rows. The object also contains the metadata needed to\n    // determine the name of the blob when it gets written. (This includes a sequence number;\n    // if the blob fills, we flush it and start accumulating a new one with an\n    // incremented sequence.)\n\n    TRACEINFO(trace, \"Adding row to request ID \" << _request->UUID() << \": \" << row);\n\n    std::string jsonRow;\n    try {\n        jsonRow = row.GetJsonRow(_template.duration, _template.tenant, _template.role, _template.roleInstance);\n    }\n    catch (std::exception& e) {\n        Logger::LogError(e.what());\n        return;\n    }\n\n    _request->AddJsonRow(std::move(jsonRow));\n\n    TRACEINFO(trace, \"Block now contains \" << _request->EstimatedSize() << \" bytes\");\n\n    // If the size of the accumulated data is \"close\" to the maximum size of a JSON blob block,\n    // flush the block and prepare for the next one\n\n    if (_request->EstimatedSize() > _targetBlockSize) {\n        TRACEINFO(trace, \"Size of accumulated rows is larger than block size limit; flushing\");\n        Flush();\n    }\n}\n\n// Flush any data we're holding. We might never have allocated a request, or it might\n// be empty, or we might have data.\n// Post-condition: _request is nullptr. Next call to AddRow() will create a new request on demand.\nvoid\nXJsonBlobSink::Flush()\n{\n    Trace trace(Trace::JsonBlob, \"XJBS::Flush\");\n\n    TRACEINFO(trace, \"Begin XJBS::Flush on containerName = \" << _containerName);\n\n    if (nullptr == _request) {\n        // First time through. Just make the post-condition true\n        TRACEINFO(trace, \"Null _request; no action.\");\n        return;\n    }\n    // XJsonBlob must flush if there's any data. Otherwise, just return.\n    if (_request->EstimatedSize() == 0) {\n        TRACEINFO(trace, \"No data to flush; no action.\");\n        return;\n    }\n\n    TRACEINFO(trace, \"Flush() request ID \" + _request->UUID());\n\n    if (_request->EstimatedSize() > 0) {\n        // Detach the request and send it. Send() is fire-and-forget; the request object\n        // is responsible for deleting itself after that point.\n        try {\n            XJsonBlobRequest::Send(std::move(_request), _connString);\n        }\n        catch (std::exception & ex) {\n            trace.NOTE(std::string(\"Exception leaked from XJBR Send: \") + ex.what());\n        }\n    } else {\n        // Since we create these on demand, this really shouldn't happen.\n        TRACEINFO(trace, \"Empty _request; no action (deleting).\");\n        _request.reset();\n    }\n}\n// vim: se sw=4 expandtab ts=4 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/XJsonBlobSink.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _XJSONBLOBSINK_HH_\n#define _XJSONBLOBSINK_HH_\n\n#include \"IMdsSink.hh\"\n#include <string>\n#include \"MdsTime.hh\"\n#include \"MdsEntityName.hh\"\n#include <memory>\n#include <mutex>\n#include <condition_variable>\n\nclass CanonicalEntity;\nclass Credentials;\nclass MdsdConfig;\nclass MdsValue;\nclass XJsonBlobRequest;\nnamespace mdsd {\n\tstruct OboDirectConfig;\n}\nnamespace azure { namespace storage {\n    class block_list_item;\n}}\n\n\n// Thin object wrapper, supporting synchronization across async task threads (with owner name)\ntemplate <typename T>\nclass ObjectWithOwnership\n{\npublic:\n    ObjectWithOwnership() {}\n\n    T& get() { return _object; }\n\n    void LockIfOwnedByNoneThenSetOwner(const std::string& ownerName)\n    {\n        if (ownerName.empty()) {\n            throw std::invalid_argument(\"Passed ownerName is empty in ObjectWithOwnership::LockIfOwnedByNoneThenSetOwner\");\n        }\n\n        std::unique_lock<std::mutex> lock(_mutex);\n        _cv.wait(lock, [this]{ return _ownerName.empty(); });\n        _ownerName = ownerName;\n    }\n\n    // Caller must make sure that the set owner is itself.\n    void ResetOwnerAndNotify()\n    {\n        std::lock_guard<std::mutex> lock(_mutex);\n\n        if (_ownerName.empty()) {\n            throw std::runtime_error(\"Current _ownerName is empty in ObjectWithOwnership::ResetOwnerAndNotify\");\n        }\n\n        _ownerName.clear();\n        _cv.notify_all();\n    }\n\nprivate:\n    T _object;\n    std::mutex _mutex;\n    std::string _ownerName;\n    std::condition_variable _cv;\n};\n\n\nusing BlockListT = ObjectWithOwnership<std::vector<azure::storage::block_list_item>>;\n\n\nclass XJsonBlobSink : public IMdsSink\n{\npublic:\n\n    struct RequestInfo\n    {\n    public:\n        const MdsEntityName target;\t\t// Destination storage container\n        std::string primaryPartitionField;\n            // E.g., \"resourceId=...\". 'resourceId' is obtained from OboDirectConfig.primaryPartitionField,\n            // and '...' needs to be obtained from somewhere else (Portal/LAD config? -- WAD is blocked on this)\n        std::string agentIdentityHash;\n        std::string partitionFields;\n            // E.g., \"resourceId=xxx/subscriptionId=yyy\". 'resourceId' and 'subscriptionId' are obtained from OboDirectConfig.partitionFields,\n            // and 'xxx' and 'yyy' need to be obtained from somewhere else (OBO service? What about LAD scenario?)\n        std::string duration;\t// E.g., \"PT1M\" for metric events. \"\" for non-metric events. Will be used by Json construction\n\n        std::string tenant;\t// Tenane name in metric Json content\n        std::string role;   // Role name in metric Json content\n        std::string roleInstance; // RoleInstance name in metric Json content\n\n        RequestInfo(const MdsEntityName& t) : target(t) {}\n    };\n\n    virtual bool IsXJsonBlob() const { return true; }\n\n    XJsonBlobSink(MdsdConfig* config, const MdsEntityName &target, const Credentials* c);\n\n    virtual ~XJsonBlobSink();\n\n    virtual void AddRow(const CanonicalEntity&, const MdsTime&);\n\n    virtual void Flush();\n\n    virtual void ValidateAccess();\n\nprivate:\n    XJsonBlobSink();\n\n    // This code path is currently really not used (as we haven't actually\n    // implemented the OboDirect feature), but just placed for the future.\n    void InitializeForOboDirect(MdsdConfig* config, const std::shared_ptr<mdsd::OboDirectConfig>& oboDirectConfig);\n\n    // This will be mostly used for LAD JsonBlob sink scenario.\n    void InitializeForLadWithoutOboDirect(MdsdConfig* config);\n\n    void ComputeConnString();\n\n    RequestInfo _template;\n\n    const Credentials* _creds;\n\n    std::string _namespace;\n\n    std::string _containerName;\n\n    std::shared_ptr<XJsonBlobRequest> _request;\n\n    // Per-blob block list that needs to be persisted across multiple requests,\n    // so keep it here as a shared ptr. XJBS just maintains a pointer (so that\n    // it can be persisted across multiple requests) and all operations on it\n    // are done by XJBR.\n    std::shared_ptr<BlockListT> _blockList;\n\n    // Block list reconstruction from a persisted block count file is needed\n    // only for the first request, so remember whether first request was created or not.\n    bool _firstReqCreated;\n\n    MdsTime _blobBaseTime;  // Base time for which we're currently building a blob.\n\n    time_t _blobIntervalSec;        // E.g., 1 hour (3600 sec). Fixed interval in seconds for a blob.\n    std::string _blobIntervalISO8601Duration;   // E.g., \"PT1H\". _blobIntervalSec should be computed from this. If this is not a correct ISO8601 string, it should be \"PT1H\" by default.\n\n    // Maintained by ComputeConnString()\n    std::string _connString;\n\n    // Other constants\n    static constexpr size_t _targetBlockSize { 4128768 };\t// 4MB - 64KB\n};\n\n#endif // _XJSONBLOBSINK_HH_\n\n// vim: se sw=4 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/XTableConst.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"XTableConst.hh\"\n\nunsigned int XTableConstants::_backoffBaseTime = 10;\nunsigned int XTableConstants::_backoffLimit = 3;\n\nint XTableConstants::_sdkRetryPolicyInterval = 3;\nint XTableConstants::_sdkRetryPolicyLimit = 5;\nint XTableConstants::_initialOpTimeout = 30;\nint XTableConstants::_defaultOpTimeout = 30;\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/XTableConst.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#ifndef _XTABLECONST_HH_\n#define _XTABLECONST_HH_\n\n// \"Constants\" used by XTableSink, DataUploader, etc.\n// These are generally run-time constants. They've been encapsulated in this class so they\n// can be manipulated at run-time by test code, generally to reduce timeouts or retry counts.\n\nclass XTableConstants\n{\npublic:\n    // Getters\n\tstatic unsigned int BackoffBaseTime()  { return _backoffBaseTime; }\n\tstatic unsigned int BackoffLimit()     {return _backoffLimit; }\n\n\tstatic int SDKRetryPolicyInterval() { return _sdkRetryPolicyInterval; }\n\tstatic int SDKRetryPolicyLimit()    { return _sdkRetryPolicyLimit; }\n\tstatic int InitialOpTimeout()       { return _initialOpTimeout; }\n\tstatic int DefaultOpTimeout()       { return _defaultOpTimeout; }\n\n\tstatic unsigned int MaxItemPerBatch() { return 100; }\t// Not alterable\n\n\n    // Setters\n\tstatic void SetBackoffBaseTime(unsigned int val) { _backoffBaseTime = val; }\n\tstatic void SetBackoffLimit(unsigned int val) { _backoffLimit = val; }\n\n\tstatic void SetSDKRetryPolicyInterval(int val) { _sdkRetryPolicyInterval = val; }\n\tstatic void SetSDKRetryPolicyLimit(int val) { _sdkRetryPolicyLimit = val; }\n\tstatic void SetInitialOpTimeout(int val) { _initialOpTimeout = val; }\n\tstatic void SetDefaultOpTimeout(int val) { _defaultOpTimeout = val; }\n\nprivate:\n\tXTableConstants();\n\tXTableConstants(const XTableConstants&) = delete;\n\n\tstatic unsigned int _backoffBaseTime;\n\tstatic unsigned int _backoffLimit;\n\n\tstatic int _sdkRetryPolicyInterval;\n\tstatic int _sdkRetryPolicyLimit;\n\tstatic int _initialOpTimeout;\n\tstatic int _defaultOpTimeout;\n};\n\n#endif // _XTABLECONST_HH_\n\n// vim: se sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/XTableHelper.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"XTableHelper.hh\"\n#include \"XTableConst.hh\"\n#include \"Logger.hh\"\n#include \"Trace.hh\"\n#include \"Utility.hh\"\n\nusing namespace azure::storage;\nusing std::string;\n\nXTableHelper*\nXTableHelper::GetInstance()\n{\n    static XTableHelper* s_instance = new XTableHelper();\n    return s_instance;\n}\n\nXTableHelper::XTableHelper()\n{\n}\n\n// Delete all the cloud_table objects stored in the cache map.\nXTableHelper::~XTableHelper()\n{\n    Trace trace(Trace::XTable, \"XTableHelper destructor\");\n\n    try\n    {\n        cloudTableMap.clear();\n    }\n    catch(const std::exception& e)\n    {\n        LogError(\"Error: ~XTableHelper(): unexpected std::exception: \" + string(e.what()));\n    }\n    catch(...)\n    {\n    }\n}\n\n/*\n  Each new table object is saved in the hash table. Because the hash table will\n  be shared by multiple threads, using mutex lock for table operations.\n  \n  The tablename is the actual table name, not a URI. Ie the connStr specifies a SAS, then Azure requires\n  the tablename to match the tn= component in the SAS; otherwise, either fetching the table reference will\n  fail (here) or using it will fail (probably by the caller of this function).\n */\nstd::shared_ptr<azure::storage::cloud_table>\nXTableHelper::CreateTable(const string& tablename, const string& connStr)\n{    \n    Trace trace(Trace::XTable, \"XTableHelper::CreateTable\");\n    std::shared_ptr<azure::storage::cloud_table> tableObj;\n\n    try {\n        trace.NOTE(\"tablename='\" + tablename + \"'; connection string='\" + connStr + \"'.\");\n        if (MdsdUtil::NotValidName(tablename)) {\n            LogError(\"Error: invalid table name: '\" + tablename + \"'; connection string='\" + connStr + \"'.\");\n            return nullptr;\n        }\n        if (MdsdUtil::NotValidName(connStr)) {\n            LogError(\"Error: invalid connection string: '\" + connStr + \"'. tablename='\" + tablename + \"'\");\n            return nullptr;\n        }\n\n        const auto key = connStr + tablename;\n        std::lock_guard<std::mutex> lock(tablemutex);\n\n        auto ctIter = cloudTableMap.find(key);\n        if (ctIter != cloudTableMap.end()) {\n            trace.NOTE(\"Found table object in cache. tablename='\" + tablename + \"'\");\n            tableObj = ctIter->second;\n        }\n        else {\n            trace.NOTE(\"Create new cloud_table for '\" + tablename + \"' with connection string='\" + connStr + \"'.\");\n            tableObj = std::make_shared<cloud_table>(\n                cloud_storage_account::parse(connStr)\n                .create_cloud_table_client()\n                .get_table_reference(tablename)\n                );\n\n\n            cloudTableMap[key] = tableObj;\n        }\n    }\n    catch(const std::exception& e)\n    {\n        LogError(\"Error: XTableHelper::CreateTable(\" + tablename + \"): unexpected std::exception: \" + string(e.what()) );\n    }\n    catch(...)\n    {\n        LogError(\"Error: XTableHelper::CreateTable(\" + tablename + \"): unexpected exception\");\n    }\n\n    return tableObj;\n}\n\n\n/*\n  Handle storage exception. Return true if the execution can be retried. Return false if no value to retry.\n \n  By default, not report error when an exception occurs. \n  \n  The default behavior is to retry, except the following cases, where the HTTP status code is:\n  - BadRequest 400: bad API request. report error. (ex: bad credential)\n  - NotFound 404 : table not found. report error.\n  - Forbidden 403 : permission denied. report error.\n  - Conflict 409 : data already uploaded or duplicates found. \n    If this is the first time, report error. if not first time, shouldn't report error.\n */\nbool XTableHelper::HandleStorageException(const string & tablename, const storage_exception& e,\n\t\t\t\t\t  size_t * pnerrs, bool isFirstTime, bool * isNoSuchTable)\n{\n\tTrace trace(Trace::XTable, \"HandleStorageException\");\n\n\tbool retryableErr = true;    \n\tbool suppressErrorMsg = false;\n\n\ttrace.NOTE(std::string(\"Storage exception: \") + e.what());\n\n\tauto msg = std::string(e.what()) + \"\\n\";\n\t\n\trequest_result result = e.result();\n\tstorage_extended_error err = result.extended_error();\n\tif (!err.message().empty())\n\t{\n\t\tmsg += err.message();\n\t\ttrace.NOTE(\"Extended info: \" + err.message());\n\t}\n                \n\t// the retryable API is not accurate (ex for a client timeout, which retry may work, but retryable\n\t// is still false. so not use it as of 10/17/14.)\n\t// bool retryable1 = e.retryable();\n\t// msg += ustring(\"exception is retryable? = \") + std::to_string(retryable1);\n\n\tweb::http::status_code httpcode = result.http_status_code();\n    \ttrace.NOTE(\"HTTP status \" + std::to_string(httpcode));\n\tmsg += \"\\nStatusCode=\" + std::to_string(httpcode);\n\n\tbool isErr = false;\n\n\t{\n\t    using web::http::status_codes;\n\t    if (httpcode == status_codes::NotFound && isNoSuchTable) {\n\t    \t*isNoSuchTable = true;\n\t\t// By handing us a valid isNoSuchTable ptr, caller has indicated\n\t\t// a desire to handle the No Such Table error directly.\n\t\tsuppressErrorMsg = true;\n\t    }\n\t    if (httpcode == status_codes::NotFound || httpcode == status_codes::BadRequest || httpcode == status_codes::Forbidden) {\n\t\t    isErr = true;\n\t    } else if (httpcode == status_codes::Conflict) {\n\t\t    retryableErr = false;\n\t\t    if (isFirstTime) {\n\t\t\t    isErr = true;\n\t\t    }\n\t    }\n\t}\n\n\tif (isErr) { \n\t\tif (!suppressErrorMsg) {\n\t\t\tLogError(\"Azure Storage Exception for table \\\"\" + tablename + \"\\\": \" + msg);\n\t\t}\n\t\tretryableErr = false;\n\t\tif (pnerrs) (*pnerrs)++;\n\t}\n\tif (!retryableErr) {\n\t\ttrace.NOTE(\"Status code \" + std::to_string(httpcode) + \" is not retryable. Abort further retry.\");\n\t}\n\n\treturn retryableErr;\n}\n\nvoid XTableHelper::CreateRequestOperation(table_request_options& requestOpt) const\n{      \n    exponential_retry_policy retry_policy(\n          std::chrono::seconds(XTableConstants::SDKRetryPolicyInterval()), XTableConstants::SDKRetryPolicyLimit());\n    requestOpt.set_retry_policy(retry_policy);\n\n    requestOpt.set_server_timeout(std::chrono::seconds(XTableConstants::DefaultOpTimeout()));\n    requestOpt.set_maximum_execution_time(std::chrono::seconds(XTableConstants::InitialOpTimeout()));\n\n    requestOpt.set_payload_format(table_payload_format::json_no_metadata);\n}\n\nvoid XTableHelper::CreateOperationContext(operation_context& c) const\n{    \n    std::string id = utility::uuid_to_string(utility::new_uuid());\n    c.set_client_request_id(id);\n}\n\n/*\n Error level logging. This is to isolate XTableHelper logging.\n */\nvoid XTableHelper::LogError(const std::string & msg) const\n{    \n    auto msg2 = MdsdUtil::GetTid() + \": \" + msg;\n    Logger::LogError(msg2);\n}\n\n// vim: se sw=4 :\t// Would prefer 8, but... c'est la vie\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/XTableHelper.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#ifndef _XTABLEHELPER_HH_\n#define _XTABLEHELPER_HH_\n\n#include <unordered_map>\n#include <string>\n#include \"was/storage_account.h\"\n#include \"was/table.h\"\n#include <mutex>\n#include <memory>\n\n/*\n  This class manages storage table operations. Because same event can be uploaded\n  to multiple tables, instead of creating new table, uploading, then deleting it, each new\n  table object will be saved in cache. All tables are freed at final destruction time.\n */\n\nclass XTableHelper\n{\npublic:    \n    static XTableHelper* GetInstance();\n\n    // disable copy and move contructors\n    XTableHelper(XTableHelper&& h) = delete;\n    XTableHelper& operator=(XTableHelper&& h) = delete;\n\n    XTableHelper(const XTableHelper&) = delete;\n    XTableHelper& operator=(const XTableHelper &) = delete;\n\n    // create a new cloud table using connection string (ex: AccountName/Key, or SAS Key)\n    // The table will be stored in a cache for future fast reference.\n    // tablename: the actual tablename, not a URI.\n    std::shared_ptr<azure::storage::cloud_table> CreateTable(const std::string& tablename, const std::string& connStr);\n\n    // Handle storage exception. Return true if the execution can be retried. Return false \n    // if no value to retry. Return the number of errors found by pnerrs. If isFirstTry is\n    // true, it means this is the first time to run the upload operation on this dataset.\n    // Only updates *pnerrs and *isNoSuchTable if those pointers are not nullptr.\n    bool HandleStorageException(const std::string& tablename, const azure::storage::storage_exception& e,\n    \t\t\t\tsize_t * pnerrs, bool isFirstTry, bool * isNoSuchTable);\n\n    // Create a new request operation object.\n    void CreateRequestOperation(azure::storage::table_request_options& options) const;\n\n    // Create a new operation context object.\n    void CreateOperationContext(azure::storage::operation_context & context) const;\n\nprivate:\n    XTableHelper();\n    ~XTableHelper();\n\n    // Log error message. This function is to make isolated test easiler.\n    void LogError(const std::string& msg) const;\n\n    // This will store all the created cloud_table objects. Key=tableUri;\n    std::unordered_map<std::string, std::shared_ptr<azure::storage::cloud_table>> cloudTableMap;\n    std::mutex tablemutex;\n};\n\n\n#endif // _XTABLEHELPER_HH_\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/XTableRequest.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"XTableRequest.hh\"\n#include \"XTableConst.hh\"\n#include \"Logger.hh\"\n#include \"Trace.hh\"\n#include \"XTableHelper.hh\"\n#include \"MdsdMetrics.hh\"\n#include <algorithm>\n#include <sstream>\n\nXTableRequest::XTableRequest(const std::string& connStr, const std::string& tablename)\n\t: _tablename(tablename), _rowCount(0)\n{\n\tTrace trace(Trace::XTable, \"XTR Constructor\");\n\n\tauto helper = XTableHelper::GetInstance();\n\n\t_table = helper->CreateTable(tablename, connStr);\n\tif (!_table) {\n\t\tstd::ostringstream msg;\n\t\tmsg << \"CreateTable(\" << tablename << \", '\" << connStr << \"') returned nullptr\";\n\t\ttrace.NOTE(msg.str());\n\t\tthrow std::runtime_error(msg.str());\n\t}\n\n\thelper->CreateRequestOperation(_requestOptions);\n\thelper->CreateOperationContext(_context);\n\n\t_useUpsert = (tablename == \"SchemasTable\"); // Ugh - such a hack\n}\n\nbool\nXTableRequest::AddRow(const azure::storage::table_entity & row)\n{\n\tTrace trace(Trace::XTable, \"XTR::AddRow\");\n\tif (_rowCount == XTableConstants::MaxItemPerBatch()) {\n\t\ttrace.NOTE(\"Batch is already full; ignoring row\");\n\t\treturn false;\n\t}\n\n\tif (_useUpsert) {\n\t\t_batchOperation.insert_or_replace_entity(row);\n\t} else {\n\t\t_batchOperation.insert_entity(row);\n\t}\n\t_rowCount++;\n\n\treturn true;\n}\n\n/*static*/ void\nXTableRequest::Send(std::unique_ptr<XTableRequest> req)\n{\n\tTrace trace(Trace::XTable, \"XTR::Send\");\n\treq->_rowCount = req->_batchOperation.operations().size();\n\n\tMdsdMetrics::Count(\"XTable_send\");\n\tMdsdMetrics::Count(\"XTable_rowsSent\", req->_rowCount);\n\tif (req->_rowCount == 0) {\n\t\ttrace.NOTE(\"Shortcut completion: zero row count\");\n\t\treturn;\n\t}\n\n\t// Need to convert the unique_ptr to shared_ptr for lambda capture inside\n\tXTableRequest::DoWork(std::shared_ptr<XTableRequest>(req.release()), boost::system::error_code());\n}\n\n/*static*/ void\nXTableRequest::DoWork(std::shared_ptr<XTableRequest> req, const boost::system::error_code &error)\n{\n\tTrace trace(Trace::XTable, \"XTR::DoWork\");\n\n\tif (error) {\n\t\tstd::ostringstream msg;\n\t\tmsg << \"DoWork() observed error \" << error << \" from previous task\";\n\t\ttrace.NOTE(msg.str());\n\t\tLogger::LogError(msg.str());\n\t\treturn;\n\t}\n\n\treq->_table->execute_batch_async(req->_batchOperation, req->_requestOptions, req->_context)\n\t.then([req](pplx::task<std::vector<azure::storage::table_result> > t) { DoContinuation(req, t); })\n\t.then([=](pplx::task<void> previous_task) {\n\t\ttry {\n\t\t\tprevious_task.wait();\n\t\t}\n\t\tcatch (std::exception & e) {\n\t\t\tMdsdMetrics::Count(\"XTable_failedGeneralException\");\n\t\t\tstd::ostringstream msg;\n\t\t\tmsg << \"Writing to table '\" << req->_tablename << \"' \"\n\t\t\t\t<< \"caught exception: \" << e.what();\n\t\t\tLogger::LogError(\"XTR::DoWork(): \" + msg.str());\n\t\t}\n\t\tcatch(...) {\n\t\t\tMdsdMetrics::Count(\"XTable_failedUnknownException\");\n\t\t\tLogger::LogError(\"XTR::DoWork() caught unknown exception.\");\n\t\t}\n\t});\n}\n\n/*static*/ void\nXTableRequest::DoContinuation(std::shared_ptr<XTableRequest> req, pplx::task<std::vector<azure::storage::table_result> > t)\n{\n\tTrace trace(Trace::XTable, \"XTR::DoContinuation\");\n\tsize_t errcount = 0;\n\ttry\n\t{\n\t\tt.wait();\n\t\tfor (const auto &result : t.get() ) {\n\t\t\tif (result.http_status_code() != web::http::status_codes::NoContent) {\n\t\t\t\tstd::ostringstream msg;\n\t\t\t\tmsg << \"Unexpected HTTP status \" << result.http_status_code() << \" when writing to \" << req->_tablename;\n\t\t\t\ttrace.NOTE(msg.str());\n\t\t\t\tLogger::LogError(msg.str());\n\t\t\t\terrcount++;\n\t\t\t}\n\t\t}\n\t\tif (errcount) {\n\t\t\tstd::ostringstream msg;\n\t\t\tmsg << \"Total of \" << errcount << ((errcount==1)?\"error\":\"errors\") << \" while writing to \" << req->_tablename;\n\t\t\tLogger::LogError(msg.str());\n\t\t\tMdsdMetrics::Count(\"XTable_completeWithErrors\");\n\t\t\ttrace.NOTE(\"Completed but some rows not successful\");\n\t\t}\n\t\telse {\n\t\t\tMdsdMetrics::Count(\"XTable_complete\");\n\t\t\ttrace.NOTE(\"Complete\");\n\t\t}\n\t\tMdsdMetrics::Count(\"XTable_rowsSuccess\", req->_rowCount - std::min(errcount, req->_rowCount));\n\t}\n\tcatch (azure::storage::storage_exception & e) {\n\t\ttrace.NOTE(\"Caught storage exception for table \" + req->_tablename);\n\t\tbool isNoSuchTable = false;\n\t\tXTableHelper::GetInstance()->HandleStorageException(req->_tablename, e, &errcount,\n\t\t\t\t\t\t\t\ttrue, &isNoSuchTable);\n\t\tif (isNoSuchTable) {\n\t\t\t// Table doesn't exist. Let's see if we can create it.\n\t\t\ttrace.NOTE(\"Trying to create table \" + req->_tablename);\n\t\t\tMdsdMetrics::Count(\"XTable_tableCreate\");\n\t\t\treq->_table->create_if_not_exists_async(req->_requestOptions, req->_context)\n\t\t\t  .then([req](pplx::task<bool> t) {\n\t\t\t\tTrace trace(Trace::XTable, \"XTR Create Table lambda\");\n\t\t\t\ttry\n\t\t\t\t{\n\t\t\t\t\tt.wait();\n\t\t\t\t\t(void) t.get();\t\t// Don't care if it was already created\n\t\t\t\t\t// If we get here, the table exists; let's retry the initial operation\n\t\t\t\t\tMdsdMetrics::Count(\"XTable_retries\");\n\t\t\t\t\tXTableRequest::DoWork(req, boost::system::error_code());\n\t\t\t\t\treturn;\n\t\t\t\t}\n\t\t\t\tcatch (azure::storage::storage_exception & e) {\n\t\t\t\t\t// Just emit the necessary error messages\n\t\t\t\t\t(void)XTableHelper::GetInstance()\n\t\t\t\t\t\t->HandleStorageException(req->_tablename, e, nullptr, true, nullptr);\n\t\t\t\t}\n\t\t\t\tcatch (std::exception& e) {\n\t\t\t\t\tstd::string msg = \"While trying to create table \" + req->_tablename\n\t\t\t\t\t\t\t+ \" Caught exception: \" + e.what();\n\t\t\t\t\ttrace.NOTE(msg);\n\t\t\t\t\tLogger::LogError(msg);\n\t\t\t\t}\n\t\t\t\tcatch (...) {\n\t\t\t\t\tstd::string msg = \"While trying to create table \" + req->_tablename\n\t\t\t\t\t\t\t+ \" Caught unknown exception.\";\n\t\t\t\t\ttrace.NOTE(msg);\n\t\t\t\t\tLogger::LogError(msg);\n\t\t\t\t}\n\t\t\t});\n\t\t\treturn;\n\t\t}\n\t}\n\tcatch (std::exception & e) {\n\t\tMdsdMetrics::Count(\"XTable_failedGeneralException\");\n\t\tstd::ostringstream msg;\n\t\tmsg << \"Caught exception: \" << e.what();\n\t\ttrace.NOTE(msg.str());\n\t\tLogger::LogError(\"XTR::DoContinuation(): \" + msg.str());\n\t}\n\tcatch (...) {\n\t\tMdsdMetrics::Count(\"XTable_failedUnknownException\");\n\t\ttrace.NOTE(\"Caught unknown exception.\");\n\t\tLogger::LogError(\"XTR::DoContinuation() caught unknown exception.\");\n\t}\n}\n\n// vim: se sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/XTableRequest.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _XTABLEREQUEST_HH_\n#define _XTABLEREQUEST_HH_\n#include <string>\n#include <vector>\n#include <cstdlib>\n#include <memory>\n#include <was/storage_account.h>\n#include <was/table.h>\n#include <boost/system/error_code.hpp>\n\nclass XTableRequest\n{\npublic:\n\tXTableRequest(const std::string& connStr, const std::string& tablename);\n\n\tbool AddRow(const azure::storage::table_entity &row);\n\tstatic void Send(std::unique_ptr<XTableRequest> req);\n\tsize_t Size() { return _rowCount; }\n\nprivate:\n\tstd::shared_ptr<azure::storage::cloud_table> _table;\n\tstd::string _tablename;\n\tazure::storage::table_batch_operation _batchOperation;\n\tazure::storage::table_request_options _requestOptions;\n\tazure::storage::operation_context _context;\n\n\tsize_t _rowCount;\n\tbool _useUpsert;\n\n\tstatic void DoWork(std::shared_ptr<XTableRequest> req, const boost::system::error_code&);\n\tstatic void DoContinuation(std::shared_ptr<XTableRequest> req, pplx::task<std::vector<azure::storage::table_result> > t);\n};\n\n#endif // _XTABLEREQUEST_HH_\n\n// vim: se sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/XTableSink.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"XTableSink.hh\"\n\n#include <iterator>\n#include <sstream>\n\n#include \"CanonicalEntity.hh\"\n#include \"Engine.hh\"\n#include \"MdsdConfig.hh\"\n#include \"Credentials.hh\"\n#include \"Utility.hh\"\n#include \"RowIndex.hh\"\n#include \"Trace.hh\"\n#include \"MdsdMetrics.hh\"\n#include \"XTableRequest.hh\"\n#include \"StoreType.hh\"\n\n#include \"stdafx.h\"\n#include \"was/table.h\"\n#include \"was/common.h\"\n\nusing std::string;\nusing azure::storage::entity_property;\n\nXTableSink::XTableSink(MdsdConfig* config, const MdsEntityName &target, const Credentials* c)\n  : IMdsSink(StoreType::Type::XTable), _config(config), _target(target), _creds(c)\n{\n\tTrace trace(Trace::XTable, \"XTS::Constructor\");\n\n\tif (!target.IsSchemasTable()) {\n\t\t// Build the identity columns metadata only once\n\t\t// Similarly, compute the partition data only once.\n\t\t// SchemasTable has no identity columns (in this sense) and does partitioning differently\n\t\tconfig->GetIdentityColumnValues(std::back_inserter(_identityColumns));\n\t\tstd::vector<string> identValues;\n\t\tidentValues.reserve(_identityColumns.size());\n\t\tfor (const ident_col_t& idpair : _identityColumns) {\n\t\t\tidentValues.push_back(idpair.second);\n\t\t}\n\n\t\t_identColumnString = MdsdUtil::Join(identValues, \"___\");\n\t\tunsigned long long N = MdsdUtil::EasyHash(_identColumnString) % (unsigned long long)(_config->PartitionCount());\n\t\t_N = MdsdUtil::ZeroFill(N, 19);\n\t}\n\t_estimatedBytes = 0;\n}\n\nvoid\nXTableSink::ComputeConnString()\n{\n\tTrace trace(Trace::XTable, \"XTS::ComputeConnString\");\n\n\tif (_creds->ConnectionString(_target, Credentials::ServiceType::XTable, _fullTableName, _connString, _rebuildTime) ) {\n\t\tif (trace.IsActive()) {\n\t\t\tstd::ostringstream msg;\n\t\t\tmsg << _fullTableName << \"=[\" << _connString << \"] expires \" << _rebuildTime;\n\t\t\ttrace.NOTE(msg.str());\n\t\t}\n\t} else {\n\t\tLogger::LogError(\"Couldn't construct connection string for table \" + _target.Name());\n\t}\n}\n\nXTableSink::~XTableSink()\n{\n\tTrace trace(Trace::XTable, \"XTS::Destructor\");\n}\n\n// Convert the CanonicalEntity to a table_entity and add it to our internal request. Flush\n// the request if it fills up.\n//\n// Note that AddRow() doesn't keep the CanonicalEntity; we copy anything we need from it.\nvoid\nXTableSink::AddRow(const CanonicalEntity &row, const MdsTime& qibase)\n{\n\tTrace trace(Trace::XTable, \"XTS::AddRow\");\n\n\t// If this row is for a different partition, flush what we have and track the new partition\n\tif (row.PartitionKey() != _pkey) {\n\t\tFlush();\n\t\t_pkey = row.PartitionKey();\n\t}\n\n\t// If we have no in-progress request, either because we just flushed or because we're just\n\t// starting up, make one.\n\tif (! _request) {\n\t\ttry {\n\t\t\tComputeConnString();\n\t\t\t_request.reset(new XTableRequest(_connString, _fullTableName));\n\t\t}\n\t\tcatch (std::exception &ex) {\n\t\t\tstd::ostringstream msg;\n\n\t\t\tmsg << \"Exception (\" << ex.what() << \") caught while creating new XTableRequest; dropping row\";\n\t\t\ttrace.NOTE(msg.str());\n\t\t\tLogger::LogError(msg.str());\n\t\t\tMdsdMetrics::Count(\"Dropped_Entities\");\n\t\t\treturn;\n\t\t}\n\t}\n\n\tazure::storage::table_entity e { _pkey, row.RowKey() };\n\tazure::storage::table_entity::properties_type& properties = e.properties();\n\tsize_t byteCount = 2 * (_pkey.length() + row.RowKey().length()) + 4;\n\tbool oversize = false;\n\n\tfor (const auto & col : row) {\n\t\t// col is pair<string name, MdsValue* val>\n\t\tauto namesize = 2 * col.first.length();\n\t\tbyteCount += namesize;\t\t// Account for the column name, which is stored in the entity in XStore\n\t\tswitch((col.second)->type) {\n\t\t\tcase MdsValue::MdsType::mt_bool:\n\t\t\t\tproperties[col.first] = entity_property((col.second)->bval);\n\t\t\t\tbyteCount += 1;\n\t\t\t\tbreak;\n\t\t\tcase MdsValue::MdsType::mt_wstr:\n\t\t\t\t{\n\t\t\t\t\tproperties[col.first] = entity_property(*((col.second)->strval));\n\t\t\t\t\tauto colsize = 2 * ((col.second)->strval->length()) + 2;\n\t\t\t\t\tbyteCount += colsize;\n\t\t\t\t\tif (colsize + namesize > 65536) {\t// XStore max attribute size is 64Ki\n\t\t\t\t\t\tstd::ostringstream msg;\n\t\t\t\t\t\tmsg << \"Column \" << col.first << \" oversize: colsize \" << colsize\n\t\t\t\t\t\t\t<< \" namesize \" << namesize;\n\t\t\t\t\t\ttrace.NOTE(msg.str());\n\t\t\t\t\t\toversize = true;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tbreak;\n\t\t\tcase MdsValue::MdsType::mt_float64:\n\t\t\t\tproperties[col.first] = entity_property((col.second)->dval);\n\t\t\t\tbyteCount += 8;\n\t\t\t\tbreak;\n\t\t\tcase MdsValue::MdsType::mt_int32:\n\t\t\t\tproperties[col.first] = entity_property((int32_t)(col.second)->lval);\n\t\t\t\tbyteCount += 4;\n\t\t\t\tbreak;\n\t\t\tcase MdsValue::MdsType::mt_int64:\n\t\t\t\tproperties[col.first] = entity_property((int64_t)(col.second)->llval);\n\t\t\t\tbyteCount += 8;\n\t\t\t\tbreak;\n\t\t\tcase MdsValue::MdsType::mt_utc:\n\t\t\t\tproperties[col.first] = entity_property((col.second)->datetimeval);\n\t\t\t\tbyteCount += 8;\n\t\t\t\tbreak;\n\t\t}\n\t}\n\n\tif (oversize || (byteCount > 1024*1024)) {\t// XStore max table size is 1024Ki\n\t\ttrace.NOTE(\"Entity or column too large - dropped\");\n\t\tstd::ostringstream msg;\n\t\tmsg << \"Dropping oversize entity: \" << row;\n\t\tLogger::LogWarn(msg.str());\n\t\tMdsdMetrics::Count(\"Dropped_Entities\");\n\t\tMdsdMetrics::Count(\"Overlarge_Entities\");\n\t\treturn;\n\t}\n\n\tif ((_estimatedBytes + byteCount) > 4000000) {\n\t\ttrace.NOTE(\"Batch would be too big; flushing before adding this entity\");\n\t\tFlush();\n\t\ttry {\n\t\t\tComputeConnString();\n\t\t\t_request.reset(new XTableRequest(_connString, _fullTableName));\n\t\t}\n\t\tcatch (std::exception & ex) {\n\t\t\tstd::ostringstream msg;\n\n\t\t\tmsg << \"Exception (\" << ex.what() << \") caught while creating new XTableRequest; dropping row\";\n\t\t\ttrace.NOTE(msg.str());\n\t\t\tLogger::LogError(msg.str());\n\t\t\tMdsdMetrics::Count(\"Dropped_Entities\");\n\t\t\treturn;\n\t\t}\n\t}\n\t_request->AddRow(e);\n\t_estimatedBytes += byteCount;\n\n\tif (trace.IsActive()) {\n\t\tstd::ostringstream msg;\n\t\tmsg << \"We have \" << _request->Size() << \" rows\";\n\t\ttrace.NOTE(msg.str());\n\t}\n\tif (_request->Size() == 100) {\n\t\tFlush();\n\t}\n}\n\n// Flush any data we're holding. We might never have allocated a request, or it might\n// be empty, or we might have data.\n// Post-condition: _request is nullptr. Next call to AddRow() will create a new request on demand.\nvoid\nXTableSink::Flush()\n{\n\tTrace trace(Trace::XTable, \"XTS::Flush\");\n\n\tif (!_request) {\n\t\t// First time through. Just make the post-condition true\n\t\ttrace.NOTE(\"Null _request; no action.\");\n\t} else {\n\t\tif (_request->Size() > 0) {\n\t\t\t// Detach the request and send it. Send() is fire-and-forget; the request object\n\t\t\t// is responsible for deleting itself after that point.\n\t\t\ttrace.NOTE(\"Writing to \" + _fullTableName + \" with connection string \" + _connString);\n\t\t\tXTableRequest::Send(std::move(_request));\n\t\t} else {\n\t\t\t// Since we create these on demand, this really shouldn't happen.\n\t\t\ttrace.NOTE(\"Empty _request; no action (deleting).\");\n\t\t}\n\t\t_request.reset();\n\t\t_estimatedBytes = 0;\n\t}\n}\n\n// vim: se sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/XTableSink.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _XTABLESINK_HH_\n#define _XTABLESINK_HH_\n\n#include \"IMdsSink.hh\"\n#include <vector>\n#include <string>\n#include <memory>\n#include \"stdafx.h\"\n#include \"IdentityColumns.hh\"\n#include \"MdsTime.hh\"\n#include \"MdsEntityName.hh\"\n\nclass CanonicalEntity;\nclass Credentials;\nclass MdsdConfig;\nclass XTableRequest;\n\nclass XTableSink : public IMdsSink\n{\npublic:\n\tvirtual bool IsXTable() const { return true; }\n\n\tXTableSink(MdsdConfig* config, const MdsEntityName &target, const Credentials* c);\n\n\tvirtual ~XTableSink();\n\n\tvirtual void AddRow(const CanonicalEntity&, const MdsTime&);\n\n\tvirtual void Flush();\nprivate:\n\tXTableSink();\n\tvoid ComputeConnString();\n\n\tMdsdConfig* _config;\n\tMdsEntityName _target;\n\tconst Credentials* _creds;\n\n\tident_vect_t _identityColumns;\n\tstd::string _identColumnString;\n\n\tMdsTime _QIBase;\n\tstd::string _pkey;\n\tstd::string _TIMESTAMP;\n\tstd::string _N;\n\n\tstd::string _connString;\n\tstd::string _fullTableName;\n\tMdsTime _rebuildTime;\n\n\tstd::unique_ptr<XTableRequest> _request;\n\tunsigned long _estimatedBytes;\n};\n\n#endif // _XTABLESINK_HH_\n\n// vim: se sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/cJSON.c",
    "content": "/*\n  Copyright (c) 2009 Dave Gamble\n\n  Permission is hereby granted, free of charge, to any person obtaining a copy\n  of this software and associated documentation files (the \"Software\"), to deal\n  in the Software without restriction, including without limitation the rights\n  to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n  copies of the Software, and to permit persons to whom the Software is\n  furnished to do so, subject to the following conditions:\n\n  The above copyright notice and this permission notice shall be included in\n  all copies or substantial portions of the Software.\n\n  THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n  FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n  AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n  LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n  OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n  THE SOFTWARE.\n*/\n\n/* cJSON */\n/* JSON parser in C. */\n//#define _GNU_SOURCE\n#include <string.h>\n#include <stdio.h>\n#include <math.h>\n#include <stdlib.h>\n#include <float.h>\n#include <limits.h>\n#include <ctype.h>\n#include \"cJSON.h\"\n\nstatic const char *ep;\n\nconst char *cJSON_GetErrorPtr(void) {return ep;}\n\nstatic int cJSON_strcasecmp(const char *s1,const char *s2)\n{\n\tif (!s1) return (s1==s2)?0:1;if (!s2) return 1;\n\tfor(; tolower(*s1) == tolower(*s2); ++s1, ++s2)\tif(*s1 == 0)\treturn 0;\n\treturn tolower(*(const unsigned char *)s1) - tolower(*(const unsigned char *)s2);\n}\n\nstatic void *(*cJSON_malloc)(size_t sz) = malloc;\nstatic void (*cJSON_free)(void *ptr) = free;\n\nstatic char* cJSON_strdup(const char* str)\n{\n      size_t len;\n      char* copy;\n\n      len = strlen(str) + 1;\n      if (!(copy = (char*)cJSON_malloc(len))) return 0;\n      memcpy(copy,str,len);\n      return copy;\n}\n\nvoid cJSON_InitHooks(cJSON_Hooks* hooks)\n{\n    if (!hooks) { /* Reset hooks */\n        cJSON_malloc = malloc;\n        cJSON_free = free;\n        return;\n    }\n\n\tcJSON_malloc = (hooks->malloc_fn)?hooks->malloc_fn:malloc;\n\tcJSON_free\t = (hooks->free_fn)?hooks->free_fn:free;\n}\n\n/* Internal constructor. */\nstatic cJSON *cJSON_New_Item(void)\n{\n\tcJSON* node = (cJSON*)cJSON_malloc(sizeof(cJSON));\n\tif (node) memset(node,0,sizeof(cJSON));\n\treturn node;\n}\n\n/* Delete a cJSON structure. */\nvoid cJSON_Delete(cJSON *c)\n{\n\tcJSON *next;\n\twhile (c)\n\t{\n\t\tnext=c->next;\n\t\tif (!(c->type&cJSON_IsReference) && c->child) { cJSON_Delete(c->child); c->child = 0; }\n\t\tif (!(c->type&cJSON_IsReference) && c->valuestring) { cJSON_free(c->valuestring); c->valuestring = 0; }\n\t\tif (c->string) { cJSON_free(c->string); c->string = 0; }\n\t\tcJSON_free(c);\n\t\tc=next;\n\t}\n}\n\n/* Parse the input text to generate a number, and populate the result into item. */\nstatic const char *parse_number(cJSON *item,const char *num)\n{\n\tdouble n=0,sign=1,scale=0;int subscale=0,signsubscale=1;\n\n\tif (*num=='-') sign=-1,num++;\t/* Has sign? */\n\tif (*num=='0') num++;\t\t\t/* is zero */\n\tif (*num>='1' && *num<='9')\tdo\tn=(n*10.0)+(*num++ -'0');\twhile (*num>='0' && *num<='9');\t/* Number? */\n\tif (*num=='.' && num[1]>='0' && num[1]<='9') {num++;\t\tdo\tn=(n*10.0)+(*num++ -'0'),scale--; while (*num>='0' && *num<='9');}\t/* Fractional part? */\n\tif (*num=='e' || *num=='E')\t\t/* Exponent? */\n\t{\tnum++;if (*num=='+') num++;\telse if (*num=='-') signsubscale=-1,num++;\t\t/* With sign? */\n\t\twhile (*num>='0' && *num<='9') subscale=(subscale*10)+(*num++ - '0');\t/* Number? */\n\t}\n\n\tn=sign*n*pow(10.0,(scale+subscale*signsubscale));\t/* number = +/- number.fraction * 10^+/- exponent */\n\t\n\titem->valuedouble=n;\n\titem->valueint=(long long)n;\n\titem->type=cJSON_Number;\n\treturn num;\n}\n\n/* Render the number nicely from the given item into a string. */\nstatic char *print_number(cJSON *item)\n{\n\tchar *str;\n\tdouble d=item->valuedouble;\n\tif (fabs(((double)item->valueint)-d)<=DBL_EPSILON && d<=LLONG_MAX && d>=LLONG_MIN)\n\t{\n\t\tstr=(char*)cJSON_malloc(21);\t/* 2^64+1 can be represented in 21 chars. */\n\t\tif (str) sprintf(str,\"%lld\",item->valueint);\n\t}\n\telse\n\t{\n\t\tconst size_t buffSize = 64;\n\t\tstr=(char*)cJSON_malloc(buffSize);\t/* This is a nice tradeoff. */\n\t\tif (str)\n\t\t{\n\t\t\tif (fabs(floor(d)-d)<=DBL_EPSILON && fabs(d)<1.0e60)snprintf(str, buffSize, \"%.0f\",d);\n\t\t\telse if (fabs(d)<1.0e-6 || fabs(d)>1.0e9)\t\t\tsnprintf(str, buffSize, \"%e\",d);\n\t\t\telse\t\t\t\t\t\t\t\t\t\t\t\tsnprintf(str, buffSize, \"%f\",d);\n\t\t}\n\t}\n\treturn str;\n}\n\nstatic unsigned parse_hex4(const char *str)\n{\n\tunsigned h=0;\n\tif (*str>='0' && *str<='9') h+=(*str)-'0'; else if (*str>='A' && *str<='F') h+=10+(*str)-'A'; else if (*str>='a' && *str<='f') h+=10+(*str)-'a'; else return 0;\n\th=h<<4;str++;\n\tif (*str>='0' && *str<='9') h+=(*str)-'0'; else if (*str>='A' && *str<='F') h+=10+(*str)-'A'; else if (*str>='a' && *str<='f') h+=10+(*str)-'a'; else return 0;\n\th=h<<4;str++;\n\tif (*str>='0' && *str<='9') h+=(*str)-'0'; else if (*str>='A' && *str<='F') h+=10+(*str)-'A'; else if (*str>='a' && *str<='f') h+=10+(*str)-'a'; else return 0;\n\th=h<<4;str++;\n\tif (*str>='0' && *str<='9') h+=(*str)-'0'; else if (*str>='A' && *str<='F') h+=10+(*str)-'A'; else if (*str>='a' && *str<='f') h+=10+(*str)-'a'; else return 0;\n\treturn h;\n}\n\n/* Parse the input text into an unescaped cstring, and populate item. */\nstatic const unsigned char firstByteMark[7] = { 0x00, 0x00, 0xC0, 0xE0, 0xF0, 0xF8, 0xFC };\nstatic const char *parse_string(cJSON *item,const char *str)\n{\n\tconst char *ptr=str+1;char *ptr2;char *out;int len=0;unsigned uc,uc2;\n\tif (*str!='\\\"') {ep=str;return 0;}\t/* not a string! */\n\t\n\twhile (*ptr!='\\\"' && *ptr && ++len) if (*ptr++ == '\\\\') ptr++;\t/* Skip escaped quotes. */\n\t\n\tout=(char*)cJSON_malloc(len+1);\t/* This is how long we need for the string, roughly. */\n\tif (!out) return 0;\n\t\n\tptr=str+1;ptr2=out;\n\twhile (*ptr!='\\\"' && *ptr)\n\t{\n\t\tif (*ptr!='\\\\') *ptr2++=*ptr++;\n\t\telse\n\t\t{\n\t\t\tptr++;\n\t\t\tswitch (*ptr)\n\t\t\t{\n\t\t\t\tcase 'b': *ptr2++='\\b';\tbreak;\n\t\t\t\tcase 'f': *ptr2++='\\f';\tbreak;\n\t\t\t\tcase 'n': *ptr2++='\\n';\tbreak;\n\t\t\t\tcase 'r': *ptr2++='\\r';\tbreak;\n\t\t\t\tcase 't': *ptr2++='\\t';\tbreak;\n\t\t\t\tcase 'u':\t /* transcode utf16 to utf8. */\n\t\t\t\t\tuc=parse_hex4(ptr+1);ptr+=4;\t/* get the unicode char. */\n\n\t\t\t\t\tif ((uc>=0xDC00 && uc<=0xDFFF) || uc==0)\tbreak;\t/* check for invalid.\t*/\n\n\t\t\t\t\tif (uc>=0xD800 && uc<=0xDBFF)\t/* UTF16 surrogate pairs.\t*/\n\t\t\t\t\t{\n\t\t\t\t\t\tif (ptr[1]!='\\\\' || ptr[2]!='u')\tbreak;\t/* missing second-half of surrogate.\t*/\n\t\t\t\t\t\tuc2=parse_hex4(ptr+3);ptr+=6;\n\t\t\t\t\t\tif (uc2<0xDC00 || uc2>0xDFFF)\t\tbreak;\t/* invalid second-half of surrogate.\t*/\n\t\t\t\t\t\tuc=0x10000 + (((uc&0x3FF)<<10) | (uc2&0x3FF));\n\t\t\t\t\t}\n\n\t\t\t\t\tlen=4;if (uc<0x80) len=1;else if (uc<0x800) len=2;else if (uc<0x10000) len=3; ptr2+=len;\n\t\t\t\t\t\n\t\t\t\t\tswitch (len) {\n\t\t\t\t\t\tcase 4: *--ptr2 =((uc | 0x80) & 0xBF); uc >>= 6;\n\t\t\t\t\t\tcase 3: *--ptr2 =((uc | 0x80) & 0xBF); uc >>= 6;\n\t\t\t\t\t\tcase 2: *--ptr2 =((uc | 0x80) & 0xBF); uc >>= 6;\n\t\t\t\t\t\tcase 1: *--ptr2 =(uc | firstByteMark[len]);\n\t\t\t\t\t}\n\t\t\t\t\tptr2+=len;\n\t\t\t\t\tbreak;\n\t\t\t\tdefault:  *ptr2++=*ptr; break;\n\t\t\t}\n\t\t\tptr++;\n\t\t}\n\t}\n\t*ptr2=0;\n\tif (*ptr=='\\\"') ptr++;\n\titem->valuestring=out;\n\titem->type=cJSON_String;\n\treturn ptr;\n}\n\n/* Render the cstring provided to an escaped version that can be printed. */\nstatic char *print_string_ptr(const char *str)\n{\n\tconst char *ptr;char *ptr2,*out;int len=0;unsigned char token;\n\t\n\tif (!str) return cJSON_strdup(\"\");\n\tptr=str;while ((token=*ptr) && ++len) {if (strchr(\"\\\"\\\\\\b\\f\\n\\r\\t\",token)) len++; else if (token<32) len+=5;ptr++;}\n\t\n\tout=(char*)cJSON_malloc(len+3);\n\tif (!out) return 0;\n\n\tptr2=out;ptr=str;\n\t*ptr2++='\\\"';\n\twhile (*ptr)\n\t{\n\t\tif ((unsigned char)*ptr>31 && *ptr!='\\\"' && *ptr!='\\\\') *ptr2++=*ptr++;\n\t\telse\n\t\t{\n\t\t\t*ptr2++='\\\\';\n\t\t\tswitch (token=*ptr++)\n\t\t\t{\n\t\t\t\tcase '\\\\':\t*ptr2++='\\\\';\tbreak;\n\t\t\t\tcase '\\\"':\t*ptr2++='\\\"';\tbreak;\n\t\t\t\tcase '\\b':\t*ptr2++='b';\tbreak;\n\t\t\t\tcase '\\f':\t*ptr2++='f';\tbreak;\n\t\t\t\tcase '\\n':\t*ptr2++='n';\tbreak;\n\t\t\t\tcase '\\r':\t*ptr2++='r';\tbreak;\n\t\t\t\tcase '\\t':\t*ptr2++='t';\tbreak;\n\t\t\t\tdefault: sprintf(ptr2,\"u%04x\",token);ptr2+=5;\tbreak;\t/* escape and print */\n\t\t\t}\n\t\t}\n\t}\n\t*ptr2++='\\\"';*ptr2++=0;\n\treturn out;\n}\n/* Invote print_string_ptr (which is useful) on an item. */\nstatic char *print_string(cJSON *item)\t{return print_string_ptr(item->valuestring);}\n\n/* Predeclare these prototypes. */\nstatic const char *parse_value(cJSON *item,const char *value);\nstatic char *print_value(cJSON *item,int depth,int fmt);\nstatic const char *parse_array(cJSON *item,const char *value);\nstatic char *print_array(cJSON *item,int depth,int fmt);\nstatic const char *parse_object(cJSON *item,const char *value);\nstatic char *print_object(cJSON *item,int depth,int fmt);\n\n/* Utility to jump whitespace and cr/lf */\nstatic const char *skip(const char *in) {while (in && *in && (unsigned char)*in<=32) in++; return in;}\n\n/* Parse an object - create a new root, and populate. */\ncJSON *cJSON_ParseWithOpts(const char *value,const char **return_parse_end,int require_null_terminated)\n{\n\tconst char *end=0;\n\tcJSON *c=cJSON_New_Item();\n\tep=0;\n\tif (!c) return 0;       /* memory fail */\n\n\tend=parse_value(c,skip(value));\n\tif (!end)\t{cJSON_Delete(c);return 0;}\t/* parse failure. ep is set. */\n\n\t/* if we require null-terminated JSON without appended garbage, skip and then check for a null terminator */\n\tif (require_null_terminated) {end=skip(end);if (*end) {cJSON_Delete(c);ep=end;return 0;}}\n\tif (return_parse_end) *return_parse_end=end;\n\treturn c;\n}\n/* Default options for cJSON_Parse */\ncJSON *cJSON_Parse(const char *value) {return cJSON_ParseWithOpts(value,0,0);}\n\n/* Render a cJSON item/entity/structure to text. */\nchar *cJSON_Print(cJSON *item)\t\t\t\t{return print_value(item,0,1);}\nchar *cJSON_PrintUnformatted(cJSON *item)\t{return print_value(item,0,0);}\n\n/* Parser core - when encountering text, process appropriately. */\nstatic const char *parse_value(cJSON *item,const char *value)\n{\n\tif (!value)\t\t\t\t\t\treturn 0;\t/* Fail on null. */\n\tif (!strncmp(value,\"null\",4))\t{ item->type=cJSON_NULL;  return value+4; }\n\tif (!strncmp(value,\"false\",5))\t{ item->type=cJSON_False; return value+5; }\n\tif (!strncmp(value,\"true\",4))\t{ item->type=cJSON_True; item->valueint=1;\treturn value+4; }\n\tif (*value=='\\\"')\t\t\t\t{ return parse_string(item,value); }\n\tif (*value=='-' || (*value>='0' && *value<='9'))\t{ return parse_number(item,value); }\n\tif (*value=='[')\t\t\t\t{ return parse_array(item,value); }\n\tif (*value=='{')\t\t\t\t{ return parse_object(item,value); }\n\n\tep=value;return 0;\t/* failure. */\n}\n\n/* Render a value to text. */\nstatic char *print_value(cJSON *item,int depth,int fmt)\n{\n\tchar *out=0;\n\tif (!item) return 0;\n\tswitch ((item->type)&255)\n\t{\n\t\tcase cJSON_NULL:\tout=cJSON_strdup(\"null\");\tbreak;\n\t\tcase cJSON_False:\tout=cJSON_strdup(\"false\");break;\n\t\tcase cJSON_True:\tout=cJSON_strdup(\"true\"); break;\n\t\tcase cJSON_Number:\tout=print_number(item);break;\n\t\tcase cJSON_String:\tout=print_string(item);break;\n\t\tcase cJSON_Array:\tout=print_array(item,depth,fmt);break;\n\t\tcase cJSON_Object:\tout=print_object(item,depth,fmt);break;\n\t}\n\treturn out;\n}\n\n/* Build an array from input text. */\nstatic const char *parse_array(cJSON *item,const char *value)\n{\n\tcJSON *child;\n\tif (*value!='[')\t{ep=value;return 0;}\t/* not an array! */\n\n\titem->type=cJSON_Array;\n\tvalue=skip(value+1);\n\tif (*value==']') return value+1;\t/* empty array. */\n\n\titem->child=child=cJSON_New_Item();\n\tif (!item->child) return 0;\t\t /* memory fail */\n\tvalue=skip(parse_value(child,skip(value)));\t/* skip any spacing, get the value. */\n\tif (!value) return 0;\n\n\twhile (*value==',')\n\t{\n\t\tcJSON *new_item;\n\t\tif (!(new_item=cJSON_New_Item())) return 0; \t/* memory fail */\n\t\tchild->next=new_item;new_item->prev=child;child=new_item;\n\t\tvalue=skip(parse_value(child,skip(value+1)));\n\t\tif (!value) return 0;\t/* memory fail */\n\t}\n\n\tif (*value==']') return value+1;\t/* end of array */\n\tep=value;return 0;\t/* malformed. */\n}\n\n/* Render an array to text */\nstatic char *print_array(cJSON *item,int depth,int fmt)\n{\n\tchar **entries;\n\tchar *out=0,*ptr,*ret;int len=5;\n\tcJSON *child=item->child;\n\tint numentries=0,i=0,fail=0;\n\t\n\t/* How many entries in the array? */\n\twhile (child) numentries++,child=child->next;\n\t/* Explicitly handle numentries==0 */\n\tif (!numentries)\n\t{\n\t\tout=(char*)cJSON_malloc(3);\n\t\tif (out) strcpy(out,\"[]\");\n\t\treturn out;\n\t}\n\t/* Allocate an array to hold the values for each */\n\tentries=(char**)cJSON_malloc(numentries*sizeof(char*));\n\tif (!entries) return 0;\n\tmemset(entries,0,numentries*sizeof(char*));\n\t/* Retrieve all the results: */\n\tchild=item->child;\n\twhile (child && !fail)\n\t{\n\t\tret=print_value(child,depth+1,fmt);\n\t\tentries[i++]=ret;\n\t\tif (ret) len+=strlen(ret)+2+(fmt?1:0); else fail=1;\n\t\tchild=child->next;\n\t}\n\t\n\t/* If we didn't fail, try to malloc the output string */\n\tif (!fail) out=(char*)cJSON_malloc(len);\n\t/* If that fails, we fail. */\n\tif (!out) fail=1;\n\n\t/* Handle failure. */\n\tif (fail)\n\t{\n\t\tfor (i=0;i<numentries;i++) if (entries[i]) cJSON_free(entries[i]);\n\t\tcJSON_free(entries);\n\t\treturn 0;\n\t}\n\t\n\t/* Compose the output array. */\n\t*out='[';\n\tptr=out+1;*ptr=0;\n\tfor (i=0;i<numentries;i++)\n\t{\n\t\tstrcpy(ptr,entries[i]);ptr+=strlen(entries[i]);\n\t\tif (i!=numentries-1) {*ptr++=',';if(fmt)*ptr++=' ';*ptr=0;}\n\t\tcJSON_free(entries[i]);\n\t}\n\tcJSON_free(entries);\n\t*ptr++=']';*ptr++=0;\n\treturn out;\t\n}\n\n/* Build an object from the text. */\nstatic const char *parse_object(cJSON *item,const char *value)\n{\n\tcJSON *child;\n\tif (*value!='{')\t{ep=value;return 0;}\t/* not an object! */\n\t\n\titem->type=cJSON_Object;\n\tvalue=skip(value+1);\n\tif (*value=='}') return value+1;\t/* empty array. */\n\t\n\titem->child=child=cJSON_New_Item();\n\tif (!item->child) return 0;\n\tvalue=skip(parse_string(child,skip(value)));\n\tif (!value) return 0;\n\tchild->string=child->valuestring;child->valuestring=0;\n\tif (*value!=':') {ep=value;return 0;}\t/* fail! */\n\tvalue=skip(parse_value(child,skip(value+1)));\t/* skip any spacing, get the value. */\n\tif (!value) return 0;\n\t\n\twhile (*value==',')\n\t{\n\t\tcJSON *new_item;\n\t\tif (!(new_item=cJSON_New_Item()))\treturn 0; /* memory fail */\n\t\tchild->next=new_item;new_item->prev=child;child=new_item;\n\t\tvalue=skip(parse_string(child,skip(value+1)));\n\t\tif (!value) return 0;\n\t\tchild->string=child->valuestring;child->valuestring=0;\n\t\tif (*value!=':') {ep=value;return 0;}\t/* fail! */\n\t\tvalue=skip(parse_value(child,skip(value+1)));\t/* skip any spacing, get the value. */\n\t\tif (!value) return 0;\n\t}\n\t\n\tif (*value=='}') return value+1;\t/* end of array */\n\tep=value;return 0;\t/* malformed. */\n}\n\n/* Render an object to text. */\nstatic char *print_object(cJSON *item,int depth,int fmt)\n{\n\tchar **entries=0,**names=0;\n\tchar *out=0,*ptr,*ret,*str;int len=7,i=0,j;\n\tcJSON *child=item->child;\n\tint numentries=0,fail=0;\n\t/* Count the number of entries. */\n\twhile (child) numentries++,child=child->next;\n\t/* Explicitly handle empty object case */\n\tif (!numentries)\n\t{\n\t\tout=(char*)cJSON_malloc(fmt?depth+4:3);\n\t\tif (!out)\treturn 0;\n\t\tptr=out;*ptr++='{';\n\t\tif (fmt) {*ptr++='\\n';for (i=0;i<depth-1;i++) *ptr++='\\t';}\n\t\t*ptr++='}';*ptr++=0;\n\t\treturn out;\n\t}\n\t/* Allocate space for the names and the objects */\n\tentries=(char**)cJSON_malloc(numentries*sizeof(char*));\n\tif (!entries) return 0;\n\tnames=(char**)cJSON_malloc(numentries*sizeof(char*));\n\tif (!names) {cJSON_free(entries);return 0;}\n\tmemset(entries,0,sizeof(char*)*numentries);\n\tmemset(names,0,sizeof(char*)*numentries);\n\n\t/* Collect all the results into our arrays: */\n\tchild=item->child;depth++;if (fmt) len+=depth;\n\twhile (child)\n\t{\n\t\tnames[i]=str=print_string_ptr(child->string);\n\t\tentries[i++]=ret=print_value(child,depth,fmt);\n\t\tif (str && ret) len+=strlen(ret)+strlen(str)+2+(fmt?2+depth:0); else fail=1;\n\t\tchild=child->next;\n\t}\n\t\n\t/* Try to allocate the output string */\n\tif (!fail) out=(char*)cJSON_malloc(len);\n\tif (!out) fail=1;\n\n\t/* Handle failure */\n\tif (fail)\n\t{\n\t\tfor (i=0;i<numentries;i++) {if (names[i]) cJSON_free(names[i]);if (entries[i]) cJSON_free(entries[i]);}\n\t\tcJSON_free(names);cJSON_free(entries);\n\t\treturn 0;\n\t}\n\t\n\t/* Compose the output: */\n\t*out='{';ptr=out+1;if (fmt)*ptr++='\\n';*ptr=0;\n\tfor (i=0;i<numentries;i++)\n\t{\n\t\tif (fmt) for (j=0;j<depth;j++) *ptr++='\\t';\n\t\tstrcpy(ptr,names[i]);ptr+=strlen(names[i]);\n\t\t*ptr++=':';if (fmt) *ptr++='\\t';\n\t\tstrcpy(ptr,entries[i]);ptr+=strlen(entries[i]);\n\t\tif (i!=numentries-1) *ptr++=',';\n\t\tif (fmt) *ptr++='\\n';*ptr=0;\n\t\tcJSON_free(names[i]);cJSON_free(entries[i]);\n\t}\n\t\n\tcJSON_free(names);cJSON_free(entries);\n\tif (fmt) for (i=0;i<depth-1;i++) *ptr++='\\t';\n\t*ptr++='}';*ptr++=0;\n\treturn out;\t\n}\n\n/* Get Array size/item / object item. */\nint    cJSON_GetArraySize(cJSON *array)\t\t\t\t\t\t\t{cJSON *c=array->child;int i=0;while(c)i++,c=c->next;return i;}\ncJSON *cJSON_GetArrayItem(cJSON *array,int item)\t\t\t\t{cJSON *c=array->child;  while (c && item>0) item--,c=c->next; return c;}\ncJSON *cJSON_GetObjectItem(cJSON *object,const char *string)\t{cJSON *c=object->child; while (c && cJSON_strcasecmp(c->string,string)) c=c->next; return c;}\n\n/* Utility for array list handling. */\nstatic void suffix_object(cJSON *prev,cJSON *item) {prev->next=item;item->prev=prev;}\n/* Utility for handling references. */\nstatic cJSON *create_reference(cJSON *item) {cJSON *ref=cJSON_New_Item();if (!ref) return 0;memcpy(ref,item,sizeof(cJSON));ref->string=0;ref->type|=cJSON_IsReference;ref->next=ref->prev=0;return ref;}\n\n/* Add item to array/object. */\nvoid   cJSON_AddItemToArray(cJSON *array, cJSON *item)\t\t\t\t\t\t{cJSON *c=array->child;if (!item) return; if (!c) {array->child=item;} else {while (c && c->next) c=c->next; suffix_object(c,item);}}\nvoid   cJSON_AddItemToObject(cJSON *object,const char *string,cJSON *item)\t{if (!item) return; if (item->string) cJSON_free(item->string);item->string=cJSON_strdup(string);cJSON_AddItemToArray(object,item);}\nvoid\tcJSON_AddItemReferenceToArray(cJSON *array, cJSON *item)\t\t\t\t\t\t{cJSON_AddItemToArray(array,create_reference(item));}\nvoid\tcJSON_AddItemReferenceToObject(cJSON *object,const char *string,cJSON *item)\t{cJSON_AddItemToObject(object,string,create_reference(item));}\n\ncJSON *cJSON_DetachItemFromArray(cJSON *array,int which)\t\t\t{cJSON *c=array->child;while (c && which>0) c=c->next,which--;if (!c) return 0;\n\tif (c->prev) c->prev->next=c->next;if (c->next) c->next->prev=c->prev;if (c==array->child) array->child=c->next;c->prev=c->next=0;return c;}\nvoid   cJSON_DeleteItemFromArray(cJSON *array,int which)\t\t\t{cJSON_Delete(cJSON_DetachItemFromArray(array,which));}\ncJSON *cJSON_DetachItemFromObject(cJSON *object,const char *string) {int i=0;cJSON *c=object->child;while (c && cJSON_strcasecmp(c->string,string)) i++,c=c->next;if (c) return cJSON_DetachItemFromArray(object,i);return 0;}\nvoid   cJSON_DeleteItemFromObject(cJSON *object,const char *string) {cJSON_Delete(cJSON_DetachItemFromObject(object,string));}\n\n/* Replace array/object items with new ones. */\nvoid   cJSON_ReplaceItemInArray(cJSON *array,int which,cJSON *newitem)\t\t{cJSON *c=array->child;while (c && which>0) c=c->next,which--;if (!c) return;\n\tnewitem->next=c->next;newitem->prev=c->prev;if (newitem->next) newitem->next->prev=newitem;\n\tif (c==array->child) array->child=newitem; else newitem->prev->next=newitem;c->next=c->prev=0;cJSON_Delete(c);}\nvoid   cJSON_ReplaceItemInObject(cJSON *object,const char *string,cJSON *newitem){int i=0;cJSON *c=object->child;while(c && cJSON_strcasecmp(c->string,string))i++,c=c->next;if(c){newitem->string=cJSON_strdup(string);cJSON_ReplaceItemInArray(object,i,newitem);}}\n\n/* Create basic types: */\ncJSON *cJSON_CreateNull(void)\t\t\t\t\t{cJSON *item=cJSON_New_Item();if(item)item->type=cJSON_NULL;return item;}\ncJSON *cJSON_CreateTrue(void)\t\t\t\t\t{cJSON *item=cJSON_New_Item();if(item)item->type=cJSON_True;return item;}\ncJSON *cJSON_CreateFalse(void)\t\t\t\t\t{cJSON *item=cJSON_New_Item();if(item)item->type=cJSON_False;return item;}\ncJSON *cJSON_CreateBool(int b)\t\t\t\t\t{cJSON *item=cJSON_New_Item();if(item)item->type=(b?cJSON_True:cJSON_False);return item;}\ncJSON *cJSON_CreateNumber(double num)\t\t\t{cJSON *item=cJSON_New_Item();if(item){item->type=cJSON_Number;item->valuedouble=num;item->valueint=(long long)num;}return item;}\ncJSON *cJSON_CreateString(const char *string)\t{cJSON *item=cJSON_New_Item();if(item){item->type=cJSON_String;item->valuestring=cJSON_strdup(string);}return item;}\ncJSON *cJSON_CreateArray(void)\t\t\t\t\t{cJSON *item=cJSON_New_Item();if(item)item->type=cJSON_Array;return item;}\ncJSON *cJSON_CreateObject(void)\t\t\t\t\t{cJSON *item=cJSON_New_Item();if(item)item->type=cJSON_Object;return item;}\n\n/* Create Arrays: */\ncJSON *cJSON_CreateIntArray(const int *numbers,int count)\t\t{int i;cJSON *n=0,*p=0,*a=cJSON_CreateArray();for(i=0;a && i<count;i++){n=cJSON_CreateNumber(numbers[i]);if(!i)a->child=n;else suffix_object(p,n);p=n;}return a;}\ncJSON *cJSON_CreateFloatArray(const float *numbers,int count)\t{int i;cJSON *n=0,*p=0,*a=cJSON_CreateArray();for(i=0;a && i<count;i++){n=cJSON_CreateNumber(numbers[i]);if(!i)a->child=n;else suffix_object(p,n);p=n;}return a;}\ncJSON *cJSON_CreateDoubleArray(const double *numbers,int count)\t{int i;cJSON *n=0,*p=0,*a=cJSON_CreateArray();for(i=0;a && i<count;i++){n=cJSON_CreateNumber(numbers[i]);if(!i)a->child=n;else suffix_object(p,n);p=n;}return a;}\ncJSON *cJSON_CreateStringArray(const char **strings,int count)\t{int i;cJSON *n=0,*p=0,*a=cJSON_CreateArray();for(i=0;a && i<count;i++){n=cJSON_CreateString(strings[i]);if(!i)a->child=n;else suffix_object(p,n);p=n;}return a;}\n\n/* Duplication */\ncJSON *cJSON_Duplicate(cJSON *item,int recurse)\n{\n\tcJSON *newitem,*cptr,*nptr=0,*newchild;\n\t/* Bail on bad ptr */\n\tif (!item) return 0;\n\t/* Create new item */\n\tnewitem=cJSON_New_Item();\n\tif (!newitem) return 0;\n\t/* Copy over all vars */\n\tnewitem->type=item->type&(~cJSON_IsReference),newitem->valueint=item->valueint,newitem->valuedouble=item->valuedouble;\n\tif (item->valuestring)\t{newitem->valuestring=cJSON_strdup(item->valuestring);\tif (!newitem->valuestring)\t{cJSON_Delete(newitem);return 0;}}\n\tif (item->string)\t\t{newitem->string=cJSON_strdup(item->string);\t\t\tif (!newitem->string)\t\t{cJSON_Delete(newitem);return 0;}}\n\t/* If non-recursive, then we're done! */\n\tif (!recurse) return newitem;\n\t/* Walk the ->next chain for the child. */\n\tcptr=item->child;\n\twhile (cptr)\n\t{\n\t\tnewchild=cJSON_Duplicate(cptr,1);\t\t/* Duplicate (with recurse) each item in the ->next chain */\n\t\tif (!newchild) {cJSON_Delete(newitem);return 0;}\n\t\tif (nptr)\t{nptr->next=newchild,newchild->prev=nptr;nptr=newchild;}\t/* If newitem->child already set, then crosswire ->prev and ->next and move on */\n\t\telse\t\t{newitem->child=newchild;nptr=newchild;}\t\t\t\t\t/* Set newitem->child and move to it */\n\t\tcptr=cptr->next;\n\t}\n\treturn newitem;\n}\n\nvoid cJSON_Minify(char *json)\n{\n\tchar *into=json;\n\twhile (*json)\n\t{\n\t\tif (*json==' ') json++;\n\t\telse if (*json=='\\t') json++;\t// Whitespace characters.\n\t\telse if (*json=='\\r') json++;\n\t\telse if (*json=='\\n') json++;\n\t\telse if (*json=='/' && json[1]=='/')  while (*json && *json!='\\n') json++;\t// double-slash comments, to end of line.\n\t\telse if (*json=='/' && json[1]=='*') {while (*json && !(*json=='*' && json[1]=='/')) json++;json+=2;}\t// multiline comments.\n\t\telse if (*json=='\\\"'){*into++=*json++;while (*json && *json!='\\\"'){if (*json=='\\\\') *into++=*json++;*into++=*json++;}*into++=*json++;} // string literals, which are \\\" sensitive.\n\t\telse *into++=*json++;\t\t\t// All other characters.\n\t}\n\t*into=0;\t// and null-terminate.\n}\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/cJSON.h",
    "content": "/*\n  Copyright (c) 2009 Dave Gamble\n \n  Permission is hereby granted, free of charge, to any person obtaining a copy\n  of this software and associated documentation files (the \"Software\"), to deal\n  in the Software without restriction, including without limitation the rights\n  to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n  copies of the Software, and to permit persons to whom the Software is\n  furnished to do so, subject to the following conditions:\n \n  The above copyright notice and this permission notice shall be included in\n  all copies or substantial portions of the Software.\n \n  THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n  FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n  AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n  LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n  OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n  THE SOFTWARE.\n*/\n\n#ifndef cJSON__h\n#define cJSON__h\n\n#ifdef __cplusplus\nextern \"C\"\n{\n#endif\n\n/* cJSON Types: */\n#define cJSON_False 0\n#define cJSON_True 1\n#define cJSON_NULL 2\n#define cJSON_Number 3\n#define cJSON_String 4\n#define cJSON_Array 5\n#define cJSON_Object 6\n\t\n#define cJSON_IsReference 256\n\n/* The cJSON structure: */\ntypedef struct cJSON {\n\tstruct cJSON *next,*prev;\t/* next/prev allow you to walk array/object chains. Alternatively, use GetArraySize/GetArrayItem/GetObjectItem */\n\tstruct cJSON *child;\t\t/* An array or object item will have a child pointer pointing to a chain of the items in the array/object. */\n\n\tint type;\t\t\t\t\t/* The type of the item, as above. */\n\n\tchar *valuestring;\t\t\t/* The item's string, if type==cJSON_String */\n\tlong long valueint;\t\t\t\t/* The item's number, if type==cJSON_Number */\n\tdouble valuedouble;\t\t\t/* The item's number, if type==cJSON_Number */\n\n\tchar *string;\t\t\t\t/* The item's name string, if this item is the child of, or is in the list of subitems of an object. */\n} cJSON;\n\ntypedef struct cJSON_Hooks {\n      void *(*malloc_fn)(size_t sz);\n      void (*free_fn)(void *ptr);\n} cJSON_Hooks;\n\n/* Supply malloc, realloc and free functions to cJSON */\nextern void cJSON_InitHooks(cJSON_Hooks* hooks);\n\n\n/* Supply a block of JSON, and this returns a cJSON object you can interrogate. Call cJSON_Delete when finished. */\nextern cJSON *cJSON_Parse(const char *value);\n/* Render a cJSON entity to text for transfer/storage. Free the char* when finished. */\nextern char  *cJSON_Print(cJSON *item);\n/* Render a cJSON entity to text for transfer/storage without any formatting. Free the char* when finished. */\nextern char  *cJSON_PrintUnformatted(cJSON *item);\n/* Delete a cJSON entity and all subentities. */\nextern void   cJSON_Delete(cJSON *c);\n\n/* Returns the number of items in an array (or object). */\nextern int\t  cJSON_GetArraySize(cJSON *array);\n/* Retrieve item number \"item\" from array \"array\". Returns NULL if unsuccessful. */\nextern cJSON *cJSON_GetArrayItem(cJSON *array,int item);\n/* Get item \"string\" from object. Case insensitive. */\nextern cJSON *cJSON_GetObjectItem(cJSON *object,const char *string);\n\n/* For analysing failed parses. This returns a pointer to the parse error. You'll probably need to look a few chars back to make sense of it. Defined when cJSON_Parse() returns 0. 0 when cJSON_Parse() succeeds. */\nextern const char *cJSON_GetErrorPtr(void);\n\t\n/* These calls create a cJSON item of the appropriate type. */\nextern cJSON *cJSON_CreateNull(void);\nextern cJSON *cJSON_CreateTrue(void);\nextern cJSON *cJSON_CreateFalse(void);\nextern cJSON *cJSON_CreateBool(int b);\nextern cJSON *cJSON_CreateNumber(double num);\nextern cJSON *cJSON_CreateString(const char *string);\nextern cJSON *cJSON_CreateArray(void);\nextern cJSON *cJSON_CreateObject(void);\n\n/* These utilities create an Array of count items. */\nextern cJSON *cJSON_CreateIntArray(const int *numbers,int count);\nextern cJSON *cJSON_CreateFloatArray(const float *numbers,int count);\nextern cJSON *cJSON_CreateDoubleArray(const double *numbers,int count);\nextern cJSON *cJSON_CreateStringArray(const char **strings,int count);\n\n/* Append item to the specified array/object. */\nextern void cJSON_AddItemToArray(cJSON *array, cJSON *item);\nextern void\tcJSON_AddItemToObject(cJSON *object,const char *string,cJSON *item);\n/* Append reference to item to the specified array/object. Use this when you want to add an existing cJSON to a new cJSON, but don't want to corrupt your existing cJSON. */\nextern void cJSON_AddItemReferenceToArray(cJSON *array, cJSON *item);\nextern void\tcJSON_AddItemReferenceToObject(cJSON *object,const char *string,cJSON *item);\n\n/* Remove/Detatch items from Arrays/Objects. */\nextern cJSON *cJSON_DetachItemFromArray(cJSON *array,int which);\nextern void   cJSON_DeleteItemFromArray(cJSON *array,int which);\nextern cJSON *cJSON_DetachItemFromObject(cJSON *object,const char *string);\nextern void   cJSON_DeleteItemFromObject(cJSON *object,const char *string);\n\t\n/* Update array items. */\nextern void cJSON_ReplaceItemInArray(cJSON *array,int which,cJSON *newitem);\nextern void cJSON_ReplaceItemInObject(cJSON *object,const char *string,cJSON *newitem);\n\n/* Duplicate a cJSON item */\nextern cJSON *cJSON_Duplicate(cJSON *item,int recurse);\n/* Duplicate will create a new, identical cJSON item to the one you pass, in new memory that will\nneed to be released. With recurse!=0, it will duplicate any children connected to the item.\nThe item->next and ->prev pointers are always zero on return from Duplicate. */\n\n/* ParseWithOpts allows you to require (and check) that the JSON is null terminated, and to retrieve the pointer to the final byte parsed. */\nextern cJSON *cJSON_ParseWithOpts(const char *value,const char **return_parse_end,int require_null_terminated);\n\nextern void cJSON_Minify(char *json);\n\n/* Macros for creating things quickly. */\n#define cJSON_AddNullToObject(object,name)\t\tcJSON_AddItemToObject(object, name, cJSON_CreateNull())\n#define cJSON_AddTrueToObject(object,name)\t\tcJSON_AddItemToObject(object, name, cJSON_CreateTrue())\n#define cJSON_AddFalseToObject(object,name)\t\tcJSON_AddItemToObject(object, name, cJSON_CreateFalse())\n#define cJSON_AddBoolToObject(object,name,b)\tcJSON_AddItemToObject(object, name, cJSON_CreateBool(b))\n#define cJSON_AddNumberToObject(object,name,n)\tcJSON_AddItemToObject(object, name, cJSON_CreateNumber(n))\n#define cJSON_AddStringToObject(object,name,s)\tcJSON_AddItemToObject(object, name, cJSON_CreateString(s))\n\n/* When assigning an integer value, it needs to be propagated to valuedouble too. */\n#define cJSON_SetIntValue(object,val)\t\t\t((object)?(object)->valueint=(object)->valuedouble=(val):(val))\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/cryptutil.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"cryptutil.hh\"\n\n#include <exception>\n#include <fstream>\n#include <iostream>\n#include <memory>\n#include <sstream>\n#include <system_error>\n\nextern \"C\" {\n#include <openssl/cms.h>\n#include <openssl/err.h>\n#include <openssl/pkcs7.h>\n#include <openssl/pkcs12.h>\n#include <openssl/ssl.h>\n#include <openssl/x509.h>\n#include <sys/stat.h>\n}\n\nusing namespace std;\n\nnamespace cryptutil\n{\n    using uniqueEvpKey = std::unique_ptr<EVP_PKEY, void(*)(EVP_PKEY*)>;\n    using uniqueCms = std::unique_ptr<CMS_ContentInfo, void (*)(CMS_ContentInfo*)>;\n    using uniqueP12 = std::unique_ptr<PKCS12, void (*)(PKCS12*)>;\n    \n    // True if file exists, false if not\n    bool FileExists(const string& filename)\n    {\n        struct stat buffer;   \n        return ((stat(filename.c_str(), &buffer)==0) && S_ISREG(buffer.st_mode));\n    }\n    \n    // Convert a hex string into a vector of bytes\n    bool DecodeString(const string& encoded, vector<BYTE>& byteBuf)\n    {\n        if (encoded.length() < 2)\n        {\n            return false;\n        }\n        auto bufLen = encoded.length() / 2;\n        byteBuf = vector<BYTE>(bufLen);\n        size_t idx = 0;\n        for (size_t i = 0; i < bufLen; i++)\n        {\n            BYTE data1 = (BYTE)(encoded[idx] - '0');\n            if (data1 > 9)\n            {\n                data1 = (BYTE)((encoded[idx] - 'A') + 10);\n            }\n            BYTE data2 = (BYTE)(encoded[idx+1] - '0');\n            if (data2 > 9)\n            {\n                data2 = (BYTE)((encoded[idx+1] - 'A') + 10);\n            }\n            byteBuf[i] = (data1 << 4) | data2;\n            idx += 2;\n        }\n        return true;\n    }\n\n    // Read a string from the data in a BIO object\n    string GetStringFromBio(BIO *mem)\n    {\n        if (mem == nullptr)\n        {\n            throw invalid_argument(\"A nullptr was passed in place of a BIO argument\");\n        }\n        const int bufSize = 10;\n        char buf[bufSize] = \"\";\n        stringstream ss;\n        while (BIO_gets(mem, buf, bufSize) > 0)\n        {\n            ss << buf;\n        }\n        return ss.str();\n    }\n    \n    // Open a PKCS12 (.pfx) file, and return a suitable object or throw an exception\n    uniqueP12 GetPkcs12FromFile(const string& privKeyPath)\n    {\n        FILE *p12_file = fopen(privKeyPath.c_str(), \"rb\");\n        if (p12_file == nullptr)\n        {\n            throw system_error(errno, system_category(), string(\"Unable to read PKCS12 file \" + privKeyPath));\n        }\n        PKCS12 *p12 = nullptr;\n        d2i_PKCS12_fp(p12_file, &p12);\n        fclose(p12_file);\n        if (p12 == nullptr)\n        {\n            throw cryptutilException(\"PKCS12 structure could not be parsed from \" + privKeyPath);\n        }\n        uniqueP12 retP12(p12, PKCS12_free);\n        return retP12;\n    }\n    \n    // Return the EVP_PKEY contained in the specified pkcs12 file, or throw an exception\n    uniqueEvpKey GetPrivateKeyFromPkcs12(const string& privKeyPath, const string& keyPass)\n    {\n        EVP_PKEY *pkey = nullptr;\n        X509 *cert = nullptr;\n        \n        uniqueP12 p12 = GetPkcs12FromFile(privKeyPath);\n        \n        if (!PKCS12_parse(p12.get(), keyPass.c_str(), &pkey, &cert, (STACK_OF(X509)**)nullptr))\n        {\n            throw cryptutilException(\"Could not parse private key from PKCS12 file \" + privKeyPath);\n        }\n        uniqueEvpKey retKey(pkey, EVP_PKEY_free);\n        // clear certs\n        X509_free(cert);\n        return retKey;\n    }\n    \n    // Return the EVP_PKEY contained in the specified PEM file, or NULL if a failure occurs.\n    uniqueEvpKey GetPrivateKeyFromPem(const string& privKeyPath)\n    {\n        BIO *keyBio = BIO_new_file(privKeyPath.c_str(), \"r\");\n        if (keyBio == nullptr)\n        {\n            throw cryptutilException(\"Unable to read PEM file \" + privKeyPath);\n        }\n        EVP_PKEY *pkey = PEM_read_bio_PrivateKey(keyBio, NULL, 0, NULL);\n        BIO_free(keyBio);\n        if (pkey == nullptr)\n        {\n            throw cryptutilException(\"Unable to parse private key from PEM file \" + privKeyPath);\n        }\n        uniqueEvpKey retKey(pkey, EVP_PKEY_free);\n        return retKey;\n    }\n    \n    // Try to parse the specified file as PKCS12 (PFX) or PEM, return the private key or NULL\n    uniqueEvpKey GetPrivateKeyFromUnknownFileType(const string& privKeyPath, const string& keyPass)\n    {\n        try\n        {\n            return GetPrivateKeyFromPem(privKeyPath);\n        }\n        catch (exception& ex)\n        {\n            // File isn't a PEM, but it might be a PFX. We don't care unless BOTH fail.\n        }\n        // This function can throw cryptutilException and system_error.\n        // No need to catch/rethrow the exception - just let it go unhindered\n        // The last call in this function should always allow any exceptions\n        // to pass through to the caller.\n        return GetPrivateKeyFromPkcs12(privKeyPath, keyPass);\n    }\n    \n    // Parse the specified file as Cryptographic Message Syntax (CMS) or return NULL\n    uniqueCms GetCMSFromEncodedString(const string& encoded)\n    {\n        // Decode text from hex chars to binary\n        vector<BYTE> byteBuf;\n        if(!DecodeString(encoded, byteBuf))\n        {\n            throw cryptutilException(\"Unable to decode provided string to CMS\");\n        }\n        BIO *mem = BIO_new_mem_buf(byteBuf.data(), byteBuf.size());\n        \n        // Read encrypted text\n        CMS_ContentInfo *cms = d2i_CMS_bio(mem, NULL);\n        BIO_free(mem);\n        if (cms == nullptr)\n        {\n            throw cryptutilException(\"Unable to parse CMS from decoded string\");\n        }\n        uniqueCms retCms(cms, CMS_ContentInfo_free);\n        return retCms;\n    }\n\n    // Given a private key and CMS object,return decrypted string\n    // or throw an exception\n    string DecryptCMSWithPrivateKey(uniqueEvpKey& pkey, uniqueCms& cms)\n    {\n        if (pkey.get() == nullptr)\n        {\n            throw invalid_argument(\"The provided private key must not be a nullptr\");\n        }\n        if (cms.get() == nullptr)\n        {\n            throw invalid_argument(\"The provided CMS must not be a nullptr\");\n        }\n        // Decrypt file contents\n        BIO *out = BIO_new(BIO_s_mem());\n        int res = CMS_decrypt(cms.get(), pkey.get(), NULL, NULL, out, 0);\n        if (!res)\n        {\n            BIO_free(out);\n            int error = ERR_get_error();\n            const char* errstr = ERR_reason_error_string(error);\n            if (errstr) {\n                throw cryptutilException(\"Error decrypting cipher text [\" + string(errstr) + \"]\");\n            }\n            else {\n                throw cryptutilException(\"Error decrypting cipher text\");\n            }\n        }\n        string plaintext = GetStringFromBio(out);\n        BIO_free(out);\n        return plaintext;\n    }\n\n    // Given an encrypted STRING (CMS encoded as hex chars), a private key file, and an optional password,\n    // decode and decrypt the CMS and return the decrypted string, or throw a cryptutilException if it fails\n    string DecodeAndDecryptString(const string& privKeyPath, const string& encoded, const string& keyPass)\n    {\n        if (privKeyPath.empty())\n        {\n            throw invalid_argument(\"The private key path must not be an empty string\");\n        }\n        if (encoded.empty())\n        {\n            throw invalid_argument(\"The encoded ciphertext must not be an empty string\");\n        }\n        if (!FileExists(privKeyPath))\n        {\n            throw runtime_error(\"Private key file was not found at path: \" + privKeyPath);\n        }\n        OpenSSL_add_all_algorithms();\n        ERR_load_crypto_strings();\n\n        // Read Private Key\n        uniqueEvpKey pkey = GetPrivateKeyFromUnknownFileType(privKeyPath, keyPass);\n        uniqueCms cms = GetCMSFromEncodedString(encoded);\n        return DecryptCMSWithPrivateKey(pkey, cms);\n    }\n}\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/cryptutil.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#ifndef _CRYPTUTIL_H_\n#define _CRYPTUTIL_H_\n#include <vector>\n#include <string>\n#include <utility>\n\n\ntypedef unsigned char BYTE;\nnamespace cryptutil\n{\n    bool DecodeString(const std::string& encodedString, std::vector<BYTE>& results);\n    std::string DecodeAndDecryptString(const std::string& privKeyPath, const std::string& encodedString,\n                                       const std::string& keyPassword = \"\");\n\n    // Custom exception class\n    class cryptutilException : public std::exception\n    {\n        std::string exMessage;\n    public:\n        cryptutilException(const std::string& errDetail)\n            : exMessage(errDetail)\n        {}\n        virtual const char* what() const throw()\n        {\n            return exMessage.c_str();\n        }\n    };\n}\n#endif\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/fdelt_chk.c",
    "content": "/*\n   Copyright (c) Microsoft Corporation. All rights reserved.\n   Licensed under the MIT license.\n*/\n\n#include <sys/select.h>\n\n# define strong_alias(name, aliasname) _strong_alias(name, aliasname)\n# define _strong_alias(name, aliasname) \\\n  extern __typeof (name) aliasname __attribute__ ((alias (#name)));\n\n/*\n * 'unsigned' dropped from the original source, to match\n * the prototype defined in select2.h.\n */\nlong int\n__fdelt_chk (long int d)\n{\n  if (d >= FD_SETSIZE)\n    __chk_fail ();\n\n  return d / __NFDBITS;\n}\nstrong_alias (__fdelt_chk, __fdelt_warn)\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/mdsautokey.h",
    "content": "// --------------------------------------------------------------------------------------------------------------------\n// <copyright file=\"mdsautokey.h\" company=\"Microsoft\">\n//  Copyright (c) Microsoft Corporation.  All rights reserved.\n// </copyright>\n// --------------------------------------------------------------------------------------------------------------------\n\n// The autokey feature is never used by the Linux Diagnostic Extension; this stub disables the feature.\n\n#ifndef _AUTOKEY_H_\n#define _AUTOKEY_H_\n#include <string>\n#include <map>\n\nnamespace mdsautokey {\n    enum autokeyResultStatus {\n        autokeySuccess,\n        autokeyPartialSuccess,\n        autokeyFailure\n    };\n\n    class autokeyResult\n    {\n    public:\n        autokeyResultStatus status;\n        autokeyResult(autokeyResultStatus stat) : status(stat) {}\n        autokeyResult() : status(autokeyResultStatus::autokeySuccess) {}\n    };\n\n    autokeyResult GetLatestMdsKeys(const std::string& autokeyCfg, const std::string& nmspace,\n        int eventVersion, std::map<std::pair<std::string, std::string>, std::string>& keys)\n        {\n            return autokeyResult(autokeyResultStatus::autokeyFailure);\n        }\n}\n#endif\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/mdsd.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"Logger.hh\"\n#include \"ProtocolListenerMgr.hh\"\n#include \"MdsdConfig.hh\"\n#include \"LocalSink.hh\"\n#include \"Engine.hh\"\n#include \"Version.hh\"\n#include \"Trace.hh\"\n#include \"DaemonConf.hh\"\n#include \"ExtensionMgmt.hh\"\n#include \"Utility.hh\"\n#include \"HttpProxySetup.hh\"\n#include \"EventHubUploaderMgr.hh\"\n#include \"XJsonBlobBlockCountsMgr.hh\"\n\n#include <cstdlib>\n#include <cerrno>\n#include <system_error>\n#include <cstdio>\n#include <cstring>\n#include <string>\n#include <fstream>\n#include <iostream>\n#include <sstream>\n#include <stdexcept>\n#include <memory>\n\nextern \"C\" {\n#include <unistd.h>\n#include <sys/types.h>\n#include <sys/socket.h>\n#include <netinet/in.h>\n#include <pthread.h>\n#include <sys/time.h>\n#include <sys/resource.h>\n}\n\nusing std::string;\nusing std::to_string;\nusing std::cerr;\nusing std::endl;\n\nvoid usage();\nextern \"C\" { void SetSignalCatchers(int); }\nvoid TerminateHandler();\n\n// This is a file-scope string\nstatic std::string config_file_path;\nstatic std::string autokey_config_path;\n\nint\nmain(int argc, char **argv)\n{\n    int mdsd_port = 29130;    // Default port number, grabbed out of the air\n\n    Engine* engine = Engine::GetEngine();\n    bool mdsdConfigValidationOnly = false;\n    bool runAsDaemon = false; // If true, run at Daemon mode instead of application mode.\n    bool coreDumpAtFatal = false; // If true, create core dump when received fatal signals.\n    std::string proxy_setting_string; // E.g., \"[http:]//[username:password@]www.xyz.com:8080/\"\n    bool disableLogging = false; // Useful for development testing\n    bool retryRandomPort = false;\n\n    Logger::Init();\n\n    std::string mdsd_config_dir;\n    std::string mdsd_run_dir;\n    std::string mdsd_log_dir;\n\n    try\n    {\n        mdsd_config_dir = MdsdUtil::GetEnvDirVar(\"MDSD_CONFIG_DIR\", \"/etc/mdsd.d\");\n        mdsd_run_dir = MdsdUtil::GetEnvDirVar(\"MDSD_RUN_DIR\", \"/var/run/mdsd\");\n        mdsd_log_dir = MdsdUtil::GetEnvDirVar(\"MDSD_LOG_DIR\", \"/var/log\");\n    } catch (std::runtime_error& ex) {\n        Logger::LogError(ex.what());\n        exit(1);\n    }\n\n    config_file_path = mdsd_config_dir + \"/mdsd.xml\";\n    autokey_config_path = mdsd_config_dir + \"/mdsautokey.cfg\";\n    const std::string config_cache_dir = mdsd_config_dir + \"/config-cache\";\n\n    std::string mdsd_prefix = mdsd_run_dir + \"/\";\n    std::string mdsd_role = \"default\"; // altered by '-r'\n    std::string mdsd_role_prefix = mdsd_prefix + mdsd_role; // replaced with '-r' value if it starts with '/'\n    std::string ehSaveDir = mdsd_run_dir + \"/eh\"; // Full path to save failed Event Hub events.\n\n    // default mdsd log file paths, they can be overwritten by input args.\n    std::string mdsdInfoFile = mdsd_log_dir + \"/mdsd.info\";\n    std::string mdsdWarnFile = mdsd_log_dir + \"/mdsd.warn\";\n    std::string mdsdErrFile = mdsd_log_dir + \"/mdsd.err\";\n\n    LocalSink::Initialize();\n\n    {\n        int opt;\n        while ((opt = getopt(argc, argv, \"bc:CDde:jo:P:p:Rr:S:T:vVw:\")) != -1) {\n            switch (opt) {\n            case 'b':\n                engine->BlackholeEvents();\n                break;\n            case 'c':\n                config_file_path = optarg;\n                break;\n            case 'C':\n                coreDumpAtFatal = true;\n                break;\n            case 'D':\n                disableLogging = true;\n                break;\n            case 'd':\n                runAsDaemon = true;\n                break;\n            case 'e':\n                mdsdErrFile = optarg;\n                break;\n            case 'j':\n                Trace::AddInterests(Trace::EventIngest);\n                break;\n            case 'o':\n                mdsdInfoFile = optarg;\n                break;\n            case 'P':\n                proxy_setting_string = optarg;\n                try {\n                    MdsdUtil::CheckProxySettingString(proxy_setting_string);\n                } catch (const MdsdUtil::HttpProxySetupException& e) {\n                    cerr << \"Invalid proxy specification for -P option: \"\n                         << e.what() << endl;\n                    usage();\n                }\n                break;\n            case 'p':\n                mdsd_port = atoi(optarg);\n                if (mdsd_port < 0) { // We now allow '-p 0' (binding to a random port)\n                    usage();\n                }\n                break;\n            case 'R':\n                retryRandomPort = true;\n                break;\n            case 'r':\n                if (*optarg == '/') {\n                    // Special case to allow overriding of the default mdsd_prefix (e.g. /var/run/mdsd).\n                    // This may be needed in cases where mdsd will not be able to create or write to /var/run/mdsd.\n                    // This is useful during dev testing and might also be needed for LAD.\n                    mdsd_role_prefix = optarg;\n                } else {\n                    mdsd_role_prefix = mdsd_prefix + std::string(optarg);\n                }\n                break;\n            case 'S':\n                ehSaveDir = optarg;\n                if (ehSaveDir.empty()) {\n                    cerr << \"'-S' requires a valid pathname.\" << endl;\n                    usage();\n                }\n                break;\n            case 'T':\n                try {\n                    unsigned long val = std::stol(string(optarg), 0, 0);\n                    Trace::AddInterests(static_cast<Trace::Flags>(val));\n                } catch (std::exception & ex) {\n                    usage();\n                }\n                break;\n            case 'v':\n                mdsdConfigValidationOnly = true;\n                break;\n            case 'V':\n                cerr << Version::Version << endl;\n                exit(0);\n            case 'w':\n                mdsdWarnFile = optarg;\n                break;\n            default: /* '?' */\n                usage();\n            }\n        }\n    }\n\n    // For config xml validation only, log to console.\n    if (!mdsdConfigValidationOnly) {\n        // Only try to create the mdsd_run_dir dir if it wasn't overridden via '-r' option.\n        if (mdsd_role_prefix.substr(0, mdsd_run_dir.length()) == mdsd_run_dir) {\n            try {\n                MdsdUtil::CreateDirIfNotExists(mdsd_run_dir, 01755);\n            }\n            catch (std::exception &e) {\n                Logger::LogError(\"Fatal error: unexpected exception at creating dir '\" + mdsd_run_dir + \"'. \" +\n                                 \"Reason: \" + e.what());\n                exit(1);\n            }\n        }\n\n        try {\n            MdsdUtil::CreateDirIfNotExists(ehSaveDir, 01755);\n        }\n        catch(std::exception & e) {\n            Logger::LogError(\"Fatal error: unexpected exception at creating dir '\" + ehSaveDir + \"'. \" +\n                             \"Reason: \" + e.what());\n            exit(1);\n        }\n\n        if (!disableLogging) {\n            Logger::SetInfoLog(mdsdInfoFile.c_str());\n            Logger::SetWarnLog(mdsdWarnFile.c_str());\n            Logger::SetErrorLog(mdsdErrFile.c_str());\n        }\n\n        if (0 == geteuid() && runAsDaemon) {\n            // Change ownership of logs if we're running as root\n            DaemonConf::Chown(mdsdInfoFile);\n            DaemonConf::Chown(mdsdWarnFile);\n            DaemonConf::Chown(mdsdErrFile);\n\n            if (mdsd_role_prefix.substr(0, mdsd_run_dir.length()) == mdsd_run_dir)\n            {\n                DaemonConf::Chown(mdsd_run_dir);\n            }\n            DaemonConf::Chown(ehSaveDir);\n        }\n    }\n\n    try {\n        XJsonBlobBlockCountsMgr::GetInstance().SetPersistDir(mdsd_role_prefix + \"_jsonblob_blkcts\", mdsdConfigValidationOnly);\n    } catch (std::exception& e) {\n        Logger::LogError(std::string(\"Unexpected exception from setting JsonBlobBlockCountsMgr persist dir. Reason: \").append(e.what()));\n        exit(1);\n    }\n\n    if (runAsDaemon) {\n        DaemonConf::RunAsDaemon(mdsd_role_prefix + \".pid\");\n    }\n\n    SetSignalCatchers(coreDumpAtFatal);\n    std::set_terminate(TerminateHandler);\n\n    if (mdsdConfigValidationOnly) {\n        std::unique_ptr<MdsdConfig> newconfig(new MdsdConfig(config_file_path, autokey_config_path));\n        int status = 0;\n        if (newconfig->GotMessages(MdsdConfig::anySeverity)) {\n            cerr << \"Parse reported these messages:\" << endl;\n            newconfig->MessagesToStream(cerr, MdsdConfig::anySeverity);\n            status = 1;\n        } else {\n            cerr << \"Parse succeeded with no messages.\" << endl;\n        }\n        newconfig.reset();\n        exit(status);\n    }\n\n    if (!mdsd::EventHubUploaderMgr::GetInstance().SetTopLevelPersistDir(ehSaveDir)) {\n        exit(1);\n    }\n\n    ProtocolListenerMgr::Init(mdsd_role_prefix, mdsd_port, retryRandomPort);\n\n    MdsdConfig* newconfig = new MdsdConfig(config_file_path, autokey_config_path);\n    auto valid = newconfig->ValidateConfig(true);\n    if (!valid || !newconfig->IsUseful()) {\n        Logger::LogError(\"Error: Config invalid or not useful (if there's no config parse error). Abort mdsd.\");\n        delete newconfig;\n        exit(1);\n    }\n    Engine::SetConfiguration(newconfig);\n\n    try {\n        MdsdUtil::SetStorageHttpProxy(proxy_setting_string, { \"MDSD_http_proxy\", \"https_proxy\", \"http_proxy\" });\n    }\n    catch(const std::exception & ex) {\n        Logger::LogError(ex.what());\n        exit(1);\n    }\n\n    ExtensionMgmt::StartExtensionsAsync(Engine::GetEngine()->GetConfig());\n\n    // Start the listeners\n    auto plmgmt = ProtocolListenerMgr::GetProtocolListenerMgr();\n    try\n    {\n        if (!plmgmt->Start()) {\n            Logger::LogError(\"One or more listeners failed to start.\");\n            exit(1);\n        }\n    }\n    catch(std::exception& ex) {\n        Logger::LogError(\"Error: unexpected exception while starting listeners: \" + std::string(ex.what()));\n        exit(1);\n    }\n    catch(...) {\n        Logger::LogError(\"Error: unknown exception while starting listeners.\");\n        exit(1);\n    }\n\n    // Wait to be stopped\n    plmgmt->Wait();\n\n    return 0;\n}\n\nvoid\nusage()\n{\n    cerr << \"Usage:\" << endl\n    << \"mdsd [-Abdjv] [-c path] [-e path] [-o path] [-p port] [-P proxy_setting] [-r path] [-S path] [-T flags] [-w path]\" << endl << endl\n    << \"-A  Don't enable config auto management.\" << endl\n    << \"-b  Don't forward events to MDS (blackhole them instead)\" << endl\n    << \"-c  Specifies the path to the configuration XML file\" << endl\n    << \"-C  Don't suppress core dump when dying due to fatal signals\" << endl\n    << \"-D  Disable logging to files. All log output will instead go to STDERR (fd 2).\" << endl\n    << \"-d  Run mdsd as a daemon\" << endl\n    << \"-e  Specifies the path to which mdsd error logs are dumped\" << endl\n    << \"-j  Dump all JSON events to stdout as they're received\" << endl\n    << \"-o  Specifies the path to which mdsd informative logs are dumped\" << endl\n    << \"-p  Specifies the port on which the daemon listens for stream connections (0 can be passed\" << endl\n    << \"    as port, in which case a randomly available port will be picked). The port will only be\" << endl\n    << \"    bound to 127.0.0.1 (loopback). If the specified non-zero port is in use,\" << endl\n    << \"    and '-R' is specified, then mdsd will try to bind to a randomly available port instead.\" << endl\n    << \"    Either way, the bound port number will be written to a file whose path is derived\" << endl\n    << \"    from -r info or default (/var/run/mdsd/default.pidport).\" << endl\n    << \"-P  Specifies an HTTP proxy. If not set, use environment variable in order of MDSD_http_proxy,\" << endl\n    << \"    https_proxy, http_proxy, with first one tried first. If -P is set, override environment variables.\" << endl\n    << \"-R  Try binding to a random port if binding to the default/specified port fails.\" << endl\n    << \"-r  Specifies the role name or file prefix that mdsd will use to construct the paths to the\" << endl\n    << \"    pidport and unix domain socket files. If the argument starts with '/' then the value is\" << endl\n    << \"    used as the file prefix, otherwise it is used as the role name and the file prefix is \" << endl\n    << \"    '/var/run/mdsd/' + role name (e.g. if role name is 'test' then the prefix is '/var/run/mdsd/test').\" << endl\n    << \"-S  Specifies directory to save Event Hub events. syslog user needs to have rwx\" << endl\n    << \"    access to it. If the directory does not exist, mdsd will try to create it.\" << endl\n    << \"-T  Enable tracing for modules selected by flags\" << endl\n    << \"-v  Validate configuration file and exit\" << endl\n    << \"-V  Print version and exit\" << endl\n    << \"-w  Specifies the path to which mdsd warning logs are dumped\" << endl;\n    exit(1);\n}\n\nextern \"C\" void\nLoadNewConfiguration()\n{\n    Trace trace(Trace::ConfigLoad, \"LoadNewConfiguration\");\n\n    Logger::LogInfo(\"Reloading configuration (SIGHUP caught)\");\n\n    MdsdConfig *newconfig = new MdsdConfig(config_file_path, autokey_config_path);\n    bool valid = newconfig->ValidateConfig(true);\n    if (!valid || !newconfig->IsUseful()) {\n        delete newconfig;\n    }\n    else {\n        Engine::SetConfiguration(newconfig);\n        ExtensionMgmt::StartExtensionsAsync(newconfig);\n    }\n}\n\nextern \"C\" void\nSetCoreDumpLimit()\n{\n    Logger::LogInfo(\"Set resource limits for core dump.\");\n\n    struct rlimit core_limit;\n    if (getrlimit(RLIMIT_CORE, &core_limit) < 0) {\n        std::string errstr = MdsdUtil::GetErrnoStr(errno);\n        Logger::LogError(\"Error: getrlimit failed. Reason: \" + errstr);\n        return;\n    }\n\n    if (RLIM_INFINITY != core_limit.rlim_cur) {\n        core_limit.rlim_cur = RLIM_INFINITY;\n        core_limit.rlim_max = core_limit.rlim_cur;\n\n        if (setrlimit(RLIMIT_CORE, &core_limit) < 0) {\n            std::string errstr = MdsdUtil::GetErrnoStr(errno);\n            Logger::LogError(\"Error: setrlimit failed. Reason: \" + errstr);\n        }\n    }\n}\n\n// vim: set tabstop=4 softtabstop=4 shiftwidth=4 expandtab :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd/wrap_memcpy.c",
    "content": "/*\n   Copyright (c) Microsoft Corporation. All rights reserved.\n   Licensed under the MIT license.\n*/\n\n#include <string.h>\n\n/* some systems do not have newest memcpy@@GLIBC_2.14 - stay with old good one */\nasm (\".symver memcpy, memcpy@GLIBC_2.2.5\");\n\nvoid *__wrap_memcpy(void *dest, const void *src, size_t n)\n{\n    return memcpy(dest, src, n);\n}\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsd.8",
    "content": ".\\\"Created with GNOME Manpages Editor Wizard\n.\\\"http://sourceforge.net/projects/gmanedit2\n.TH mdsd 8 \"August 28, 2017\" \"\" \"Azure MDS Daemon\"\n\n.SH NAME\nmdsd \\- azure MDS daemon\n\n.SH SYNOPSIS\n.B mdsd\n.RI  \"[-AbCDdjvV] [-c path] [-e path] [-o path] [-p port] [-P proxy_setting] [-r role_name/path] [-T flags] [-w path]\"\n.br\n\n.SH DESCRIPTION\n.PP\n\\fBmdsd\\fP is the mandated logging infrastructure for Azure services. It delivers event\ndata (logs, collected metrics, etc) to Azure storage for consumption by various\ndownstream users.\n\n.SH OPTIONS\n.TP\n.BI \"\\-b\"\nDon't forward events to storage (blackhole them instead)\n.TP\n.BI \"\\-c \" \"config file\"\nSpecifies the path to the configuration XML file (default /etc/mdsd.d/mdsd.xml)\n.TP\n.BI \"\\-C\"\nDon't suppress core dump when dying due to fatal signals\n.TP\n.BI \"\\-D\"\nDisable logging to files. All log output will instead go to STDERR (fd 2).\n.TP\n.BI \"\\-d\"\nRun mdsd as a daemon\n.TP\n.BI \"\\-e \" \"log path\"\nSpecifies the path to which mdsd error logs are dumped\n.TP\n.BI \"\\-j\"\nDump all JSON events to stdout as they're received\n.TP\n.BI \"\\-o \" \"log path\"  \nSpecifies the path to which received object strings are dumped\n.TP\n.BI \"\\-P \" \"proxy_setting\"\nSpecifies the http proxy which the daemon should use for all outbound http/https connections.\nAn example proxy_setting is something like \"http://username:password@proxy_host_name:proxy_port_number\".\nThe same can be specified using one of the \"MDSD_http_proxy\" or \"https_proxy\" or \"http_proxy\"\nenvironment variables (searched in that order and the first hit is used), and this option (using -P)\nwill override the environment variable (when -P is specified). DO NOT a password on the command line.\nIf a password needs to be given, specify as one of the environment variables mentioned earlier.\n.TP\n.BI \"\\-p \" \"port\"\nSpecifies the port on which the daemon listens for stream connections (0 can be passed\nas port, in which case a randomly available port will be picked). The port will only be\nbound to 127.0.0.1 (loopback).\nIf the specified non-zero port is in use, and '-R' is specified, then mdsd will try to bind\nto a randomly available port instead. Either way, the bound port number will be written to a\nfile whose path is derived from -r info or default (/var/run/mdsd/default.pidport).\n.TP\n.BI \"\\-R \"\nTry binding to a random port if binding to the default/specified port fails.\n.TP\n.BI \"\\-r \" \"role_name/path\"\nSpecifies the role name or file prefix that mdsd will use to construct the paths to the\npidport and unix domain socket files. If the argument starts with '/', the value is\nused as the file prefix; otherwise, the value is used as the role name and the file\nprefix is '/var/run/mdsd/' + role name. For example, if role name is 'test', then the\nprefix is '/var/run/mdsd/test'. The pidport file path is 'prefix' + '.pidport'. The\nunix domain socket files paths are 'prefix' + '_' + 'protocol' + '.socket', where the\nprotocol is 'bond', 'djson', and 'json'.\nThe default paths are:\n/var/run/mdsd/default.pidport\n/var/run/mdsd/default_bond.socket\n/var/run/mdsd/default_djson.socket\n/var/run/mdsd/default_json.socket\n.TP\n.BI \"\\-S \" \"directory\"\nSpecifies directory to save Event Hub events. syslog user needs to have rwx\naccess to it. If the directory does not exist, mdsd will try to create it.\n.TP\n.BI \"\\-T\" \nEnable tracing for modules selected by flags.\n.TP\n.BI \"\\-v\"\nValidate configuration file and exit\n.TP\n.BI \"\\-V\"\nPrint version and exit\n.TP\n.BI \"\\-w \" \"log path\"\nSpecifies the path to which mdsd warning logs are dumped\n\n.SH ENVIRONMENT\n.TP\n.BI \"MDSD_CONFIG_DIR\"\nIf set, overrides the default value of \"/etc/mdsd.d\".\n.TP\n.BI \"MDSD_RUN_DIR\"\nIf set, overrides the default value of \"/var/run/mdsd\"\n.TP\n.BI \"MDSD_LOG_DIR\"\nIf set, overrides the default value of \"/var/log\"\n\n.SH \"SEE ALSO\"\n.BR logger (1),\n.BR syslog (2),\n.BR syslog (3)\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsdcfg/CMakeLists.txt",
    "content": "include_directories(\n    ${CMAKE_SOURCE_DIR}/mdsd\n    ${CMAKE_SOURCE_DIR}/mdsdlog\n)\n\nset(SOURCES\n    EventPubCfg.cc\n    MdsdEventCfg.cc\n)\n\n# static lib only\nadd_library(${MDSDCFG_LIB_NAME} STATIC ${SOURCES})\n\ninstall(TARGETS\n        ${MDSDCFG_LIB_NAME}\n        ARCHIVE DESTINATION ${CMAKE_BINARY_DIR}/release/lib\n)\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsdcfg/EventPubCfg.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"EventPubCfg.hh\"\n#include \"MdsdEventCfg.hh\"\n#include \"Trace.hh\"\n\nusing namespace mdsd;\n\nEventPubCfg::EventPubCfg(\n    const std::shared_ptr<MdsdEventCfg>& mdsdEventCfg\n    ) :\n    m_mdsdEventCfg(mdsdEventCfg),\n    m_dataChecked(false)\n{\n    if (!mdsdEventCfg) {\n        throw std::invalid_argument(\"EventPubCfg ctor: invalid NULL pointer for mdsdEventCfg param.\");\n    }\n}\n\nvoid\nEventPubCfg::AddServiceBusAccount(\n    const std::string & moniker,\n    std::string connStr\n    )\n{\n    if (moniker.empty()) {\n        throw std::invalid_argument(\"AddServiceBusAccount(): moniker param cannot be empty.\");\n    }\n    if (connStr.empty()) {\n        throw std::invalid_argument(\"AddServiceBusAccount(): connStr param cannot be empty.\");\n    }\n    // throw if key already exists\n    if (m_sbAccountMap.find(moniker) != m_sbAccountMap.end()) {\n        throw std::runtime_error(\"AddServiceBusAccount(): key \" + moniker + \" already exists.\");\n    }\n    m_sbAccountMap[moniker] = std::move(connStr);\n    m_dataChecked = false;\n}\n\nvoid\nEventPubCfg::AddAnnotationKey(\n    const std::string & publisherName,\n    std::string saskey\n    )\n{\n    if (publisherName.empty()) {\n        throw std::invalid_argument(\"AddAnnotationKey(): publisherName param cannot be empty.\");\n    }\n    if (saskey.empty()) {\n        throw std::invalid_argument(\"AddAnnotationKey(): saskey param cannot be empty.\");\n    }\n\n    // throw if key already exists\n    if (m_annotationKeyMap.find(publisherName) != m_annotationKeyMap.end()) {\n        throw std::runtime_error(\"AddAnnotationKey(): key \" + publisherName + \" already exists.\");\n    }\n    m_annotationKeyMap[publisherName] = std::move(saskey);\n    m_dataChecked = false;\n}\n\nstd::unordered_set<std::string>\nEventPubCfg::CheckForInconsistencies(\n    bool hasAutoKey\n    )\n{\n    Trace trace(Trace::ConfigLoad, \"EventPubCfg::CheckForInconsistencies\");\n    if (m_dataChecked) {\n        TRACEINFO(trace, \"EventPubCfg was already checked for inconsistencies. Do nothing.\");\n        return std::unordered_set<std::string>();\n    }\n\n    // clear any previous data\n    m_nameMonikers.clear();\n    m_embeddedSasMap.clear();\n\n    std::unordered_set<std::string> invalidItems;\n\n    for (const auto & publisherName : m_mdsdEventCfg->GetEventPublishers()) {\n        try {\n            ValidateSasKey(publisherName, hasAutoKey);\n        }\n        catch(const std::exception & ex) {\n            invalidItems.insert(publisherName);\n        }\n    }\n\n    m_dataChecked = true;\n    DumpEmbeddedSasInfo();\n    return invalidItems;\n}\n\nvoid\nEventPubCfg::ValidateSasKey(\n    const std::string & publisherName,\n    bool hasAutoKey\n    )\n{\n    if (publisherName.empty()) {\n        throw std::invalid_argument(\"ValidateSasKey(): publisherName param cannot be empty.\");\n    }\n\n    auto monikers = m_mdsdEventCfg->GetEventPubMonikers(publisherName);\n    if (monikers.empty()) {\n        throw std::runtime_error(\"ValidateSasKey(): no moniker is found for publisher \" + publisherName);\n    }\n\n    m_nameMonikers[publisherName] = monikers;\n\n    if (!hasAutoKey) {\n        ValidateEmbeddedKey(publisherName, monikers);\n    }\n}\n\nvoid\nEventPubCfg::ValidateEmbeddedKey(\n    const std::string & publisherName,\n    const std::unordered_set<std::string>& monikers\n    )\n{\n    // The SAS Key should be defined in either\n    // <EventStreamingAnnotations> or <ServiceBusAccountInfos>\n    auto annotationItem = m_annotationKeyMap.find(publisherName);\n    if (annotationItem != m_annotationKeyMap.end()) {\n        // search annotation key first\n        auto & saskey = annotationItem->second;\n        for (const auto & moniker: monikers) {\n            m_embeddedSasMap[publisherName][moniker] = saskey;\n        }\n    }\n    else {\n        // search service bus account info\n        for (const auto & moniker: monikers) {\n            auto sbitem = m_sbAccountMap.find(moniker);\n            if (sbitem != m_sbAccountMap.end()) {\n                m_embeddedSasMap[publisherName][moniker] = sbitem->second;\n            }\n            else {\n                throw std::invalid_argument(\"ValidateEmbeddedKey(): failed to find EH SAS key for \" + publisherName);\n            }\n        }\n    }\n}\n\nvoid\nEventPubCfg::DumpEmbeddedSasInfo()\n{\n    Trace trace(Trace::ConfigLoad, \"EventPubCfg::DumpEmbeddedSasInfo\");\n\n    if (!trace.IsActive()) {\n        return;\n    }\n    if (m_embeddedSasMap.empty()) {\n        TRACEINFO(trace, \"EventPublisher map is empty\");\n    }\n    else {\n        for (const auto & iter : m_embeddedSasMap) {\n            auto & publisherName = iter.first;\n            auto & itemsmap = iter.second;\n            if (itemsmap.empty()) {\n                TRACEINFO(trace, \"EventPublisher='\" << publisherName << \"'; Moniker/SAS: N/A.\");\n            }\n            else {\n                for (const auto& item : itemsmap) {\n                    auto & moniker = item.first;\n                    auto & saskey = item.second;\n                    TRACEINFO(trace, \"EventPublisher='\" << publisherName << \"'; Moniker='\"\n                        << moniker << \"'; SAS: \" << saskey.substr(0, saskey.size()/2));\n                }\n            }\n        }\n    }\n}\n\nstd::unordered_map<std::string, std::unordered_map<std::string, std::string>>\nEventPubCfg::GetEmbeddedSasData() const\n{\n    if (!m_dataChecked) {\n        throw std::runtime_error(\"Check EventPubCfg for inconsistencies before GetEmbeddedSasData().\");\n    }\n    return m_embeddedSasMap;\n}\n\nstd::unordered_map<std::string, std::unordered_set<std::string>>\nEventPubCfg::GetNameMonikers() const\n{\n    if (!m_dataChecked) {\n        throw std::runtime_error(\"Check EventPubCfg for inconsistencies before GetNameMonikers().\");\n    }\n    return m_nameMonikers;\n}"
  },
  {
    "path": "Diagnostic/mdsd/mdsdcfg/EventPubCfg.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef __EVENTPUBCFG__HH__\n#define __EVENTPUBCFG__HH__\n\n#include <memory>\n#include <string>\n#include <unordered_map>\n#include <unordered_set>\n\nnamespace mdsd\n{\n\nclass MdsdEventCfg;\n\n/// This class handles event publishing configurations and error detection.\n///\n/// The design is based on the fact that\n/// - Some errors could only be detected after all configuration data had been gathered.\n/// - One piece of information (whether mdsd xml uses AutoKey or not), not managed by\n///   this class and MdsdEventCfg, was needed to that final error detection.\n///\n/// Usage pattern:\n/// - Add raw configuration data (like service bus accounts, annotation keys).\n///   Read general event data from MdsdEventCfg.\n/// - Extract event publisher SAS keys, monikers using CheckForInconsistencies().\n///   Handle any inconsistencies.\n///   If new service bus account, annotation key data are added after\n///   CheckForInconsistencies(), CheckForInconsistencies() needs to be called again.\n/// - Use SAS keys, moniker info for event publishing (GetEmbeddedSasData(),\n///   GetNameMonikers(), etc.\n///\n/// NOTE: this class is not designed for thread-safe.\n///\nclass EventPubCfg\n{\npublic:\n    EventPubCfg(const std::shared_ptr<MdsdEventCfg>& mdsdEventCfg);\n\n    ~EventPubCfg() = default;\n\n    /// <summary>\n    /// Save event publisher credential info defined in <ServiceBusAccountInfos>.\n    /// If the moniker already exists, throw exception.\n    /// </summary>\n    void AddServiceBusAccount(const std::string & moniker, std::string connStr);\n\n    /// <summary>\n    /// Save each Event Publisher's SAS key defined in <EventStreamingAnnotations>\n    /// If the publisherName already exists, throw exception.\n    /// </summary>\n    /// <param name=\"publisherName\"> event publisher name. It is source name for non-OMI query,\n    /// or eventName for OMIQuery</param>\n    /// <param name=\"saskey\">SAS Key for event publishing</param>\n    void AddAnnotationKey(const std::string & publisherName, std::string saskey);\n\n    /// <summary>\n    /// Using SBAccounts, AnnotationKeys and data from mdsdEventCfg,\n    /// extract all publisher names, their monikers and sas keys.\n    /// Return all the invalid publisher names if any.\n    /// NOTE: this API applies to either AutoKey or embedded keys.\n    /// </summary>\n    /// <param name=\"hasAutoKey\">If true, validate autokey related info; If false, validate\n    /// embedded keys info. </param>\n    std::unordered_set<std::string> CheckForInconsistencies(bool hasAutoKey);\n\n    /// <summary>\n    /// Return a map containing moniker, saskey info for each publisher name.\n    /// The saskeys are from embedded keys only.\n    /// map key: publisher name\n    /// map value: a map of <moniker, saskey>\n    ///\n    /// Throw exception if required CheckForInconsistencies() is not called.\n    /// </summary>\n    std::unordered_map<std::string, std::unordered_map<std::string, std::string>> GetEmbeddedSasData() const;\n\n    /// <summary>\n    /// Get all the publisher names and their monikers.\n    /// Each publisher has one or more monikers.\n    /// NOTE: this function works for both embedded keys and AutoKeys.\n    /// Return a map with key=publishername; value: monikers\n    ///\n    /// Throw exception if required CheckForInconsistencies() is not called.\n    /// </summary>\n    std::unordered_map<std::string, std::unordered_set<std::string>> GetNameMonikers() const;\n\nprivate:\n    /// <summary>\n    /// Get the SAS key for given event publisher, and store the result to _ehPubMap.\n    /// Throw exception if no SAS key or no moniker is found for the event publisher.\n    /// </summary>\n    void ValidateSasKey(const std::string & publisherName, bool hasAutoKey);\n\n    /// <summary>\n    /// Validate embedded keys.\n    /// Throw exception if no key is found for given publisher name.\n    /// </summary>\n    void ValidateEmbeddedKey(const std::string & publisherName, const std::unordered_set<std::string>& monikers);\n\n    /// <summary>Dump all embedded sas configuration data for tracing purpose.</summary>\n    void DumpEmbeddedSasInfo();\n\n\nprivate:\n    std::shared_ptr<MdsdEventCfg> m_mdsdEventCfg;\n\n    /// Whether data are checked or not.\n    /// CheckForInconsistencies() must be called before any lookup methods are called.\n    bool m_dataChecked;\n\n    /// To store Event Publisher connection string defined in <ServiceBusAccountInfos>\n    /// in mdsd xml.\n    /// map key: moniker; value: event publisher connection string.\n    std::unordered_map<std::string, std::string> m_sbAccountMap;\n\n    /// To store Event Publisher SAS key defined in <EventStreamingAnnotations> in mdsd xml.\n    /// NOTE: for each event publisher defined in EventStreamingAnnotations, the SAS key\n    /// must be defined:\n    /// - For non-Geneva, either ServiceBusAccountInfos or EventStreamingAnnotations.\n    /// - For Geneva, AutoKey only.\n    ///\n    /// map key: publisher name; value: event publisher SAS key.\n    /// publisher name: source name for non-OMIQuery, or eventName for OMIQuery.\n    std::unordered_map<std::string, std::string> m_annotationKeyMap;\n\n    /// This stores moniker, saskey for each publisher name.\n    /// These information are calculated based on raw xml embedded configurations.\n    /// map key=publisher name; map value=a map of <moniker, saskey>\n    std::unordered_map<std::string, std::unordered_map<std::string, std::string>> m_embeddedSasMap;\n\n    /// This stores monikers for each publisher name.\n    /// Each publisher has one or more monikers.\n    /// These information are calculated based on raw xml configurations.\n    /// map key = publisher name; map value=monikers\n    std::unordered_map<std::string, std::unordered_set<std::string>> m_nameMonikers;\n};\n\n} // namespace\n\n#endif // __EVENTPUBCFG__HH__\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsdcfg/EventSinkCfgInfo.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef __EVENTSINKCFGINFO__HH__\n#define __EVENTSINKCFGINFO__HH__\n\n#include <string>\n#include \"StoreType.hh\"\n#include \"EventType.hh\"\n\nnamespace mdsd\n{\n\n// This class is about mdsd event sink/destination configuration info.\n// It records what's defined in mdsd xml.\nstruct EventSinkCfgInfo {\n    std::string m_eventName;\n    std::string m_moniker;\n    StoreType::Type m_storeType = StoreType::None;\n    std::string m_sourceName;\n    EventType m_eventType;\n\n    EventSinkCfgInfo(const std::string & eventName,\n        const std::string & moniker,\n        StoreType::Type storeType,\n        const std::string & sourceName,\n        EventType eventType\n        ) :\n        m_eventName(eventName),\n        m_moniker(moniker),\n        m_storeType(storeType),\n        m_sourceName(sourceName),\n        m_eventType(eventType)\n        {}\n\n    /// Return true if this is a valid entry. Return false otherwise.\n    /// NOTE: sourceName can be empty (e.g. OMIQuery).\n    bool IsValid() const\n    {\n        if (m_moniker.empty() ||\n            StoreType::None == m_storeType ||\n            (EventType::None == m_eventType && !m_eventName.empty()) ||\n            (EventType::None != m_eventType && m_eventName.empty())\n            ) {\n            return false;\n        }\n        return true;\n    }\n\n    bool operator==(const EventSinkCfgInfo& other) const\n    {\n        return ((m_eventName == other.m_eventName) &&\n                (m_moniker == other.m_moniker) &&\n                (m_storeType == other.m_storeType) &&\n                (m_sourceName == other.m_sourceName) &&\n                (m_eventType == other.m_eventType)\n                );\n    }\n\n    bool operator!=(const EventSinkCfgInfo& other) const\n    {\n        return  !(*this == other);\n    }\n\n    // Return the name of the local sink that holds the CanonicalEntities\n    // that are supposed to be pushed to EventHub.\n    // For OMIQuery and DerivedEvent events, this is their event name.\n    // For other events, this is their source name.\n    std::string GetLocalSinkName() const\n    {\n        if (EventType::OMIQuery != m_eventType &&\n            EventType::DerivedEvent != m_eventType) {\n            return m_sourceName;\n        }\n        else {\n            return m_eventName;\n        }\n    }\n};\n\n} // namespace\n\n#endif // __EVENTSINKCFGINFO__HH__\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsdcfg/EventType.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef __EVENTTYPE__HH__\n#define __EVENTTYPE__HH__\n\nnamespace mdsd\n{\n\n// This defines event type specified in mdsd configuration file.\nenum class EventType {\n    None,\n    OMIQuery,       // event defined by <OMIQuery> \n    RouteEvent,     // event defined by <RouteEvent>\n    DerivedEvent,   // event defined by <DerivedEvent>\n    EtwEvent        // event defined by <EtwProvider>\n};\n\n} // namespace\n\n#endif // __EVENTTYPE__HH__\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsdcfg/MdsdEventCfg.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include <stdexcept>\n\n#include \"MdsdEventCfg.hh\"\n#include \"Trace.hh\"\n\nusing namespace mdsd;\n\nvoid\nMdsdEventCfg::AddEventSinkCfgInfoItem(\n    const EventSinkCfgInfo & item\n    )\n{\n    if (!item.IsValid()) {\n        throw std::invalid_argument(\"MdsdEventCfg::AddEventSinkCfgInfoItem(): item param must be valid.\");\n    }\n    m_eventSinkCfgInfoList.push_back(item);\n    m_dataUpdated = true;\n}\n\nvoid\nMdsdEventCfg::SetEventAnnotationTypes(\n    std::unordered_map<std::string, EventAnnotationType::Type>&& eventtypes\n    )\n{\n    m_eventAnnotationTypes = std::move(eventtypes);\n\n    for (const auto & item : m_eventAnnotationTypes) {\n        if (item.second & EventAnnotationType::EventPublisher) {\n            m_eventPublishers.insert(item.first);\n        }\n    }\n    m_dataUpdated = true;\n}\n\nvoid\nMdsdEventCfg::UpdateMoniker(\n    const std::string & eventName,\n    const std::string & oldMoniker,\n    const std::string & newMoniker\n    )\n{\n    Trace trace(Trace::ConfigUse, \"MdsdEventCfg::UpdateMoniker\");\n\n    if (eventName.empty()) {\n        throw std::invalid_argument(\"MdsdEventCfg::UpdateMoniker(): eventName param cannot be empty.\");\n    }\n\n    if (oldMoniker.empty()) {\n        throw std::invalid_argument(\"MdsdEventCfg::UpdateMoniker(): oldMoniker param cannot be empty.\");\n    }\n\n    if (newMoniker.empty()) {\n        throw std::invalid_argument(\"MdsdEventCfg::UpdateMoniker(): newMoniker param cannot be empty.\");\n    }\n\n    for (auto & item : m_eventSinkCfgInfoList) {\n        if (eventName == item.m_eventName && oldMoniker == item.m_moniker) {\n            item.m_moniker = newMoniker;\n            m_dataUpdated = true;\n        }\n    }\n}\n\nstd::unordered_set<std::string>\nMdsdEventCfg::GetInvalidAnnotations()\n{\n    ExtractEventCfg();\n\n    std::unordered_set<std::string> result;\n\n    for (const auto & item : m_eventAnnotationTypes) {\n        auto & name =  item.first;\n        auto & anntype = item.second;\n        if (EventAnnotationType::EventPublisher == anntype) {\n            if (!m_ehpubMonikers.count(name)) {\n                result.insert(name);\n            }\n        }\n        else {\n            if (!m_eventNames.count(name)) {\n                result.insert(name);\n            }\n        }\n    }\n    return result;\n}\n\nvoid\nMdsdEventCfg::ExtractEventCfg()\n{\n    if (!m_dataUpdated) {\n        return;\n    }\n\n    Trace trace(Trace::ConfigUse, \"MdsdEventCfg::ExtractEventCfg\");\n\n    // Clean any previous data if any\n    m_eventNames.clear();\n    m_ehpubMonikers.clear();\n    m_ehMonikers.clear();\n\n    auto publishers = GetEventPublishers();\n\n    for (const auto & item : m_eventSinkCfgInfoList) {\n        auto & eventname = item.m_eventName;\n        auto & moniker = item.m_moniker;\n        auto & storetype = item.m_storeType;\n\n        m_eventNames.insert(eventname);\n\n        auto localSinkName = item.GetLocalSinkName();\n        m_ehpubMonikers[localSinkName].insert(moniker);\n\n        if (storetype == StoreType::Bond) {\n            m_ehMonikers.insert(moniker);\n        }\n        else if (storetype == StoreType::Local) {\n            if (publishers.count(localSinkName)) {\n               m_ehMonikers.insert(moniker);\n            }\n        }\n    }\n    m_dataUpdated = false;\n}\n\nstd::unordered_map<std::string, std::unordered_set<std::string>>\nMdsdEventCfg::GetCentralBondEvents() const\n{\n    std::unordered_map<std::string, std::unordered_set<std::string>> cbEvents;\n\n    for (const auto & item : m_eventSinkCfgInfoList) {\n        if (StoreType::Bond == item.m_storeType) {\n            cbEvents[item.m_eventName].insert(item.m_moniker);\n        }\n    }\n    return cbEvents;\n}\n\nstd::unordered_set<std::string>\nMdsdEventCfg::GetEventPubMonikers(\n    const std::string & publisherName\n    )\n{\n    if (publisherName.empty()) {\n        throw std::invalid_argument(\"GetEventPubMonikers(): publisherName param cannot be empty.\");\n    }\n\n    ExtractEventCfg();\n\n    auto item = m_ehpubMonikers.find(publisherName);\n    if (item != m_ehpubMonikers.end()) {\n        return item->second;\n    }\n\n    return std::unordered_set<std::string>();\n}\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsdcfg/MdsdEventCfg.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef __MDSDEVENTCFG__HH__\n#define __MDSDEVENTCFG__HH__\n\n#include <string>\n#include <unordered_map>\n#include <unordered_set>\n#include <vector>\n\n#include \"EventSinkCfgInfo.hh\"\n#include \"CfgEventAnnotationType.hh\"\n\n\nnamespace mdsd\n{\n\nenum class EventType;\n\n/// This class handles general mdsd event configurations.\n/// Usage pattern:\n/// - Collect raw event info data: AddEventSinkCfgInfoItem(), SetEventAnnotationTypes(),\n///   UpdateMoniker(), etc.\n/// - Use aggregated results: GetCentralBondEvents(), IsEventHubEnabled(), etc.\n///   The event configuration data are lazily extracted and aggregated at \"Get\" time.\n///\n/// NOTE: This class is not designed for thread-safe.\n///\nclass MdsdEventCfg {\npublic:\n    MdsdEventCfg() = default;\n    ~MdsdEventCfg() = default;\n\n    /// <summary>\n    /// Add an eventSinkCfgInfo object to internal data structure if it is valid.\n    /// Throw exception if eventSinkCfgInfo is invalid.\n    /// </summary>\n    void AddEventSinkCfgInfoItem(const EventSinkCfgInfo & item);\n\n    /// <summary>\n    /// Set event annotation types object.\n    /// </summary>\n    void SetEventAnnotationTypes(std::unordered_map<std::string, EventAnnotationType::Type>&& eventtypes);\n\n    /// <summary>\n    /// For all m_eventSinkCfgInfoList entries where eventName='eventName'\n    /// and moniker='oldMoniker', update moniker to 'newMoniker'.\n    /// Throw exception if any input parameter string is empty.\n    /// </summary>\n    void UpdateMoniker(const std::string & eventName, const std::string & oldMoniker,\n        const std::string & newMoniker);\n\n    /// <summary>\n    /// Get a map of <eventname, monikers> for all CentralBond store type events.\n    /// </summary>\n    std::unordered_map<std::string, std::unordered_set<std::string>> GetCentralBondEvents() const;\n\n    /// <summary>\n    /// Return the names of all event publishers in mdsd xml <EventStreamingAnnotations>.\n    /// This includes anything that could be invalid if any.\n    /// </summary>\n    std::unordered_set<std::string> GetEventPublishers() const\n    {\n        return m_eventPublishers;\n    }\n\n    /// <summary>\n    /// Return all the monikers used by given publisherName, which can be either a source name,\n    /// or an EventName (e.g. OMIQuery or DerivedEvent).\n    /// Return empty set if publisherName is not found\n    /// </summary>\n    std::unordered_set<std::string> GetEventPubMonikers(const std::string & publisherName);\n\n    /// <summary>\n    /// Get invalid names in mdsd xml <EventStreamingAnnotations>\n    /// </summary>\n    std::unordered_set<std::string> GetInvalidAnnotations();\n\n    /// <summary>\n    /// Returns boolean specifying whether provided moniker (input parameter)\n    /// has a companion Event Hub.\n    /// </summary>\n    bool IsEventHubEnabled(const std::string & moniker)\n    {\n        ExtractEventCfg();\n        return m_ehMonikers.count(moniker);\n    }\n\n    size_t GetNumEventSinkCfgInfoItems() const\n    {\n        return m_eventSinkCfgInfoList.size();\n    }\n\nprivate:\n    /// <summary>\n    /// Extract event configuration data and store them to internal data structures.\n    /// - a set to store all the event names.\n    /// - publisher name -> monikers map for all events.\n    /// - All monikers that are used by EventHub notice or Event publishing.\n    /// </summary>\n    void ExtractEventCfg();\n\nprivate:\n    /// Whether any config data are updated\n    bool m_dataUpdated = false;\n\n    /// Store information about all the events in mdsd xml file.\n    std::vector<EventSinkCfgInfo> m_eventSinkCfgInfoList;\n\n    /// Store all the eventNames\n    std::unordered_set<std::string> m_eventNames;\n\n    /// This map tracks all the EventHub publication monikers to which each new\n    /// CanonicalEvent, when added to the LocalSink, should be published.\n    ///\n    /// map key: LocalSink name\n    /// map value: all the monikers used by the LocalSink\n    std::unordered_map<std::string, std::unordered_set<std::string>> m_ehpubMonikers;\n\n    /// key: item name; value: EventAnnotationType\n    std::unordered_map<std::string, EventAnnotationType::Type> m_eventAnnotationTypes;\n\n    /// Store all the event publisher names.\n    std::unordered_set<std::string> m_eventPublishers;\n\n    /// Store the moniker names when EventHub is enabled on the moniker:\n    /// A companion Event Hub exists if\n    /// - a moniker has an event of store type 'CentralBond'\n    /// - a moniker has an event of store type 'Local', which is also listed\n    ///   under EventStreamingAnnotation as an EventPublisher.\n    std::unordered_set<std::string> m_ehMonikers;\n};\n\n} // namespace\n\n#endif // __MDSDEVENTCFG__HH__\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsdinput/CMakeLists.txt",
    "content": "set(SOURCES\n    mdsd_input_types.cpp\n    mdsd_input_apply.cpp\n    MdsdInputSchemaCache.cpp\n    MdsdInputMessageBuilder.cpp\n    MdsdInputMessageIO.cpp\n)\n\nadd_library(${INPUT_LIB_NAME} STATIC ${SOURCES})\n\ninstall(TARGETS ${INPUT_LIB_NAME}\n    ARCHIVE DESTINATION ${CMAKE_BINARY_DIR}/release/lib\n)\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsdinput/MdsdInputMessageBuilder.cpp",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"MdsdInputMessageBuilder.h\"\n#include \"bond/core/bond.h\"\n#include <boost/make_shared.hpp>\n#include <algorithm>\n\nnamespace mdsdinput\n{\n\n    void MessageBuilder::MessageBegin()\n    {\n        _output.reset(new bond::OutputBuffer(_buffer, BUFFER_SIZE));\n        _writer.reset(new bond::SimpleBinaryWriter<bond::OutputBuffer>(*(_output.get())));\n        _schema = std::make_shared<SchemaDef>();\n    }\n\n    std::shared_ptr<Message> MessageBuilder::MessageEnd(const std::string& source)\n    {\n        auto id = (_schema_cache->AddSchema(_schema)).first;\n        auto msg = std::make_shared<Message>();\n\n        msg->schemaId = id;\n        msg->source = source;\n\n        auto out = _output->GetBuffer();\n        auto buf = boost::allocate_shared_noinit<char[]>(std::allocator<char>(), out.length());\n        std::copy(out.begin(), out.end(), buf.get());\n        msg->data.assign(buf, out.length());\n\n        return msg;\n    }\n\n    void MessageBuilder::AddBool(const std::string& name, bool value)\n    {\n        FieldDef fd;\n        fd.name = name;\n        fd.fieldType = FT_BOOL;\n        _schema->fields.push_back(fd);\n        _writer->Write(value);\n    }\n\n    void MessageBuilder::AddInt32(const std::string& name, int32_t value)\n    {\n        FieldDef fd;\n        fd.name = name;\n        fd.fieldType = FT_INT32;\n        _schema->fields.push_back(fd);\n        _writer->Write(value);\n    }\n\n    void MessageBuilder::AddInt64(const std::string& name, int64_t value)\n    {\n        FieldDef fd;\n        fd.name = name;\n        fd.fieldType = FT_INT64;\n        _schema->fields.push_back(fd);\n        _writer->Write(value);\n    }\n\n    void MessageBuilder::AddDouble(const std::string& name, double value)\n    {\n        FieldDef fd;\n        fd.name = name;\n        fd.fieldType = FT_DOUBLE;\n        _schema->fields.push_back(fd);\n        _writer->Write(value);\n    }\n\n    void MessageBuilder::AddTime(const std::string& name, const Time& value, bool isTimestampField)\n    {\n        FieldDef fd;\n        fd.name = name;\n        fd.fieldType = FT_TIME;\n        if (isTimestampField)\n        {\n            _schema->timestampFieldIdx.set(static_cast<uint32_t>(_schema->fields.size()));\n        }\n        _schema->fields.push_back(fd);\n        _writer->Write(value.sec);\n        _writer->Write(value.nsec);\n    }\n\n    void MessageBuilder::AddString(const std::string& name, const std::string& value)\n    {\n        FieldDef fd;\n        fd.name = name;\n        fd.fieldType = FT_STRING;\n        _schema->fields.push_back(fd);\n        _writer->Write(value);\n    }\n\n}\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsdinput/MdsdInputMessageBuilder.h",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n\n#include \"mdsd_input_reflection.h\"\n#include <string>\n#include \"MdsdInputSchemaCache.h\"\n\nnamespace mdsdinput\n{\n\n    class MessageBuilder\n    {\n    public:\n        static const size_t BUFFER_SIZE = 32 * 1024;\n\n        MessageBuilder()\n            : _schema_cache(std::make_shared<SchemaCache>())\n            , _buffer(boost::make_shared_noinit<char[]>(BUFFER_SIZE))\n        {}\n\n        MessageBuilder(std::shared_ptr<SchemaCache>& schemaCache)\n            : _schema_cache(schemaCache)\n            , _buffer(boost::make_shared_noinit<char[]>(BUFFER_SIZE))\n        {}\n\n        MessageBuilder(const MessageBuilder&) = delete;\n        MessageBuilder(MessageBuilder&&) = default;\n        MessageBuilder& operator=(const MessageBuilder&) = delete;\n        MessageBuilder& operator=(MessageBuilder&&) = default;\n\n        std::shared_ptr<SchemaCache> GetSchemaCache() { return _schema_cache; }\n\n        // Start a new message. All previous data is discarded.\n        void MessageBegin();\n\n        // Return a constructed message.\n        std::shared_ptr<Message> MessageEnd(const std::string& source);\n\n        void AddBool(const std::string& name, bool value);\n        void AddInt32(const std::string& name, int32_t value);\n        void AddInt64(const std::string& name, int64_t value);\n        void AddDouble(const std::string& name, double value);\n        void AddTime(const std::string& name, const Time& value, bool isTimestampField);\n        void AddString(const std::string& name, const std::string& value);\n\n    protected:\n        std::shared_ptr<SchemaCache> _schema_cache;\n        std::shared_ptr<SchemaDef> _schema;\n        boost::shared_ptr<char[]> _buffer;\n        std::unique_ptr<bond::OutputBuffer> _output;\n        std::unique_ptr<bond::SimpleBinaryWriter<bond::OutputBuffer> > _writer;\n    };\n\n}\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsdinput/MdsdInputMessageDecoder.h",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n\n#include \"mdsd_input_reflection.h\"\n#include <string>\n#include \"MdsdInputSchemaCache.h\"\n#include \"bond/core/bond.h\"\n#include \"bond/stream/input_buffer.h\"\n#include \"bond/protocol/simple_binary.h\"\n#include <cstdio>\n#include <iostream>\n\nnamespace mdsdinput\n{\n\n    class MessageDecoder\n    {\n    public:\n        MessageDecoder()\n            : _schema_cache(std::make_shared<SchemaCache>())\n        {}\n\n        MessageDecoder(std::shared_ptr<SchemaCache>& schemaCache)\n            : _schema_cache(schemaCache)\n        {}\n\n        template<typename FieldReceiver>\n        ResponseCode Decode(const Message& msg, FieldReceiver& receiver)\n        {\n            std::shared_ptr<SchemaDef> schema;\n            if (!msg.schema.empty())\n            {\n                schema = std::make_shared<SchemaDef>(msg.schema.value());\n                if (!_schema_cache->AddSchemaWithId(schema, msg.schemaId))\n                {\n                    return ACK_DUPLICATE_SCHEMA_ID;\n                }\n            }\n            else\n            {\n                try\n                {\n                    schema = _schema_cache->GetSchema(msg.schemaId);\n                }\n                catch (std::out_of_range ex)\n                {\n                    return ACK_UNKNOWN_SCHEMA_ID;\n                }\n            }\n\n            bond::SimpleBinaryReader<bond::InputBuffer> reader(msg.data);\n\n            int32_t idx = 0;\n            for (auto it = schema->fields.begin(); it != schema->fields.end(); ++it, ++idx)\n            {\n                try\n                {\n                    switch (it->fieldType)\n                    {\n                    case FT_INVALID:\n                        return ACK_DECODE_ERROR;\n                    case FT_BOOL:\n                        {\n                            bool b;\n                            reader.Read(b);\n                            receiver.BoolField(it->name, b);\n                            break;\n                        }\n                    case FT_INT32:\n                        {\n                            int32_t i;\n                            reader.Read(i);\n                            receiver.Int32Field(it->name, i);\n                            break;\n                        }\n                    case FT_INT64:\n                        {\n                            int64_t i;\n                            reader.Read(i);\n                            receiver.Int64Field(it->name, i);\n                            break;\n                        }\n                    case FT_DOUBLE:\n                        {\n                            double d;\n                            reader.Read(d);\n                            receiver.DoubleField(it->name, d);\n                            break;\n                        }\n                    case FT_TIME:\n                        {\n                            Time t;\n                            reader.Read(t.sec);\n                            reader.Read(t.nsec);\n                            receiver.TimeField(it->name, t, (!schema->timestampFieldIdx.empty() && *(schema->timestampFieldIdx) == static_cast<uint32_t>(idx)));\n                            break;\n                        }\n                    case FT_STRING:\n                        {\n                            std::string str;\n                            reader.Read(str);\n                            receiver.StringField(it->name, str);\n                            break;\n                        }\n                    }\n                }\n                catch (bond::StreamException& ex)\n                {\n                    return ACK_DECODE_ERROR;\n                }\n            }\n\n            return ACK_SUCCESS;\n        }\n\n        std::shared_ptr<SchemaDef> GetSchema(uint64_t id)\n        {\n            return _schema_cache->GetSchema(id);\n        }\n\n        std::string GetSchemaKey(uint64_t id)\n        {\n            return _schema_cache->GetSchemaKey(id);\n        }\n\n    protected:\n        std::shared_ptr<SchemaCache> _schema_cache;\n    };\n}"
  },
  {
    "path": "Diagnostic/mdsd/mdsdinput/MdsdInputMessageIO.cpp",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"MdsdInputMessageIO.h\"\n#include <boost/make_shared.hpp>\n\nextern \"C\" {\n#include <unistd.h>\n}\n\n#include <cassert>\n#include <cerrno>\n#include <system_error>\n\nnamespace mdsdinput\n{\n    void FDIO::Write(const bond::blob& blob)\n    {\n        Write(blob.data(), blob.size());\n    }\n\n    void FDIO::Read(bond::blob& blob, uint32_t size)\n    {\n        auto data = boost::allocate_shared_noinit<char[]>(std::allocator<char>(), size);\n        Read(data.get(), size);\n        blob.assign(data, size);\n    }\n\n    void FDIO::Read(void *buffer, uint32_t size)\n    {\n        assert(buffer != nullptr);\n        size_t nleft = size;\n        do\n        {\n            errno = 0;\n            ssize_t nr = read(_fd, reinterpret_cast<char*>(buffer) + (size - nleft), nleft);\n            if (nr < 0)\n            {\n                if (EINTR != errno)\n                {\n                    throw std::system_error(errno, std::system_category());\n                }\n            }\n            else\n            {\n                nleft -= nr;\n\n                if (nleft > 0 && nr == 0)\n                {\n                    throw eof_exception();\n                }\n            }\n        } while (nleft > 0);\n    }\n\n    void FDIO::Write(const void *buffer, uint32_t size)\n    {\n        assert(buffer != nullptr);\n        size_t nleft = size;\n        do\n        {\n            errno = 0;\n            ssize_t nw = write(_fd, reinterpret_cast<const char*>(buffer)+(size - nleft), nleft);\n            if (nw < 0)\n            {\n                if (EINTR != errno)\n                {\n                    throw std::system_error(errno, std::system_category());\n                }\n            }\n            else if (nw == 0)\n            {\n                throw std::runtime_error(\"write() returned 0\");\n            }\n            else\n            {\n                nleft -= nw;\n            }\n        } while (nleft > 0);\n    }\n\n    template void FDIO::Read(bool&);\n    template void FDIO::Read(int32_t&);\n    template void FDIO::Read(int64_t&);\n    template void FDIO::Read(double&);\n\n    template void FDIO::Write(bool);\n    template void FDIO::Write(int32_t);\n    template void FDIO::Write(int64_t);\n    template void FDIO::Write(double);\n\n    template class MessageIO<FDIO>;\n}\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsdinput/MdsdInputMessageIO.h",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n\n#include \"mdsd_input_reflection.h\"\n#include \"bond/core/bond.h\"\n#include \"bond/stream/input_buffer.h\"\n#include \"bond/stream/output_buffer.h\"\n#include \"bond/protocol/simple_binary.h\"\n\nnamespace mdsdinput\n{\n    constexpr uint32_t MAX_MESSAGE_SIZE = 64 * 1024;\n\n    class eof_exception : public std::runtime_error {\n    public:\n        eof_exception()\n            : std::runtime_error(\"Connection Closed\")\n        {}\n    };\n\n    class msg_too_large_error : public std::runtime_error {\n    public:\n        explicit msg_too_large_error(const std::string& msg)\n            : std::runtime_error(msg)\n        {}\n    };\n\n    class FDIO\n    {\n    public:\n        explicit FDIO(int fd)\n            : _fd(fd)\n        {}\n\n        // Read overload(s) for arithmetic types\n        template <typename T>\n        typename boost::enable_if<boost::is_arithmetic<T> >::type\n        Read(T& value)\n        {\n            Read(reinterpret_cast<void*>(&value), sizeof(value));\n        }\n\n        // Write overload(s) for arithmetic types\n        template <typename T>\n        typename boost::enable_if<boost::is_arithmetic<T> >::type\n        Write(T value)\n        {\n            Write(reinterpret_cast<const void*>(&value), sizeof(value));\n        }\n\n        // Read into a memory blob\n        void Read(bond::blob& blob, uint32_t size);\n\n        // Write a memory blob\n        void Write(const bond::blob& blob);\n\n        // Read into a memory buffer\n        void Read(void *buffer, uint32_t size);\n\n        // Write a memory buffer\n        void Write(const void *buffer, uint32_t size);\n    protected:\n        int _fd;\n    };\n\n    extern template void FDIO::Read(bool&);\n    extern template void FDIO::Read(int32_t&);\n    extern template void FDIO::Read(int64_t&);\n    extern template void FDIO::Read(double&);\n\n    extern template void FDIO::Write(bool);\n    extern template void FDIO::Write(int32_t);\n    extern template void FDIO::Write(int64_t);\n    extern template void FDIO::Write(double);\n\n    template<typename IO>\n    class MessageIO\n    {\n    public:\n        MessageIO(IO& io)\n            : _io(io)\n        {}\n\n        void ReadMessage(Message& msg)\n        {\n            uint32_t size = 0;\n            _io.Read(size);\n            if (size > MAX_MESSAGE_SIZE)\n            {\n                throw msg_too_large_error(\"\");\n            }\n            bond::blob data;\n            _io.Read(data, size);\n            bond::SimpleBinaryReader<bond::InputBuffer> input(data);\n            bond::Deserialize(input, msg);\n        }\n\n        void WriteMessage(const Message& msg)\n        {\n            bond::OutputBuffer obuf;\n            bond::SimpleBinaryWriter<bond::OutputBuffer> output(obuf);\n            bond::Serialize(msg, output);\n            bond::blob data = obuf.GetBuffer();\n            uint32_t size = data.size();\n            _io.Write(size);\n            _io.Write(data);\n        }\n\n        void ReadAck(Ack& ack)\n        {\n            _io.Read(ack.msgId);\n            uint32_t code = 0;\n            _io.Read(code);\n            ack.code = static_cast<mdsdinput::ResponseCode>(code);\n        }\n\n        void WriteAck(const Ack& ack)\n        {\n            _io.Write(ack.msgId);\n            _io.Write(static_cast<uint32_t>(ack.code));\n        }\n\n    protected:\n        IO _io;\n    };\n\n    extern template class MessageIO<FDIO>;\n}\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsdinput/MdsdInputSchemaCache.cpp",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"MdsdInputSchemaCache.h\"\n#include \"bond/core/apply.h\"\n#include \"bond/core/runtime_schema.h\"\n#include \"bond/core/schema.h\"\n\n#include \"stdio.h\"\n\nnamespace mdsdinput\n{\n\n    std::pair<uint64_t, bool> SchemaCache::AddSchema(const std::shared_ptr<SchemaDef>& schema)\n    {\n        auto key = schemaKey(schema);\n\n        std::lock_guard<std::mutex> lock(_lock);\n\n        auto sk = _schema_ids.find(key);\n        if (sk != _schema_ids.end())\n        {\n            return std::make_pair((*sk).second, false);\n        }\n\n        uint64_t id = _next_id++;\n        _schemas.insert(std::make_pair(id, schema));\n        _schema_ids.insert(std::make_pair(key, id));\n        _schema_keys.insert(std::make_pair(id, key));\n\n        return std::make_pair(id, true);\n    }\n\n    bool SchemaCache::AddSchemaWithId(const std::shared_ptr<SchemaDef>& schema, uint64_t id)\n    {\n        auto key = schemaKey(schema);\n\n        std::lock_guard<std::mutex> lock(_lock);\n\n        auto it = _schema_keys.find(id);\n\n        if (it != _schema_keys.end())\n        {\n            if (it->second == key)\n            {\n                return true;\n            } else {\n                return false;\n            }\n        }\n\n        _schemas.insert(std::make_pair(id, schema));\n        _schema_ids.insert(std::make_pair(key, id));\n        _schema_keys.insert(std::make_pair(id, key));\n\n        return true;\n    }\n\n    std::shared_ptr<SchemaDef> SchemaCache::GetSchema(uint64_t id)\n    {\n        std::lock_guard<std::mutex> lock(_lock);\n\n        return _schemas.at(id);\n    }\n\n    std::string SchemaCache::GetSchemaKey(uint64_t id)\n    {\n        std::lock_guard<std::mutex> lock(_lock);\n\n        return _schema_keys.at(id);\n    }\n\n    std::string SchemaCache::schemaKey(const std::shared_ptr<SchemaDef>& schema)\n    {\n        std::string key;\n\n        if (!schema->timestampFieldIdx.empty()) {\n            key.append(std::to_string(*(schema->timestampFieldIdx)));\n        }\n        for (const auto & it : schema->fields) {\n            key.append(ToString(it.fieldType));\n            key.append(it.name);\n        }\n\n        return key;\n    }\n\n    static boost::shared_ptr<bond::SchemaDef> makeBondSchema(const std::shared_ptr<SchemaDef>& s)\n    {\n        auto bs = boost::make_shared <bond::SchemaDef>();\n        bond::StructDef st;\n        bool _time_added = false;\n        uint16_t _time_id = 1; // Time is always the second struct and thus has ID 1.\n        uint16_t id = 0;\n        for (const auto & it : s->fields)\n        {\n            bond::FieldDef f;\n            f.id = id;\n            id++;\n            f.metadata.name = it.name;\n            switch (it.fieldType)\n            {\n            case FT_INVALID:\n                throw std::runtime_error(\"FT_INVALID encountered!\");\n            case FT_BOOL:\n                f.type.id = bond::BT_BOOL;\n                break;\n            case FT_INT32:\n                f.type.id = bond::BT_INT32;\n                break;\n            case FT_INT64:\n                f.type.id = bond::BT_INT64;\n                break;\n            case FT_DOUBLE:\n                f.type.id = bond::BT_DOUBLE;\n                break;\n            case FT_TIME:\n                f.type.id = bond::get_type_id<Time>::value;\n                f.type.struct_def = _time_id;\n                f.type.bonded_type = bond::is_bonded<Time>::value;\n                _time_added = true;\n                break;\n            case FT_STRING:\n                f.type.id = bond::BT_STRING;\n                break;\n            }\n            st.fields.push_back(f);\n        }\n        bs->structs.push_back(st);\n        if (_time_added)\n        {\n            bond::detail::SchemaCache<Time, void>::AppendStructDef(bs.get());\n        }\n\n        bs->root.id = bond::BT_STRUCT;\n        bs->root.bonded_type = false;\n        bs->root.struct_def = 0;\n\n        return bs;\n    }\n\n    boost::shared_ptr<bond::SchemaDef> SchemaCache::GetBondSchema(uint64_t id)\n    {\n        auto it = _bond_schemas.find(id);\n        if (it == _bond_schemas.end())\n        {\n            auto s = _schemas.at(id);\n            auto bs = makeBondSchema(s);\n            _bond_schemas.insert(std::make_pair(id, bs));\n            return bs;\n        }\n        else\n        {\n            return it->second;\n        }\n    }\n\n}\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsdinput/MdsdInputSchemaCache.h",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n\n#include \"mdsd_input_reflection.h\"\n#include \"bond/core/bond_types.h\"\n#include <string>\n#include <unordered_map>\n#include <mutex>\n\nnamespace mdsdinput\n{\n\n    class SchemaCache\n    {\n    public:\n        SchemaCache() = default;\n        SchemaCache(const SchemaCache&) = delete;\n        SchemaCache(SchemaCache&&) = delete;\n        SchemaCache& operator=(const SchemaCache&) = delete;\n        SchemaCache& operator=(SchemaCache&&) = delete;\n\n        // Returns the schema id and a flag indicating if the schema was new.\n        std::pair<uint64_t, bool> AddSchema(const std::shared_ptr<SchemaDef>& schema);\n\n        // Add a schema using supplied id.\n        // Returns false if the id is already in use and the cached schema doesn't match the provided schema.\n        bool AddSchemaWithId(const std::shared_ptr<SchemaDef>& schema, uint64_t id);\n\n        // Return the schema. Throws an exception if not found.\n        std::shared_ptr<SchemaDef> GetSchema(uint64_t id);\n\n        // Return the bond schema. Throws an exception if not found.\n        boost::shared_ptr<bond::SchemaDef> GetBondSchema(uint64_t id);\n\n        // Return the schema key. Throws an exception if not found.\n        std::string GetSchemaKey(uint64_t id);\n    protected:\n        std::string schemaKey(const std::shared_ptr<SchemaDef>& schema);\n\n        std::mutex _lock;\n        uint64_t _next_id;\n        std::unordered_map<std::string, uint64_t> _schema_ids;\n        std::unordered_map<uint64_t, std::string> _schema_keys;\n        std::unordered_map<uint64_t, std::shared_ptr<SchemaDef>> _schemas;\n        std::unordered_map<uint64_t, boost::shared_ptr<bond::SchemaDef>> _bond_schemas;\n    };\n\n}\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsdinput/mdsd_input.bond",
    "content": "namespace mdsdinput\n\nstruct Time\n{\n\t// Seconds\n    0: required uint64 sec;\n\n\t// Nanoseconds\n    1: uint32 nsec;\n}\n\n// These fields types map to the Mds Field types\nenum FieldType\n{\n    FT_INVALID = 0,\t// This signifies an invalid field type.\n    FT_BOOL = 1,\t// The field is encoded as BT_BOOL\n    FT_INT32 = 2,\t// The field is encoded as BT_INT32\n    FT_INT64 = 3,\t// The field is encoded as BT_INT64\n    FT_DOUBLE = 4,\t// The field is encoded as BT_DOUBLE\n    FT_TIME = 5,\t// The field is encoded as Time (all times are expected to be UTC)\n    FT_STRING = 6,\t// The field is encoded as BT_STRING\n}\n\n// Represents on field definition\nstruct FieldDef\n{\n\t// The field name\n    0: required string name;\n\n\t// The field type\n    1: required FieldType fieldType = FT_INVALID;\n}\n\n// Schema describing the data in a Message\nstruct SchemaDef\n{\n\t// The field definitions (in the order that they will appear in the Message data)\n    0: required vector<FieldDef> fields;\n\n    // The index of the field that is to be treated as the event timestamp value (PreciseTimeStamp)\n    // If the value is null, the index index is < 0, > fields size, or the refrenced field is not of type FT_TIME,\n    // then the event timestamp will be set at the time of event reception\n    1: nullable<uint32> timestampFieldIdx;\n}\n\n// A single mdsd message (to be recorded by mdsd as a single MDS row)\nstruct Message\n{\n\t// The source of the message (Used for routing messages in mdsd)\n    0: required string source;\n\n\t// The message id. Must be unique for all messages sent during a single session (e.g. life of TCP stream).\n    1: required uint64 msgId;\n\n\t// The id of the schema that describes the data in the message\n    3: required uint64 schemaId;\n\n\t// The schema to associate with schemaId. This should be null if the schema for schemaId was previously transmitted during the same session.\n    4: nullable<SchemaDef> schema;\n\n\t// The message data, encoded according to the schema identified by schemaId\n    5: required blob data;\n}\n\n// The response code describing the result of processing a Message\nenum ResponseCode\n{\n    ACK_SUCCESS = 0,\t\t\t // The message was decoded and stored in mdsd's memory\n    ACK_FAILED = 1,\t\t\t\t // The message could not be processed for an unknown reason\n    ACK_UNKNOWN_SCHEMA_ID = 2,\t // The message's schemaId is unknown (no schema was transmitted for that id)\n    ACK_DECODE_ERROR = 3,\t\t // The message data could not be decoded using the schema identified by message schemaId\n    ACK_INVALID_SOURCE = 4,\t\t // The message's source is invalid\n    ACK_DUPLICATE_SCHEMA_ID = 5, // The message's schema id was already used by a different schema\n}\n\n// An acknowledgement sent by mdsd. One Ack will be sent for each Message recieved.\nstruct Ack\n{\n\t// The Message.msgId\n    0: required uint64 msgId;\n\n\t// The result of processing the message\n    1: ResponseCode code = ACK_SUCCESS;\n}\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsdinput/mdsd_input_apply.cpp",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n\n//------------------------------------------------------------------------------\n// This code was generated by a tool.\n//\n//   Tool : Bond Compiler 0.3.0.5\n//   File : mdsd_input_apply.cpp\n//\n// Changes to this file may cause incorrect behavior and will be lost when\n// the code is regenerated.\n// <auto-generated />\n//------------------------------------------------------------------------------\n\n#include \"mdsd_input_apply.h\"\n#include \"mdsd_input_reflection.h\"\n\nnamespace mdsdinput\n{\n    \n    //\n    // Overloads of Apply function with common transforms for Time.\n    // These overloads will be selected using argument dependent lookup\n    // before bond::Apply function templates.\n    //\n    bool Apply(const bond::To<Time>& transform,\n               const bond::bonded<Time>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n\n    bool Apply(const bond::InitSchemaDef& transform,\n               const Time& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::To<Time>& transform,\n               const bond::bonded<Time, bond::CompactBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n\n    bool Apply(const bond::To<Time>& transform,\n               const bond::bonded<void, bond::CompactBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Serializer<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const Time& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n\n    bool Apply(const bond::Serializer<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Time>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Serializer<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Time, bond::CompactBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Serializer<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Time, bond::FastBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Serializer<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Time, bond::SimpleBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Marshaler<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const Time& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n\n    bool Apply(const bond::Marshaler<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Time>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Marshaler<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Time, bond::CompactBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Marshaler<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Time, bond::FastBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Marshaler<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Time, bond::SimpleBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::To<Time>& transform,\n               const bond::bonded<Time, bond::FastBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n\n    bool Apply(const bond::To<Time>& transform,\n               const bond::bonded<void, bond::FastBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Serializer<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const Time& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n\n    bool Apply(const bond::Serializer<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Time>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Serializer<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Time, bond::CompactBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Serializer<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Time, bond::FastBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Serializer<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Time, bond::SimpleBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Marshaler<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const Time& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n\n    bool Apply(const bond::Marshaler<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Time>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Marshaler<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Time, bond::CompactBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Marshaler<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Time, bond::FastBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Marshaler<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Time, bond::SimpleBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::To<Time>& transform,\n               const bond::bonded<Time, bond::SimpleBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n\n    bool Apply(const bond::To<Time>& transform,\n               const bond::bonded<void, bond::SimpleBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Serializer<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const Time& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n\n    bool Apply(const bond::Serializer<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Time>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Serializer<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Time, bond::CompactBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Serializer<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Time, bond::FastBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Serializer<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Time, bond::SimpleBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Marshaler<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const Time& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n\n    bool Apply(const bond::Marshaler<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Time>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Marshaler<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Time, bond::CompactBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Marshaler<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Time, bond::FastBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Marshaler<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Time, bond::SimpleBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    //\n    // Overloads of Apply function with common transforms for FieldDef.\n    // These overloads will be selected using argument dependent lookup\n    // before bond::Apply function templates.\n    //\n    bool Apply(const bond::To<FieldDef>& transform,\n               const bond::bonded<FieldDef>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n\n    bool Apply(const bond::InitSchemaDef& transform,\n               const FieldDef& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::To<FieldDef>& transform,\n               const bond::bonded<FieldDef, bond::CompactBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n\n    bool Apply(const bond::To<FieldDef>& transform,\n               const bond::bonded<void, bond::CompactBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Serializer<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const FieldDef& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n\n    bool Apply(const bond::Serializer<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<FieldDef>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Serializer<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<FieldDef, bond::CompactBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Serializer<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<FieldDef, bond::FastBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Serializer<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<FieldDef, bond::SimpleBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Marshaler<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const FieldDef& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n\n    bool Apply(const bond::Marshaler<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<FieldDef>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Marshaler<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<FieldDef, bond::CompactBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Marshaler<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<FieldDef, bond::FastBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Marshaler<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<FieldDef, bond::SimpleBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::To<FieldDef>& transform,\n               const bond::bonded<FieldDef, bond::FastBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n\n    bool Apply(const bond::To<FieldDef>& transform,\n               const bond::bonded<void, bond::FastBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Serializer<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const FieldDef& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n\n    bool Apply(const bond::Serializer<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<FieldDef>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Serializer<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<FieldDef, bond::CompactBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Serializer<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<FieldDef, bond::FastBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Serializer<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<FieldDef, bond::SimpleBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Marshaler<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const FieldDef& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n\n    bool Apply(const bond::Marshaler<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<FieldDef>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Marshaler<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<FieldDef, bond::CompactBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Marshaler<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<FieldDef, bond::FastBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Marshaler<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<FieldDef, bond::SimpleBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::To<FieldDef>& transform,\n               const bond::bonded<FieldDef, bond::SimpleBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n\n    bool Apply(const bond::To<FieldDef>& transform,\n               const bond::bonded<void, bond::SimpleBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Serializer<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const FieldDef& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n\n    bool Apply(const bond::Serializer<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<FieldDef>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Serializer<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<FieldDef, bond::CompactBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Serializer<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<FieldDef, bond::FastBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Serializer<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<FieldDef, bond::SimpleBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Marshaler<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const FieldDef& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n\n    bool Apply(const bond::Marshaler<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<FieldDef>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Marshaler<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<FieldDef, bond::CompactBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Marshaler<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<FieldDef, bond::FastBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Marshaler<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<FieldDef, bond::SimpleBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    //\n    // Overloads of Apply function with common transforms for SchemaDef.\n    // These overloads will be selected using argument dependent lookup\n    // before bond::Apply function templates.\n    //\n    bool Apply(const bond::To<SchemaDef>& transform,\n               const bond::bonded<SchemaDef>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n\n    bool Apply(const bond::InitSchemaDef& transform,\n               const SchemaDef& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::To<SchemaDef>& transform,\n               const bond::bonded<SchemaDef, bond::CompactBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n\n    bool Apply(const bond::To<SchemaDef>& transform,\n               const bond::bonded<void, bond::CompactBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Serializer<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const SchemaDef& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n\n    bool Apply(const bond::Serializer<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<SchemaDef>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Serializer<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<SchemaDef, bond::CompactBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Serializer<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<SchemaDef, bond::FastBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Serializer<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<SchemaDef, bond::SimpleBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Marshaler<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const SchemaDef& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n\n    bool Apply(const bond::Marshaler<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<SchemaDef>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Marshaler<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<SchemaDef, bond::CompactBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Marshaler<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<SchemaDef, bond::FastBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Marshaler<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<SchemaDef, bond::SimpleBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::To<SchemaDef>& transform,\n               const bond::bonded<SchemaDef, bond::FastBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n\n    bool Apply(const bond::To<SchemaDef>& transform,\n               const bond::bonded<void, bond::FastBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Serializer<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const SchemaDef& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n\n    bool Apply(const bond::Serializer<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<SchemaDef>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Serializer<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<SchemaDef, bond::CompactBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Serializer<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<SchemaDef, bond::FastBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Serializer<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<SchemaDef, bond::SimpleBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Marshaler<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const SchemaDef& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n\n    bool Apply(const bond::Marshaler<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<SchemaDef>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Marshaler<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<SchemaDef, bond::CompactBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Marshaler<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<SchemaDef, bond::FastBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Marshaler<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<SchemaDef, bond::SimpleBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::To<SchemaDef>& transform,\n               const bond::bonded<SchemaDef, bond::SimpleBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n\n    bool Apply(const bond::To<SchemaDef>& transform,\n               const bond::bonded<void, bond::SimpleBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Serializer<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const SchemaDef& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n\n    bool Apply(const bond::Serializer<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<SchemaDef>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Serializer<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<SchemaDef, bond::CompactBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Serializer<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<SchemaDef, bond::FastBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Serializer<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<SchemaDef, bond::SimpleBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Marshaler<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const SchemaDef& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n\n    bool Apply(const bond::Marshaler<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<SchemaDef>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Marshaler<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<SchemaDef, bond::CompactBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Marshaler<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<SchemaDef, bond::FastBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Marshaler<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<SchemaDef, bond::SimpleBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    //\n    // Overloads of Apply function with common transforms for Message.\n    // These overloads will be selected using argument dependent lookup\n    // before bond::Apply function templates.\n    //\n    bool Apply(const bond::To<Message>& transform,\n               const bond::bonded<Message>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n\n    bool Apply(const bond::InitSchemaDef& transform,\n               const Message& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::To<Message>& transform,\n               const bond::bonded<Message, bond::CompactBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n\n    bool Apply(const bond::To<Message>& transform,\n               const bond::bonded<void, bond::CompactBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Serializer<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const Message& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n\n    bool Apply(const bond::Serializer<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Message>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Serializer<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Message, bond::CompactBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Serializer<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Message, bond::FastBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Serializer<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Message, bond::SimpleBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Marshaler<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const Message& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n\n    bool Apply(const bond::Marshaler<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Message>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Marshaler<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Message, bond::CompactBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Marshaler<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Message, bond::FastBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Marshaler<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Message, bond::SimpleBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::To<Message>& transform,\n               const bond::bonded<Message, bond::FastBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n\n    bool Apply(const bond::To<Message>& transform,\n               const bond::bonded<void, bond::FastBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Serializer<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const Message& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n\n    bool Apply(const bond::Serializer<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Message>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Serializer<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Message, bond::CompactBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Serializer<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Message, bond::FastBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Serializer<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Message, bond::SimpleBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Marshaler<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const Message& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n\n    bool Apply(const bond::Marshaler<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Message>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Marshaler<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Message, bond::CompactBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Marshaler<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Message, bond::FastBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Marshaler<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Message, bond::SimpleBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::To<Message>& transform,\n               const bond::bonded<Message, bond::SimpleBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n\n    bool Apply(const bond::To<Message>& transform,\n               const bond::bonded<void, bond::SimpleBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Serializer<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const Message& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n\n    bool Apply(const bond::Serializer<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Message>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Serializer<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Message, bond::CompactBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Serializer<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Message, bond::FastBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Serializer<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Message, bond::SimpleBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Marshaler<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const Message& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n\n    bool Apply(const bond::Marshaler<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Message>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Marshaler<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Message, bond::CompactBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Marshaler<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Message, bond::FastBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Marshaler<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Message, bond::SimpleBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    //\n    // Overloads of Apply function with common transforms for Ack.\n    // These overloads will be selected using argument dependent lookup\n    // before bond::Apply function templates.\n    //\n    bool Apply(const bond::To<Ack>& transform,\n               const bond::bonded<Ack>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n\n    bool Apply(const bond::InitSchemaDef& transform,\n               const Ack& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::To<Ack>& transform,\n               const bond::bonded<Ack, bond::CompactBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n\n    bool Apply(const bond::To<Ack>& transform,\n               const bond::bonded<void, bond::CompactBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Serializer<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const Ack& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n\n    bool Apply(const bond::Serializer<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Ack>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Serializer<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Ack, bond::CompactBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Serializer<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Ack, bond::FastBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Serializer<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Ack, bond::SimpleBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Marshaler<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const Ack& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n\n    bool Apply(const bond::Marshaler<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Ack>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Marshaler<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Ack, bond::CompactBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Marshaler<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Ack, bond::FastBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Marshaler<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Ack, bond::SimpleBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::To<Ack>& transform,\n               const bond::bonded<Ack, bond::FastBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n\n    bool Apply(const bond::To<Ack>& transform,\n               const bond::bonded<void, bond::FastBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Serializer<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const Ack& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n\n    bool Apply(const bond::Serializer<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Ack>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Serializer<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Ack, bond::CompactBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Serializer<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Ack, bond::FastBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Serializer<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Ack, bond::SimpleBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Marshaler<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const Ack& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n\n    bool Apply(const bond::Marshaler<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Ack>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Marshaler<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Ack, bond::CompactBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Marshaler<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Ack, bond::FastBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Marshaler<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Ack, bond::SimpleBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::To<Ack>& transform,\n               const bond::bonded<Ack, bond::SimpleBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n\n    bool Apply(const bond::To<Ack>& transform,\n               const bond::bonded<void, bond::SimpleBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Serializer<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const Ack& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n\n    bool Apply(const bond::Serializer<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Ack>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Serializer<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Ack, bond::CompactBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Serializer<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Ack, bond::FastBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Serializer<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Ack, bond::SimpleBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Marshaler<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const Ack& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n\n    bool Apply(const bond::Marshaler<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Ack>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Marshaler<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Ack, bond::CompactBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Marshaler<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Ack, bond::FastBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n    bool Apply(const bond::Marshaler<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Ack, bond::SimpleBinaryReader<bond::InputBuffer>&>& value)\n    {\n        return bond::Apply<>(transform, value);\n    }\n    \n} // namespace mdsdinput\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsdinput/mdsd_input_apply.h",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n\n//------------------------------------------------------------------------------\n// This code was generated by a tool.\n//\n//   Tool : Bond Compiler 0.3.0.5\n//   File : mdsd_input_apply.h\n//\n// Changes to this file may cause incorrect behavior and will be lost when\n// the code is regenerated.\n// <auto-generated />\n//------------------------------------------------------------------------------\n\n#pragma once\n\n#include \"mdsd_input_types.h\"\n#include <bond/core/bond.h>\n#include <bond/stream/output_buffer.h>\n\n\nnamespace mdsdinput\n{\n    \n    //\n    // Overloads of Apply function with common transforms for Time.\n    // These overloads will be selected using argument dependent lookup\n    // before bond::Apply function templates.\n    //\n    bool Apply(const bond::To<Time>& transform,\n               const bond::bonded<Time>& value);\n\n    bool Apply(const bond::InitSchemaDef& transform,\n               const Time& value);\n    \n    bool Apply(const bond::To<Time>& transform,\n               const bond::bonded<Time, bond::CompactBinaryReader<bond::InputBuffer>&>& value);\n\n    bool Apply(const bond::To<Time>& transform,\n               const bond::bonded<void, bond::CompactBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Serializer<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const Time& value);\n\n    bool Apply(const bond::Serializer<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Time>& value);\n    \n    bool Apply(const bond::Serializer<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Time, bond::CompactBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Serializer<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Time, bond::FastBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Serializer<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Time, bond::SimpleBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Marshaler<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const Time& value);\n\n    bool Apply(const bond::Marshaler<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Time>& value);\n    \n    bool Apply(const bond::Marshaler<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Time, bond::CompactBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Marshaler<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Time, bond::FastBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Marshaler<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Time, bond::SimpleBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::To<Time>& transform,\n               const bond::bonded<Time, bond::FastBinaryReader<bond::InputBuffer>&>& value);\n\n    bool Apply(const bond::To<Time>& transform,\n               const bond::bonded<void, bond::FastBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Serializer<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const Time& value);\n\n    bool Apply(const bond::Serializer<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Time>& value);\n    \n    bool Apply(const bond::Serializer<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Time, bond::CompactBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Serializer<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Time, bond::FastBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Serializer<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Time, bond::SimpleBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Marshaler<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const Time& value);\n\n    bool Apply(const bond::Marshaler<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Time>& value);\n    \n    bool Apply(const bond::Marshaler<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Time, bond::CompactBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Marshaler<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Time, bond::FastBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Marshaler<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Time, bond::SimpleBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::To<Time>& transform,\n               const bond::bonded<Time, bond::SimpleBinaryReader<bond::InputBuffer>&>& value);\n\n    bool Apply(const bond::To<Time>& transform,\n               const bond::bonded<void, bond::SimpleBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Serializer<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const Time& value);\n\n    bool Apply(const bond::Serializer<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Time>& value);\n    \n    bool Apply(const bond::Serializer<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Time, bond::CompactBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Serializer<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Time, bond::FastBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Serializer<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Time, bond::SimpleBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Marshaler<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const Time& value);\n\n    bool Apply(const bond::Marshaler<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Time>& value);\n    \n    bool Apply(const bond::Marshaler<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Time, bond::CompactBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Marshaler<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Time, bond::FastBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Marshaler<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Time, bond::SimpleBinaryReader<bond::InputBuffer>&>& value);\n    \n    //\n    // Overloads of Apply function with common transforms for FieldDef.\n    // These overloads will be selected using argument dependent lookup\n    // before bond::Apply function templates.\n    //\n    bool Apply(const bond::To<FieldDef>& transform,\n               const bond::bonded<FieldDef>& value);\n\n    bool Apply(const bond::InitSchemaDef& transform,\n               const FieldDef& value);\n    \n    bool Apply(const bond::To<FieldDef>& transform,\n               const bond::bonded<FieldDef, bond::CompactBinaryReader<bond::InputBuffer>&>& value);\n\n    bool Apply(const bond::To<FieldDef>& transform,\n               const bond::bonded<void, bond::CompactBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Serializer<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const FieldDef& value);\n\n    bool Apply(const bond::Serializer<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<FieldDef>& value);\n    \n    bool Apply(const bond::Serializer<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<FieldDef, bond::CompactBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Serializer<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<FieldDef, bond::FastBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Serializer<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<FieldDef, bond::SimpleBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Marshaler<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const FieldDef& value);\n\n    bool Apply(const bond::Marshaler<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<FieldDef>& value);\n    \n    bool Apply(const bond::Marshaler<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<FieldDef, bond::CompactBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Marshaler<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<FieldDef, bond::FastBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Marshaler<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<FieldDef, bond::SimpleBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::To<FieldDef>& transform,\n               const bond::bonded<FieldDef, bond::FastBinaryReader<bond::InputBuffer>&>& value);\n\n    bool Apply(const bond::To<FieldDef>& transform,\n               const bond::bonded<void, bond::FastBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Serializer<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const FieldDef& value);\n\n    bool Apply(const bond::Serializer<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<FieldDef>& value);\n    \n    bool Apply(const bond::Serializer<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<FieldDef, bond::CompactBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Serializer<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<FieldDef, bond::FastBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Serializer<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<FieldDef, bond::SimpleBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Marshaler<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const FieldDef& value);\n\n    bool Apply(const bond::Marshaler<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<FieldDef>& value);\n    \n    bool Apply(const bond::Marshaler<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<FieldDef, bond::CompactBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Marshaler<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<FieldDef, bond::FastBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Marshaler<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<FieldDef, bond::SimpleBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::To<FieldDef>& transform,\n               const bond::bonded<FieldDef, bond::SimpleBinaryReader<bond::InputBuffer>&>& value);\n\n    bool Apply(const bond::To<FieldDef>& transform,\n               const bond::bonded<void, bond::SimpleBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Serializer<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const FieldDef& value);\n\n    bool Apply(const bond::Serializer<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<FieldDef>& value);\n    \n    bool Apply(const bond::Serializer<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<FieldDef, bond::CompactBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Serializer<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<FieldDef, bond::FastBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Serializer<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<FieldDef, bond::SimpleBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Marshaler<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const FieldDef& value);\n\n    bool Apply(const bond::Marshaler<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<FieldDef>& value);\n    \n    bool Apply(const bond::Marshaler<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<FieldDef, bond::CompactBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Marshaler<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<FieldDef, bond::FastBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Marshaler<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<FieldDef, bond::SimpleBinaryReader<bond::InputBuffer>&>& value);\n    \n    //\n    // Overloads of Apply function with common transforms for SchemaDef.\n    // These overloads will be selected using argument dependent lookup\n    // before bond::Apply function templates.\n    //\n    bool Apply(const bond::To<SchemaDef>& transform,\n               const bond::bonded<SchemaDef>& value);\n\n    bool Apply(const bond::InitSchemaDef& transform,\n               const SchemaDef& value);\n    \n    bool Apply(const bond::To<SchemaDef>& transform,\n               const bond::bonded<SchemaDef, bond::CompactBinaryReader<bond::InputBuffer>&>& value);\n\n    bool Apply(const bond::To<SchemaDef>& transform,\n               const bond::bonded<void, bond::CompactBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Serializer<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const SchemaDef& value);\n\n    bool Apply(const bond::Serializer<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<SchemaDef>& value);\n    \n    bool Apply(const bond::Serializer<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<SchemaDef, bond::CompactBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Serializer<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<SchemaDef, bond::FastBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Serializer<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<SchemaDef, bond::SimpleBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Marshaler<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const SchemaDef& value);\n\n    bool Apply(const bond::Marshaler<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<SchemaDef>& value);\n    \n    bool Apply(const bond::Marshaler<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<SchemaDef, bond::CompactBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Marshaler<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<SchemaDef, bond::FastBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Marshaler<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<SchemaDef, bond::SimpleBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::To<SchemaDef>& transform,\n               const bond::bonded<SchemaDef, bond::FastBinaryReader<bond::InputBuffer>&>& value);\n\n    bool Apply(const bond::To<SchemaDef>& transform,\n               const bond::bonded<void, bond::FastBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Serializer<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const SchemaDef& value);\n\n    bool Apply(const bond::Serializer<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<SchemaDef>& value);\n    \n    bool Apply(const bond::Serializer<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<SchemaDef, bond::CompactBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Serializer<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<SchemaDef, bond::FastBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Serializer<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<SchemaDef, bond::SimpleBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Marshaler<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const SchemaDef& value);\n\n    bool Apply(const bond::Marshaler<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<SchemaDef>& value);\n    \n    bool Apply(const bond::Marshaler<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<SchemaDef, bond::CompactBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Marshaler<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<SchemaDef, bond::FastBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Marshaler<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<SchemaDef, bond::SimpleBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::To<SchemaDef>& transform,\n               const bond::bonded<SchemaDef, bond::SimpleBinaryReader<bond::InputBuffer>&>& value);\n\n    bool Apply(const bond::To<SchemaDef>& transform,\n               const bond::bonded<void, bond::SimpleBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Serializer<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const SchemaDef& value);\n\n    bool Apply(const bond::Serializer<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<SchemaDef>& value);\n    \n    bool Apply(const bond::Serializer<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<SchemaDef, bond::CompactBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Serializer<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<SchemaDef, bond::FastBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Serializer<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<SchemaDef, bond::SimpleBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Marshaler<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const SchemaDef& value);\n\n    bool Apply(const bond::Marshaler<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<SchemaDef>& value);\n    \n    bool Apply(const bond::Marshaler<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<SchemaDef, bond::CompactBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Marshaler<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<SchemaDef, bond::FastBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Marshaler<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<SchemaDef, bond::SimpleBinaryReader<bond::InputBuffer>&>& value);\n    \n    //\n    // Overloads of Apply function with common transforms for Message.\n    // These overloads will be selected using argument dependent lookup\n    // before bond::Apply function templates.\n    //\n    bool Apply(const bond::To<Message>& transform,\n               const bond::bonded<Message>& value);\n\n    bool Apply(const bond::InitSchemaDef& transform,\n               const Message& value);\n    \n    bool Apply(const bond::To<Message>& transform,\n               const bond::bonded<Message, bond::CompactBinaryReader<bond::InputBuffer>&>& value);\n\n    bool Apply(const bond::To<Message>& transform,\n               const bond::bonded<void, bond::CompactBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Serializer<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const Message& value);\n\n    bool Apply(const bond::Serializer<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Message>& value);\n    \n    bool Apply(const bond::Serializer<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Message, bond::CompactBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Serializer<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Message, bond::FastBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Serializer<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Message, bond::SimpleBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Marshaler<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const Message& value);\n\n    bool Apply(const bond::Marshaler<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Message>& value);\n    \n    bool Apply(const bond::Marshaler<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Message, bond::CompactBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Marshaler<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Message, bond::FastBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Marshaler<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Message, bond::SimpleBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::To<Message>& transform,\n               const bond::bonded<Message, bond::FastBinaryReader<bond::InputBuffer>&>& value);\n\n    bool Apply(const bond::To<Message>& transform,\n               const bond::bonded<void, bond::FastBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Serializer<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const Message& value);\n\n    bool Apply(const bond::Serializer<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Message>& value);\n    \n    bool Apply(const bond::Serializer<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Message, bond::CompactBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Serializer<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Message, bond::FastBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Serializer<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Message, bond::SimpleBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Marshaler<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const Message& value);\n\n    bool Apply(const bond::Marshaler<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Message>& value);\n    \n    bool Apply(const bond::Marshaler<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Message, bond::CompactBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Marshaler<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Message, bond::FastBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Marshaler<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Message, bond::SimpleBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::To<Message>& transform,\n               const bond::bonded<Message, bond::SimpleBinaryReader<bond::InputBuffer>&>& value);\n\n    bool Apply(const bond::To<Message>& transform,\n               const bond::bonded<void, bond::SimpleBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Serializer<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const Message& value);\n\n    bool Apply(const bond::Serializer<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Message>& value);\n    \n    bool Apply(const bond::Serializer<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Message, bond::CompactBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Serializer<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Message, bond::FastBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Serializer<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Message, bond::SimpleBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Marshaler<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const Message& value);\n\n    bool Apply(const bond::Marshaler<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Message>& value);\n    \n    bool Apply(const bond::Marshaler<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Message, bond::CompactBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Marshaler<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Message, bond::FastBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Marshaler<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Message, bond::SimpleBinaryReader<bond::InputBuffer>&>& value);\n    \n    //\n    // Overloads of Apply function with common transforms for Ack.\n    // These overloads will be selected using argument dependent lookup\n    // before bond::Apply function templates.\n    //\n    bool Apply(const bond::To<Ack>& transform,\n               const bond::bonded<Ack>& value);\n\n    bool Apply(const bond::InitSchemaDef& transform,\n               const Ack& value);\n    \n    bool Apply(const bond::To<Ack>& transform,\n               const bond::bonded<Ack, bond::CompactBinaryReader<bond::InputBuffer>&>& value);\n\n    bool Apply(const bond::To<Ack>& transform,\n               const bond::bonded<void, bond::CompactBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Serializer<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const Ack& value);\n\n    bool Apply(const bond::Serializer<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Ack>& value);\n    \n    bool Apply(const bond::Serializer<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Ack, bond::CompactBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Serializer<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Ack, bond::FastBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Serializer<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Ack, bond::SimpleBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Marshaler<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const Ack& value);\n\n    bool Apply(const bond::Marshaler<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Ack>& value);\n    \n    bool Apply(const bond::Marshaler<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Ack, bond::CompactBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Marshaler<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Ack, bond::FastBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Marshaler<bond::CompactBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Ack, bond::SimpleBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::To<Ack>& transform,\n               const bond::bonded<Ack, bond::FastBinaryReader<bond::InputBuffer>&>& value);\n\n    bool Apply(const bond::To<Ack>& transform,\n               const bond::bonded<void, bond::FastBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Serializer<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const Ack& value);\n\n    bool Apply(const bond::Serializer<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Ack>& value);\n    \n    bool Apply(const bond::Serializer<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Ack, bond::CompactBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Serializer<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Ack, bond::FastBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Serializer<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Ack, bond::SimpleBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Marshaler<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const Ack& value);\n\n    bool Apply(const bond::Marshaler<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Ack>& value);\n    \n    bool Apply(const bond::Marshaler<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Ack, bond::CompactBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Marshaler<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Ack, bond::FastBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Marshaler<bond::FastBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Ack, bond::SimpleBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::To<Ack>& transform,\n               const bond::bonded<Ack, bond::SimpleBinaryReader<bond::InputBuffer>&>& value);\n\n    bool Apply(const bond::To<Ack>& transform,\n               const bond::bonded<void, bond::SimpleBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Serializer<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const Ack& value);\n\n    bool Apply(const bond::Serializer<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Ack>& value);\n    \n    bool Apply(const bond::Serializer<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Ack, bond::CompactBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Serializer<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Ack, bond::FastBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Serializer<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Ack, bond::SimpleBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Marshaler<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const Ack& value);\n\n    bool Apply(const bond::Marshaler<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Ack>& value);\n    \n    bool Apply(const bond::Marshaler<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Ack, bond::CompactBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Marshaler<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Ack, bond::FastBinaryReader<bond::InputBuffer>&>& value);\n    \n    bool Apply(const bond::Marshaler<bond::SimpleBinaryWriter<bond::OutputBuffer> >& transform,\n               const bond::bonded<Ack, bond::SimpleBinaryReader<bond::InputBuffer>&>& value);\n    \n} // namespace mdsdinput\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsdinput/mdsd_input_reflection.h",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n\n//------------------------------------------------------------------------------\n// This code was generated by a tool.\n//\n//   Tool : Bond Compiler 0.3.0.5\n//   File : mdsd_input_reflection.h\n//\n// Changes to this file may cause incorrect behavior and will be lost when\n// the code is regenerated.\n// <auto-generated />\n//------------------------------------------------------------------------------\n\n#pragma once\n\n#include \"mdsd_input_types.h\"\n#include <bond/core/reflection.h>\n\nnamespace mdsdinput\n{\n    //\n    // Time\n    //\n    struct Time::Schema\n    {\n        typedef bond::no_base base;\n\n        static const bond::Metadata metadata;\n        \n        private: static const bond::Metadata s_sec_metadata;\n        private: static const bond::Metadata s_nsec_metadata;\n\n        public: struct var\n        {\n            // sec\n            typedef bond::reflection::FieldTemplate<\n                0,\n                bond::reflection::required_field_modifier,\n                Time,\n                uint64_t,\n                &Time::sec,\n                &s_sec_metadata\n            > sec;\n        \n            // nsec\n            typedef bond::reflection::FieldTemplate<\n                1,\n                bond::reflection::optional_field_modifier,\n                Time,\n                uint32_t,\n                &Time::nsec,\n                &s_nsec_metadata\n            > nsec;\n        };\n\n        private: typedef boost::mpl::list<> fields0;\n        private: typedef boost::mpl::push_front<fields0, var::nsec>::type fields1;\n        private: typedef boost::mpl::push_front<fields1, var::sec>::type fields2;\n\n        public: typedef fields2::type fields;\n        \n        \n        static bond::Metadata GetMetadata()\n        {\n            return bond::reflection::MetadataInit(\"Time\", \"mdsdinput.Time\",\n                bond::reflection::Attributes()\n            );\n        }\n    };\n    \n\n    //\n    // FieldDef\n    //\n    struct FieldDef::Schema\n    {\n        typedef bond::no_base base;\n\n        static const bond::Metadata metadata;\n        \n        private: static const bond::Metadata s_name_metadata;\n        private: static const bond::Metadata s_fieldType_metadata;\n\n        public: struct var\n        {\n            // name\n            typedef bond::reflection::FieldTemplate<\n                0,\n                bond::reflection::required_field_modifier,\n                FieldDef,\n                std::string,\n                &FieldDef::name,\n                &s_name_metadata\n            > name;\n        \n            // fieldType\n            typedef bond::reflection::FieldTemplate<\n                1,\n                bond::reflection::required_field_modifier,\n                FieldDef,\n                ::mdsdinput::FieldType,\n                &FieldDef::fieldType,\n                &s_fieldType_metadata\n            > fieldType;\n        };\n\n        private: typedef boost::mpl::list<> fields0;\n        private: typedef boost::mpl::push_front<fields0, var::fieldType>::type fields1;\n        private: typedef boost::mpl::push_front<fields1, var::name>::type fields2;\n\n        public: typedef fields2::type fields;\n        \n        \n        static bond::Metadata GetMetadata()\n        {\n            return bond::reflection::MetadataInit(\"FieldDef\", \"mdsdinput.FieldDef\",\n                bond::reflection::Attributes()\n            );\n        }\n    };\n    \n\n    //\n    // SchemaDef\n    //\n    struct SchemaDef::Schema\n    {\n        typedef bond::no_base base;\n\n        static const bond::Metadata metadata;\n        \n        private: static const bond::Metadata s_fields_metadata;\n        private: static const bond::Metadata s_timestampFieldIdx_metadata;\n\n        public: struct var\n        {\n            // fields\n            typedef bond::reflection::FieldTemplate<\n                0,\n                bond::reflection::required_field_modifier,\n                SchemaDef,\n                std::vector< ::mdsdinput::FieldDef>,\n                &SchemaDef::fields,\n                &s_fields_metadata\n            > fields;\n        \n            // timestampFieldIdx\n            typedef bond::reflection::FieldTemplate<\n                1,\n                bond::reflection::optional_field_modifier,\n                SchemaDef,\n                bond::nullable<uint32_t>,\n                &SchemaDef::timestampFieldIdx,\n                &s_timestampFieldIdx_metadata\n            > timestampFieldIdx;\n        };\n\n        private: typedef boost::mpl::list<> fields0;\n        private: typedef boost::mpl::push_front<fields0, var::timestampFieldIdx>::type fields1;\n        private: typedef boost::mpl::push_front<fields1, var::fields>::type fields2;\n\n        public: typedef fields2::type fields;\n        \n        \n        static bond::Metadata GetMetadata()\n        {\n            return bond::reflection::MetadataInit(\"SchemaDef\", \"mdsdinput.SchemaDef\",\n                bond::reflection::Attributes()\n            );\n        }\n    };\n    \n\n    //\n    // Message\n    //\n    struct Message::Schema\n    {\n        typedef bond::no_base base;\n\n        static const bond::Metadata metadata;\n        \n        private: static const bond::Metadata s_source_metadata;\n        private: static const bond::Metadata s_msgId_metadata;\n        private: static const bond::Metadata s_schemaId_metadata;\n        private: static const bond::Metadata s_schema_metadata;\n        private: static const bond::Metadata s_data_metadata;\n\n        public: struct var\n        {\n            // source\n            typedef bond::reflection::FieldTemplate<\n                0,\n                bond::reflection::required_field_modifier,\n                Message,\n                std::string,\n                &Message::source,\n                &s_source_metadata\n            > source;\n        \n            // msgId\n            typedef bond::reflection::FieldTemplate<\n                1,\n                bond::reflection::required_field_modifier,\n                Message,\n                uint64_t,\n                &Message::msgId,\n                &s_msgId_metadata\n            > msgId;\n        \n            // schemaId\n            typedef bond::reflection::FieldTemplate<\n                3,\n                bond::reflection::required_field_modifier,\n                Message,\n                uint64_t,\n                &Message::schemaId,\n                &s_schemaId_metadata\n            > schemaId;\n        \n            // schema\n            typedef bond::reflection::FieldTemplate<\n                4,\n                bond::reflection::optional_field_modifier,\n                Message,\n                bond::nullable< ::mdsdinput::SchemaDef>,\n                &Message::schema,\n                &s_schema_metadata\n            > schema;\n        \n            // data\n            typedef bond::reflection::FieldTemplate<\n                5,\n                bond::reflection::required_field_modifier,\n                Message,\n                bond::blob,\n                &Message::data,\n                &s_data_metadata\n            > data;\n        };\n\n        private: typedef boost::mpl::list<> fields0;\n        private: typedef boost::mpl::push_front<fields0, var::data>::type fields1;\n        private: typedef boost::mpl::push_front<fields1, var::schema>::type fields2;\n        private: typedef boost::mpl::push_front<fields2, var::schemaId>::type fields3;\n        private: typedef boost::mpl::push_front<fields3, var::msgId>::type fields4;\n        private: typedef boost::mpl::push_front<fields4, var::source>::type fields5;\n\n        public: typedef fields5::type fields;\n        \n        \n        static bond::Metadata GetMetadata()\n        {\n            return bond::reflection::MetadataInit(\"Message\", \"mdsdinput.Message\",\n                bond::reflection::Attributes()\n            );\n        }\n    };\n    \n\n    //\n    // Ack\n    //\n    struct Ack::Schema\n    {\n        typedef bond::no_base base;\n\n        static const bond::Metadata metadata;\n        \n        private: static const bond::Metadata s_msgId_metadata;\n        private: static const bond::Metadata s_code_metadata;\n\n        public: struct var\n        {\n            // msgId\n            typedef bond::reflection::FieldTemplate<\n                0,\n                bond::reflection::required_field_modifier,\n                Ack,\n                uint64_t,\n                &Ack::msgId,\n                &s_msgId_metadata\n            > msgId;\n        \n            // code\n            typedef bond::reflection::FieldTemplate<\n                1,\n                bond::reflection::optional_field_modifier,\n                Ack,\n                ::mdsdinput::ResponseCode,\n                &Ack::code,\n                &s_code_metadata\n            > code;\n        };\n\n        private: typedef boost::mpl::list<> fields0;\n        private: typedef boost::mpl::push_front<fields0, var::code>::type fields1;\n        private: typedef boost::mpl::push_front<fields1, var::msgId>::type fields2;\n\n        public: typedef fields2::type fields;\n        \n        \n        static bond::Metadata GetMetadata()\n        {\n            return bond::reflection::MetadataInit(\"Ack\", \"mdsdinput.Ack\",\n                bond::reflection::Attributes()\n            );\n        }\n    };\n    \n\n    \n} // namespace mdsdinput\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsdinput/mdsd_input_types.cpp",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n\n//------------------------------------------------------------------------------\n// This code was generated by a tool.\n//\n//   Tool : Bond Compiler 0.3.0.5\n//   File : mdsd_input_types.cpp\n//\n// Changes to this file may cause incorrect behavior and will be lost when\n// the code is regenerated.\n// <auto-generated />\n//------------------------------------------------------------------------------\n\n#include \"mdsd_input_reflection.h\"\n#include <bond/core/exception.h>\n\nnamespace mdsdinput\n{\n    \n    const bond::Metadata Time::Schema::metadata\n        = Time::Schema::GetMetadata();\n    \n    const bond::Metadata Time::Schema::s_sec_metadata\n        = bond::reflection::MetadataInit(\"sec\", bond::reflection::required_field_modifier::value,\n            bond::reflection::Attributes());\n    \n    const bond::Metadata Time::Schema::s_nsec_metadata\n        = bond::reflection::MetadataInit(\"nsec\");\n\n    \n    namespace _bond_enumerators\n    {\n    namespace FieldType\n    {\n        const\n        std::map<std::string, enum FieldType> _name_to_value_FieldType =\n            boost::assign::map_list_of<std::string, enum FieldType>\n                (\"FT_INVALID\", FT_INVALID)\n                (\"FT_BOOL\", FT_BOOL)\n                (\"FT_INT32\", FT_INT32)\n                (\"FT_INT64\", FT_INT64)\n                (\"FT_DOUBLE\", FT_DOUBLE)\n                (\"FT_TIME\", FT_TIME)\n                (\"FT_STRING\", FT_STRING);\n\n        const\n        std::map<enum FieldType, std::string> _value_to_name_FieldType =\n            bond::reverse_map(_name_to_value_FieldType);\n\n        const std::string& ToString(enum FieldType value)\n        {\n            std::map<enum FieldType, std::string>::const_iterator it =\n                GetValueToNameMap(value).find(value);\n\n            if (GetValueToNameMap(value).end() == it)\n                bond::InvalidEnumValueException(value, \"FieldType\");\n\n            return it->second;\n        }\n\n        void FromString(const std::string& name, enum FieldType& value)\n        {\n            std::map<std::string, enum FieldType>::const_iterator it =\n                _name_to_value_FieldType.find(name);\n\n            if (_name_to_value_FieldType.end() == it)\n                bond::InvalidEnumValueException(name.c_str(), \"FieldType\");\n\n            value = it->second;\n        }\n\n    } // namespace FieldType\n    } // namespace _bond_enumerators\n\n    \n    const bond::Metadata FieldDef::Schema::metadata\n        = FieldDef::Schema::GetMetadata();\n    \n    const bond::Metadata FieldDef::Schema::s_name_metadata\n        = bond::reflection::MetadataInit(\"name\", bond::reflection::required_field_modifier::value,\n            bond::reflection::Attributes());\n    \n    const bond::Metadata FieldDef::Schema::s_fieldType_metadata\n        = bond::reflection::MetadataInit(::mdsdinput::_bond_enumerators::FieldType::FT_INVALID, \"fieldType\", bond::reflection::required_field_modifier::value,\n            bond::reflection::Attributes());\n\n    \n    const bond::Metadata SchemaDef::Schema::metadata\n        = SchemaDef::Schema::GetMetadata();\n    \n    const bond::Metadata SchemaDef::Schema::s_fields_metadata\n        = bond::reflection::MetadataInit(\"fields\", bond::reflection::required_field_modifier::value,\n            bond::reflection::Attributes());\n    \n    const bond::Metadata SchemaDef::Schema::s_timestampFieldIdx_metadata\n        = bond::reflection::MetadataInit(\"timestampFieldIdx\");\n\n    \n    const bond::Metadata Message::Schema::metadata\n        = Message::Schema::GetMetadata();\n    \n    const bond::Metadata Message::Schema::s_source_metadata\n        = bond::reflection::MetadataInit(\"source\", bond::reflection::required_field_modifier::value,\n            bond::reflection::Attributes());\n    \n    const bond::Metadata Message::Schema::s_msgId_metadata\n        = bond::reflection::MetadataInit(\"msgId\", bond::reflection::required_field_modifier::value,\n            bond::reflection::Attributes());\n    \n    const bond::Metadata Message::Schema::s_schemaId_metadata\n        = bond::reflection::MetadataInit(\"schemaId\", bond::reflection::required_field_modifier::value,\n            bond::reflection::Attributes());\n    \n    const bond::Metadata Message::Schema::s_schema_metadata\n        = bond::reflection::MetadataInit(\"schema\");\n    \n    const bond::Metadata Message::Schema::s_data_metadata\n        = bond::reflection::MetadataInit(\"data\", bond::reflection::required_field_modifier::value,\n            bond::reflection::Attributes());\n\n    \n    namespace _bond_enumerators\n    {\n    namespace ResponseCode\n    {\n        const\n        std::map<std::string, enum ResponseCode> _name_to_value_ResponseCode =\n            boost::assign::map_list_of<std::string, enum ResponseCode>\n                (\"ACK_SUCCESS\", ACK_SUCCESS)\n                (\"ACK_FAILED\", ACK_FAILED)\n                (\"ACK_UNKNOWN_SCHEMA_ID\", ACK_UNKNOWN_SCHEMA_ID)\n                (\"ACK_DECODE_ERROR\", ACK_DECODE_ERROR)\n                (\"ACK_INVALID_SOURCE\", ACK_INVALID_SOURCE)\n                (\"ACK_DUPLICATE_SCHEMA_ID\", ACK_DUPLICATE_SCHEMA_ID);\n\n        const\n        std::map<enum ResponseCode, std::string> _value_to_name_ResponseCode =\n            bond::reverse_map(_name_to_value_ResponseCode);\n\n        const std::string& ToString(enum ResponseCode value)\n        {\n            std::map<enum ResponseCode, std::string>::const_iterator it =\n                GetValueToNameMap(value).find(value);\n\n            if (GetValueToNameMap(value).end() == it)\n                bond::InvalidEnumValueException(value, \"ResponseCode\");\n\n            return it->second;\n        }\n\n        void FromString(const std::string& name, enum ResponseCode& value)\n        {\n            std::map<std::string, enum ResponseCode>::const_iterator it =\n                _name_to_value_ResponseCode.find(name);\n\n            if (_name_to_value_ResponseCode.end() == it)\n                bond::InvalidEnumValueException(name.c_str(), \"ResponseCode\");\n\n            value = it->second;\n        }\n\n    } // namespace ResponseCode\n    } // namespace _bond_enumerators\n\n    \n    const bond::Metadata Ack::Schema::metadata\n        = Ack::Schema::GetMetadata();\n    \n    const bond::Metadata Ack::Schema::s_msgId_metadata\n        = bond::reflection::MetadataInit(\"msgId\", bond::reflection::required_field_modifier::value,\n            bond::reflection::Attributes());\n    \n    const bond::Metadata Ack::Schema::s_code_metadata\n        = bond::reflection::MetadataInit(::mdsdinput::_bond_enumerators::ResponseCode::ACK_SUCCESS, \"code\");\n\n    \n} // namespace mdsdinput\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsdinput/mdsd_input_types.h",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n\n//------------------------------------------------------------------------------\n// This code was generated by a tool.\n//\n//   Tool : Bond Compiler 0.3.0.5\n//   File : mdsd_input_types.h\n//\n// Changes to this file may cause incorrect behavior and will be lost when\n// the code is regenerated.\n// <auto-generated />\n//------------------------------------------------------------------------------\n\n#pragma once\n\n#include <bond/core/bond_version.h>\n\n#if BOND_VERSION < 0x302\n#error This file was generated by a newer version of Bond compiler\n#error and is incompatible with your version Bond library.\n#endif\n\n#if BOND_MIN_CODEGEN_VERSION > 0x0305\n#error This file was generated by an older version of Bond compiler\n#error and is incompatible with your version Bond library.\n#endif\n\n#include <bond/core/config.h>\n#include <bond/core/containers.h>\n#include <bond/core/nullable.h>\n#include <bond/core/blob.h>\n\n\nnamespace mdsdinput\n{\n    \n    struct Time\n    {\n        uint64_t sec;\n        uint32_t nsec;\n        \n        Time()\n          : sec(),\n            nsec()\n        {\n        }\n\n        \n#ifndef BOND_NO_CXX11_DEFAULTED_FUNCTIONS\n        // Compiler generated copy ctor OK\n        Time(const Time& other) = default;\n#endif\n        \n#ifndef BOND_NO_CXX11_RVALUE_REFERENCES\n        Time(Time&& other)\n          : sec(std::move(other.sec)),\n            nsec(std::move(other.nsec))\n        {\n        }\n#endif\n        \n        \n#ifndef BOND_NO_CXX11_DEFAULTED_FUNCTIONS\n        // Compiler generated operator= OK\n        Time& operator=(const Time& other) = default;\n#endif\n\n        bool operator==(const Time& other) const\n        {\n            return true\n                && (sec == other.sec)\n                && (nsec == other.nsec);\n        }\n\n        bool operator!=(const Time& other) const\n        {\n            return !(*this == other);\n        }\n\n        void swap(Time& other)\n        {\n            using std::swap;\n            swap(sec, other.sec);\n            swap(nsec, other.nsec);\n        }\n\n        struct Schema;\n\n    protected:\n        void InitMetadata(const char*, const char*)\n        {\n        }\n    };\n\n    inline void swap(Time& left, Time& right)\n    {\n        left.swap(right);\n    }\n\n    \n    namespace _bond_enumerators\n    {\n    namespace FieldType\n    {\n        enum FieldType\n        {\n            FT_INVALID = 0,\n            FT_BOOL = 1,\n            FT_INT32 = 2,\n            FT_INT64 = 3,\n            FT_DOUBLE = 4,\n            FT_TIME = 5,\n            FT_STRING = 6\n        };\n        \n        extern const std::map<enum FieldType, std::string> _value_to_name_FieldType;\n        extern const std::map<std::string, enum FieldType> _name_to_value_FieldType;\n\n        inline\n        const char* GetTypeName(enum FieldType)\n        {\n            return \"FieldType\";\n        }\n\n        inline\n        const char* GetTypeName(enum FieldType, const bond::qualified_name_tag&)\n        {\n            return \"mdsdinput.FieldType\";\n        }\n\n        inline\n        const std::map<enum FieldType, std::string>& GetValueToNameMap(enum FieldType)\n        {\n            return _value_to_name_FieldType;\n        }\n\n        inline\n        const std::map<std::string, enum FieldType>& GetNameToValueMap(enum FieldType)\n        {\n            return _name_to_value_FieldType;\n        }\n\n        const std::string& ToString(enum FieldType value);\n\n        void FromString(const std::string& name, enum FieldType& value);\n\n        inline\n        bool ToEnum(enum FieldType& value, const std::string& name)\n        {\n            std::map<std::string, enum FieldType>::const_iterator it =\n                _name_to_value_FieldType.find(name);\n\n            if (_name_to_value_FieldType.end() == it)\n                return false;\n\n            value = it->second;\n\n            return true;\n        }\n    } // namespace FieldType\n    } // namespace _bond_enumerators\n\n    using namespace _bond_enumerators::FieldType;\n    \n\n    \n    struct FieldDef\n    {\n        std::string name;\n        ::mdsdinput::FieldType fieldType;\n        \n        FieldDef()\n          : fieldType(::mdsdinput::_bond_enumerators::FieldType::FT_INVALID)\n        {\n        }\n\n        \n#ifndef BOND_NO_CXX11_DEFAULTED_FUNCTIONS\n        // Compiler generated copy ctor OK\n        FieldDef(const FieldDef& other) = default;\n#endif\n        \n#ifndef BOND_NO_CXX11_RVALUE_REFERENCES\n        FieldDef(FieldDef&& other)\n          : name(std::move(other.name)),\n            fieldType(std::move(other.fieldType))\n        {\n        }\n#endif\n        \n        \n#ifndef BOND_NO_CXX11_DEFAULTED_FUNCTIONS\n        // Compiler generated operator= OK\n        FieldDef& operator=(const FieldDef& other) = default;\n#endif\n\n        bool operator==(const FieldDef& other) const\n        {\n            return true\n                && (name == other.name)\n                && (fieldType == other.fieldType);\n        }\n\n        bool operator!=(const FieldDef& other) const\n        {\n            return !(*this == other);\n        }\n\n        void swap(FieldDef& other)\n        {\n            using std::swap;\n            swap(name, other.name);\n            swap(fieldType, other.fieldType);\n        }\n\n        struct Schema;\n\n    protected:\n        void InitMetadata(const char*, const char*)\n        {\n        }\n    };\n\n    inline void swap(FieldDef& left, FieldDef& right)\n    {\n        left.swap(right);\n    }\n\n    \n    struct SchemaDef\n    {\n        std::vector< ::mdsdinput::FieldDef> fields;\n        bond::nullable<uint32_t> timestampFieldIdx;\n        \n        SchemaDef()\n        {\n        }\n\n        \n#ifndef BOND_NO_CXX11_DEFAULTED_FUNCTIONS\n        // Compiler generated copy ctor OK\n        SchemaDef(const SchemaDef& other) = default;\n#endif\n        \n#ifndef BOND_NO_CXX11_RVALUE_REFERENCES\n        SchemaDef(SchemaDef&& other)\n          : fields(std::move(other.fields)),\n            timestampFieldIdx(std::move(other.timestampFieldIdx))\n        {\n        }\n#endif\n        \n        \n#ifndef BOND_NO_CXX11_DEFAULTED_FUNCTIONS\n        // Compiler generated operator= OK\n        SchemaDef& operator=(const SchemaDef& other) = default;\n#endif\n\n        bool operator==(const SchemaDef& other) const\n        {\n            return true\n                && (fields == other.fields)\n                && (timestampFieldIdx == other.timestampFieldIdx);\n        }\n\n        bool operator!=(const SchemaDef& other) const\n        {\n            return !(*this == other);\n        }\n\n        void swap(SchemaDef& other)\n        {\n            using std::swap;\n            swap(fields, other.fields);\n            swap(timestampFieldIdx, other.timestampFieldIdx);\n        }\n\n        struct Schema;\n\n    protected:\n        void InitMetadata(const char*, const char*)\n        {\n        }\n    };\n\n    inline void swap(SchemaDef& left, SchemaDef& right)\n    {\n        left.swap(right);\n    }\n\n    \n    struct Message\n    {\n        std::string source;\n        uint64_t msgId;\n        uint64_t schemaId;\n        bond::nullable< ::mdsdinput::SchemaDef> schema;\n        bond::blob data;\n        \n        Message()\n          : msgId(),\n            schemaId()\n        {\n        }\n\n        \n#ifndef BOND_NO_CXX11_DEFAULTED_FUNCTIONS\n        // Compiler generated copy ctor OK\n        Message(const Message& other) = default;\n#endif\n        \n#ifndef BOND_NO_CXX11_RVALUE_REFERENCES\n        Message(Message&& other)\n          : source(std::move(other.source)),\n            msgId(std::move(other.msgId)),\n            schemaId(std::move(other.schemaId)),\n            schema(std::move(other.schema)),\n            data(std::move(other.data))\n        {\n        }\n#endif\n        \n        \n#ifndef BOND_NO_CXX11_DEFAULTED_FUNCTIONS\n        // Compiler generated operator= OK\n        Message& operator=(const Message& other) = default;\n#endif\n\n        bool operator==(const Message& other) const\n        {\n            return true\n                && (source == other.source)\n                && (msgId == other.msgId)\n                && (schemaId == other.schemaId)\n                && (schema == other.schema)\n                && (data == other.data);\n        }\n\n        bool operator!=(const Message& other) const\n        {\n            return !(*this == other);\n        }\n\n        void swap(Message& other)\n        {\n            using std::swap;\n            swap(source, other.source);\n            swap(msgId, other.msgId);\n            swap(schemaId, other.schemaId);\n            swap(schema, other.schema);\n            swap(data, other.data);\n        }\n\n        struct Schema;\n\n    protected:\n        void InitMetadata(const char*, const char*)\n        {\n        }\n    };\n\n    inline void swap(Message& left, Message& right)\n    {\n        left.swap(right);\n    }\n\n    \n    namespace _bond_enumerators\n    {\n    namespace ResponseCode\n    {\n        enum ResponseCode\n        {\n            ACK_SUCCESS = 0,\n            ACK_FAILED = 1,\n            ACK_UNKNOWN_SCHEMA_ID = 2,\n            ACK_DECODE_ERROR = 3,\n            ACK_INVALID_SOURCE = 4,\n            ACK_DUPLICATE_SCHEMA_ID = 5\n        };\n        \n        extern const std::map<enum ResponseCode, std::string> _value_to_name_ResponseCode;\n        extern const std::map<std::string, enum ResponseCode> _name_to_value_ResponseCode;\n\n        inline\n        const char* GetTypeName(enum ResponseCode)\n        {\n            return \"ResponseCode\";\n        }\n\n        inline\n        const char* GetTypeName(enum ResponseCode, const bond::qualified_name_tag&)\n        {\n            return \"mdsdinput.ResponseCode\";\n        }\n\n        inline\n        const std::map<enum ResponseCode, std::string>& GetValueToNameMap(enum ResponseCode)\n        {\n            return _value_to_name_ResponseCode;\n        }\n\n        inline\n        const std::map<std::string, enum ResponseCode>& GetNameToValueMap(enum ResponseCode)\n        {\n            return _name_to_value_ResponseCode;\n        }\n\n        const std::string& ToString(enum ResponseCode value);\n\n        void FromString(const std::string& name, enum ResponseCode& value);\n\n        inline\n        bool ToEnum(enum ResponseCode& value, const std::string& name)\n        {\n            std::map<std::string, enum ResponseCode>::const_iterator it =\n                _name_to_value_ResponseCode.find(name);\n\n            if (_name_to_value_ResponseCode.end() == it)\n                return false;\n\n            value = it->second;\n\n            return true;\n        }\n    } // namespace ResponseCode\n    } // namespace _bond_enumerators\n\n    using namespace _bond_enumerators::ResponseCode;\n    \n\n    \n    struct Ack\n    {\n        uint64_t msgId;\n        ::mdsdinput::ResponseCode code;\n        \n        Ack()\n          : msgId(),\n            code(::mdsdinput::_bond_enumerators::ResponseCode::ACK_SUCCESS)\n        {\n        }\n\n        \n#ifndef BOND_NO_CXX11_DEFAULTED_FUNCTIONS\n        // Compiler generated copy ctor OK\n        Ack(const Ack& other) = default;\n#endif\n        \n#ifndef BOND_NO_CXX11_RVALUE_REFERENCES\n        Ack(Ack&& other)\n          : msgId(std::move(other.msgId)),\n            code(std::move(other.code))\n        {\n        }\n#endif\n        \n        \n#ifndef BOND_NO_CXX11_DEFAULTED_FUNCTIONS\n        // Compiler generated operator= OK\n        Ack& operator=(const Ack& other) = default;\n#endif\n\n        bool operator==(const Ack& other) const\n        {\n            return true\n                && (msgId == other.msgId)\n                && (code == other.code);\n        }\n\n        bool operator!=(const Ack& other) const\n        {\n            return !(*this == other);\n        }\n\n        void swap(Ack& other)\n        {\n            using std::swap;\n            swap(msgId, other.msgId);\n            swap(code, other.code);\n        }\n\n        struct Schema;\n\n    protected:\n        void InitMetadata(const char*, const char*)\n        {\n        }\n    };\n\n    inline void swap(Ack& left, Ack& right)\n    {\n        left.swap(right);\n    }\n} // namespace mdsdinput\n\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsdlog/CMakeLists.txt",
    "content": "set(SOURCES\n    Logger.cc\n    Trace.cc\n)\n\nadd_library(${LOG_LIB_NAME} STATIC ${SOURCES})\n\ninstall(TARGETS ${LOG_LIB_NAME}\n    ARCHIVE DESTINATION ${CMAKE_BINARY_DIR}/release/lib\n)\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsdlog/Logger.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"Logger.hh\"\n\n#include <cstdio>\n#include <cerrno>\n#include <string>\n#include <vector>\n#include <iomanip>\n#include <thread>\n#include <chrono>\n#include <system_error>\n\nextern \"C\" {\n#include <unistd.h>\n#include <sys/types.h>\n#include <sys/stat.h>\n#include <fcntl.h>\n#include <execinfo.h>\n#include <sys/time.h>\n#include <sys/uio.h>\n}\n\nstatic void\nWriteTimeAndMessage(int fd, const char * timeBuffer, size_t timeLength, const char * message, size_t messageLength)\n{\n\tif (timeBuffer == nullptr || message == nullptr) {\n\t\tthrow std::invalid_argument(\"Invalid argument; cannot be nullptr\");\n\t}\n\tstruct iovec iov[4];\n\tstatic std::string separator { \": \" };\n\tstatic char newline = '\\n';\n\n\t// Deliberately cast the const away. The C++ standard permits this as long as the\n\t// caller doesn't actually try to change write to the const object. The POSIX\n\t// standard defines iovec::iov_base as a void* so the struct definition can be\n\t// shared with readv() and writev().\n\tiov[0].iov_base = static_cast<void*>(const_cast<char*>(timeBuffer));\n\tiov[0].iov_len = timeLength;\n\n\tiov[1].iov_base = static_cast<void*>(const_cast<char*>(separator.c_str()));\n\tiov[1].iov_len = separator.length();\n\n\tiov[2].iov_base = static_cast<void*>(const_cast<char*>(message));\n\tiov[2].iov_len = messageLength;\n\n\tiov[3].iov_base = static_cast<void*>(&newline);\n\tiov[3].iov_len = 1;\n\n\tssize_t totalLength = timeLength + separator.length() + messageLength + 1;\n\n\tssize_t result = writev(fd, iov, sizeof(iov)/sizeof(struct iovec));\n\tif (result == -1) {\n\t\tauto saved_errno = errno;\n\t\tstd::error_code ec(saved_errno, std::system_category());\n\t\tstd::ostringstream msg;\n\t\tmsg << \"Writev failed: errno \" << saved_errno << \": \" << ec.message();\n\t\tthrow std::runtime_error(msg.str());\n\t} else if (result != totalLength) {\n\t\tstd::ostringstream msg;\n\t\tmsg << \"Writev() short write: requested \" << totalLength << \" but wrote \" << result;\n\t\tthrow std::runtime_error(msg.str());\n\t}\n}\n\nsize_t\nLogger::TimestampISO8601::to_string(struct timeval * tv, char * buffer, size_t buflen)\n{\n\tstruct tm zulu;\n\tsize_t totalLength;\n\n\tif (!tv || !buffer || !buflen) {\n\t\treturn 0;\n\t}\n\n\t(void)gmtime_r(&(tv->tv_sec), &zulu);\n\ttotalLength = strftime(buffer, buflen, \"%Y-%m-%dT%H:%M:%S\", &zulu);\n\tbuffer += totalLength;\n\tbuflen -= totalLength;\n\tif (buflen < 10) {\t\t// Fractional time won't fit\n\t\treturn totalLength;\n\t}\n\n\t*buffer++ = '.';\n\tbuflen--;\n\n\tauto usec = static_cast<unsigned long>(tv->tv_usec);\n\tfor (int offset = 5; offset >= 0; offset--) {\n\t\tif (usec) {\n\t\t\t*(buffer+offset) = '0' + (usec % 10);\n\t\t\tusec /= 10;\n\t\t} else {\n\t\t\t*(buffer+offset) = '0';\n\t\t}\n\t}\n\tbuffer += 6;\n\tbuflen -= 6;\n\n\t(void)strncpy(buffer, \"0Z\", buflen-1);\n\n\treturn totalLength+9;\n}\n\nvoid\nLogger::AppendErrnoToMsg(int Error, char * buf, size_t buflen)\n{\n\tchar errstrbuf[256];\n\tchar *msg = strerror_r(Error, errstrbuf, 256);\n\n\tint offset = strlen(buf);\n\tsnprintf(buf+offset, buflen-offset, \"errno %d (%s)\", Error, msg);\n}\n\nLogger::LogWriter::LogWriter(const char * filename) : m_delay(false)\n{\n\tint tmp_fd = open(filename, O_WRONLY | O_APPEND | O_CREAT, 0755);\n\tif (tmp_fd < 0) {\n\t\tchar msgbuf[256];\n\t\tsnprintf(msgbuf, sizeof(msgbuf), \"LogWriter creat failed (errno %d) for path %s\", errno, filename);\n\t\tmsgbuf[255] = 0;\t// Just in case filename is too long for the buffer\n\t\tm_fd = dup(2);\t\t// Use whatever was stderr\n\t\tthis->Write(msgbuf);\n\t} else {\n\t\tm_fd = tmp_fd;\n\t\tm_filename = filename;\n\t}\n}\n\nLogger::LogWriter::LogWriter() : m_delay(false) { m_fd = dup(2); }\n\nLogger::LogWriter::~LogWriter() { close(m_fd); }\n\nLogger::LogWriter::LogWriter(const LogWriter& orig) : m_filename(orig.m_filename), m_delay(false) { m_fd = dup(orig.m_fd); }\nLogger::LogWriter& Logger::LogWriter::operator=(const LogWriter & orig)\n{\n\tif (&orig != this) {\n\t\tm_fd = dup(orig.m_fd);\n\t\tm_filename = orig.m_filename;\n\t\tm_delay = orig.m_delay;\n\t}\n\treturn *this;\n}\n\nint\nLogger::LogWriter::Rotate()\n{\n\tif (m_filename.empty()) {\n\t\treturn -1;\n\t}\n\n\tauto prev_fd = m_fd;\n\tm_fd = open(m_filename.c_str(), O_WRONLY | O_APPEND | O_CREAT, 0755);\n\tif (m_fd < 0) {\n\t\tm_fd = prev_fd;\n\t\treturn -1;\n\t}\n\treturn prev_fd;\n}\n\nvoid Logger::LogWriter::Write(const char * msg, size_t msgLength)\n{\n\tif (!msg) {\n\t\treturn;\n\t}\n\n\tchar timebuffer[100];\n\tstruct timeval tv;\n\t(void)gettimeofday(&tv, 0);\n\tsize_t timeLength = timestamp->to_string(&tv, timebuffer, sizeof(timebuffer));\n\ttry {\n\t\tauto fd = m_fd;\n\t\tif (m_delay) {\n\t\t\tstd::this_thread::sleep_for(std::chrono::milliseconds(100));\n\t\t}\n\t\tWriteTimeAndMessage(fd, timebuffer, timeLength, msg, msgLength);\n\t}\n\tcatch (...)\n\t{\n\t\t// We're screwed; can't log a message if logging throws an exception\n\t}\n}\n\nvoid Logger::LogWriter::Write(const char * msg)\n{\n\tif (!msg) {\n\t\treturn;\n\t}\n\tWrite(msg, strlen(msg));\n}\n\nvoid Logger::LogWriter::Write(const std::string& msg)\n{\n\tWrite(msg.c_str(), msg.length());\n}\n\nLogger::LogWriter* Logger::errorlog = 0;\nLogger::LogWriter* Logger::warnlog = 0;\nLogger::LogWriter* Logger::infolog = 0;\n\nstd::unique_ptr<Logger::Timestamp> Logger::timestamp = std::unique_ptr<Logger::Timestamp>(new Logger::TimestampISO8601());\n\nvoid\nLogger::Init()\n{\n\tif (!errorlog)\n\t\terrorlog = new Logger::LogWriter();\n\tif (!warnlog)\n\t\twarnlog = new Logger::LogWriter();\n\tif (!infolog)\n\t\tinfolog = new Logger::LogWriter();\n\n\tSetTimestamp(std::unique_ptr<Logger::Timestamp>(new Logger::TimestampISO8601()));\n}\n\nvoid\nLogger::Closer::rotate(Logger::LogWriter* log)\n{\n\tif (log) {\n\t\tint fd = log->Rotate();\n\t\tif (fd >= 0) ToClose.push_back(fd);\n\t}\n}\n\nstatic void\nCloseAfterDelay(std::chrono::duration<int> delaySeconds, std::vector<int> ToClose)\n{\n\tstd::this_thread::sleep_for(delaySeconds);\n\tfor (auto fd : ToClose) {\n\t\tclose(fd);\n\t}\n}\n\nLogger::Closer::~Closer()\n{\n\tif (! ToClose.empty()) {\n\t\tstd::thread t { CloseAfterDelay, std::chrono::seconds(5), ToClose };\n\t\tt.detach();\n\t}\n}\n\nvoid\nLogger::RotateLogs()\n{\n\tLogger::Closer stash;\n\n\tstash.rotate(errorlog);\n\tstash.rotate(warnlog);\n\tstash.rotate(infolog);\n}\n\nvoid\nLogger::EnableDelay()\n{\n\tif (errorlog) errorlog->m_delay = true;\n\tif (warnlog) warnlog->m_delay = true;\n\tif (infolog) infolog->m_delay = true;\n}\n\nvoid\nLogger::StackTrace(int signo, void **stack, int count)\n{\n\tchar buf[256];\n\n\tif (errorlog) {\n\t\tif (signo > 0) {\n\t\t\tsnprintf(buf, sizeof(buf), \"FATAL: mdsd killed by signal %d\\nStacktrace follows\\n===========\", signo);\n\t\t}\n\t\telse {\n\t\t\tsnprintf(buf, sizeof(buf), \"FATAL: mdsd killed by direct call, no signal involved\\nStacktrace follows\\n===========\");\n\t\t}\n\t\terrorlog->Write(buf);\n\t\tbacktrace_symbols_fd(stack, count, errorlog->m_fd);\n\t\terrorlog->Write(\"===========\");\n\t}\n}\n\nextern \"C\" void\nLogStackTrace(int signo, void **stack, int count)\n{\n\tLogger::StackTrace(signo, stack, count);\n}\n\nextern \"C\" void\nLogAbort()\n{\n    Logger::LogError(\"SIGABRT received - immediate exit.\");\n}\n\nextern \"C\" void\nRotateLogs()\n{\n\tLogger::RotateLogs();\n}\n\n// vim: se sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsdlog/Logger.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#ifndef _LOGGER_HH_\n#define _LOGGER_HH_\n\n#include <cstring>\n#include <string>\n#include <sstream>\n#include <vector>\n#include <memory>\n\nstruct timeval;\n\nclass Logger\n{\npublic:\n\tclass Timestamp {\n\tpublic:\n\t\tTimestamp() {}\n\t\tvirtual ~Timestamp() {}\n\t\tvirtual size_t to_string(struct timeval * tv, char * buffer, size_t buflen) = 0;\n\t};\n\tclass TimestampISO8601 : public Timestamp {\n\tpublic:\n\t\tvirtual size_t to_string(struct timeval * tv, char * buffer, size_t buflen);\n\t};\nprivate:\n\tclass Closer;\n\tclass LogWriter {\n\n\tfriend class Logger;\n\tfriend class Logger::Closer;\n\n\tprivate:\n\t\tint m_fd;\n\t\tstd::string m_filename;\n\t\tbool m_delay;\n\n\tpublic:\n\t\tLogWriter(const char * filename);\n\t\tLogWriter();\n\n\t\t~LogWriter();\n\n\t\tLogWriter(const LogWriter& orig);\n\t\tLogWriter& operator=(const LogWriter & orig);\n\n\t\t/// <summary>Write a message to a logfile</summary>\n\t\t/// <param name=\"msg\">The message to be written</param>\n\t\tvoid Write(const char * msg);\n\t\t/// <summary>Write a message to a logfile</summary>\n\t\t/// <param name=\"msg\">The message to be written</param>\n\t\t/// <param name=\"len\">The length of message</param>\n\t\tvoid Write(const char * msg, size_t len);\n\t\t/// <summary>Write a message to a logfile</summary>\n\t\t/// <param name=\"msg\">The message to be written</param>\n\t\tvoid Write(const std::string& msg);\n\n\t\tint Rotate();\n\t};\n\n        class Closer\n        {\n        public:\n                void rotate(Logger::LogWriter* log);\n                ~Closer();\n        private:\n                std::vector<int> ToClose;\n        };\n\n\tstatic LogWriter * errorlog;\n\tstatic LogWriter * warnlog;\n\tstatic LogWriter * infolog;\n\n\tstatic std::unique_ptr<Timestamp> timestamp;\n\n\tLogger();\n\npublic:\n\tstatic void Init();\n\n\tstatic void LogError(const char * msg) { if (errorlog) errorlog->Write(msg); }\n\tstatic void LogWarn(const char * msg) { if (warnlog) warnlog->Write(msg); }\n\tstatic void LogInfo(const char * msg) { if (infolog) infolog->Write(msg); }\n\n\t/// <summary>Write a message to the error logfile</summary>\n\t/// <param name=\"msg\">The message to be written</param>\n\tstatic void LogError(const std::string& msg) { if (errorlog) errorlog->Write(msg); }\n\tstatic void LogError(const std::ostringstream& msg) { if (errorlog) LogError(msg.str()); }\n\tstatic void LogWarn(const std::string& msg) { if (warnlog) warnlog->Write(msg); }\n\tstatic void LogWarn(const std::ostringstream& msg) { if (warnlog) LogWarn(msg.str()); }\n\tstatic void LogInfo(const std::string& msg) { if (infolog) infolog->Write(msg); }\n\tstatic void LogInfo(const std::ostringstream& msg) { if (infolog) LogInfo(msg.str()); }\n\n\tstatic void StackTrace(int signo, void **stack, int count);\n\n\tstatic void SetErrorLog(const char * pathname) { delete errorlog; errorlog = new LogWriter(pathname); }\n\tstatic void SetWarnLog(const char * pathname) { delete warnlog; warnlog = new LogWriter(pathname); }\n\tstatic void SetInfoLog(const char * pathname) { delete infolog; infolog = new LogWriter(pathname); }\n\n\tstatic void CloseAllLogs() { delete errorlog; delete warnlog; delete infolog; errorlog = warnlog = infolog = nullptr; }\n\n\tstatic void AppendErrnoToMsg(int Error, char * buf, size_t buflen);\n\n\tstatic void SetTimestamp(std::unique_ptr<Timestamp> && timestamp_) { timestamp = std::move(timestamp_); }\n\n\tstatic void RotateLogs();\n\tstatic void EnableDelay();\n};\n\n#endif //_LOGGER_HH_\n\n// vim: set ai sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsdlog/Trace.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"Trace.hh\"\n#include \"Logger.hh\"\n#include <sstream>\n\nTrace::Flags Trace::_interests = Trace::Flags::None;\n\nTrace::Trace(Flags covers, const char *calling_fn)\n\t:  _covers(covers), _fn(calling_fn), _active((_interests & covers) != 0)\n{\n\tif (_active) {\n\t\tLogger::LogInfo(\"Entering \" + _fn);\n\t}\n}\n\nTrace::Trace(Flags covers, std::string calling_fn)\n\t:  _covers(covers), _fn(std::move(calling_fn)), _active((_interests & covers) != 0)\n{\n\tif (_active) {\n\t\tLogger::LogInfo(\"Entering \" + _fn);\n\t}\n}\n\nTrace::~Trace()\n{\n\tif (_active) {\n\t\tLogger::LogInfo(\"Leaving \" + _fn);\n\t}\n}\n\nvoid\nTrace::Note(const char *filename, int lineno, const std::string& msg) const\n{\n    return Note(filename, lineno, msg, Type::INFO);\n}\n\nstd::string\nTrace::TruncateFilename(const std::string & filename)\n{\n\tsize_t slash = filename.find_last_of('/');\n\tif (slash == std::string::npos || slash <= 1) {\n\t\treturn filename;\n\t}\n\tslash = filename.find_last_of('/', slash-1);\n\tif (slash == std::string::npos || slash == 0) {\n\t\treturn filename;\n\t}\n\treturn std::string(\"...\").append(filename.substr(slash));\n}\n\nvoid\nTrace::Note(const char *filename, int lineno, const std::string& msg, Type level) const\n{\n\tif (_active) {\n\t\tstd::ostringstream message;\n\t\tmessage << _fn << \" (\" << TruncateFilename(filename) << \" +\" << lineno << \") \" << msg;\n\t\tif (level >= Type::INFO) {\n\t\t    Logger::LogInfo(message.str());\n\t\t}\n\t\tif (level >= Type::WARN) {\n\t\t    Logger::LogWarn(message.str());\n\t\t}\n\t\tif (level >= Type::ERROR) {\n\t\t    Logger::LogError(message.str());\n\t\t}\n\t}\n}\n\nTrace&\nTrace::Prefix(const char * filename, int lineno, Trace::Type level = Trace::Type::INFO)\n{\n\tif (IsActive()) {\n\t\t_msg << _fn << \" (\" << TruncateFilename(filename) << \" +\" << lineno << \") \";\n\t\t_level = level;\n\t}\n\treturn *this;\n}\n\nbool\nTrace::flush()\n{\n\tif (IsActive()) {\n\t\tauto msg = _msg.str();\n\t\tLogger::LogInfo(msg);\n\t\tif (_level == Trace::Type::WARN) {\n\t\t\tLogger::LogWarn(msg);\n\t\t} else if (_level == Trace::Type::ERROR) {\n\t\t\tLogger::LogError(msg);\n\t\t}\n\n\t\t_msg.str(\"\");\n\t\t_msg.clear();\n\t\t_level = Trace::Type::INFO;\n\t}\n\treturn true;\n}\n\n\n// vim: se sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsdlog/Trace.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _TRACE_HH_\n#define _TRACE_HH_\n\n#include <string>\n#include <sstream>\n\n#define NOTE(MSG) Note(__FILE__, __LINE__, MSG)\n#define NOTEWARN(MSG) Note(__FILE__, __LINE__, MSG, Trace::Type::WARN)\n#define NOTEERR(MSG) Note(__FILE__, __LINE__, MSG, Trace::Type::ERROR)\n\n#define TRACEINFO(trace,body) trace.IsActive() && (trace.Prefix(__FILE__, __LINE__, Trace::Type::INFO) << body).flush()\n#define TRACEWARN(trace,body) trace.IsActive() && (trace.Prefix(__FILE__, __LINE__, Trace::Type::WARN) << body).flush()\n#define TRACEERROR(trace,body) trace.IsActive() && (trace.Prefix(__FILE__, __LINE__, Trace::Type::ERROR) << body).flush()\n\nclass Trace\n{\npublic:\n\tenum Flags\n\t{\n\t\tNone= 0, ConfigLoad=1, EventIngest=2, CanonicalEvent=4,\n\t\tBatching=8, XTable=0x10, Scheduler=0x20, OMIIngest=0x40, Credentials=0x80,\n\t\tDaemon = 0x100, ConfigUse=0x200, SignalHandlers=0x400, EntityName=0x800,\n\t\tQueryPipe = 0x1000, Local = 0x2000, DerivedEvent = 0x4000,\n\t\tExtensions = 0x8000, AppInsights = 0x10000, MdsCmd = 0x20000,\n\t\tBond = 0x40000, SchemaCache = 0x80000, BondDetails = 0x100000,\n\t\tIngestContents = 0x200000, JsonBlob = 0x400000\n\t};\n\n\tenum Type {INFO, WARN, ERROR};\n\n\tTrace(Flags trace_level, const char * calling_fn);\n\tTrace(Flags trace_level, std::string calling_fn);\n\t~Trace();\n\n\tvoid Note(const char * filename, int lineno, const std::string& msg) const;\n\tvoid Note(const char * filename, int lineno, const std::string& msg, Type level) const;\n\tbool IsActive() const { return _active; }\n\tbool IsAlsoActive(Flags flags) const { return (flags & _interests) == flags; }\n\tFlags Covers() const { return _covers; }\n\n\t// Pushes the tracing line prefix into the accumulated message\n\tTrace& Prefix(const char * filename, int lineno, Type level);\n\t// Adds the item to the stream holding the accumulated message\n\ttemplate <typename T> friend Trace& operator<<(Trace& trace, const T & item)\n\t{\n\t\tif (trace.IsActive()) { trace._msg << item; } return trace;\n\t}\n\n\tbool flush();\n\n\tstatic std::string TruncateFilename(const std::string&);\n\tstatic void SetInterests(Flags flags) { _interests = flags; }\n#define SCUI(foo) static_cast<unsigned int>(foo)\n\tstatic void AddInterests(Flags flags) { _interests = static_cast<Flags>(SCUI(_interests) | SCUI(flags)); }\n#undef SCUI\n\nprivate:\n\tFlags _covers;\n\tstd::string _fn;\n\tbool _active;\t\t\t// True if the calling function covers any of the tasks of interest\n\n\tstd::ostringstream _msg;\t// Accumulates a trace message\n\tType _level;\t\t\t// The severity level of the message being accumulated\n\n\tstatic Flags _interests;\n};\n\n#endif // _TRACE_HH_\n\n// vim: set tabstop=4 softtabstop=4 shiftwidth=4 noexpandtab :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsdutil/AzureUtility.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"AzureUtility.hh\"\n#include \"Utility.hh\"\n#include \"Trace.hh\"\n#include <stdexcept>\n#include <map>\n#include <was/storage_account.h>\n#include <was/table.h>\n#include <was/blob.h>\n#include <was/error_code_strings.h>\n\n//////// Begin MdsdUtil namespace\n\nnamespace MdsdUtil {\n\nvoid ValidateStorageCredentialForTable(const std::string& connStr)\n{\n    if (connStr.empty())\n    {\n        throw std::invalid_argument(\"Storage connection string cannot be empty\");\n    }\n\n    // Method: Just calling cloud_table_client.list_tables() will\n    // throw an exception with a proper message if the storage key\n    // is not good (server auth failed) or if the storage account\n    // name/type (e.g., Premium or Blob storage) is invalid (DNS\n    // failure).\n\n    azure::storage::cloud_storage_account::parse(connStr)\n    .create_cloud_table_client()\n    .list_tables();\n}\n\nvoid ValidateSAS(const std::string& sastoken, bool& isValidAccountSas)\n{\n    isValidAccountSas = false;\n\n    std::map<std::string, std::string> qry;\n    MdsdUtil::ParseQueryString(sastoken, qry);\n\n    // Logic 1: If the sastoken doesn't contain 'ss' param,\n    // we consider it a valid service SAS (not checking any further,\n    // keeping the current behavior).\n    auto ss = qry.find(\"ss\");\n    if (ss == qry.end()) {\n        return;\n    }\n\n#define CHECK_MISSING_ENTRY_IN_ACCOUNT_SAS(param, entry, reason) \\\n    if (param.find(entry) == std::string::npos) {\\\n        throw MdsdInvalidSASException(reason);\\\n    }\n\n    // Logic 2: If there's an 'ss' param, it should be an account SAS,\n    // and needs the following entries in params:\n    // 'ss' (SignedServices): must include 'b' (blob) and 't'\n    // 'srt' (SignedResourceTypes): must include 'c' (container) and 'o' (object).\n    //     May later need 's' (service) as well if we want to validate the SAS key by listing tables.\n    // 'sp' (SignedPermissions): must include 'w' (write), 'u' (update), 'c' (create), 'a' (add), 'l' (list).\n    //     'l' is still needed, because our ValidateStorageCredentialForTable() depends on list_tables().\n    const char* ssReqMsg = \"Account SAS must enable blob and table services (ss='bt' or better)\";\n    CHECK_MISSING_ENTRY_IN_ACCOUNT_SAS(ss->second, 'b', ssReqMsg);\n    CHECK_MISSING_ENTRY_IN_ACCOUNT_SAS(ss->second, 't', ssReqMsg);\n\n    const char* srtReqMsg = \"Account SAS must enable container and object access (srt='co' or better)\";\n    auto srt = qry.find(\"srt\");\n    if (srt == qry.end()) {\n        throw MdsdInvalidSASException(srtReqMsg);\n    }\n    CHECK_MISSING_ENTRY_IN_ACCOUNT_SAS(srt->second, 'c', srtReqMsg);\n    CHECK_MISSING_ENTRY_IN_ACCOUNT_SAS(srt->second, 'o', srtReqMsg);\n\n    const char* spReqMsg = \"Account SAS must grant sp='acluw' permissions or better\";\n    auto sp = qry.find(\"sp\");\n    if (sp == qry.end()) {\n        throw MdsdInvalidSASException(spReqMsg);\n    }\n    CHECK_MISSING_ENTRY_IN_ACCOUNT_SAS(sp->second, 'a', spReqMsg);\n    CHECK_MISSING_ENTRY_IN_ACCOUNT_SAS(sp->second, 'c', spReqMsg);\n    CHECK_MISSING_ENTRY_IN_ACCOUNT_SAS(sp->second, 'l', spReqMsg);\n    CHECK_MISSING_ENTRY_IN_ACCOUNT_SAS(sp->second, 'u', spReqMsg);\n    CHECK_MISSING_ENTRY_IN_ACCOUNT_SAS(sp->second, 'w', spReqMsg);\n\n#undef CHECK_MISSING_ENTRY_IN_ACCOUNT_SAS\n\n    isValidAccountSas = true;\n}\n\n\nbool ContainerAlreadyExistsException(const azure::storage::storage_exception& e)\n{\n    const auto& r = e.result(); // handy reference\n    return r.is_response_available()\n            && (r.http_status_code() == web::http::status_codes::Conflict)\n            && (r.extended_error().code() == azure::storage::protocol::error_code_container_already_exists);\n\n}\n\n\nvoid CreateContainer(const std::string & connectionString, const std::string & containerName)\n{\n    Trace trace(Trace::ConfigLoad, \"MdsdUtil::CreateContainer\");\n\n    // Azure requires the container name to be all lower case\n    std::string containerNameLC(MdsdUtil::to_lower(containerName));\n\n    using namespace azure::storage;\n\n    try {\n        TRACEINFO(trace, \"Parsing connection string \\\"\" << connectionString << \"\\\"\");\n        auto acct = cloud_storage_account::parse(connectionString);\n        auto client = acct.create_cloud_blob_client();\n        TRACEINFO(trace, \"Get reference to container \\\"\" << containerNameLC << \"\\\"\");\n        auto container = client.get_container_reference(containerNameLC);\n\n        // This is a synchronous call!\n        TRACEINFO(trace, \"Create container (noop if it already exists)\");\n        container.create(); // create_if_not_exists() needs read perm on account SAS,\n                            // which is undesirable, so just use create() and swallow\n                            // the ContainerAlreadyExists exception.\n    }\n    catch (const azure::storage::storage_exception& e) {\n        if (MdsdUtil::ContainerAlreadyExistsException(e)) {\n            TRACEINFO(trace, \"Container already exists, ignoring the exception\");\n            return;\n        }\n        TRACEINFO(trace, \"Exception: \" << e.what());\n        throw;\n    }\n    catch (const std::exception & e) {\n        TRACEINFO(trace, \"Exception: \" << e.what());\n        throw;\n    }\n}\n\n\n};\n\n//////////// MdsdUtil namespace ends\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsdutil/AzureUtility.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _AZUREUTILITY_HH_\n#define _AZUREUTILITY_HH_\n\n#include <string>\n#include <stdexcept>\n\nnamespace azure { namespace storage { class storage_exception; }}\n\nnamespace MdsdUtil {\n\n/// <summary>Validate the storage credential (shared key or SAS) for table storage.\n/// Returns silently if it's valid. Throws an exception if it's not valid.\n/// The storage credential is invalid if it is incorrect, or\n/// if the storage account doesn't support table storage (e.g., Premium\n/// or Blob storage account). The caller must catch the exception.</summary>\nvoid ValidateStorageCredentialForTable(const std::string& connStr);\n\n/// <summary>Check the passed SAS token for its validity.\n/// Currently this is mainly to validate an account SAS for its minimal\n/// requirements (e.g., services, permissions, ...). We may perform\n/// more thorough SAS token validation here, even for a service SAS\n/// but it's currently out of scope.\n/// Returns silently iff it's any valid SAS (but it doesn't check much about\n/// a service SAS). Sets isValidAccountSas true if the sastoken is\n/// a valid account SAS with needed services and permissions.\n/// Throws an exception if the SAS token is invalid (currently only as an account SAS)</summary>\nvoid ValidateSAS(const std::string& sastoken, bool& isValidAccountSas);\n\nclass MdsdInvalidSASException : public std::runtime_error\n{\npublic:\n\tMdsdInvalidSASException(const std::string& message)\n\t\t: std::runtime_error(message)\n\t{}\n};\n\n\n/// <summary>Returns true iff the passed storage exception indicates\n/// the error code is \"ContainerAlreadyExists\".</summary>\nbool ContainerAlreadyExistsException(const azure::storage::storage_exception& e);\n\n/// <summary>Creates the specified container using the given connection string.\n/// This function may throw an exception and the caller should handle any.</summary>\nvoid CreateContainer(const std::string& connectionString, const std::string& containerName);\n\n}\n\n#endif // _AZUREUTILITY_HH_\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsdutil/CMakeLists.txt",
    "content": "set(SOURCES\n    AzureUtility.cc\n    Crypto.cc\n    HttpProxySetup.cc\n    MdsTime.cc\n    OpensslCert.cc\n    OpensslCertStore.cc\n    Utility.cc\n)\n\n# Disable warnings from azure storage API.\nset_source_files_properties(\n    AzureUtility.cc\n    HttpProxySetup.cc\n    PROPERTIES\n    COMPILE_FLAGS \"-Wno-unused-value -Wno-reorder\"\n)\n\ninclude_directories(\n    ${STORAGE_INCLUDE_DIRS}\n    ${CASABLANCA_INCLUDE_DIRS}\n    ${OMI_INCLUDE_DIRS}\n    ${CMAKE_SOURCE_DIR}/mdsd\n    ${CMAKE_SOURCE_DIR}/mdsdlog\n)\n\nadd_library(${UTIL_LIB_NAME} STATIC ${SOURCES})\n\ninstall(TARGETS ${UTIL_LIB_NAME}\n    ARCHIVE DESTINATION ${CMAKE_BINARY_DIR}/release/lib\n)\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsdutil/Crypto.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"Crypto.hh\"\n#include <cstring>\n#include <map>\n#include <string>\n#include <stdexcept>\n#include <system_error>\nextern \"C\" {\n#include <openssl/md5.h>\n#include <errno.h>\n#include <unistd.h>\n#include <fcntl.h>\n//#include <sys/types.h>\n//#include <sys/stat.h>\n}\n\nnamespace Crypto {\n\nMD5Hash::MD5Hash()\n{\n    memset((void*)hash, 0, DIGEST_LENGTH);\n}\n\nbool\nMD5Hash::operator==(const MD5Hash& that) const\n{\n    return (0 == memcmp((void*)(this->hash), (void*)(that.hash), DIGEST_LENGTH));\n}\n\nbool\nMD5Hash::operator!=(const MD5Hash& that) const\n{\n    return (0 != memcmp((void*)(this->hash), (void*)(that.hash), DIGEST_LENGTH));\n}\n\nstd::string\nMD5Hash::to_string() const\n{\n    std::string result;\n    result.reserve(2 * DIGEST_LENGTH);\n\n    constexpr char digits[] = \"0123456789abcdef\";\n\n    for (size_t i = 0; i < DIGEST_LENGTH; i++) {\n        result.push_back(digits[(hash[i]>>4) & 0xf]);\n        result.push_back(digits[ hash[i]     & 0xf]);\n    }\n\n    return result;\n}\n\nMD5Hash\nMD5HashString(const std::string& input)\n{\n    MD5Hash hash;\n\n    MD5((const unsigned char *)input.c_str(), input.length(), hash.GetBuffer());\n    return hash;\n}\n\nMD5Hash\nMD5HashFile(const std::string & filename)\n{\n    int fd = open(filename.c_str(), O_RDONLY);\n    if (-1 == fd) {\n        throw std::system_error(errno, std::system_category(), std::string(\"Failed to open \").append(filename).append(\" for read\"));\n    }\n\n    MD5_CTX context;\n\n    MD5_Init(&context);\n\n    while (1) {\n        unsigned char buffer[65536];\n\n        ssize_t length = read(fd, buffer, sizeof(buffer));\n        if (-1 == length) {\n\t    close(fd);\n            throw std::system_error(errno, std::system_category(), \"Failed to read \" + filename);\n\t} else if (0 == length) {\n            break;\n\t}\n        MD5_Update(&context, buffer, length);\n    }\n\n    MD5Hash result;\n    MD5_Final(result.GetBuffer(), &context);\n    close(fd);\n    return result;\n}\n\nunsigned char\nchar_to_nybble(char c)\n{\n    static std::map<char, unsigned char> high_digits {\n        { 'a', 10 }, { 'A', 10 }, { 'b', 11 }, { 'B', 11 }, { 'c', 12 }, { 'C', 12 },\n        { 'd', 13 }, { 'D', 13 }, { 'e', 14 }, { 'E', 14 }, { 'f', 15 }, { 'F', 15 }\n    };\n\n    if (c >= '0' && c <= '9') {\n        return (unsigned char)(c - '0');\n    }\n\n    auto iter = high_digits.find(c);\n    if (iter != high_digits.end()) {\n        return iter->second;\n    }\n\n    std::string msg { \"Illegal character (\" };\n    msg.append(1, c).append(\"} in MD5 hashstring\");\n    throw std::domain_error(msg);\n}\n\nMD5Hash\nMD5Hash::from_hash(const std::string & hash_string)\n{\n    MD5Hash result;\n    unsigned char nybbles[DIGEST_LENGTH*2];\n    size_t current = 0;\n\n    for (char c : hash_string) {\n        if (c != ' ') {\n            nybbles[current++] = char_to_nybble(c);\n            if (current == sizeof(nybbles)) {\n                break;\n            }\n        }\n    }\n    if (current != sizeof(nybbles)) {\n        throw std::length_error(\"MD5 hash string too short\");\n    }\n\n    for (current = 0; current < DIGEST_LENGTH; current++) {\n        int offset = current << 1;\n        result.hash[current] = (nybbles[offset]<<4) + nybbles[offset+1];\n    }\n\n    return result;\n}\n\n};\n\n// vim: se sw=4 expandtab :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsdutil/Crypto.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _CRYPTO_HH_\n#define _CRYPTO_HH_\n\n#include <string>\n\nnamespace Crypto\n{\n\nclass MD5Hash\n{\npublic:\n        MD5Hash();\n        MD5Hash(const MD5Hash& orig) = default;\n        MD5Hash& operator=(const MD5Hash& that) = default;\n\tMD5Hash(MD5Hash&&) = default;\n        MD5Hash& operator=(MD5Hash&&) = default;\n\n        bool operator==(const MD5Hash& that) const;\n        bool operator!=(const MD5Hash& that) const;\n        unsigned char * GetBuffer() { return hash; }\n        const unsigned char * GetBuffer() const { return hash; }\n        std::string to_string() const;\n\n\tstatic MD5Hash from_hash(const std::string &);\n\n\tstatic constexpr size_t DIGEST_LENGTH = 16;\nprivate:\n\n        unsigned char hash[DIGEST_LENGTH];\n};\n\nMD5Hash\nMD5HashString(const std::string&);\n\nMD5Hash\nMD5HashFile(const std::string&);\n\n};\n\n#endif // _CRYPTO_HH_\n\n// vim: se sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsdutil/HttpProxySetup.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include <cpprest/uri.h>\n#include <was/common.h>\n#include <boost/regex.hpp>\n\n#include \"HttpProxySetup.hh\"\n#include \"Utility.hh\"\n#include \"Logger.hh\"\n\nnamespace MdsdUtil {\n\nstatic web::web_proxy GetProxySetting(\n    const std::string& proxy_setting_string // \"[[http[s]:]//][username[:password]@]host[:port]\"\n)\n{\n    std::string host_port;          // \"//host[:port]\" for CPPREST API\n    std::string username, password; // Should be URL-decoded\n\n     boost::regex re { R\"(^\\s*((https?:)?//)?((([^:/@\\s]+)(:([^:/@\\s]+))?)@)?([\\w.\\-]+)(:([0-9]+))?\\s*$)\" };\n    // Submatch 0 is the whole string\n    // Submatch 5 is the optional (but required if @ is present) username\n    // Submatch 7 is the optional password\n    // Submatch 8 is the required hostname\n    // Submatch 10 is the optional port number\n    boost::smatch matches;\n    if (!boost::regex_match(proxy_setting_string, matches, re)) {\n        // No match\n        throw HttpProxySetupException(\"Invalid proxy setting string\");\n    }\n    \n    // We've got a match\n#define FIELD_TO_STRING(FN) (std::string(matches[FN].first, matches[FN].second))\n    try {\n        if (matches[5].matched) {\n            username = web::uri::decode(FIELD_TO_STRING(5));\n        }\n        if (matches[7].matched) {\n            password = web::uri::decode(FIELD_TO_STRING(7));\n        }\n    } catch (web::uri_exception& e) {\n        throw HttpProxySetupException(std::string(\"Exception occurred when URL-decoding username \"\n                                                  \" or password. Exception message: \")\n                                     + e.what());\n    }\n\n    if (matches[8].matched) {\n        host_port = std::string(\"//\") + FIELD_TO_STRING(8);\n    }\n    if (matches[10].matched) {\n        host_port += std::string(\":\") + FIELD_TO_STRING(10);\n    }\n\n    web::web_proxy proxy_setting(_XPLATSTR(host_port.c_str()));\n    if (!username.empty()) {\n        proxy_setting.set_credentials(web::credentials(_XPLATSTR(username.c_str()), _XPLATSTR(password.c_str())));\n    }\n\n    return proxy_setting;\n}\n\nvoid SetStorageDefaultHttpProxy(const std::string& proxy_setting_string)\n{\n    web::web_proxy proxy_setting = GetProxySetting(proxy_setting_string);\n\n    azure::storage::operation_context::set_default_proxy(proxy_setting);\n\n    std::ostringstream msg;\n    msg << \"Set http proxy for Azure Storage API with '\" << proxy_setting_string << \"'. \";\n    msg << \"The resulted http proxy setting is '\" << MdsdUtil::GetStorageDefaultHttpProxyAddress() << \"'.\";\n    Logger::LogInfo(msg);\n}\n\nvoid CheckProxySettingString(const std::string& proxy_setting_string)\n{\n    GetProxySetting(proxy_setting_string);\n}\n\nstd::string GetStorageDefaultHttpProxyAddress()\n{\n    web::web_proxy default_proxy = azure::storage::operation_context::default_proxy();\n\n    return default_proxy.address().to_string();\n}\n\nvoid\nSetStorageHttpProxy(\n    std::string proxy_setting_string,\n    const std::vector<std::string> & proxyEnvVars\n    )\n{\n    std::string proxy_env_var_name;\n\n    if (proxy_setting_string.empty()) {\n        for (auto env : proxyEnvVars) {\n            proxy_setting_string = MdsdUtil::GetEnvironmentVariableOrEmpty(env);\n            if (!proxy_setting_string.empty()) {\n                proxy_env_var_name = env;\n                break;\n            }\n        }\n    }\n\n    if (!proxy_setting_string.empty()) {\n        try {\n            SetStorageDefaultHttpProxy(proxy_setting_string);\n        }\n        catch(const HttpProxySetupException& ex) {\n            std::ostringstream msg;\n            msg << \"Fatal error: setting http proxy for Azure Storage API to '\" << proxy_setting_string << \"' \";\n            if (!proxy_env_var_name.empty()) {\n                msg << \"from environment variable '\" << proxy_env_var_name << \"' \";\n            }\n            msg << \"failed: \" << ex.what();\n            throw std::runtime_error(msg.str());\n        }\n    }\n}\n\nvoid\nRemoveStorageHttpProxy()\n{\n    web::web_proxy proxy_setting;\n    azure::storage::operation_context::set_default_proxy(proxy_setting);\n}\n\n} // namespace MdsdUtil\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsdutil/HttpProxySetup.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _HTTPPROXYSETUP_HH_\n#define _HTTPPROXYSETUP_HH_\n\n#include <string>\n#include <stdexcept>\n#include <vector>\n\nnamespace MdsdUtil {\n\n\nclass HttpProxySetupException : public std::runtime_error\n{\npublic:\n    HttpProxySetupException(const std::string& message)\n        : std::runtime_error(message)\n    {}\n};\n\n/// <summary>\n/// Sets up Storage C++ SDK's http proxy by calling corresponding Azure Storage C++ API.\n/// Throws an HttpProxySetupException if it fails.\n/// proxy_config_string format is \"[[http[s]:]//][username[:password]@]host[:port]\".\n/// </summary>\nvoid SetStorageDefaultHttpProxy(const std::string& proxy_config_string);\n\n/// <summary>\n/// Checks if the proxy_setting_string is valid. Throws an HttpProxySetupException\n/// if it's invalid. Noop otherwise.\n/// proxy_config_string format is \"[[http[s]:]//][username[:password]@]host[:port]\".\n/// </summary>\nvoid CheckProxySettingString(const std::string& proxy_setting_string);\n\n/// <summary>\n/// Get the address of the proxy server for Storage SDK's default (global) http/https proxy.\n/// Address is of form \"//host[:port]\".\n/// </summary>\nstd::string GetStorageDefaultHttpProxyAddress();\n\n/// <summary>\n/// Set Azure Storage API http proxy to one of the following values, with\n/// first one tried first if it is not empty:\n/// - proxySetting\n/// - ordered list of environment variables in proxyEnvVars.\n/// Throw exception for any error.\nvoid SetStorageHttpProxy(std::string proxySetting, const std::vector<std::string> & proxyEnvVars);\n\n/// <summary>\n/// Remove Azure Storage API http proxy.\n/// </summary>\nvoid RemoveStorageHttpProxy();\n\n} // namespace MdsdUtil\n\n#endif // _HTTPPROXYSETUP_HH_\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsdutil/MdsTime.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"MdsTime.hh\"\n#include <string>\n#include <iomanip>\n#include <limits>\n#include <boost/regex.hpp>\n#include <boost/date_time/posix_time/conversion.hpp>\n#include \"Utility.hh\"\n\nMdsTime::MdsTime(const std::string &rfc3339)\n{\n\tstruct tm tm;\n\n\tstd::string decoded = MdsdUtil::UriDecode(rfc3339);\n\tstrptime(decoded.c_str(), \"%Y-%m-%dT%TZ\", &tm);\n\ttv.tv_sec = timegm(&tm);\n\ttv.tv_usec = 0;\n}\n\nMdsTime::MdsTime(const mi::Datetime& x)\n{\n        MI_Uint32 y,mon,d,h,min,s,us;\n        MI_Sint32 utc;\n        x.Get(y,mon,d,h,min,s,us,utc);\n\n        struct tm t;\n        t.tm_year = y-1900;\n        t.tm_mon = mon-1;\n        t.tm_mday = d;\n        t.tm_hour = h;\n        t.tm_min = min;\n        t.tm_sec = s;\n        t.tm_isdst = -1;  // let mktime() to decide daylight saving adjustment\n\n        time_t time1 = mktime(&t);\n\n        tv.tv_sec = time1 + 60 * utc;\n        tv.tv_usec = us;\n}\n\nMdsTime::MdsTime(const utility::datetime& dt) {\n\tauto interval = dt.to_interval();\n\ttv.tv_usec = (interval % ticks_per_second) / 10;\n\ttv.tv_sec = (interval / ticks_per_second) - epoch_difference;\n}\n\nMdsTime\nMdsTime::Max()\n{\n\treturn MdsTime(std::numeric_limits<time_t>::max());\n}\n\nMdsTime\nMdsTime::FromIS8601Duration(const std::string &is8601)\n{\n\t// Use a regex to extract the various fields. Since we only care about short-ish periods,\n\t// the regex will only handle days, hours, minutes, and seconds; we skip months and years\n\t// due to their variability. And we truncate fractions of seconds.\n\t//\n\tboost::regex re { \"P(([0-9]+)D)?T(([0-9]+)H)?(([0-9]+)M)?(([0-9]+)(.[0-9]+)?S)?\" };\n\t// Submatch 0 is the whole thing\n\t// Submatch 2 is the number of days\n\t// Submatch 4 is the number of hours\n\t// Submatch 6 is the number of minutes\n\t// Submatch 8 is the number of whole seconds\n\tboost::smatch matches;\n\tif (boost::regex_match(is8601, matches, re)) {\n\t\t// Got a match. \"matches\" contains the matching data\n\t\tunsigned long seconds = 0;\n#define FIELD_TO_SECONDS(FN, SECS) (std::stoul(std::string(matches[FN].first, matches[FN].second))) * SECS\n\t\tif (matches[2].matched) {\n\t\t\tseconds += FIELD_TO_SECONDS(2, 24*60*60);\n\t\t}\n\t\tif (matches[4].matched) {\n\t\t\tseconds += FIELD_TO_SECONDS(4, 60*60);\n\t\t}\n\t\tif (matches[6].matched) {\n\t\t\tseconds += FIELD_TO_SECONDS(6, 60);\n\t\t}\n\t\tif (matches[8].matched) {\n\t\t\tseconds += FIELD_TO_SECONDS(8, 1);\n\t\t}\n\t\treturn MdsTime(seconds);\n\t} else {\n\t\treturn MdsTime(0);\n\t}\n}\n\nunsigned long long\nMdsTime::to_FILETIME() const\n{\n\treturn  (epoch_difference + (unsigned long long)tv.tv_sec) * ticks_per_second \n\t      + (unsigned long long)tv.tv_usec * 10;\n}\n\nboost::posix_time::ptime\nMdsTime::to_ptime() const\n{\n\treturn boost::posix_time::from_time_t(tv.tv_sec) + boost::posix_time::microseconds(tv.tv_usec);\n}\n\nboost::posix_time::time_duration\nMdsTime::to_duration() const\n{\n\treturn boost::posix_time::seconds(tv.tv_sec) + boost::posix_time::microseconds(tv.tv_usec);\n}\n\nMdsTime\nMdsTime::RoundTenDay() const\n{\n\tunsigned long long ft = to_FILETIME();\n\n\tft -= ft % (10ULL * 24ULL * 3600ULL * ticks_per_second);\n\n\treturn MdsTime( (ft / ticks_per_second) - epoch_difference);\n}\n\nunsigned long long\nMdsTime::to_DateTime() const\n{\n\treturn ticks_per_second * ((unsigned long long)tv.tv_sec + 62135596800ULL) + 10ULL * tv.tv_usec;\n}\n\n// cpprest/PPLX uses Windows FILETIME as its utility::datetime datatype.\nutility::datetime\nMdsTime::to_pplx_datetime() const\n{\n\treturn utility::datetime() + to_FILETIME();\n}\n\n\nstd::ostream&\noperator<<(std::ostream& os, const MdsTime& mt)\n{\n\tif (mt.tv.tv_sec == 0 && mt.tv.tv_usec == 0) {\n\t\tos << std::string(\"1601-01-01T00:00:00.0000001Z\");\n\t} else {\n\t\tstruct tm zulu;\n\t\tchar timebuf[100];\n\n\t\t(void) gmtime_r(&(mt.tv.tv_sec), &zulu);\n\t\tsize_t n = strftime(timebuf, sizeof(timebuf), \"%Y-%m-%dT%H:%M:%S\", &zulu);\n\n\t\t// Note that usec is microseconds, but the Windows DateTime is precise to 100ns. We hard-code\n\t\t// an extra 0 to match the required precision.\n\n\t\tos << std::string { timebuf, n };\n\t\tos << \".\" << std::setw(6) << std::setfill('0') << static_cast<unsigned long>(mt.tv.tv_usec) << \"0Z\";\n\t}\n\n\treturn os;\n}\n\nstd::string\nMdsTime::to_iso8601_utf8() const\n{\n\tstd::ostringstream buf;\n\tbuf << *this;\n\treturn buf.str();\n}\n\n// Rely on the fact that to_iso8601_utf8() produces characters that require only one octet; casting\n// each of those to a char16_t is sufficient to convert to UTF-16.\nstd::u16string\nMdsTime::to_iso8601_utf16() const\n{\n\tstd::string utf8 = to_iso8601_utf8();\n\tstd::u16string result;\n\tresult.reserve(utf8.length());\n\n\tfor (const auto & c : utf8) {\n\t\tresult.push_back(static_cast<std::u16string::value_type>(c));\n\t}\n\treturn result;\n}\n\nstd::string\nMdsTime::to_strftime(const char* format) const\n{\n\tif (format == nullptr || *format == '\\0') {\n\t\treturn std::string();\n\t}\n\n\tstruct tm timeParts;\n\ttime_t time_t_val = to_time_t();\n\t(void)gmtime_r(&time_t_val, &timeParts);\n\tchar timebuf[256];\n\tsize_t n = strftime(timebuf, sizeof(timebuf), format, &timeParts);\n\tif (n == 0) { // Too long an output (in rare occasions)\n\t\tsize_t max_len = strlen(format) * 10;\t// Hopefully 10 times is big enough!\n\t\tchar* buf = static_cast<char*>(malloc(max_len));\n\t\tn = strftime(buf, max_len, format, &timeParts);\n\t\tif (n == 0) { // Still too big???!!!\n\t\t\tthrow std::runtime_error(std::string(\"MdsTime::to_strftime(): Too big an output string for format \\\"\")\n\t\t\t                         .append(format).append(\"\\\"\"));\n\t\t}\n\t\tstd::string result(buf, n);\n\t\tfree(buf);\n\t\treturn result;\n\t}\n\n\treturn std::string(timebuf,n);\n}\n\nvoid\nMdsTime::GetYMD(std::ostream& strm) const\n{\n\tstruct tm brokendown;\n\t(void)gmtime_r(&(tv.tv_sec), &brokendown);\n\n\tstrm << brokendown.tm_year+1900;\n\tstrm << std::setfill('0') << std::setw(2) << brokendown.tm_mon+1;\n\tstrm << std::setfill('0') << std::setw(2) << brokendown.tm_mday;\n}\n\nMdsTime&\nMdsTime::operator+=(const MdsTime &right)\n{\n\ttv.tv_sec += right.tv.tv_sec;\n\ttv.tv_usec += right.tv.tv_usec;\n\ttime_t sec = tv.tv_usec / 1000000;\n\tif (sec) {\n\t\ttv.tv_sec += sec;\n\t\ttv.tv_usec -= 1000000*sec;\n\t}\n\treturn *this;\n}\n\nMdsTime\noperator+(const MdsTime &left, const MdsTime &right)\n{\n\tMdsTime answer(left);\n\tanswer += right;\n\treturn answer;\n}\n\nMdsTime\noperator+(const MdsTime& left, time_t seconds)\n{\n    MdsTime answer(left);\n    answer.tv.tv_sec += seconds;\n    return answer;\n}\n\nMdsTime&\nMdsTime::operator-=(const MdsTime &right)\n{\n\ttv.tv_sec -= right.tv.tv_sec;\n\ttv.tv_usec -= right.tv.tv_usec;\n\twhile (tv.tv_usec < 0) {\n\t\ttv.tv_usec += 1000000;\n\t\ttv.tv_sec -= 1;\n\t}\n\n\treturn *this;\n}\n\nMdsTime\noperator-(const MdsTime &left, const MdsTime &right)\n{\n\tMdsTime answer(left);\n\tanswer -= right;\n\treturn answer;\n}\n\n// vim: se sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsdutil/MdsTime.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _MDSTIME_HH_\n#define _MDSTIME_HH_\n\n#include <iostream>\n#include <ctime>\nextern \"C\" {\n#include <sys/time.h>\n}\n#include <boost/date_time/posix_time/posix_time_types.hpp>\n#include <micxx/datetime.h>\n#include <cpprest/asyncrt_utils.h>\n\nclass MdsTime\n{\n\t// <summary>This friend function outputs the ISO8601 formatted string representing the time\n\t// to the stream</summary>\n\tfriend std::ostream& operator<<(std::ostream& os, const MdsTime& ft);\n\tfriend MdsTime operator+(const MdsTime &left, const MdsTime &right);\n\tfriend MdsTime operator-(const MdsTime &left, const MdsTime &right);\n\tfriend MdsTime operator+(const MdsTime &left, time_t seconds);\npublic:\n\tMdsTime() { Touch(); }\n\tMdsTime(time_t sec, suseconds_t usec = 0) : tv { sec, usec } {}\n\tMdsTime(const struct timeval &src) { tv.tv_sec = src.tv_sec; tv.tv_usec = src.tv_usec; }\n\tMdsTime(const MdsTime &src) { tv.tv_sec = src.tv.tv_sec; tv.tv_usec = src.tv.tv_usec; }\n\tMdsTime(const std::string &rfc3339);\n\tMdsTime(const mi::Datetime&);\n\tMdsTime(const utility::datetime&);\n\n\t/// <summary>Create an MdsTime initialized to \"now\". Identical to the default constructor, but it's\n\t/// explicit about what's going on so that code is easier for people to understand.</summary>\n\tstatic MdsTime Now() { return MdsTime(); }\n\t/// <summary>Create an MdsTime set to the maximum supported time for the implementation (some\n\t/// time in 2038, for 32-bit time_t values).</summary>\n\tstatic MdsTime Max();\n\n\tstatic MdsTime FromIS8601Duration(const std::string &is8601);\n\n\tvoid Touch() { (void)gettimeofday(&tv, 0); }\n\n\t// void RoundDown(time_t interval) { if (interval > 0) tv.tv_sec -= tv.tv_sec % interval; }\n\tMdsTime Round(time_t n) const { time_t sec = tv.tv_sec; if (n > 1) sec -= sec % n; return MdsTime(sec, 0); }\n\n\tMdsTime RoundTenDay() const;\n\n\tdouble Elapsed() const { MdsTime res; res -= *this; return (double(res.tv.tv_sec) + (double(res.tv.tv_usec) / 1000000.)); }\n\n\tMdsTime& operator=(const MdsTime& src) { tv.tv_sec = src.tv.tv_sec; tv.tv_usec = src.tv.tv_usec; return *this; }\n\tMdsTime& operator=(const time_t seconds) { tv.tv_sec = seconds; tv.tv_usec = 0; return *this; }\n\n\tMdsTime& operator+=(const MdsTime &right);\n\tMdsTime& operator-=(const MdsTime &right);\n\n\tMdsTime& operator+=(const time_t seconds) { tv.tv_sec += seconds; return *this; }\n\n\tbool operator==(const MdsTime &t) const { return (tv.tv_sec == t.tv.tv_sec && tv.tv_usec == t.tv.tv_usec); }\n\tbool operator!=(const MdsTime &t) const { return !(*this == t); }\n\tbool operator>=(const MdsTime &t) const { return (tv.tv_sec > t.tv.tv_sec || (tv.tv_sec == t.tv.tv_sec && tv.tv_usec >= t.tv.tv_usec)); }\n\tbool operator<(const MdsTime &t) const { return ! (*this >= t); }\n\tbool operator>(const MdsTime &t) const { return (tv.tv_sec > t.tv.tv_sec || (tv.tv_sec == t.tv.tv_sec && tv.tv_usec > t.tv.tv_usec)); }\n\tbool operator<=(const MdsTime &t) const { return ! (*this > t); }\n\n\t// Returns true if either tv_sec or tv_usec is true (i.e. non-zero)\n\texplicit operator bool() const { return (tv.tv_sec || tv.tv_usec); }\n\n\n\ttime_t to_time_t() const { return tv.tv_sec; }\n\n\tunsigned long long to_FILETIME() const;\n\tunsigned long long to_DateTime() const;\n\tboost::posix_time::ptime to_ptime() const;\n\tboost::posix_time::time_duration to_duration() const;\n\tutility::datetime to_pplx_datetime() const;\n\n\tvoid GetYMD(std::ostream& fn) const;\n\n\t/// <summary>Convert the time to an ISO 8601 string, encoded in UTF-8 (ASCII, technically, but they\n\t/// amount to the same thing for this particular string).</summary>\n\tstd::string to_iso8601_utf8() const;\n\n\t/// <summary>Convert the time to an ISO 8601 string, encoded in UTF-16.</summary>\n\tstd::u16string to_iso8601_utf16() const;\n\n\t/// <summary>Convert the time to a custom-formatted date-time string.\n\t/// Example format string: \"y=%Y/m=%m/d=%d/h=%H/m=%M\".\n\t/// The internal buffer size is limited to 256 bytes (including the null-terminating char)\n\t/// and this function may throw an exception if the resulting string is to be much bigger than that.\n\t/// Also, the format string should NOT result in a valid empty string (e.g., \"%p\", which might\n\t/// be an empty string in some locales), to simplify detection of such a situation of\n\t/// an exceedingly long result string. Note that the input format string is NOT checked\n\t/// for such a case. It's the caller's responsibility.</summary>\n\tstd::string to_strftime(const char* format) const;\n\n\t// 23:59:59.9999999 UTC, December 31, 9999 in the Gregorian calendar, exactly one 100-nanosecond tick before\n\t// 00:00:00 UTC, January 1, 10000\n\tstatic const unsigned long long MaxDateTimeTicks   = 3155378975999999999ULL;\n\n\t// This is a \"magic number\" date/time for MDS which is used in certain table entries when a fake timestamp\n\t// is required (e.g. MDS SchemasTable entries)\n\tstatic const unsigned long long FakeTimeStampTicks =  504911232000000001ULL;\n\nprivate:\n\tstruct timeval tv;\n\n\tconst unsigned long long ticks_per_second = 10000000ULL;\n\tconst unsigned long long epoch_difference = 11644473600ULL;\n\n};\n\nstd::ostream& operator<<(std::ostream& os, const MdsTime& ft);\nMdsTime operator+(const MdsTime &left, const MdsTime &right);\nMdsTime operator-(const MdsTime &left, const MdsTime &right);\n\n#endif // _MDSTIME_HH_\n\n// vim: se sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsdutil/OpensslCert.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include <openssl/x509.h>\n#include <cstring>\n#include <stdexcept>\n#include \"OpensslCert.hh\"\n\nbool OpensslCert::IsValid() const\n{\n    if (m_invalid)\n    {\n        return false;\n    }\n    if (!m_cert)\n    {\n        m_invalid = true;\n        return false;\n    }\n\n    ASN1_TIME* notBefore = X509_get_notBefore(m_cert.get());\n\n    int nDiffDays = 0;\n    int nDiffSeconds = 0;\n    if (!ASN1_TIME_diff(&nDiffDays, &nDiffSeconds, notBefore, NULL)) {\n        throw std::runtime_error(\"ASN1_TIME_diff() failed for checking notBefore time.\");\n    }\n\n    if (nDiffSeconds < 0 || nDiffDays < 0) {\n        return false;\n    }\n\n    ASN1_TIME* notAfter = X509_get_notAfter(m_cert.get());\n    if (!ASN1_TIME_diff(&nDiffDays, &nDiffSeconds, notAfter, NULL)) {\n        throw std::runtime_error(\"ASN1_TIME_diff() failed for checking notAfter time.\");\n    }\n\n    if (nDiffSeconds > 0 || nDiffDays > 0) {\n        return false;\n    }\n    return true;\n}\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsdutil/OpensslCert.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _OPENSSLCERT_H_\n#define _OPENSSLCERT_H_\n\n#include <memory>\n#include <ctime>\n#include <string>\n#include <openssl/x509.h>\n\n/// <summary>\n/// Represents a certificate used for HTTPS connection to the service.\n/// </summary>\nclass OpensslCert\n{\npublic:\n    /// <summary>\n    /// Initializes a Certificate object.\n    /// </summary>\n    /// <param name=\"cert\">public certificate object</param>\n    /// <param name=\"privateKey\"> private key object. Can be NULL.</param>\n    /// <param name=\"thumbprint\">Certificate's thumbprint.</param>\n    OpensslCert(\n        const std::shared_ptr<X509> & cert,\n        const std::shared_ptr<EVP_PKEY> & privateKey,\n        const std::string& thumbprint) :\n        m_cert(cert),\n        m_privatekey(privateKey),\n        m_thumbprint(thumbprint),\n        m_invalid(false) \n    {\n    }\n\n    /// <summary>\n    /// Destroys Certificate object and releases all associated resources.\n    /// The certificate object and private key object in the contructor will be\n    /// freed here.\n    /// </summary>\n    ~OpensslCert()\n    {\n\n    }\n\n    /// <summary>\n    /// Gets a value indicating whether certificate is still valid.\n    /// </summary>\n    bool IsValid() const;\n\n    /// <summary>\n    /// Explicitly marks certificate as invalid..\n    /// </summary>\n    void Invalidate()\n    {\n        m_invalid = true;\n    }\n\n    void SetAsValid()\n    {\n        m_invalid = false;\n    }\n\n    /// <summary>\n    /// Gets the certificate's thumbprint.\n    /// </summary>\n    const std::string& GetThumbprint() const\n    {\n        return m_thumbprint;\n    }\n\n    /// <summary>\n    /// Gets the certificate object.\n    /// </summary>\n    X509* GetCert() const\n    {\n        return m_cert.get();\n    }\n\n    EVP_PKEY* GetPrivateKey() const\n    {\n        return m_privatekey.get();\n    }\n\nprivate:\n    std::shared_ptr<X509> m_cert;\n    std::shared_ptr<EVP_PKEY> m_privatekey;\n    std::string m_thumbprint;\n    mutable bool m_invalid;\n};\n\n\n#endif\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsdutil/OpensslCertStore.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include <fstream>\n#include <openssl/pem.h>\n#include <algorithm>\n\n#include \"OpensslCert.hh\"\n#include \"OpensslCertStore.hh\"\n#include \"Utility.hh\"\n\nstatic std::string\nStringToUpper(\n    std::string strToConvert\n    )\n{\n    std::transform(strToConvert.begin(), strToConvert.end(), strToConvert.begin(), ::toupper);\n    return strToConvert;\n}\n\nstatic void\nCertDeleter(X509* cert)\n{\n    if (cert) {\n        X509_free(cert);\n    }\n}\n\nstatic void\nKeyDeleter(EVP_PKEY* pkey)\n{\n    if (pkey) {\n        EVP_PKEY_free(pkey);\n    }\n}\n\nOpensslCertStore::OpensslCertStore(\n    const std::string& certFile,\n    const std::string& privateKeyFile,\n    const std::string& sslDigest) :\n    m_CertFile(certFile),\n    m_PrivateKeyFile(privateKeyFile),\n    m_SslDigest(sslDigest)\n{\n    if (certFile.empty()) {\n        throw std::invalid_argument(\"OpensslCertStore: unexpected empty string for certFile\");\n    }\n    if (privateKeyFile.empty()) {\n        throw std::invalid_argument(\"OpensslCertStore: unexpected empty string for privateKeyFile\");\n    }\n    if (sslDigest.empty()) {\n        throw std::invalid_argument(\"OpensslCertStore: unexpected empty string for sslDigest\");\n    }\n}\n\nstd::shared_ptr<X509>\nOpensslCertStore::ReadCertFromFile()\n{\n    if (!MdsdUtil::IsRegFileExists(m_CertFile))\n    {\n        throw std::runtime_error(\"ReadCertFromFile(): failed to find certificate file: '\" + m_CertFile + \"'\");\n    }\n\n    FILE *fp = fopen(m_CertFile.c_str(), \"r\");\n    if (!fp) {\n        throw std::runtime_error(\"ReadCertFromFile(): failed to open certificate file: '\" + m_CertFile + \"'\");\n    }\n    MdsdUtil::FileCloser fcloser(fp);\n\n    X509 *cert = PEM_read_X509(fp, NULL, NULL, NULL);\n    if (!cert) {\n        throw std::runtime_error(\"ReadCertFromFile(): failed to read certificate file: '\" + m_CertFile + \"'\");\n    }\n\n    return std::shared_ptr<X509>(cert, CertDeleter);\n}\n\nstd::shared_ptr<EVP_PKEY>\nOpensslCertStore::ReadPrivateKeyFromFile()\n{\n    if (!MdsdUtil::IsRegFileExists(m_PrivateKeyFile))\n    {\n        throw std::runtime_error(\"ReadPrivateKeyFromFile(): failed to find privatekey file: '\" + m_PrivateKeyFile + \"'\");\n    }\n    FILE* fp = fopen(m_PrivateKeyFile.c_str(), \"r\");\n    if (!fp) {\n        throw std::runtime_error(\"ReadPrivateKeyFromFile(): failed to open privatekey file: '\" + m_PrivateKeyFile + \"'\");\n    }\n    MdsdUtil::FileCloser fcloser(fp);\n\n    EVP_PKEY* keyobj = PEM_read_PrivateKey(fp, NULL, NULL, NULL);\n    if (!keyobj)\n    {\n        throw std::runtime_error(\"ReadPrivateKeyFromFile(): failed to read privatekey file: '\" + m_PrivateKeyFile + \"'\");\n    }\n\n    return std::shared_ptr<EVP_PKEY>(keyobj, KeyDeleter);\n}\n\n\nconst EVP_MD*\nOpensslCertStore::GetCertDigest()\n{\n    OpenSSL_add_all_digests();\n    auto pdigest = EVP_get_digestbyname(m_SslDigest.c_str());\n    if (!pdigest) {\n        throw std::runtime_error(\"GetCertDigest: failed to get digest by name: \" +  m_SslDigest);\n    }\n    return pdigest;\n}\n\nstd::string\nOpensslCertStore::GetCertThumbprint(\n    X509* cert\n    )\n{\n    if (!cert) {\n        throw std::invalid_argument(\"GetCertThumbprint(): unexpected nullptr for cert\");\n    }\n\n    std::string thumbprint;\n    unsigned char mdarray[EVP_MAX_MD_SIZE];\n    const EVP_MD* pdigest = GetCertDigest();\n    unsigned int len = 0;\n    if (!X509_digest(cert, pdigest, mdarray, &len))\n    {\n        throw std::runtime_error(\"GetCertThumbprint(): failed at calling X509_digest(): out of memory\");\n    }   \n    else {\n        unsigned int w = 2;\n        size_t buflen = len*w+1;\n        char buf[buflen];\n        char* pbuf = (char*)buf;\n\n        for (unsigned int i = 0; i < len; i++)\n        {           \n            BIO_snprintf(pbuf, w+1, \"%02X\", mdarray[i]);\n            pbuf += w;\n        }\n        buf[buflen-1] = '\\0';\n        thumbprint = buf;\n\n        thumbprint = StringToUpper(thumbprint);\n    }\n\n    return thumbprint;\n}\n\n\nstd::shared_ptr<OpensslCert>\nOpensslCertStore::LoadCertificate(\n    const std::string& thumbprint\n    )\n{\n    if (thumbprint.empty())\n    {\n        throw std::invalid_argument(\"LoadCertificate(): unexpected empty string for thumbprint\");\n    }\n\n    std::shared_ptr<X509> certObj = ReadCertFromFile();\n    if (!certObj) {\n        throw std::runtime_error(\"LoadCertificate(): failed to get certificate\");\n    }\n\n    auto thumbprintFromFile = GetCertThumbprint(certObj.get());\n\n    auto thumbprintUpper = StringToUpper(thumbprint);\n    if (thumbprintFromFile != thumbprintUpper)\n    {\n        throw std::runtime_error(\"LoadCertificate(): given thumbprint \" + thumbprint +\n                                 \" doesn't match cert \" + m_CertFile + \" thumbprint \" + thumbprintFromFile);\n    }\n\n    std::shared_ptr<EVP_PKEY> pkey = ReadPrivateKeyFromFile();\n    if (!pkey)\n    {\n        throw std::runtime_error(\"LoadCertificate(): failed to get private key\");\n    }\n    return std::make_shared<OpensslCert>(certObj, pkey, thumbprintUpper);\n}\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsdutil/OpensslCertStore.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n\n#ifndef _OPENSSLCERTSTORE_HH__\n#define _OPENSSLCERTSTORE_HH__\n\n#include <memory>\n\nclass OpensslCert;\n\n/// <summary>\n/// Represents a OpenSSL certificate store.\n/// </summary>\nclass OpensslCertStore\n{\npublic:\n    /// <summary>\n    /// Initializes a CertificateStore object.\n    /// Throw exception if any of the input strings is empty.\n    /// </summary>\n    OpensslCertStore(\n        const std::string& certFile,\n        const std::string& privateKeyFile,\n        const std::string& sslDigest);\n\n    /// <summary>\n    /// Destroys the object and releases all associated resources.\n    /// </summary>\n    ~OpensslCertStore() { }\n\n    /// <summary>\n    /// Loads certificate from the store by certificate's thumb print.\n    /// Throw exception if any error.\n    /// </summary>\n    std::shared_ptr<OpensslCert> LoadCertificate(const std::string& thumbprint);\n\nprivate:\n    /// Read a public certificate object from the file. The file\n    /// must be a plain text file in PEM format.\n    /// Caller function needs to call X509_free(X509*) to free the object.\n    /// Return X509* object or NULL if any error.\n    std::shared_ptr<X509> ReadCertFromFile();\n\n    /// Read a private key object from the file. The file\n    /// must be in a plain text file in PEM format.\n    /// Caller function needs to call EVP_PKEY_free(EVP_PKEY*) to free the object.\n    /// Return EVP_PKEY* object or NULL if any error.\n    std::shared_ptr<EVP_PKEY> ReadPrivateKeyFromFile();\n\n    /// Get the certificate digest object.\n    /// If m_SslDigest is empty, or if any error occurs when using the given\n    /// non-empty SslDigest, return SHA1 digest object.        \n    const EVP_MD* GetCertDigest();\n\n    /// Return the thumbprint in upper case for the given cert object.\n    /// If any error, return \"\".\n    std::string GetCertThumbprint(X509* cert);\n\n\n    std::string m_CertFile;\n    std::string m_PrivateKeyFile;\n    std::string m_SslDigest;\n};\n\n\n#endif\n\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsdutil/Utility.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"Utility.hh\"\n#include \"MdsTime.hh\"\n#include <string>\n#include <sstream>\n#include <iomanip>\n#include <ctime>\n#include <cstring>\n#include <boost/tokenizer.hpp>\n#include <boost/regex.hpp>\n#include <algorithm>\n#include <cctype>\n#include <vector>\n\nextern \"C\" {\n#include <sys/uio.h>\n#include <sys/types.h>\n#include <sys/stat.h>\n#include <sys/file.h>\n#include <unistd.h>\n#include <fcntl.h>\n#include <dirent.h>\n#include <signal.h>\n#include <sys/time.h>\n#include <sys/resource.h>\n#include <sys/socket.h>\n#include <sys/un.h>\n}\n\n//////// Begin MdsdUtil namespace\n\nnamespace MdsdUtil {\n\nvoid\nReplaceSubstring(std::string& str, const std::string& from, const std::string& to)\n{\n\tsize_t pos = 0;\n\twhile ((pos = str.find(from, pos)) != std::string::npos) {\n\t\tstr.replace(pos, from.length(), to);\n\t\tpos += to.length();\n\t}\n}\n\n// Replace escaped characters with the actual character. Because ampersand is an escapable character,\n// it's possible than an escape sequence was, itself, escaped; this function iterates until there are\n// no remaining escape sequences.\nstd::string\nUnquoteXmlAttribute(std::string target)\n{\n\tstatic std::vector<std::pair<std::string, std::string>>\n\tconversions { { \"&lt;\", \"<\" }, { \"&gt;\", \">\" }, { \"&apos;\", \"'\" }, { \"&#39;\", \"'\" }, { \"&quot;\", \"\\\"\" } };\n\tbool more_work;\n\n\tif (target.length() >= 4) {\t// Shortest escape sequence is 4 characters\n\t\tdo {\n\t\t\tsize_t before = target.length();\n\t\t\t// If I replace an &amp; escape, I may have created a new legit escape sequence\n\t\t\tReplaceSubstring(target, \"&amp;\", \"&\");\n\t\t\tReplaceSubstring(target, \"&#38;\", \"&\");\n\t\t\tmore_work = (target.length() < before);\n\t\t} while (more_work);\n\t\tfor (const auto & repl : conversions) {\n\t\t\tReplaceSubstring(target, repl.first, repl.second);\n\t\t}\n\t}\n\treturn target;\n}\n\nbool\nNotValidName(const std::string& str)\n{\n\tif (str.length() == 0 || str.find(\" \") != std::string::npos)\n\t\treturn true;\n\n\treturn false;\n}\n\nstd::string\nJoin(const std::vector<std::string>& vec, const std::string& sep)\n{\n\tstd::string result { \"\" };\n\n\tfor (auto it = vec.begin(); it != vec.end(); /*deliberately empty*/ ) {\n\t\tresult.append(*it);\n\t\t++it;\n\t\tif (it != vec.end()) {\n\t\t\tresult.append(sep);\n\t\t}\n\t}\n\treturn result;\n}\n\nunsigned long long\nEasyHash(const std::string& str)\n{\n\tunsigned long long strhash = 0;\n\tconst unsigned long long c_Multiplier = 37;\n\tfor(size_t i = 0; i < str.size(); i++) {\n\t\tstrhash = c_Multiplier * strhash + (unsigned int)str[i];\n\t}\n\treturn strhash;\n}\n\nstd::string\nZeroFill(unsigned long long num, size_t len)\n{\n\tstd::stringstream s1;\n\ts1 << std::setfill('0') << std::setw(len) << num;\n\treturn s1.str();\n}\n\n// Convert seconds+microseconds to an RFC3339 string with 100ns resolution\nstd::string\nRfc3339(const time_t sec, const suseconds_t usec)\n{\n\t// Special case: 0.0 is the beginning of the Windows DateTime Epoch\n\tif (sec == 0 && usec == 0) {\n\t\treturn std::string(\"1601-01-01T00:00:00.0000001Z\");\n\t}\n\n\tstruct tm zulu;\n\tchar timebuf[100];\n\n\t(void) gmtime_r(&sec, &zulu);\n\tsize_t n = strftime(timebuf, sizeof(timebuf), \"%Y-%m-%dT%H:%M:%S\", &zulu);\n\n\t// Note that usec is microseconds, but the Windows DateTime is precise to 100ns. We hard-code\n\t// an extra 0 to match the required precision.\n\n\tstd::ostringstream result;\n\tresult << std::string { timebuf, n };\n\tresult << \".\" << std::setw(6) << std::setfill('0') << static_cast<unsigned long>(usec) << \"0Z\";\n\n\treturn result.str();\n}\n\n// Get the RFC3339 form of the current date/time\nstd::string\nRfc3339()\n{\n\tstruct timeval tv;\n\n\t(void)gettimeofday(&tv, 0);\n\n\treturn MdsdUtil::Rfc3339(tv.tv_sec, tv.tv_usec);\n}\n\n// Code below is based on and slightly modified from Glib's g_time_Val_from_iso8601(),\n// which is not really fully ISO8601-compliant.\n//\n// Valid examples for this code:\n//    \"2015-12-17T08:53:45.123456Z\" (Z: UTC)\n//    \"20151217T085345.123456Z\"\n//    \"2015-12-17T08:53:45.123456\" (Local timezone)\n//    \"2015-12-17T08:53:45\" (Fractional second is optional)\n//    \"2015-12-17T08:53:45,123-08:00\" (UTC-08:00, ',' can be used for fractional second)\n//    \"2015-12-17\" (Time portition is optional--treated as 00:00:00)\n//    \"2015-12-17-08:00\" (TZD can be still given with date only)\n//\n// Non-delimited date/time with reduced size (e.g., YYYY instead of YYYYMMDD) will\n// result in incorrect result, and it's not supported by this code.\n//   (e.g., \"20151217T0853\" will be treated as \"2015-12-17T00:08:53\", not \"2015-12-17T08:53:00.000\"\n//          as stated in ISO8601)\nbool\nTimeValFromIso8601Restricted(const char* datetime, long& secondsOut, long& uSecondsOut)\n{\n\tif (datetime == nullptr)\n\t{\n\t\treturn false;\n\t}\n\n\twhile (isspace(*datetime))\n\t{\n\t\t++datetime;\n\t}\n\n\tif (*datetime == '\\0' || !isdigit(*datetime))\n\t{\n\t\treturn false;\n\t}\n\n\tstruct tm _tm;\n\tmemset(&_tm, 0, sizeof(_tm));\n\n\t// Date\n\tlong parsedVal = strtoul(datetime, (char**)&datetime, 10);\n\tbool isDateDelimited = false;\n\tif (*datetime != '-')\n\t{\n\t\t// YYYYMMDD\n\t\t_tm.tm_year = parsedVal / 10000 - 1900;\n\t\t_tm.tm_mon = (parsedVal % 10000) / 100 - 1; // January is 0\n\t\t_tm.tm_mday = parsedVal % 100;\n\t}\n\telse\n\t{\n\t\t// YYYY-MM-DD\n\t\tisDateDelimited = true;\n\t\t_tm.tm_year = parsedVal - 1900;\n\t\t++datetime;\n\t\t_tm.tm_mon = strtoul(datetime, (char**)&datetime, 10) - 1;\n\t\tif (*datetime != '-')\n\t\t{\n\t\t\treturn false;\n\t\t}\n\t\t++datetime;\n\t\t_tm.tm_mday = strtoul(datetime, (char**)&datetime, 10);\n\t}\n\n\tif (*datetime == 'T')\n\t{\n\t\t// Time\n\t\t++datetime;\n\t\tif (!isdigit(*datetime))\n\t\t{\n\t\t\treturn false;\n\t\t}\n\n\t\tparsedVal = strtoul(datetime, (char**)&datetime, 10);\n\t\tconst bool isTimeDelimited = *datetime == ':';\n\t\tif (isTimeDelimited != isDateDelimited)\n\t\t{ // Time must be delimited if and only if date is delimited.\n\t\t\treturn false;\n\t\t}\n\n\t\tif (!isTimeDelimited)\n\t\t{\n\t\t\t// hhmmss\n\t\t\t_tm.tm_hour = parsedVal / 10000;\n\t\t\t_tm.tm_min = (parsedVal % 10000) / 100;\n\t\t\t_tm.tm_sec = parsedVal % 100;\n\t\t}\n\t\telse\n\t\t{\n\t\t\t// hh:mm:ss\n\t\t\t_tm.tm_hour = parsedVal;\n\t\t\t++datetime;\n\t\t\t_tm.tm_min = strtoul(datetime, (char**)&datetime, 10);\n\t\t\tif (*datetime != ':')\n\t\t\t{\n\t\t\t\treturn false;\n\t\t\t}\n\t\t\t++datetime;\n\t\t\t_tm.tm_sec = strtoul(datetime, (char**)&datetime, 10);\n\t\t}\n\n\t\t// Fractional seconds\n\t\tuSecondsOut = 0;\n\t\tif (*datetime == '.' || *datetime == ',')\n\t\t{\n\t\t\t++datetime;\n\t\t\tlong multiplier = 100000;\n\t\t\tparsedVal = 0;\n\t\t\twhile (isdigit(*datetime))\n\t\t\t{\n\t\t\t\tparsedVal += multiplier * (*datetime - '0');\n\t\t\t\tmultiplier /= 10;\n\t\t\t\t++datetime;\n\t\t\t}\n\t\t\tuSecondsOut = parsedVal;\n\t\t}\n\t}\n\n\t// Timezone\n\tlong offsetSec = 0;\n\tauto makeTimeFunc = timegm; // To switch between UTC or local time. UTC by default.\n\tif (*datetime == '+' || *datetime == '-')\n\t{\n\t\tint sign = *datetime == '+' ? -1 : 1; // Note the sign inversion\n\t\t++datetime;\n\t\tparsedVal = strtoul(datetime, (char**)&datetime, 10);\n\n\t\tconst bool isTimezoneDelimited = *datetime == ':';\n\t\tif (isTimezoneDelimited != isDateDelimited)\n\t\t{ // Timezone must be delimited if and only if date is delimited.\n\t\t\treturn false;\n\t\t}\n\n\t\tif (!isTimezoneDelimited)\n\t\t{\n\t\t\t// hhmm\n\t\t\toffsetSec = 3600 * (parsedVal / 100) + 60 * (parsedVal % 100);\n\t\t}\n\t\telse\n\t\t{\n\t\t\t// hh:mm\n\t\t\toffsetSec = 3600 * parsedVal;\n\t\t\t++datetime;\n\t\t\toffsetSec += strtoul(datetime, (char**)&datetime, 10);\n\t\t}\n\t\toffsetSec *= sign;\n\t}\n\telse if (*datetime != 'Z') // No UTC, no offset, so local time\n\t{\n\t\t_tm.tm_isdst = -1;\n\t\tmakeTimeFunc = mktime;\n\t}\n\telse // *datetime == 'Z', nothing else to do except skipping the char\n\t{\n\t\t++datetime;\n\t}\n\n\t// Finally make time (sec since Epoch)\n\tsecondsOut = makeTimeFunc(&_tm) + offsetSec;\n\n\t// Make sure no other non-whitespace char follows\n\twhile (isspace(*datetime))\n\t{\n\t\t++datetime;\n\t}\n\n\treturn *datetime == '\\0';\n}\n\ntime_t\nIntervalStart(const time_t sec, const int interval)\n{\n\tif (interval == 0) {\n\t\treturn sec;\n\t} \n\n\treturn sec - (sec % interval);\n}\n\nvoid\nParseQueryString(const std::string& qry, std::map<std::string, std::string> & elements)\n{\n\ttypedef boost::tokenizer<boost::char_separator<char> > tokenizer;\n\tboost::char_separator<char> ampsep(\"&\");\n\n\ttokenizer tokens(qry, ampsep);\n\tfor (const std::string& tok : tokens) {\n\t\tsize_t pos = tok.find(\"=\");\n\t\tif (pos != std::string::npos && pos > 0) {\n\t\t\telements[tok.substr(0, pos)] = tok.substr(pos+1);\n\t\t}\n\t}\n}\n\nbool\nIsEmptyOrWhiteSpace(const std::string& str)\n{\n\treturn std::all_of(str.cbegin(), str.cend(), isspace);\n}\n\nstd::string\nUriDecode(const std::string &src)\n{\n\tstd::string result;\n\tfor (size_t n = 0; n < src.length(); n++) {\n\t\tif (src[n] == '%') {\n\t\t\tint decoded = 0;\n\t\t\tstd::istringstream str(src.substr(n+1, 2));\n\t\t\tstr >> std::hex >> decoded;\n\t\t\tresult += static_cast<char>(decoded);\n\t\t\tn += 2;\n\t\t} else {\n\t\t\tresult += src[n];\n\t\t}\n\t}\n\n\treturn result;\n}\n\nbool\nto_bool(const std::string & val)\n{\n\tif (0 == strcasecmp(val.c_str(), \"true\") || val == \"1\") {\n\t\treturn true;\n\t}\n\treturn false;\n}\n\nstd::string\nto_lower(const std::string & input)\n{\n\tstd::string results;\n\tresults.reserve(input.length());\n\tstd::transform(input.begin(), input.end(), std::insert_iterator<std::string>(results, results.begin()), ::tolower);\n\treturn results;\n}\n\nunsigned long long\nMurmurHash64(const std::string &input, unsigned long seed = 0)\n{\n\tconst unsigned long C1 = 0x239b961b;\n\tconst unsigned long C2 = 0xab0e9789;\n\tconst unsigned long C3 = 0x561ccd1b;\n\tconst unsigned long C4 = 0x0bcaa747;\n\tconst unsigned long C5 = 0x85ebca6b;\n\tconst unsigned long C6 = 0xc2b2ae35;\n \n\tauto length = input.size();\n\tconst char *data = input.c_str();\n \n\tunsigned long h1 = seed;\n\tunsigned long h2 = seed;\n \n\tsize_t index = 0;\n\twhile (index + 7 < length)\n\t{\n\t\tunsigned long k1 = (unsigned long)(data[index + 0] | data[index + 1] << 8 | data[index + 2] << 16 | data[index + 3] << 24);\n\t\tunsigned long k2 = (unsigned long)(data[index + 4] | data[index + 5] << 8 | data[index + 6] << 16 | data[index + 7] << 24);\n\n\t\tk1 *= C1;\n\t\tk1 = RotateLeft(k1, 15);\n\t\tk1 *= C2;\n\t\th1 ^= k1;\n\t\th1 = RotateLeft(h1, 19);\n\t\th1 += h2;\n\t\th1 = (h1 * 5) + C3;\n\n\t\tk2 *= C2;\n\t\tk2 = RotateLeft(k2, 17);\n\t\tk2 *= C1;\n\t\th2 ^= k2;\n\t\th2 = RotateLeft(h2, 13);\n\t\th2 += h1;\n\t\th2 = (h2 * 5) + C4;\n\n\t\tindex += 8;\n\t\t}\n \n\tint tail = length - index;\n\tif (tail > 0)\n\t{\n\t\tunsigned long k1 =\n\t\t\t(tail >= 4) ? (unsigned long)(data[index + 0] | data[index + 1] << 8 | data[index + 2] << 16 | data[index + 3] << 24) :\n\t\t\t(tail == 3) ? (unsigned long)(data[index + 0] | data[index + 1] << 8 | data[index + 2] << 16) :\n\t\t\t(tail == 2) ? (unsigned long)(data[index + 0] | data[index + 1] << 8) :\n\t\t\t\t      (unsigned long)data[index + 0];\n \n\t\tk1 *= C1;\n\t\tk1 = RotateLeft(k1, 15);\n\t\tk1 *= C2;\n\t\th1 ^= k1;\n \n\t\tif (tail > 4)\n\t\t{\n\t\t\tunsigned long k2 =\n\t\t\t  (tail == 7) ? (unsigned long)(data[index + 4] | data[index + 5] << 8 | data[index + 6] << 16) :\n\t\t\t  (tail == 6) ? (unsigned long)(data[index + 4] | data[index + 5] << 8) :\n\t\t\t\t        (unsigned long)data[index + 4];\n \n\t\t\tk2 *= C2;\n\t\t\tk2 = RotateLeft(k2, 17);\n\t\t\tk2 *= C1;\n\t\t\th2 ^= k2;\n\t\t}\n\t}\n \n\th1 ^= (unsigned long)length;\n\th2 ^= (unsigned long)length;\n \n\th1 += h2;\n\th2 += h1;\n \n\th1 ^= h1 >> 16;\n\th1 *= C5;\n\th1 ^= h1 >> 13;\n\th1 *= C6;\n\th1 ^= h1 >> 16;\n \n\th2 ^= h2 >> 16;\n\th2 *= C5;\n\th2 ^= h2 >> 13;\n\th2 *= C6;\n\th2 ^= h2 >> 16;\n \n\th1 += h2;\n\th2 += h1;\n \n\treturn ((unsigned long long)h2 << 32) | (unsigned long long)h1;\n}\n\nstd::string\nGetErrnoStr(int errnum)\n{\n\tchar errorstr[256];\n\tchar* errRC = strerror_r(errnum, errorstr, sizeof(errorstr));\n\treturn std::string(errRC);\n}\n\n// Write the buffer, followed by a newline, to the fd. This appears to be a lot of code,\n// but it does the job in a single syscall without any string copies or construction, and\n// throw a std::runtime_error in the unlikely event of an error or short write.\n// Throw a unique exception for EWOULDBLOCK so it's easier to handle.\nvoid\nWriteBufferAndNewline(int fd, const char * buf, size_t len)\n{\n\tif (buf == nullptr) {\n\t\tthrow std::invalid_argument(\"Invalid argument; cannot be nullptr\");\n\t}\n\n\tstruct iovec iov[2];\n\tssize_t total;\n\tchar newline = '\\n';\n\n\t// Deliberately cast the const away. The C++ standard permits this as long as the\n\t// caller doesn't actually try to change write to the const object. The POSIX\n\t// standard defines iovec::iov_base as a void* so the struct definition can be\n\t// shared with readv() and writev().\n\tiov[0].iov_base = static_cast<void*>(const_cast<char*>(buf));\n\tiov[0].iov_len = len;\n\ttotal = len;\n\n\tiov[1].iov_base = static_cast<void*>(&newline);\n\tiov[1].iov_len = 1;\n\ttotal += 1;\n\n\tssize_t result = writev(fd, iov, sizeof(iov)/sizeof(struct iovec));\n\tif (result == -1) {\n\t\tauto saved_errno = errno;\n\t\tif (EWOULDBLOCK == errno) {\n\t\t\tthrow would_block();\n\t\t} else {\n\t\t\tthrow std::system_error(saved_errno, std::system_category(), \"writev() failed.\");\n\t\t}\n\t} else if (result != total) {\n\t\tstd::ostringstream msg;\n\t\tmsg << \"Writev() short write: requested \" << total << \" but wrote \" << result;\n\t\tthrow std::runtime_error(msg.str());\n\t}\n}\n\nvoid\nWriteBufferAndNewline(int fd, const char * buf)\n{\n\tif (buf == nullptr) {\n\t\tthrow std::invalid_argument(\"Invalid argument; cannot be nullptr\");\n\t}\n\n\tMdsdUtil::WriteBufferAndNewline(fd, buf, strlen(buf));\n}\n\nvoid\nWriteBufferAndNewline(int fd, const std::string& msg)\n{\n\tMdsdUtil::WriteBufferAndNewline(fd, msg.c_str(), msg.length());\n}\n\n// Convert a multi-byte string (UTF-8) to a wide-char string. On Linux, a wstring is\n// a sequence of 32-bit wchar_t characters. The natural encoding would be UTF-32\n// (as provided by mbrtowc), but other encodings are possible. In particular, the\n// Windows platform expects wstring to be a sequence of 16-bit wchar_t encoded in\n// UTF-16. This function converts UTF-8 strings to the wide string expected by Windows\n// using the cpprest utf8_to_utf16 function (which returns a std::u16string) and\n// copying characters from that into an std::wstring.\nstd::wstring\nto_utf16(const std::string& input)\n{\n        auto utf16_result = utility::conversions::utf8_to_utf16(input);\n\n        std::wstring result;\n        result.reserve(utf16_result.length());\n\n        for (const auto & c : utf16_result) {\n\t\tresult.push_back((const wchar_t)c);\n\t}\n\n        return result;\n}\n\nbool\nCreateDirIfNotExists(const std::string& filepath, mode_t mode)\n{\n\tif (filepath.empty()) {\n\t\tthrow std::invalid_argument(\"Invalid, empty file path is given.\");\n\t}\n\n\tstruct stat sb;\n\tif (stat(filepath.c_str(), &sb)) {\n\t\tauto errnoCopy = errno;\n\t\tif (ENOENT != errnoCopy) {\n\t\t\tthrow std::system_error(errnoCopy, std::system_category(),\n\t\t\t\t\"stat() failed on file path '\" + filepath + \"'\");\n\t\t}\n\t\telse {\n\t\t\tif (mkdir(filepath.c_str(), mode)) {\n\t\t\t\tthrow std::system_error(errno, std::system_category(),\n\t\t\t\t\t\"Failed to mkdir for file path '\" + filepath + \"'\");\n\t\t\t}\n\t\t\treturn true;\n\t\t}\n\t}\n\tif (!S_ISDIR(sb.st_mode)) {\n\t\tthrow std::runtime_error(\"File path '\" + filepath + \"' already exists and is not a directory.\");\n\t}\n\treturn false;\n}\n\n\nstd::string\nGetStorageAccountNameFromEndpointURL(const std::string& url)\n{\n    boost::regex re { R\"(^\\s*(https?://)?([^.]+)\\..*$)\" }; // Boost regex requires full string match, so \".*$\" at the end is needed.\n    // Submatch 0 is the whole string\n    // Submatch 2 is the account name\n    boost::smatch matches;\n    if (!boost::regex_match(url, matches, re))\n    {\n        throw std::runtime_error(\"Storage account name not found from storage endpoint URL: \" + url);\n    }\n\n    return std::string(matches[2].first, matches[2].second);\n}\n\nstd::string\nGetEnvironmentVariable(const std::string & VariableName)\n{\n        char * envariable = getenv(VariableName.c_str());\n        if (!envariable) {\n\t\tthrow std::runtime_error(\"Variable '\" + VariableName + \"' not found in environment\");\n\t}\n\n\treturn std::string(envariable);\n}\n\nstd::string\nGetEnvironmentVariableOrEmpty(const std::string & VariableName)\n{\n        char * envariable = getenv(VariableName.c_str());\n        if (!envariable) {\n\t\treturn std::string();\n\t}\n\n\treturn std::string(envariable);\n}\n\nstd::string\nGetHostname()\n{\n\tchar hostnameBuffer[HOST_NAME_MAX];\n\t(void) gethostname(hostnameBuffer, sizeof(hostnameBuffer));\n\treturn std::string(hostnameBuffer);\n}\n\nstd::string\nGetTenDaySuffix()\n{\n\tstd::ostringstream strm;\n\tMdsTime::Now().RoundTenDay().GetYMD(strm);\n\treturn strm.str();\n}\n\n\nbool\nIsRegFileExists(\n    const std::string & filepath\n    )\n{\n    if (filepath.empty()) {\n        throw std::invalid_argument(\"IsRegFileExists(): invalid, empty file path is given.\");\n    }\n\n    struct stat sb;\n    auto rtn = stat(filepath.c_str(), &sb);\n    mode_t mode = sb.st_mode;\n    return (0 == rtn && S_ISREG(mode));\n}\n\nbool\nIsDirExists(\n    const std::string & filepath\n    )\n{\n    if (filepath.empty()) {\n        throw std::invalid_argument(\"IsDirExists(): invalid, empty file path is given.\");\n    }\n\n    struct stat sb;\n    auto rtn = stat(filepath.c_str(), &sb);\n    mode_t mode = sb.st_mode;\n    return (0 == rtn && S_ISDIR(mode));\n}\n\nvoid\nValidateDirRWXByUser(\n    const std::string & filepath\n    )\n{\n\tconst std::string funcname(__func__);\n    if (filepath.empty()) {\n        throw std::invalid_argument(funcname + \": invalid, empty file path is given.\");\n    }\n\n    struct stat sb;\n    if (0 != stat(filepath.c_str(), &sb)) {\n        throw std::system_error(errno, std::system_category(),\n            funcname + \": failed to stat() path: \" + filepath);\n    }\n\n    auto mode = sb.st_mode;\n    if (!S_ISDIR(mode)) {\n        throw std::runtime_error(funcname + \": invalid directory: \" + filepath);\n    }\n\n    if (0 != access(filepath.c_str(), R_OK | W_OK | X_OK)) {\n        throw std::system_error(errno, std::system_category(),\n            funcname + \": failed to access() path: \" + filepath);\n    }\n}\n\nbool\nRemoveFileIfExists(\n    const std::string & filepath\n    )\n{\n    if (!IsRegFileExists(filepath)) {\n        return false;\n    }\n    if (unlink(filepath.c_str())) {\n        std::string errmsg = MdsdUtil::GetErrnoStr(errno);\n        throw std::runtime_error(\"RemoveFileIfExists(): failed to remove file: '\" +\n                                 filepath + \"'. Reason: \" + errmsg);\n    }\n    return true;\n}\n\nbool\nRenameFileIfExists(\n    const std::string & oldpath,\n    const std::string & newpath\n    )\n{\n    if (!IsRegFileExists(oldpath)) {\n        return false;\n    }\n    if (rename(oldpath.c_str(), newpath.c_str())) {\n        std::string errmsg = MdsdUtil::GetErrnoStr(errno);\n        throw std::runtime_error(\"RenameFileIfExists(): failed to rename from '\" +\n                                 oldpath + \"' to '\" + newpath + \"'. Reason: \" + errmsg);\n    }\n    return true;\n}\n\nvoid\nCopyFile(\n\tconst std::string & frompath,\n\tconst std::string & topath\n\t)\n{\n\tif (frompath.empty()) {\n\t\tthrow std::invalid_argument(\"CopyFile(): invalid, empty frompath is given.\");\n\t}\n\tif (topath.empty()) {\n\t\tthrow std::invalid_argument(\"CopyFile(): invalid, empty topath is given.\");\n\t}\n\n\tstruct TmpDeleter {\n\t\tstd::string m_filename;\n\t\tbool m_Delete = true;\n\t\tTmpDeleter(const std::string & filename) : m_filename(filename) {}\n\t\t~TmpDeleter() {\n\t\t\tif (m_Delete) {\n\t\t\t\tRemoveFileIfExists(m_filename);\n\t\t\t}\n\t\t}\n\t};\n\n\tint fromfd = open(frompath.c_str(), O_RDONLY);\n\tif (-1 == fromfd) {\n\t\tauto errmsg = GetErrnoStr(errno);\n\t\tthrow std::runtime_error(\"CopyFile(): failed to open fromfile '\" + frompath + \"'. Reason: \" + errmsg);\n\t}\n\tFdCloser fromFdCloser(fromfd);\n\n\tint tofd = open(topath.c_str(), O_CREAT | O_WRONLY, 0644);\n\tif (-1 == tofd) {\n\t\tauto errmsg = GetErrnoStr(errno);\n\t\tthrow std::runtime_error(\"CopyFile(): failed to open tofile '\" + topath + \"'. Reason: \" + errmsg);\n\t}\n\tFdCloser toFdCloser(tofd);\n\tTmpDeleter tmpDeleter(topath);\n\n\tssize_t bytesRead = 0;\n\tchar buf[4096];\n\twhile((bytesRead = read(fromfd, buf, sizeof(buf))) > 0) {\n\t\tif (write(tofd, buf, bytesRead) == -1) {\n\t\t\tauto errmsg = GetErrnoStr(errno);\n\t\t\tthrow std::runtime_error(\"CopyFile(): failed to write to file '\" + topath + \"'. Reason: \" + errmsg);\n\t\t}\n\t}\n\tif (-1 == bytesRead) {\n\t\tauto errmsg = GetErrnoStr(errno);\n\t\tthrow std::runtime_error(\"CopyFile(): failed to read from file '\" + frompath + \"'. Reason: \" + errmsg);\n\t}\n\n\ttmpDeleter.m_Delete = false;\n}\n\ntime_t\nGetLastModificationTime(\n\tconst std::string & filename\n\t)\n{\n\tstruct stat sb;\n\tauto rtn = stat(filename.c_str(), &sb);\n\tif (rtn) {\n\t\tthrow std::system_error(errno, std::system_category(), \"stat() failed on file '\" + filename + \"'.\");\n\t}\n\treturn sb.st_mtime;\n}\n\nstd::string\nGetMostRecentlyModifiedFile(\n\tconst std::vector<std::string> & filelist\n\t)\n{\n\tif (filelist.empty()) {\n\t\tthrow std::invalid_argument(\"filelist cannot be empty.\");\n\t}\n\n\tif (1 == filelist.size()) {\n\t\treturn filelist[0];\n\t}\n\n\tuint64_t max_mtime = 0;\n\tstd::string resultFilePath;\n\tconst uint64_t s2ns = 1000*1000*1000;\n\n\tfor (const auto f : filelist) {\n\t\tstruct stat sb;\n\t\tauto rtn = stat(f.c_str(), &sb);\n\t\tif (rtn) {\n\t\t\tthrow std::system_error(errno, std::system_category(), \"stat() failed on file '\" + f + \"'.\");\n\t\t}\n\n\t\tuint64_t mtime = sb.st_mtim.tv_nsec + ((uint64_t)sb.st_mtime)*s2ns;\n\n\t\tif (max_mtime < mtime) {\n\t\t\tmax_mtime = mtime;\n\t\t\tresultFilePath = f;\n\t\t}\n\t}\n\treturn resultFilePath;\n}\n\nvoid\nMaskSignal(bool isBlock, int signum)\n{\n    sigset_t ss;\n    if (sigemptyset(&ss)) {\n        throw std::system_error(errno, std::system_category(), \"sigemptyset() failed\");\n    }\n\n    if (sigaddset(&ss, signum)) {\n        throw std::system_error(errno, std::system_category(), \"sigaddset() failed\");\n    }\n\n    int how = isBlock? SIG_BLOCK : SIG_UNBLOCK;\n    if (sigprocmask(how, &ss, NULL)) {\n        throw std::system_error(errno, std::system_category(), \"sigprocmask() failed\");\n    }\n}\n\nvoid\nTouchFileUs(const std::string & filename)\n{\n    if (utimes(filename.c_str(), NULL)) {\n        throw std::system_error(errno, std::system_category(), \"utimes(\" + filename + \") failed\");\n    }\n}\n\nstd::string\nGetFileBasename(\n    const std::string & filepath\n    )\n{\n    auto p = filepath.find_last_of('/');\n    if (p == std::string::npos) {\n        return filepath;\n    }\n    return filepath.substr(p+1);\n}\n\n\nLockedFile::LockedFile(const std::string& filepath) : m_fd(-1)\n{\n    if (!filepath.empty()) { // Make LockedFile(\"\") the same as LockedFile()\n        Open(filepath);\n    }\n}\n\n\nvoid\nLockedFile::Open(const std::string& filepath)\n{\n    if (IsOpen()) {\n        if (filepath == m_filepath) { // same file is being opened/locked again, so just noop\n            return;\n        }\n        throw std::logic_error(\"LockedFile::Open(): The object is already holding a lock on a different file '\" + m_filepath + \"'\");\n    }\n\n    m_fd = open(filepath.c_str(), O_WRONLY | O_CREAT, 0644);\n    if (-1 == m_fd) {\n        auto errmsg = GetErrnoStr(errno);\n        throw std::runtime_error(\"LockedFile::Open(): failed to open file '\" + filepath + \"'. Reason: \" + errmsg);\n    }\n    if (flock(m_fd, LOCK_EX | LOCK_NB)) {\n        if (errno == EWOULDBLOCK) {\n            throw AlreadyLocked(\"LockedFile::Open() : File '\" + filepath + \"' is already locked\");\n        }\n        auto errmsg = GetErrnoStr(errno);\n        throw std::runtime_error(\"LockedFile::Open(): failed to lock file '\" + filepath + \"'. Reason: \" + errmsg);\n    }\n    m_filepath = filepath;\n}\n\n\nLockedFile::~LockedFile()\n{\n    Remove();\n}\n\n\nvoid\nLockedFile::WriteLine(const std::string& line) const\n{\n    if (!IsOpen()) {\n        throw std::runtime_error(\"LockedFile::WriteLine(): No file has been opened\");\n    }\n    WriteBufferAndNewline(m_fd, line);\n}\n\nvoid\nLockedFile::Remove()\n{\n    if (IsOpen()) {\n\t\t// Truncate first\n\t\ttry {\n\t\t\tRemoveFileIfExists(m_filepath);\n\t\t} catch(std::runtime_error& ex) {\n\t\t\t// Remove failed, try to truncate instead.\n\t\t\tif (-1 == ftruncate(m_fd, 0)) {\n\t\t\t\tauto errmsg = GetErrnoStr(errno);\n\t\t\t\tthrow std::runtime_error(\"LockedFile::Remove(): Remove failed and failed to truncate file '\" + m_filepath + \"'. Reason: \" + errmsg);\n\t\t\t}\n\t\t}\n\t\tclose(m_fd);\n        m_filepath.clear();\n        m_fd = -1;\n    }\n}\n\nvoid\nLockedFile::TruncateAndClose()\n{\n    if (IsOpen()) {\n        if (-1 == ftruncate(m_fd, 0)) {\n            auto errmsg = GetErrnoStr(errno);\n            throw std::runtime_error(\"LockedFile::TruncateAndClose(): failed to truncate file '\" + m_filepath + \"'. Reason: \" + errmsg);\n        }\n        close(m_fd);\n        m_filepath.clear();\n        m_fd = -1;\n    }\n}\n\nstd::string\nStringNCopy(const char* src, size_t maxbytes)\n{\n    if (!src || maxbytes == 0) {\n        return std::string();\n    }\n\n    auto len = std::min(maxbytes, strlen(src));\n    std::vector<char> dest(len+1);\n    memcpy(dest.data(), src, len);\n    dest[len] = '\\0';\n    return std::string(dest.data());\n}\n\nstd::string GetTid()\n{\n    pthread_t tid = pthread_self();\n    return \"Tid-\" + std::to_string(tid);\n\n}\n\nFdCloser::~FdCloser()\n{\n    if (m_fd > -1) {\n        close(m_fd);\n    }\n}\n\nvoid FdCloser::Release()\n{\n    m_fd = -1;\n}\n\nint32_t\nGetNumFileResourceSoftLimit()\n{\n    struct rlimit rlim;\n    if (getrlimit(RLIMIT_NOFILE, &rlim) < 0) {\n        throw std::system_error(errno, std::system_category(), \"getrlimit() failed\");\n    }\n\n    if(RLIM_INFINITY == rlim.rlim_cur) {\n        return 0;\n    }\n    return static_cast<int32_t> (rlim.rlim_cur);\n}\n\n\nstatic std::vector<const char*>&\nGetSyslogSeverityStringVector()\n{\n\tstatic auto v = new std::vector<const char*> (\n\t{\n\t\t\"\\\"Emergency\\\"\", \"\\\"Alert\\\"\", \"\\\"Critical\\\"\", \"\\\"Error\\\"\",\n\t\t\"\\\"Warning\\\"\", \"\\\"Notice\\\"\", \"\\\"Informational\\\"\", \"\\\"Debug\\\"\"\n\t});\n\treturn *v;\n}\n\n\nconst char*\nGetSyslogSeverityStringFromValue(int severity)\n{\n\tauto v = GetSyslogSeverityStringVector();\n\tif (severity < 0 || severity >= (int) v.size()) {\n\t\treturn \"\\\"Other\\\"\";\n\t}\n\treturn v[(size_t) severity];\n}\n\nint\nCreateAndBindUnixSocket(\n    const std::string & sockFilePath\n    )\n{\n    if (sockFilePath.empty()) {\n        throw std::invalid_argument(\"CreateAndBindUnixSocket: socket filepath cannot be empty.\");\n    }\n\n    struct sockaddr_un addr;\n\n    // maxLength: maximum permitted length of a path of a Unix domain socket\n    constexpr auto maxLength = sizeof(addr.sun_path);\n    if (sockFilePath.size() > maxLength) {\n        throw std::invalid_argument(\"UnixSockAddr: socketfile '\" + sockFilePath +\n            \"' exceeds max allowed length \" + std::to_string(maxLength));\n    }\n\n    int fd = socket(AF_UNIX, SOCK_STREAM, 0);\n    if (-1 == fd)\n    {\n        throw std::system_error(errno, std::system_category(), \"socket(AF_UNIX, SOCK_STREAM)\");\n    }\n\n    memset(&addr, 0, sizeof(struct sockaddr_un));\n    addr.sun_family = AF_UNIX;\n    sockFilePath.copy(addr.sun_path, sizeof(addr.sun_path));\n\n    unlink(sockFilePath.c_str());\n\n    if (bind(fd, (struct sockaddr *)&addr, sizeof(addr)))\n    {\n        close(fd);\n        throw std::system_error(errno, std::system_category(), \"bind(AF_UNIX, \" + sockFilePath + \")\");\n    }\n    return fd;\n}\n\n\n// Return env var value if set, or default value if not.\n// Throw a runtime_error is the specified dir doesn't exist.\nstd::string\nGetEnvDirVar(const std::string& name, const std::string& default_value) {\n\tchar* envConfigDir = std::getenv(name.c_str());\n\tif (envConfigDir != nullptr) {\n\t\tif (!IsDirExists(envConfigDir)) {\n\t\t\tthrow std::runtime_error(\"The directory specified in the environment variable \" + name + \" does not exist: \" + envConfigDir);\n\t\t} else {\n\t\t\treturn envConfigDir;\n\t\t}\n\t}\n\treturn default_value;\n}\n\nvoid\nParseHttpsOrHttpUrl(const std::string & absUrl, std::string& baseUrl, std::string& params)\n{\n    std::vector<std::string> supportedPrefixList = { \"https://\", \"http://\" };\n    for (const auto & prefix : supportedPrefixList) {\n        if (absUrl.size() > prefix.size()) {\n            if (0 == absUrl.compare(0, prefix.size(), prefix)) {\n                auto sepPos = absUrl.find_first_of('/', prefix.size());\n                if (sepPos != std::string::npos) {\n                    baseUrl = absUrl.substr(0, sepPos);\n                    params = absUrl.substr(sepPos);\n                }\n                else {\n                    baseUrl = absUrl;\n                    params = \"\";\n                }\n                return;\n            }\n        }\n    }\n\n    throw std::invalid_argument(\"ParseHttpsOrHttpUrl(): Invalid absURL: \" + absUrl);\n}\n\n\n};\n\n//////////// MdsdUtil namespace ends\n\n\n// vim: se sw=8 :\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsdutil/Utility.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _UTILITY_HH_\n#define _UTILITY_HH_\n\n#include <string>\n#include <vector>\n#include <ctime>\n#include <iostream>\n#include <map>\n\nextern \"C\" {\n#include <sys/time.h>\n}\n\nnamespace MdsdUtil {\n\n/// <summary>Replace all instances of \"from\" with \"to\". Proceeds left to right, does not backtrack or rescan</summary>\nextern void ReplaceSubstring(std::string& str, const std::string& from, const std::string& to);\n\n/// <summary>Replace standard XML/HTML escapes with the correct character; iterates until all have been converted</summary>\n/// <return>The string, without any escape sequences</return>\nstd::string UnquoteXmlAttribute(std::string target);\n\n/// <summary>Checks if a \"name\" is valid (not empty, does not contain blanks)</summary>\n/// <return>True if argument is not a valid name</return>\nextern bool NotValidName(const std::string& str);\n\n/// <summary>Concatenate all the strings in the vector, placing a copy of the separator between each vector element</summary>\nextern std::string Join(const std::vector<std::string>&, const std::string&);\n\n/// <summary>Compute easy MDS hash of a string</summary>\nextern unsigned long long EasyHash(const std::string& str);\n\n/// <summary>Convert num to a string, zero-pad on the left to a total length of len bytes</summary>\nstd::string ZeroFill(unsigned long long num, size_t len);\n\n/// <summary>Convert sec+usec to RFC3339 time, with 100ns resolution, in Zulu (GMT)</summary>\nstd::string Rfc3339(const time_t, const suseconds_t);\n\n/// <summary>Convert current time to RFC3339 time, with 100ns resolution, in Zulu (GMT)</summary>\nstd::string Rfc3339();\n\n/// <summary>Convert \"restricted\" ISO8601 time string to sec+usec (replacing g_time_val_from_iso8601())</summary>\n/// <return>True if and only if the parsing is successful, and sec/usec will hold the computed values</return>\nbool TimeValFromIso8601Restricted(const char* datetimeStr, long& secondsOut, long& uSecondsOut);\n\n/// <summary>Round a time_t down to the nearest multiple of interval seconds</summary>\ntime_t IntervalStart(const time_t, const int);\n\n/// <summary>Split a query string into a [key,value] map</summary>\nvoid ParseQueryString(const std::string& qry, std::map<std::string, std::string> & elements);\n\n/// <summary> To check whether a given string is empty or all white spaces </summary>\nbool IsEmptyOrWhiteSpace(const std::string& str);\n\n/// <summary> To check whether a given string is empty or all white spaces </summary>\n//bool IsEmptyOrWhiteSpace(const std::string & str);\n\n/// <summary>Decode a URL</summary>\nstd::string UriDecode(const std::string &src);\n\n/// <summary>Convert a string to a boolean value</summary>\nbool to_bool(const std::string &val);\n\n/// <summmary>Convert an ASCII string to lower case</summary>\n/// <returns>A copy of the input string with ASCII upper case characters converted to lower case</returns>\nstd::string to_lower(const std::string & asciiString);\n\n/// <summary>Rotate an integral type right</summary>\ntemplate <class T> T RotateRight(T n, unsigned int count) { count = count%(sizeof(T)*8); if (count == 0) return n; unsigned int complementCount = sizeof(T)*8 - count; return ((n >> count) & ((1LL<<complementCount) - 1LL)) | (n << complementCount); }\n\n/// <summary>Rotate an integral type left</summary>\ntemplate <class T> T RotateLeft(T n, unsigned int count) { count = count%(sizeof(T)*8); if (count == 0) return n; return RotateRight(n, sizeof(T)*8 - count); }\n\n/// <summary>Compute 64-bit Murmur hash of a string, with initializer</summary>\nunsigned long long MurmurHash64(const std::string&, unsigned long);\n\n/// <summary>Convert a POSIX errno to a string</summary>\nstd::string GetErrnoStr(int errnum);\n\ninline std::string ToString(bool b)\n{\n    return b? \"true\" : \"false\";\n}\n\nclass would_block : public std::exception\n{\npublic:\n        virtual const char* what() const noexcept { return \"EWOULDBLOCK\"; }\n};\n\n/// <summary>Write a buffer, followed by a newline, to a POSIX file descriptor. Throw appropriate exceptions\n/// for short writes or any error reported by writev.</summary>\nvoid WriteBufferAndNewline(int fd, const char * buf, size_t len);\nvoid WriteBufferAndNewline(int fd, const char * buf);\nvoid WriteBufferAndNewline(int fd, const std::string& buf);\n\n/// <summary>Convert a UTF-8 std::string to a std::wstring, encoded in UTF-16, relying on\n/// the cpprest library to convert to utf16 in a u16string and copying characters.</summary>\nstd::wstring to_utf16(const std::string& s);\n\n/// <summary>\n/// Create a directory given its path if it doesn't exist.\n/// Throw exception if any error.\n/// Return true if the directory doesn't exist and is created properly.\n/// Return false if the directory is valid and already exists.\n/// NOTE: the mode is used only when directory is created in this function.\n/// </summary>\nbool CreateDirIfNotExists(const std::string& filepath, mode_t mode);\n\n/// <summary>\n/// Extracts and returns the storage account name from the passed storage endpoint URL.\n/// For example, returns \"stgacct\", given \"https://stgacct.blob.core.windows.net/\".\n/// If no match is found, an empty string is returned.\n/// </summary>\nstd::string GetStorageAccountNameFromEndpointURL(const std::string& url);\n\n/// <summary>\n/// Get the value of a variable from the process environment. Throw std::runtime_error\n/// if the variable is not defined in the environment. This is different from the variable\n/// being defined as an empty string; that latter case does not throw an error.\n/// </summary>\nstd::string GetEnvironmentVariable(const std::string &);\n\n/// <summary>\n/// Get the value of a variable from the process environment. Does not throw an exception\n/// if the variable is not defined in the environment; in that case it returns an empty string.\n/// </summary>\nstd::string GetEnvironmentVariableOrEmpty(const std::string &);\n\n/// <summary>Returns the hostname of the running system</summary>\nstd::string GetHostname();\n\n/// <summary>Get autokey table's 10-day suffix </summary>\nstd::string GetTenDaySuffix();\n\n/// <summary>\n/// Return true if filepath exists and it is a regular file.\n/// If filepath is an empty string, throw exception.\n/// </summary>\nbool IsRegFileExists(const std::string & filepath);\n\n/// <summary>\n/// Return true if filepath exists and it is a directory.\n/// If filepath is an empty string, throw exception.\n/// </summary>\nbool IsDirExists(const std::string & filepath);\n\n/// <summary>\n/// Make sure that the filepath exists, is a dir, and the running process has\n/// read/write/execute access to the dir.\n/// Throw exception otherwise.\n/// </summary>\nvoid ValidateDirRWXByUser(const std::string & filepath);\n\n/// <summary>\n/// If 'filepath' exists, unlink it.\n/// Return true if no error and the file is unlinked.\n/// Return false if the file doesn't exist.\n/// Throw exception for any error.\n/// </summary>\nbool RemoveFileIfExists(const std::string & filepath);\n\n/// <summary>\n/// Rename file from 'oldpath' to 'newpath' if 'oldpath' exists.\n/// Return true if no error and the file is successfully renamed.\n/// Return false if the file doesn't exist.\n/// Throw exception if any error.\n/// </summary>\nbool RenameFileIfExists(const std::string & oldpath, const std::string & newpath);\n\n/// <summary>\n/// Copy file 'frompath' to 'topath'. If 'topath' exists, it will be overwritten.\n/// It will throw exception for any error.\n/// </summary>\nvoid CopyFile(const std::string & frompath, const std::string & topath);\n\ntime_t GetLastModificationTime(const std::string & filename);\n/// <summary>\n/// Get the last modified file in a given file list.\n/// If the list is empty, throw exception.\n/// If there are more than one files that meet this criteria, return the first\n/// one in the list.\n/// </summary>\nstd::string GetMostRecentlyModifiedFile(const std::vector<std::string> & filelist);\n\n/// <summary>\n/// change a file's last modification time to 'now' at micro-second precision.\n/// </summary>\nvoid TouchFileUs(const std::string & filename);\n\n/// <summary>Block or unblock a given signal.</summary>\nvoid MaskSignal(bool isBlock, int signum);\n\n/// <summary> Get the basename of a filepath </summary>\nstd::string GetFileBasename(const std::string & filepath);\n\n/// <summary>Utility class to open a file with exclusive lock, allow\n/// writing to it line-by-line, and let the destructor delete the file</summary>\nclass LockedFile\n{\n    std::string m_filepath;\n    int         m_fd;\n\npublic:\n    LockedFile() : m_fd(-1) {}\n\n    LockedFile(const std::string& filepath);\n\n    ~LockedFile();\n\n    LockedFile(const LockedFile&) = delete;\n    LockedFile(LockedFile&&) = default;\n    LockedFile& operator=(const LockedFile&) = delete;\n    LockedFile& operator=(LockedFile&&) = default;\n\n    void Open(const std::string& filepath);\n\n    bool IsOpen() const { return !m_filepath.empty(); }\n\n    void WriteLine(const std::string& line) const;\n\n    void Remove();\n\n    void TruncateAndClose();\n\n    class AlreadyLocked : public std::runtime_error\n    {\n    public:\n        AlreadyLocked(const std::string& msg) : std::runtime_error(msg) {}\n    };\n};\n\n/// Copy maximum of 'maxbytes' from 'src' and return the result string.\n/// If src is NULL or maxbytes is 0, return empty string.\n/// If maxbytes > src's length, return a duplicate of src.\nstd::string StringNCopy(const char* src, size_t maxbytes);\n\n/// Return current thread id as a string.\nstd::string GetTid();\n\nclass FdCloser\n{\npublic:\n    explicit FdCloser(int fd) : m_fd(fd) {}\n    ~FdCloser();\n\n    void Release();\n\nprivate:\n    int m_fd;\n};\n\nclass FileCloser\n{\npublic:\n    FileCloser(FILE* fp) : m_fp(fp) {}\n    ~FileCloser() {\n        if (m_fp) {\n            fclose(m_fp);\n            m_fp = nullptr;\n        }\n    }\nprivate:\n    FILE* m_fp;\n};\n\n/// <summary> Get the resource limit for number of open files for current process.</summary>\n/// Return 0 if infinity, return the actual number othwerwise.\nint32_t GetNumFileResourceSoftLimit();\n\n/// <summary>Get syslog severity string from numeric value. E.g., for 5, it's \"Notice\"</summary>\nconst char* GetSyslogSeverityStringFromValue(int severity);\n\n/// <summary>\n/// Create a UNIX socket using given file path, then bind to it.\n/// Throw exception if any error.\n/// Return the socket fd.\n/// </summary>\nint CreateAndBindUnixSocket(const std::string & sockFilePath);\n\n/// <summary>\n/// Return the named environment variable value or the default_value if the variable isn't present.\n/// If the path specified by the value doesn't exist, throw a runtime_error.\n/// </summary>\nstd::string GetEnvDirVar(const std::string& name, const std::string& default_value);\n\n/// <summary>\n/// Parse an absolute https:// or http:// URL in the format of \"http(s)://xxx/yyy\"\n/// Return \"http(s)://xxx\" as baseUrl, \"/yyy\" as params.\n/// If URL format is \"http(s)://xxx\", return \"http(s)://xxx\" as baseUrl, \"\" as params.\n/// Throw exception for invalid format absUrl.\n/// </summary>\nvoid ParseHttpsOrHttpUrl(const std::string & absUrl, std::string& baseUrl, std::string& params);\n\n}\n\n\n#endif // _UTILITY_HH_\n\n// vim: se sw=8\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsrest/CMakeLists.txt",
    "content": "include_directories(\n    ${CMAKE_SOURCE_DIR}/mdsdlog\n    ${CMAKE_SOURCE_DIR}/mdsdutil\n    ${CASABLANCA_INCLUDE_DIRS}\n    ${STORAGE_INCLUDE_DIRS}\n)\n\nset(SOURCES\n    GcsJsonData.cc\n    GcsJsonParser.cc\n    GcsServiceInfo.cc\n    GcsUtil.cc\n    MdsRest.cc\n)\n\n# static lib only\nadd_library(${MDSREST_LIB_NAME} STATIC ${SOURCES})\n\ninstall(TARGETS\n        ${MDSREST_LIB_NAME}\n        ARCHIVE DESTINATION ${CMAKE_BINARY_DIR}/release/lib\n)\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsrest/GcsJsonData.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"GcsJsonData.hh\"\n#include \"GcsUtil.hh\"\n#include \"Logger.hh\"\n#include \"MdsConst.hh\"\n\nusing namespace mdsd;\n\nstatic std::string\nGetStringFromJson(\n    const std::string & itemname,\n    const web::json::value& jsonObj\n    )\n{\n    GcsUtil::ThrowIfInvalidType(itemname, web::json::value::String, jsonObj.type());\n    return jsonObj.as_string();\n}\n\nstd::ostream&\nmdsd::operator<<(std::ostream & os, const EventHubKey& obj)\n{\n    os << \"    SasKey='\" << obj.SasKey << \"'; Uri='\" << obj.Uri << \"'.\\n\";\n    return os;\n}\n\nstd::unordered_map<std::string, itemparser_t<EventHubKey>> EventHubKey::ParserMap = {\n    { \"SasKey\", [](const std::string & name, const web::json::value & value, EventHubKey& result)\n        {\n            result.SasKey = GetStringFromJson(name, value);\n        }\n    },\n    { \"Uri\", [](const std::string & name, const web::json::value & value, EventHubKey& result)\n        {\n            result.Uri = GetStringFromJson(name, value);\n        }\n    }\n};\n\n\nstd::ostream&\nmdsd::operator<<(std::ostream & os, const ServiceBusAccountKey& obj)\n{\n    os << \"AccountGroupName='\" << obj.AccountGroupName << \"'; AccountMonikerName='\" << obj.AccountMonikerName << \"'.\\n\";\n    for (const auto & item : obj.EventHubKeys) {\n        os << \"EventHubsKeys: \" << item.first << \":\" << item.second;\n    }\n    return os;\n}\n\n// Instead of return when an invalid item is found, the validation will do\n// as much validation as possible.\nbool\nServiceBusAccountKey::IsValid() const\n{\n    bool retVal = true;\n\n    if (AccountGroupName.empty() || AccountMonikerName.empty() || EventHubKeys.empty()) {\n        Logger::LogError(\"Error: ServiceBusAccountKey has invalid empty field\");\n        retVal = false;\n    }\n\n    for (const auto & item : EventHubKeys) {\n        if (!item.second.IsValid()) {\n            Logger::LogError(\"Error: EventHubKey '\" + item.first + \"' is invalid\");\n            retVal = false;\n        }\n    }\n\n    return retVal;\n}\n\nstd::unordered_map<std::string, itemparser_t<ServiceBusAccountKey>> ServiceBusAccountKey::ParserMap = {\n    { \"AccountGroupName\", [](const std::string & name, const web::json::value & value, ServiceBusAccountKey& result)\n      {\n          result.AccountGroupName = GetStringFromJson(name, value);\n      }\n    },\n    { \"AccountMonikerName\", [](const std::string & name, const web::json::value & value, ServiceBusAccountKey& result)\n      {\n          result.AccountMonikerName = GetStringFromJson(name, value);\n      }\n    },\n    { \"EventHubKeys\", [](const std::string & name, const web::json::value & value, ServiceBusAccountKey& result)\n      {\n          details::EventHubKeysParser ehkeysParser(name, value);\n          ehkeysParser.Parse(result.EventHubKeys);\n      }\n    }\n};\n\n\nstd::unordered_map<std::string, itemparser_t<StorageSasKey>> StorageSasKey::ParserMap = {\n    { \"ResourceName\", [](const std::string & name, const web::json::value & value, StorageSasKey& result)\n        {\n            result.ResourceName = GetStringFromJson(name, value);\n        }\n    },\n    { \"SasKey\", [](const std::string & name, const web::json::value & value, StorageSasKey& result)\n        {\n            result.SasKey = GetStringFromJson(name, value);\n        }\n    },\n    { \"SasKeyType\", [](const std::string & name, const web::json::value & value, StorageSasKey& result)\n        {\n            result.SasKeyType = GetStringFromJson(name, value);\n        }\n    }\n};\n\n\nstd::ostream&\nmdsd::operator<<(std::ostream & os, const StorageSasKey& obj)\n{\n    os << \"ResourceName='\" << obj.ResourceName << \"'; SasKey='\" << obj.SasKey\n       << \"'; SasKeyType='\" << obj.SasKeyType << \"'\\n\";\n    return os;\n}\n\nstd::ostream&\nmdsd::operator<<(std::ostream & os, const StorageAccountKey& obj)\n{\n    os  << \"StorageAccountName='\" << obj.StorageAccountName << \"'; \"\n        << \"AccountGroupName='\" << obj.AccountGroupName << \"'; \"\n        << \"AccountMonikerName='\" << obj.AccountMonikerName << \"'; \"\n        << \"BlobEndpoint='\" << obj.BlobEndpoint << \"'; \"\n        << \"QueueEndpoint='\" << obj.QueueEndpoint << \"'; \"\n        << \"TableEndpoint='\" << obj.TableEndpoint << \"'.\\n\";\n\n    for (const auto & item : obj.SasKeys) {\n        os << item;\n    }\n    return os;\n}\n\n// Return true if equal, false if not equal.\n// Log error if not equal.\nstatic inline bool\nValidateEqual(\n    int expected,\n    int actual,\n    const std::string & msg\n    )\n{\n    if (expected != actual) {\n        std::ostringstream ostr;\n        ostr << \"Error: \" << msg << \": expected=\" << expected << \"; actual=\" << actual;\n        Logger::LogError(ostr);\n        return false;\n    }\n    return true;\n}\n\nbool\nStorageAccountKey::IsValid() const\n{\n    bool retVal = true;\n\n    if (StorageAccountName.empty() ||\n        AccountGroupName.empty() ||\n        AccountMonikerName.empty() ||\n        BlobEndpoint.empty() ||\n        QueueEndpoint.empty() ||\n        TableEndpoint.empty() ||\n        SasKeys.empty()) {\n        Logger::LogError(\"Error: StorageAccountKey has invalid empty field\");\n        retVal = false;\n    }\n\n    // The Blob and Table SAS keys must be defined exactly once\n    int nBlobSas = 0;\n    int nTableSas = 0;\n    const int nexpected = 1;\n\n    for (const auto & item : SasKeys) {\n        if (!item.IsValid()) {\n            retVal = false;\n        }\n        if (\"BlobService\" == item.SasKeyType) {\n            nBlobSas++;\n        }\n        else if (\"TableService\" == item.SasKeyType) {\n            nTableSas++;\n        }\n    }\n\n    retVal &= ValidateEqual(nexpected, nBlobSas, \"# of BlobService SasKeys\");\n    retVal &= ValidateEqual(nexpected, nTableSas, \"# of TableService SasKeys\");\n\n    return retVal;\n}\n\nstd::unordered_map<std::string, itemparser_t<StorageAccountKey>> StorageAccountKey::ParserMap = {\n    { \"StorageAccountName\", [](const std::string & name, const web::json::value & value, StorageAccountKey& result)\n        {\n            result.StorageAccountName = GetStringFromJson(name, value);\n        }\n    },\n    { \"AccountGroupName\", [](const std::string & name, const web::json::value & value, StorageAccountKey& result)\n        {\n            result.AccountGroupName = GetStringFromJson(name, value);\n        }\n    }\n    ,\n    { \"AccountMonikerName\", [](const std::string & name, const web::json::value & value, StorageAccountKey& result)\n        {\n            result.AccountMonikerName = GetStringFromJson(name, value);\n        }\n    }\n    ,\n    { \"BlobEndpoint\", [](const std::string & name, const web::json::value & value, StorageAccountKey& result)\n        {\n            result.BlobEndpoint = GetStringFromJson(name, value);\n        }\n    }\n    ,\n    { \"QueueEndpoint\", [](const std::string & name, const web::json::value & value, StorageAccountKey& result)\n        {\n            result.QueueEndpoint = GetStringFromJson(name, value);\n        }\n    }\n    ,\n    { \"TableEndpoint\", [](const std::string & name, const web::json::value & value, StorageAccountKey& result)\n        {\n            result.TableEndpoint = GetStringFromJson(name, value);\n        }\n    },\n    { \"SasKeys\", [](const std::string & name, const web::json::value & value, StorageAccountKey& result)\n        {\n            details::ObjectArrayParser<StorageSasKey> arrayParser(name, value);\n            arrayParser.Parse(result.SasKeys);\n        }\n    }\n};\n\n\nstd::ostream&\nmdsd::operator<<(std::ostream & os, const GcsAccount& obj)\n{\n    os << \"\\nMaSigningPublicKeys: \" << obj.MaSigningPublicKeys.size() << \"\\n\";\n    for (const auto & item : obj.MaSigningPublicKeys) {\n        os << item;\n    }\n\n    os << \"SasKeysExpireTimeUtc='\" << obj.SasKeysExpireTimeUtc << \"';\\n\";\n\n    for (const auto & item: obj.ServiceBusAccountKeys) {\n        os << item;\n    }\n\n    for (const auto & item: obj.StorageAccountKeys) {\n        os << item;\n    }\n\n    os << \"TagId='\" << obj.TagId << \"'.\";\n    return os;\n}\n\nstatic bool\nValidateMaSigningPublicKeys(\n    const std::vector<std::string> & MaSigningPublicKeys\n    )\n{\n    bool retVal = true;\n    if (MaSigningPublicKeys.empty()) {\n        Logger::LogError(\"Error: unexpected empty MaSigningPublicKey array\");\n        retVal = false;\n    }\n\n    for (const auto & item: MaSigningPublicKeys) {\n        if (item.empty()) {\n            Logger::LogError(\"Error: unexpected invalid MaSigningPublicKey\");\n            retVal = false;\n        }\n    }\n    return retVal;\n}\n\nstatic bool\nValidateServiceBusAccountKeys(\n    const std::vector<ServiceBusAccountKey> & ServiceBusAccountKeys\n    )\n{\n    bool retVal = true;\n\n    if (ServiceBusAccountKeys.empty()) {\n        Logger::LogError(\"Error: unexpected empty ServiceBusAccountKeys array\");\n        retVal = false;\n    }\n\n    size_t i = 0;\n    for (const auto & item : ServiceBusAccountKeys) {\n        if (!item.IsValid()) {\n            Logger::LogError(\"Error: ServiceBusAccountKeys[\" + std::to_string(i) + \"] is invalid\");\n            retVal = false;\n        }\n        i++;\n    }\n\n    // Validate that required EventHub keys exist\n    if (!ServiceBusAccountKeys.empty()) {\n        int nEHNoticeKeys = 0;\n        int nEHPublishKeys = 0;\n        const int nexpected = 1;\n\n        for (auto & item : ServiceBusAccountKeys) {\n            if (item.EventHubKeys.count(gcs::c_EventHub_notice)) {\n                nEHNoticeKeys++;\n            }\n            if (item.EventHubKeys.count(gcs::c_EventHub_publish)) {\n                nEHPublishKeys++;\n            }\n        }\n\n        retVal &= ValidateEqual(nexpected, nEHNoticeKeys, \"# EventHubKey for '\" + gcs::c_EventHub_notice + \"'\");\n        retVal &= ValidateEqual(nexpected, nEHPublishKeys, \"# EventHubKey for '\" + gcs::c_EventHub_publish + \"'\");\n    }\n\n    return retVal;\n}\n\nstatic bool\nValidateStorageAccountKeys(\n    const std::vector<StorageAccountKey> & StorageAccountKeys\n    )\n{\n    bool retVal = true;\n\n    if (StorageAccountKeys.empty()) {\n        Logger::LogError(\"Error: unexpected empty StorageAccountKeys array\");\n        retVal = false;\n    }\n\n    size_t i = 0;\n    for (const auto & item : StorageAccountKeys) {\n        if (!item.IsValid()) {\n            Logger::LogError(\"Error: StorageAccountKeys[\" + std::to_string(i) + \"] is invalid\");\n            retVal = false;\n        }\n        i++;\n    }\n    return retVal;\n}\n\nbool\nGcsAccount::IsValid() const\n{\n    bool retVal = true;\n\n    if (TagId.empty()) {\n        retVal = false;\n    }\n\n    if (IsEmpty()) {\n        return retVal;\n    }\n\n    retVal &= ValidateMaSigningPublicKeys(MaSigningPublicKeys);\n\n    if (SasKeysExpireTimeUtc.empty()) {\n        Logger::LogError(\"Error: unexpected empty SasKeysExpireTimeUtc\");\n        retVal = false;\n    }\n\n    retVal &= ValidateServiceBusAccountKeys(ServiceBusAccountKeys);\n    retVal &= ValidateStorageAccountKeys(StorageAccountKeys);\n\n    return retVal;\n}\n\nbool\nGcsAccount::IsEmpty() const\n{\n    return (\n        MaSigningPublicKeys.empty() &&\n        SasKeysExpireTimeUtc.empty() &&\n        ServiceBusAccountKeys.empty() &&\n        StorageAccountKeys.empty()\n        );\n}\n\nstd::unordered_map<std::string, itemparser_t<GcsAccount>> GcsAccount::ParserMap = {\n    { \"MaSigningPublicKeys\", [](const std::string & name, const web::json::value & value, GcsAccount& result)\n      {\n          if (!value.is_null()) {\n              details::StringArrayParser parser(name, value);\n              parser.Parse(result.MaSigningPublicKeys);\n          }\n      }\n    },\n    { \"SasKeysExpireTimeUtc\", [](const std::string & name, const web::json::value & value, GcsAccount& result)\n      {\n          if (!value.is_null()) {\n              result.SasKeysExpireTimeUtc = GetStringFromJson(name, value);\n          }\n      }\n    },\n    { \"ServiceBusAccountKeys\" , [](const std::string & name, const web::json::value & value, GcsAccount& result)\n      {\n          if (!value.is_null()) {\n              details::ObjectArrayParser<ServiceBusAccountKey> parser(name, value);\n              parser.Parse(result.ServiceBusAccountKeys);\n          }\n      }\n    },\n    { \"StorageAccountKeys\", [](const std::string & name, const web::json::value & value, GcsAccount& result)\n      {\n          if (!value.is_null()) {\n              details::ObjectArrayParser<StorageAccountKey> parser(name, value);\n              parser.Parse(result.StorageAccountKeys);\n          }\n      }\n    },\n    { \"TagId\", [](const std::string & name, const web::json::value & value, GcsAccount& result)\n      {\n          result.TagId = GetStringFromJson(name, value);\n      }\n    }\n};\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsrest/GcsJsonData.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef __GCSJSONDATA_HH__\n#define __GCSJSONDATA_HH__\n\n#include <string>\n#include <vector>\n#include <unordered_map>\n#include <sstream>\n#include <functional>\n#include <cpprest/json.h>\n#include \"GcsJsonParser.hh\"\n\nnamespace mdsd {\n\ntemplate<typename T>\nusing itemparser_t = std::function<void (const std::string & name, const web::json::value & jsonObj, T& result)>;\n\nstruct EventHubKey\n{\n    std::string SasKey;\n    std::string Uri;\n\n    bool IsValid() const { return !SasKey.empty() && !Uri.empty(); }\n\n    static std::unordered_map<std::string, itemparser_t<EventHubKey>> ParserMap;\n};\n\nstd::ostream& operator<<(std::ostream & os, const EventHubKey& obj);\n\nstruct ServiceBusAccountKey\n{\n    std::string AccountGroupName;   // Geneva moniker name\n    std::string AccountMonikerName; // Mapped moniker name\n\n    // This map stores all EventHub Keys.\n    // map key: Event Hub name. In GCS, each name is a hard-coded name for\n    // different scenario:\n    // \"raw\" -> Event Hub notification for CentralBond store type.,\n    // \"error\" -> Top N service.\n    // \"distributedtracing\" -> Distributed tracing service.\n    // \"eventpublisher\" -> Event Hub data publisher.\n    std::unordered_map<std::string, EventHubKey> EventHubKeys;\n\n    bool IsValid() const;\n\n    using parser_type = details::JsonObjectParser<ServiceBusAccountKey>;\n    static std::unordered_map<std::string, itemparser_t<ServiceBusAccountKey>> ParserMap;\n};\n\nstd::ostream& operator<<(std::ostream & os, const ServiceBusAccountKey& obj);\n\nstruct StorageSasKey\n{\n    std::string ResourceName;\n    std::string SasKey;\n    std::string SasKeyType;\n\n    bool IsValid() const { return !ResourceName.empty() && !SasKey.empty() && !SasKeyType.empty(); }\n\n    using parser_type = details::JsonObjectParser<StorageSasKey>;\n    static std::unordered_map<std::string, itemparser_t<StorageSasKey>> ParserMap;\n};\n\nstd::ostream& operator<<(std::ostream & os, const StorageSasKey& obj);\n\nstruct StorageAccountKey\n{\n    std::string StorageAccountName;\n    std::string AccountGroupName;\n    std::string AccountMonikerName;\n    std::string BlobEndpoint;\n    std::string QueueEndpoint;\n    std::string TableEndpoint;\n    std::vector<StorageSasKey> SasKeys;\n\n    bool IsValid() const;\n\n    using parser_type = details::JsonObjectParser<StorageAccountKey>;\n    static std::unordered_map<std::string, itemparser_t<StorageAccountKey>> ParserMap;\n};\n\nstd::ostream& operator<<(std::ostream & os, const StorageAccountKey& obj);\n\n\n// GcsAccount contains GCS account data. Its tagId should never be empty.\n// Its other values can be of two kinds:\n// 1) none of the values are empty.\n// 2) all the values are empty.\nstruct GcsAccount\n{\n    std::vector<std::string> MaSigningPublicKeys;\n    std::string SasKeysExpireTimeUtc;\n    std::vector<ServiceBusAccountKey> ServiceBusAccountKeys;\n    std::vector<StorageAccountKey> StorageAccountKeys;\n    std::string TagId;\n\n    bool IsValid() const;\n\n    // Return true if all values (ignoring TagId) are empty; return false otherwise.\n    bool IsEmpty() const;\n\n    static std::unordered_map<std::string, itemparser_t<GcsAccount>> ParserMap;\n};\n\nstd::ostream& operator<<(std::ostream & os, const GcsAccount& obj);\n\n}\n\n#endif // __GCSJSONDATA_HH__\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsrest/GcsJsonParser.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include <sstream>\n\n#include \"GcsJsonParser.hh\"\n#include \"GcsUtil.hh\"\n#include \"Logger.hh\"\n#include \"Trace.hh\"\n#include \"GcsJsonData.hh\"\n\nusing namespace mdsd;\nusing namespace mdsd::details;\n\n\nbool\nGcsJsonParser::Parse(\n    GcsAccount & gcsAccount\n    )\n{\n    Trace trace(Trace::MdsCmd, \"GcsJsonParser::Parse\");\n    if (!m_jsonStr.empty()) {\n        try {\n            m_jsonObj = web::json::value::parse(m_jsonStr);\n        }\n        catch(const std::exception & ex) {\n            Logger::LogError(\"Error: failed to parse JSON string '\" + m_jsonStr + \"': \" + ex.what());\n            return false;\n        }\n    }\n    if (!m_jsonObj.is_null()) {\n        try {\n            JsonObjectParser<GcsAccount> rootParser(\"\", m_jsonObj);\n            rootParser.Parse(gcsAccount);\n            if (trace.IsActive()) {\n                std::ostringstream ostr;\n                ostr << gcsAccount;\n                TRACEINFO(trace, ostr.str());\n            }\n        }\n        catch(const std::exception & ex) {\n            Logger::LogError(std::string(\"Error: failed to parse JSON object: \") + ex.what());\n            return false;\n        }\n    }\n    return true;\n}\n\nvoid\nGcsJsonBaseParser::CheckType() const\n{\n    GcsUtil::ThrowIfInvalidType(GetPath(), GetExpectedType(), GetActualType());\n}\n\nvoid\nGcsJsonBaseParser::LogMsgIfUnrecognized(\n    const std::string & itemname\n    ) const\n{\n    std::ostringstream msg;\n    msg << \"Ignore unrecognized item: '\" << itemname << \"'\";\n    // Because future GCS may add additional JSON key/value pairs, only log unrecognized\n    // name as information only.\n    Logger::LogInfo(msg.str());\n}\n\nvoid\nEventHubKeysParser::Parse(\n    std::unordered_map<std::string, EventHubKey>& ehkeymap\n    )\n{\n    CheckType();\n    auto & jsonObj = GetJson().as_object();\n\n    for (auto iter = jsonObj.cbegin(); iter != jsonObj.cend(); ++iter) {\n        const auto & name = iter->first;\n        const auto & value = iter->second;\n\n        if (ehkeymap.find(name) != ehkeymap.end()) {\n            throw JsonParseException(\"Found duplicate item: \" + GetPath() + \"/\" + name);\n        }\n\n        EventHubKey ehkey;\n        JsonObjectParser<EventHubKey> ehkeyParser(GetPath() + \"/\" + name, value);\n        ehkeyParser.Parse(ehkey);\n\n        ehkeymap[name] = std::move(ehkey);\n    }\n}\n\nvoid\nStringArrayParser::Parse(\n    std::vector<std::string>& resultList\n    )\n{\n    CheckType();\n    auto & array = GetJson().as_array();\n\n    for (size_t i = 0; i < array.size(); i++) {\n        auto jsontype = array.at(i).type();\n\n        if (web::json::value::String == jsontype) {\n            resultList.push_back(array.at(i).as_string());\n        }\n        else {\n            throw JsonParseException(\"StringArrayParser: unsupported JSON type '\" +\n                GcsUtil::GetJsonTypeStr(jsontype) + \"'\");\n        }\n    }\n}\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsrest/GcsJsonParser.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef _GCSJSONPARSER_HH__\n#define _GCSJSONPARSER_HH__\n\n#include <string>\n#include <vector>\n#include <unordered_map>\n#include <cpprest/json.h>\n\nnamespace mdsd\n{\n\nstruct EventHubKey;\nstruct ServiceBusAccountKey;\nstruct StorageSasKey;\nstruct StorageAccountKey;\nstruct GcsAccount;\n\nclass GcsJsonParser {\npublic:\n    GcsJsonParser(const std::string & jsonStr) : m_jsonStr(jsonStr) {}\n    GcsJsonParser(const web::json::value & jsonObj) : m_jsonObj(jsonObj) {}\n\n    // <summary>\n    // Parse JSON string or JSON object and store the results to gcsAccount object.\n    // </summary>\n    // <param name=\"gcsAccount\">To store parsed account information</param>\n    // Return true if parsing succeeds; return false if any error.\n    bool Parse(GcsAccount & gcsAccount);\n\nprivate:\n    std::string m_jsonStr;\n    web::json::value m_jsonObj;\n};\n\nnamespace details {\n\n// This is the base class for all JSON parser classes.\nclass GcsJsonBaseParser {\npublic:\n    // Constructor.\n    // <param name=\"path\">JSON path string. e.g. \"/root/ServiceBusAccountKeys/EventHubKeys\".\n    // It is to locate items in JSON parsing.</param>\n    // <param name=\"jsonObj\">JSON object to be parsed</param>\n    GcsJsonBaseParser(\n        const std::string & path,\n        const web::json::value & jsonObj\n        ) :\n        m_path(path),\n        m_jsonObj(jsonObj)\n    {\n    }\n\n    virtual ~GcsJsonBaseParser() = default;\n\nprotected:\n    // Get actual JSON value type\n    web::json::value::value_type GetActualType() const { return m_jsonObj.type(); }\n\n    // Get expected JSON value type\n    virtual web::json::value::value_type GetExpectedType() const { return web::json::value::Object; }\n\n    const web::json::value& GetJson() const { return m_jsonObj; }\n\n    virtual std::string GetPath() const { return m_path; }\n\n    // Get path assuming the object is an array type.\n    virtual std::string GetArrayPath(size_t i) const { return GetPath() + \"[\" + std::to_string(i) + \"]\"; }\n\n    bool IsNull() const { return m_jsonObj.is_null(); }\n\n    // Validate whether the JSON object has expected type. Throw exception if not.\n    void CheckType() const;\n\n    // Log message if unrecognized JSON name is found in JSON string.\n    void LogMsgIfUnrecognized(const std::string & itemname) const;\n\nprivate:\n    std::string m_path;\n    web::json::value m_jsonObj;\n};\n\nclass EventHubKeysParser : public GcsJsonBaseParser {\npublic:\n    EventHubKeysParser(const std::string & path, const web::json::value & jsonObj)\n    : GcsJsonBaseParser(path, jsonObj) {}\n\n    void Parse(std::unordered_map<std::string, EventHubKey>& ehkeys);\n};\n\n// To parse an array of json strings\nclass StringArrayParser : public GcsJsonBaseParser {\npublic:\n    StringArrayParser(const std::string & path, const web::json::value & jsonObj)\n        : GcsJsonBaseParser(path, jsonObj) {}\n\n    void Parse(std::vector<std::string>& resultList);\n\nprotected:\n    web::json::value::value_type GetExpectedType() const override { return web::json::value::Array; }\n};\n\n// A template to parse an array of json object type.\n// The object type 'T' must have 'parser_type' defined.\ntemplate<typename T>\nclass ObjectArrayParser : public GcsJsonBaseParser {\npublic:\n    ObjectArrayParser(const std::string & path, const web::json::value & jsonObj)\n    : GcsJsonBaseParser(path, jsonObj) {}\n\n    void Parse(std::vector<T>& resultList)\n    {\n        CheckType();\n        auto & array = GetJson().as_array();\n\n        for (size_t i = 0; i < array.size(); i++) {\n            typename T::parser_type parser(GetArrayPath(i), array.at(i));\n            T item;\n            parser.Parse(item);\n            resultList.push_back(std::move(item));\n        }\n    }\n\nprotected:\n    web::json::value::value_type GetExpectedType() const override { return web::json::value::Array; }\n};\n\n// Parse a JSON object with type T\ntemplate<typename T>\nclass JsonObjectParser : public GcsJsonBaseParser {\npublic:\n    JsonObjectParser(const std::string & path, const web::json::value & jsonObj)\n    : GcsJsonBaseParser(path, jsonObj) {}\n\n    void Parse(T& result) {\n        CheckType();\n        auto & jsonObj = GetJson().as_object();\n\n        for (auto iter = jsonObj.cbegin(); iter != jsonObj.cend(); ++iter)\n        {\n            const auto & name = iter->first;\n            const auto & value = iter->second;\n            auto itempath = GetPath() + \"/\" + name;\n\n            auto parserIter = T::ParserMap.find(name);\n            if (parserIter == T::ParserMap.end()) {\n                LogMsgIfUnrecognized(itempath);\n            }\n            else {\n                parserIter->second(itempath, value, result);\n            }\n        }\n    }\n};\n\n} // namespace details\n\n} // namespace mdsd\n\n#endif // _GCSJSONPARSER_HH__\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsrest/GcsServiceInfo.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include \"GcsServiceInfo.hh\"\n#include \"Logger.hh\"\n#include \"MdsConst.hh\"\n\nusing namespace mdsd;\n\nGcsServiceInfo GcsConfig::s_gcsInfo;\n\n// Read an environment variable and store the value to 'value'.\n// If given environment variable is invalid, do nothing.\nstatic void\nGetEnvVar(const std::string & name, std::string& value)\n{\n    if (name.empty()) {\n        return;\n    }\n\n    char* v = std::getenv(name.c_str());\n    if (!v) {\n        Logger::LogInfo(\"Environment variable '\" + name + \"' is not defined.\");\n    }\n    else {\n        value = v;\n    }\n}\n\nvoid\nGcsConfig::ReadFromEnvVars()\n{\n    GetEnvVar(gcs::c_GcsEnv_EndPoint, s_gcsInfo.EndPoint);\n    GetEnvVar(gcs::c_GcsEnv_Environment, s_gcsInfo.Environment);\n    GetEnvVar(gcs::c_GcsEnv_Account, s_gcsInfo.GenevaAccount);\n    GetEnvVar(gcs::c_GcsEnv_Region, s_gcsInfo.Region);\n\n    GetEnvVar(gcs::c_GcsEnv_ThumbPrint, s_gcsInfo.ThumbPrint);\n    GetEnvVar(gcs::c_GcsEnv_CertFile, s_gcsInfo.CertFile);\n    GetEnvVar(gcs::c_GcsEnv_KeyFile, s_gcsInfo.KeyFile);\n    GetEnvVar(gcs::c_GcsEnv_SslDigest, s_gcsInfo.SslDigest);\n}\n\nbool\nGcsConfig::IsSet()\n{\n    return (\n        !s_gcsInfo.EndPoint.empty() &&\n        !s_gcsInfo.Environment.empty() &&\n        !s_gcsInfo.GenevaAccount.empty() &&\n        !s_gcsInfo.Region.empty() &&\n        !s_gcsInfo.ThumbPrint.empty() &&\n        !s_gcsInfo.CertFile.empty() &&\n        !s_gcsInfo.KeyFile.empty() &&\n        !s_gcsInfo.SslDigest.empty()\n    );\n}"
  },
  {
    "path": "Diagnostic/mdsd/mdsrest/GcsServiceInfo.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef __GCSSERVICEINFO_HH__\n#define __GCSSERVICEINFO_HH__\n\n#include <string>\n\nnamespace mdsd\n{\n\nstruct GcsServiceInfo\n{\n    std::string EndPoint;\n    std::string Environment;\n    std::string GenevaAccount;\n    std::string ConfigNamespace;\n    std::string Region;\n    std::string SpecifiedConfigVersion;\n    std::string ActualConfigVersion;\n    std::string ThumbPrint;\n    std::string CertFile;\n    std::string KeyFile;\n    std::string SslDigest;\n};\n\nclass GcsConfig\n{\n    static void ReadFromEnvVars();\n\n    // Return true if all required environmental variable settings are set\n    // (may not be valid values). Return false otherwise.\n    static bool IsSet();\n\n    static GcsServiceInfo& GetData() { return s_gcsInfo; }\n\nprivate:\n    static GcsServiceInfo s_gcsInfo;\n};\n\n} // namespace mdsd\n\n#endif // __GCSSERVICEINFO_HH__\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsrest/GcsUtil.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include <map>\n#include <unordered_map>\n#include <sstream>\n\n#include \"GcsUtil.hh\"\n\nnamespace mdsd { namespace GcsUtil {\n\nstatic std::map<web::json::value::value_type, std::string>&\nGetJsonTypeMap()\n{\n    static std::map<web::json::value::value_type, std::string> m =\n    {\n        { web::json::value::Number, \"Number\" },\n        { web::json::value::Boolean, \"Boolean\" },\n        { web::json::value::String, \"String\" },\n        { web::json::value::Object, \"Object\" },\n        { web::json::value::Array, \"Array\" },\n        { web::json::value::Null, \"Null\" }\n    };\n    return m;\n}\n\nstd::string\nGetJsonTypeStr(web::json::value::value_type t)\n{\n    auto & m = GetJsonTypeMap();\n    auto item = m.find(t);\n    if (item != m.end()) {\n        return item->second;\n    }\n    return \"Unknown\";\n}\n\nvoid\nThrowIfInvalidType(\n    const std::string & itemName,\n    web::json::value::value_type expectedType,\n    web::json::value::value_type actualType\n    )\n{\n    if (expectedType != actualType) {\n        std::ostringstream ostr;\n        ostr << \"Json item '\" << itemName << \"' has invalid type:\"\n             << \" expected=\" << GetJsonTypeStr(expectedType)\n             << \" actual=\" << GetJsonTypeStr(actualType);\n        throw JsonParseException(ostr.str());\n    }\n}\n\n// key: Gcs Environment. e.g. \"Test\"\n// value: Gcs endpoing. e.g. \"ppe.warmpath.msftcloudes.com\"\nstatic std::unordered_map<std::string, std::string>&\nGetGcsEnvEndPointMap()\n{\n    static std::unordered_map<std::string, std::string> m = {\n        {\"DiagnosticsProd\", \"prod.warmpath.msftcloudes.com\"},\n        {\"FirstPartyProd\",  \"prod.warmpath.msftcloudes.com\"},\n        {\"Test\",            \"ppe.warmpath.msftcloudes.com\"},\n        {\"Stage\",           \"ppe.warmpath.msftcloudes.com\"},\n        {\"BillingProd\",     \"prod.warmpath.msftcloudes.com\"},\n        {\"ExternalProd\",    \"prod.warmpath.msftcloudes.com\"},\n        {\"CaMooncake\",      \"mooncake.warmpath.chinacloudapi.cn\"},\n        {\"CaBlackforest\",   \"blackforest.warmpath.cloudapi.de\"},\n        {\"CaFairfax\",       \"fairfax.warmpath.usgovcloudapi.net\"}\n    };\n    return m;\n}\n\nstd::string\nGetGcsEndpointFromEnvironment(\n    const std::string & gcsEnvName\n    )\n{\n    auto & m = GetGcsEnvEndPointMap();\n    auto item = m.find(gcsEnvName);\n    if (item == m.end()) {\n        return std::string();\n    }\n    return item->second;\n}\n\n} // namespace GcsUtil\n} // namespace mdsd\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsrest/GcsUtil.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef __GCSUTIL_HH__\n#define __GCSUTIL_HH__\n\n#include <string>\n#include <cpprest/json.h>\n\n#include \"MdsRestException.hh\"\n\nnamespace mdsd\n{\n\nnamespace GcsUtil\n{\n    // <summary>\n    // Get a string format of a JSON value type.\n    // </summary>\n    std::string GetJsonTypeStr(web::json::value::value_type t);\n\n    // <summary>\n    // Throw JsonParseException if actual type is not equal to expected type\n    // for an item with name called itemName.\n    // </summary>\n    void ThrowIfInvalidType(const std::string & itemName,\n        web::json::value::value_type expectedType, web::json::value::value_type actualType);\n\n    // <summary>\n    // Get GCS service endpoint given GCS environment value (e.g. \"Test\")\n    // This function is used when GCS environment is defined but GCS endpoint\n    // is empty. This can avoid customer to remember the exact endpoint.\n    // Customer can still define endpoint if needed.\n    // </summary>\n    std::string GetGcsEndpointFromEnvironment(const std::string & gcsEnvName);\n\n} // namespace GcsUtil\n\n} // namespace mdsd\n\n#endif // __GCSUTIL_HH__\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsrest/MdsConst.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n#ifndef __MDSCONST_HH__\n#define __MDSCONST_HH__\n\n#include <string>\n\nnamespace mdsd {\n    namespace gcs {\n        const std::string c_GcsServiceName = \"/api/agent/v2/\";\n        const std::string c_GcsMonitoringStorageKeysApiName = \"MonitoringStorageKeys\";\n        const int c_HttpTimeInSeconds = 60;\n        const std::string c_RequestIdHeader = \"-request-id:\";\n\n        const std::string c_GcsEnv_EndPoint = \"MONITORING_GCS_ENDPOINT\";\n        const std::string c_GcsEnv_Environment = \"MONITORING_GCS_ENVIRONMENT\";\n        const std::string c_GcsEnv_Account = \"MONITORING_GCS_ACCOUNT\";\n\n        const std::string c_GcsEnv_Namespace = \"MONITORING_GCS_NAMESPACE\";\n        const std::string c_GcsEnv_Region = \"MONITORING_GCS_REGION\";\n        const std::string c_GcsEnv_ConfigVersion = \"MONITORING_CONFIG_VERSION\";\n\n        const std::string c_GcsEnv_ThumbPrint = \"MONITORING_GCS_THUMBPRINT\";\n        const std::string c_GcsEnv_CertFile = \"MONITORING_GCS_CERT_CertFile\";\n        const std::string c_GcsEnv_KeyFile = \"MONITORING_GCS_CERT_KeyFile\";\n        const std::string c_GcsEnv_SslDigest = \"MONITORING_GCS_CERT_SSLDIGEST\";\n\n        const std::string c_EventHub_notice = \"raw\";\n        const std::string c_EventHub_publish = \"eventpublisher\";\n\n    }\n}\n\n\n#endif\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsrest/MdsRest.cc",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#include <vector>\n#include <sstream>\n#include <cpprest/json.h>\n#include <wascore/basic_types.h>\n#include <boost/asio.hpp>\n#include <boost/asio/ssl.hpp>\n#include <cctype>\n\n#include \"GcsJsonData.hh\"\n#include \"GcsJsonParser.hh\"\n#include \"GcsUtil.hh\"\n#include \"Logger.hh\"\n#include \"MdsRest.hh\"\n#include \"MdsConst.hh\"\n#include \"OpensslCert.hh\"\n#include \"OpensslCertStore.hh\"\n#include \"Trace.hh\"\n\n\nusing namespace mdsd;\nusing namespace web::http;\nusing namespace web::http::client;\n\nstatic inline void\nThrowIfEmpty(\n    const std::string & apiName,\n    const std::string & argName,\n    const std::string & argVal\n    )\n{\n    if (argVal.empty()) {\n        throw std::invalid_argument(apiName + \": unexpected empty string for \" + argName);\n    }\n}\n\nMdsRestInterface::MdsRestInterface(\n    const std::string & endPoint,\n    const std::string & gcsEnvironment,\n    const std::string & thumbPrint,\n    const std::string & certFile,\n    const std::string & keyFile,\n    const std::string & sslDigest\n    ) :\n    m_endPoint(endPoint),\n    m_gcsEnv(gcsEnvironment),\n    m_thumbPrint(thumbPrint),\n    m_certFile(certFile),\n    m_keyFile(keyFile),\n    m_sslDigest(sslDigest)\n{\n    ThrowIfEmpty(\"MdsRestInterface\", \"gcsEnvironment\", gcsEnvironment);\n    ThrowIfEmpty(\"MdsRestInterface\", \"thumbPrint\", thumbPrint);\n    ThrowIfEmpty(\"MdsRestInterface\", \"certFile\", certFile);\n    ThrowIfEmpty(\"MdsRestInterface\", \"keyFile\", keyFile);\n    ThrowIfEmpty(\"MdsRestInterface\", \"sslDigest\", sslDigest);\n}\n\nbool\nMdsRestInterface::Initialize()\n{\n    Trace trace(Trace::MdsCmd, \"MdsRestInterface::Initialize\");\n\n    try {\n        if (m_endPoint.empty()) {\n            m_endPoint = GcsUtil::GetGcsEndpointFromEnvironment(m_gcsEnv);\n            if (m_endPoint.empty()) {\n                Logger::LogError(\"Error: unexpected empty value for GCS endpoint.\");\n                return false;\n            }\n        }\n\n        m_initialized = InitCert();\n    }\n    catch(const std::exception & ex) {\n        Logger::LogError(std::string(\"Error: MdsRestInterface::Initialize() exception: \") + ex.what());\n        m_initialized = false;\n    }\n\n    return m_initialized;\n}\n\nbool\nMdsRestInterface::InitCert()\n{\n    Trace trace(Trace::MdsCmd, \"MdsRestInterface::InitCert\");\n    bool retVal = true;\n    try {\n        OpensslCertStore certStore(m_certFile, m_keyFile, m_sslDigest);\n        m_cert = certStore.LoadCertificate(m_thumbPrint);\n        if (!m_cert->IsValid()) {\n            Logger::LogError(\"Error: initializing certificate failed: certificate is invalid\");\n            retVal = false;\n            m_cert = nullptr;\n        }\n    }\n    catch(const std::exception& ex) {\n        Logger::LogError(std::string(\"Error: initializing certificate failed: \") + ex.what());\n        retVal = false;\n    }\n\n    return retVal;\n}\n\nvoid\nMdsRestInterface::ResetClient()\n{\n    Trace trace(Trace::MdsCmd, \"MdsRestInterface::ResetClient\");\n\n    if (m_client) {\n        TRACEINFO(trace, \"Http client will be reset due to previous failure.\");\n        m_client.reset();\n        m_resetHttpClient = false;\n    }\n\n    http_client_config httpClientConfig;\n    httpClientConfig.set_validate_certificates(true);\n    httpClientConfig.set_timeout(utility::seconds(gcs::c_HttpTimeInSeconds));\n\n    httpClientConfig.set_nativehandle_options([this](web::http::client::native_handle handle)->void\n    {\n        SetNativeHandleOptions(handle);\n    });\n\n    auto fullEndpoint = \"https://\" + m_endPoint;\n    m_client = std::move(std::unique_ptr<http_client>(new http_client(fullEndpoint.c_str(), httpClientConfig)));\n}\n\npplx::task<bool>\nMdsRestInterface::QueryGcsAccountInfo(\n    const std::string & mdsAccount,\n    const std::string & mdsNamespace,\n    const std::string & configVersion,\n    const std::string & region,\n    const std::string & agentIdentity,\n    const std::string & tagId\n    )\n{\n    Trace trace(Trace::MdsCmd, \"MdsRestInterface::QueryGcsAccountInfo\");\n\n    ThrowIfEmpty(\"GcsAccountInfo\", \"mdsAccount\", mdsAccount);\n    ThrowIfEmpty(\"GcsAccountInfo\", \"mdsNamespace\", mdsNamespace);\n    ThrowIfEmpty(\"GcsAccountInfo\", \"configVersion\", configVersion);\n    ThrowIfEmpty(\"GcsAccountInfo\", \"region\", region);\n    ThrowIfEmpty(\"GcsAccountInfo\", \"agentIdentity\", agentIdentity);\n\n    if (!m_initialized) {\n        if (!Initialize()) {\n            return pplx::task_from_result(false);\n        }\n    }\n\n    try {\n        auto apicall = BuildGcsApiCall(mdsAccount);\n        auto args = BuildGcsAcountArgs(mdsNamespace, configVersion, region, agentIdentity, tagId);\n        return ExecuteGcsGetCall(apicall, args);\n    }\n    catch(const std::exception & ex) {\n        Logger::LogError(std::string(\"Error: QueryGcsAccountInfo() exception: \") + ex.what());\n    }\n    return pplx::task_from_result(false);\n}\n\nstd::string\nMdsRestInterface::BuildGcsApiCall(\n    const std::string & mdsAccount\n    )\n{\n    std::ostringstream apicall;\n    apicall << gcs::c_GcsServiceName\n            << m_gcsEnv << \"/\"\n            << mdsAccount << \"/\"\n            << gcs::c_GcsMonitoringStorageKeysApiName << \"/\";\n    return apicall.str();\n}\n\nstd::string\nMdsRestInterface::BuildGcsAcountArgs(\n    const std::string & mdsNamespace,\n    const std::string & configVersion,\n    const std::string & region,\n    const std::string & agentIdentity,\n    const std::string & tagId\n    )\n{\n    // Encode agentIdentity so that no special character like '/' is used in URI.\n    std::vector<unsigned char> vec(agentIdentity.begin(), agentIdentity.end());\n    auto encodedAgentId = utility::conversions::to_base64(vec);\n\n    std::ostringstream args;\n    args << \"Namespace=\" << mdsNamespace\n         << \"&ConfigMajorVersion=\" << configVersion\n         << \"&Region=\" << region\n         << \"&Identity=\" << encodedAgentId;\n\n    if (!tagId.empty()) {\n        args << \"&TagId=\" + tagId;\n    }\n    return args.str();\n}\n\n\npplx::task<bool>\nMdsRestInterface::ExecuteGcsGetCall(\n    const std::string & contractApi,\n    const std::string & arguments\n    )\n{\n    Trace trace(Trace::MdsCmd, \"MdsRestInterface::ExecuteGcsGetCall\");\n    TRACEINFO(trace, \"contractApi='\" << contractApi << \"'; arguments='\" << arguments << \"'\");\n\n    ThrowIfEmpty(\"ExecuteGcsGetCall\", \"contractApi\", contractApi);\n    ThrowIfEmpty(\"ExecuteGcsGetCall\", \"arguments\", arguments);\n\n    if (!m_client || m_resetHttpClient) {\n        ResetClient();\n    }\n\n    web::http::uri_builder request_uri;\n    request_uri.append_path(contractApi, false);\n    request_uri.append_query(arguments, true);\n\n    http_request request;\n    auto requestId = utility::uuid_to_string(utility::new_uuid());\n    request.headers().add(_XPLATSTR(\"x-ms-client-request-id\"), requestId.c_str());\n    request.set_request_uri(request_uri.to_uri());\n    request.set_method(methods::GET);\n\n    auto shThis = shared_from_this();\n    TRACEINFO(trace, \"Start to send request {\" << requestId << \"} to GCS: \" << request.absolute_uri().to_string());\n\n    return m_client->request(request)\n    .then([shThis](pplx::task<web::http::http_response> task)\n    {\n        return shThis->HandleServerResponse(task);\n    });\n\n    TRACEINFO(trace, \"ExecuteGcsGetCall returns false\");\n    return pplx::task_from_result(false);\n}\n\nvoid\nMdsRestInterface::SetNativeHandleOptions(\n    web::http::client::native_handle handle\n    )\n{\n    Trace trace(Trace::MdsCmd, \"MdsRestInterface::SetNativeHandleOptions\");\n\n    auto streamobj = static_cast<boost::asio::ssl::stream<boost::asio::ip::tcp::socket &>* >(handle);\n    if (!streamobj) {\n        throw std::runtime_error(\"SetNativeHandleOptions() failed: unexpected NULL tcp::socket handle\");\n    }\n    auto ssl = streamobj->native_handle();\n    if (!ssl) {\n        throw std::runtime_error(\"SetNativeHandleOptions() failed: unexpected NULL ssl handle\");\n    }\n\n    const int isOK = 1;\n    auto errorcode = ::SSL_use_certificate(ssl, m_cert->GetCert());\n    if (isOK != errorcode) {\n        throw std::runtime_error(\"SSL_use_certificate() failed with error \" + std::to_string(errorcode));\n    }\n    errorcode = ::SSL_use_PrivateKey(ssl, m_cert->GetPrivateKey());\n    if (isOK != errorcode) {\n        throw std::runtime_error(\"SSL_use_PrivateKey() failed with error \" + std::to_string(errorcode));\n    }\n\n    // Disable weak ssl ciphers\n    const std::string cipherList = \"HIGH:!DSS:!RC4:!aNULL@STRENGTH\";\n    errorcode = ::SSL_set_cipher_list(ssl, cipherList.c_str());\n    if (isOK != errorcode) {\n        throw std::runtime_error(\"SSL_set_cipher_list() failed with error \" + std::to_string(errorcode));\n    }\n}\n\nstd::string\nMdsRestInterface::GetRequestIdFromResponse(\n    const std::string & responseString\n    )\n{\n    Trace trace(Trace::MdsCmd, \"MdsRestInterface::GetRequestIdFromResponse\");\n    if (responseString.empty()) {\n        TRACEINFO(trace, \"ResponseString is empty. No request id is found.\");\n        return std::string();\n    }\n\n    auto ptr = responseString.find(mdsd::gcs::c_RequestIdHeader);\n    if (ptr == std::string::npos) {\n        TRACEINFO(trace, \"No request id is found from response string.\");\n        return std::string();\n    }\n\n    ptr += mdsd::gcs::c_RequestIdHeader.size();\n\n    auto index = responseString.find_first_not_of(' ', ptr);\n    std::string requestId;\n    while(isalnum(responseString[index]) || responseString[index] == '-') {\n        requestId.append(1, responseString[index]);\n        index++;\n    }\n\n    TRACEINFO(trace, \"RequestId from response: '\" << requestId << \"'\");\n    return requestId;\n}\n\nstatic inline bool\nIsHttpStatusOK(web::http::status_code statusCode)\n{\n    return (status_codes::OK == statusCode ||\n        status_codes::Created == statusCode);  // 201. According to MSDN, 201 means success.\n}\n\n\nbool\nMdsRestInterface::HandleServerResponse(\n    pplx::task<web::http::http_response> responseTask\n    )\n{\n    Trace trace(Trace::MdsCmd, \"MdsRestInterface::HandleServerResponse\");\n\n    bool retVal = false;\n\n    try {\n        auto response = responseTask.get();\n        auto statusCode = response.status_code();\n        auto responseString = response.to_string();\n\n        if (trace.IsActive()) {\n            TRACEINFO(trace, \"Response Code: \" << statusCode << \"; Response: \" << responseString);\n        }\n\n        if (!IsHttpStatusOK(statusCode)) {\n            auto requestId = GetRequestIdFromResponse(responseString);\n            std::ostringstream ostr;\n            ostr << \"Error: request to Geneva failed with status code=\" << statusCode\n                 << \"; requestId=\" << requestId << \"; Response: \" << responseString;\n            Logger::LogError(ostr.str());\n\n            // Only reset http client when the GCS service is not available and need reconnect later.\n            if (status_codes::ServiceUnavailable == statusCode) {\n                m_resetHttpClient = true;\n            }\n        }\n        else {\n            m_responseJsonVal = response.extract_json().get();\n\n            // As long as the json object has the expected type, it is OK for http request.\n            // Detailed data and validation need to be parsed from this json object.\n            if (web::json::value::Object == m_responseJsonVal.type()) {\n                retVal = true;\n            }\n            else {\n                auto requestId = GetRequestIdFromResponse(responseString);\n                auto jsonType = m_responseJsonVal.type();\n                auto jsonTypeStr = mdsd::GcsUtil::GetJsonTypeStr(jsonType);\n                std::ostringstream ostr;\n                ostr << \"Error: received response, but an unexpected result was returned; \"\n                     << \"expected a JSON object, but received type \"\n                     << jsonType << \" \" << jsonTypeStr << \"; requestId=\" << requestId;\n                Logger::LogError(ostr.str());\n            }\n        }\n    }\n    catch(const std::exception & ex) {\n        Logger::LogError(std::string(\"Error: request failed with exception: \") + ex.what());\n        m_resetHttpClient = true;\n    }\n\n    TRACEINFO(trace, \"HandleServerResponse returned \" << (retVal? \"true\" : \"false\"));\n    return retVal;\n}\n\nbool\nMdsRestInterface::GetGcsAccountData(GcsAccount & gcsAccount) const\n{\n    Trace trace(Trace::MdsCmd, \"MdsRestInterface::GetGcsAccountData()\");\n    if (m_responseJsonVal.is_null()) {\n        TRACEINFO(trace, \"GCS account JSON object is null.\");\n        return false;\n    }\n    else {\n        GcsJsonParser parser(m_responseJsonVal);\n        return parser.Parse(gcsAccount);\n    }\n}\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsrest/MdsRest.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n\n#ifndef __MDSREST_HH__\n#define __MDSREST_HH__\n\n#include <string>\n#include <memory>\n#include <cpprest/http_client.h>\n#include <pplx/pplxtasks.h>\n\nclass OpensslCert;\n\nnamespace mdsd {\n\nstruct GcsAccount;\n\n/// This class defines APIs to call Geneva Configuration Service (GCS) REST APIs.\n/// NOTE:\n/// - This class is not thread-safe.\nclass MdsRestInterface : public std::enable_shared_from_this<MdsRestInterface>\n{\npublic:\n    /// Construct a new MdsRestInterface.\n    /// <param name=\"endPoint\">GCS endpoint. If empty, search its value\n    /// using gcsEnvironment from pre-defined table. e.g. \"ppe.warmpath.msftcloudes.com\"</param>\n    /// <param name=\"gcsEnvironment\">Environment. e.g. \"Test\"</param>\n    /// <param name=\"thumbPrint\">Certificate thumb print</param>\n    /// <param name=\"certFile\">full path to public certificate file.</param>\n    /// <param name=\"keyFile\">full path to private key file.</param>\n    /// <param name=\"sslDigest\">certificate digest. e.g. \"sha1\"</param>\n    static std::shared_ptr<MdsRestInterface> Create(\n        const std::string & endPoint,\n        const std::string & gcsEnvironment,\n        const std::string & thumbPrint,\n        const std::string & certFile,\n        const std::string & keyFile,\n        const std::string & sslDigest\n        )\n    {\n        // Because the MdsRestInterface constructor is private, std::make_shared cannot be used.\n        // std::make_shared requires public constructor.\n        return std::shared_ptr<MdsRestInterface>(\n            new MdsRestInterface(endPoint, gcsEnvironment, thumbPrint, certFile, keyFile, sslDigest));\n    }\n\n    ~MdsRestInterface() = default;\n\n    /// Initialize MdsRestInterface.\n    /// Return true if success, false if any error.\n    bool Initialize();\n\n    /// Query GCS account information. If successful, the result will be stored to\n    /// json object m_responseJsonVal.\n    ///\n    /// <param name=\"mdsAccount\">MDS Account name</param>\n    /// <param name=\"mdsNamespace\">MDS namespace</param>\n    /// <param name=\"configVersion\">configuration version. e.g. \"Ver5v0\"</param>\n    /// <param name=\"region\">Region to get storage account credentials. e.g. \"westus\"</param>\n    /// <param name=\"agentIdentity\">An identification string, which is used for\n    /// http query hashing. It can be built from mdsd IdentityColumns.</param>\n    /// <param name=\"tagid\">GCS configuration tag id. GCS internally has a tag id, which\n    /// is a combination of service configuration file md5 hash + account moniker versions.\n    /// If the input tagId is equal to GCS's internal tag id, GCS will return null JSON objects.\n    /// If the input tagId is not equal to GCS's internal tag id, GCS will return full\n    /// account information. GCS account query will return its internal tagId in the returned JSON.\n    ///\n    /// Return true if success; return false if any error.\n    pplx::task<bool> QueryGcsAccountInfo(\n        const std::string & mdsAccount,\n        const std::string & mdsNamespace,\n        const std::string & configVersion,\n        const std::string & region,\n        const std::string & agentIdentity,\n        const std::string & tagId);\n\n    /// Get the account JSON object, which stores results from GCS account query.\n    web::json::value GetGcsAccountJson() const { return m_responseJsonVal; }\n\n    /// Parse GCS account JSON object and return the results in 'gcsAccount'.\n    /// Return true if JSON object is successfully parsed.\n    /// Return false if JSON object is null, or there is parsing error.\n    bool GetGcsAccountData(GcsAccount & gcsAccount) const;\n\nprivate:\n    /// Constructor.\n    MdsRestInterface(\n        const std::string & endPoint,\n        const std::string & gcsEnvironment,\n        const std::string & thumbPrint,\n        const std::string & certFile,\n        const std::string & keyFile,\n        const std::string & sslDigest);\n\n    /// Load certificates from files.\n    /// Return true if success, false if any error.\n    bool InitCert();\n\n    /// Reset http client if any. Then recreate it.\n    void ResetClient();\n\n    /// Build the api string to call GCS service.\n    std::string BuildGcsApiCall(const std::string & mdsAccount);\n\n    /// Build the args to call GCS account service.\n    std::string BuildGcsAcountArgs(\n        const std::string & mdsNamespace,\n        const std::string & configVersion,\n        const std::string & region,\n        const std::string & agentIdentity,\n        const std::string & tagId);\n\n    /// Execute GCS REST API call.\n    /// Return true if success, false if any error.\n    pplx::task<bool> ExecuteGcsGetCall(const std::string & contractApi, const std::string & arguments);\n\n    /// Set certificates on native openssl handle\n    void SetNativeHandleOptions(web::http::client::native_handle handle);\n\n    /// Get http request id from http response. This is for logging purpose.\n    std::string GetRequestIdFromResponse(const std::string & responseString);\n\n    /// Handle GCS http response. Extract desired data from the response.\n    /// Return true if success, false if any error.\n    bool HandleServerResponse(pplx::task<web::http::http_response> responseTask);\n\nprivate:\n    bool m_initialized = false;\n    std::string m_endPoint;\n    std::string m_gcsEnv;\n    std::string m_thumbPrint;\n    std::string m_certFile;\n    std::string m_keyFile;\n    std::string m_sslDigest;\n    std::shared_ptr<OpensslCert> m_cert;\n\n    std::unique_ptr<web::http::client::http_client> m_client;\n    bool m_resetHttpClient = false;\n    web::json::value m_responseJsonVal;\n};\n\n} // namespace mdsd\n\n#endif // __MDSREST_HH__\n"
  },
  {
    "path": "Diagnostic/mdsd/mdsrest/MdsRestException.hh",
    "content": "// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT license.\n\n#pragma once\n\n#ifndef __MDSRESTEXCEPTION__HH__\n#define __MDSRESTEXCEPTION__HH__\n\n#include <string>\n#include <exception>\n\nnamespace mdsd\n{\n\nclass JsonParseException : public std::exception\n{\nprivate:\n    std::string m_msg;\n\npublic:\n    JsonParseException(std::string message) noexcept :\n        std::exception(),\n        m_msg(std::move(message))\n    {}\n\n    virtual const char * what() const noexcept\n    {\n        return m_msg.c_str();\n    }\n};\n\n}\n\n#endif // __MDSRESTEXCEPTION__HH__\n"
  },
  {
    "path": "Diagnostic/mdsd/parseglibc.py",
    "content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT license.\n\n# This script is to parse glibc-based binary files and print out\n# symbols whose GLIBC versions are higher than given version.\n# Report error if any such symbol is found.\n\nimport argparse\nimport glob\nimport os\nimport sys\nimport time\n\ntotalErrors = 0\n\n\ndef LogError(msg):\n    global totalErrors\n    totalErrors = totalErrors + 1\n    msg2 = \"%s: Error: %s\" % (sys.argv[0], msg)\n    print msg2\n\n\ndef LogInfo(msg):\n    print msg\n\n\ndef ParseCmdLine():\n    parser = argparse.ArgumentParser(sys.argv[0])\n    parser.add_argument(\"-d\", \"--dir\", type=str, required=False,\n                        help=\"directory where all its files are parsed.\")\n    parser.add_argument(\"-f\", \"--filepath\", type=str, required=False, help=\"binary filepath.\")\n\n    parser.add_argument(\"-v\", \"--glibcver\", type=str, required=True, help=\"max GLIBC ver. ex: 2.14\")\n    args = parser.parse_args()\n\n    if not args.dir and not args.filepath:\n        LogError(\"either '-d' or '-f' is required.\")\n\n    return args\n\n\ndef GetFilesToParse(filepath, dirname):\n    files = []\n    if filepath:\n        if not os.path.isfile(filepath):\n            LogError(\"%s is not a regular file.\" % (filepath))\n        else:\n            files.append(filepath)\n\n    elif dirname:\n        if not os.path.isdir(dirname):\n            LogError(\"%s is not a directory.\" % (dirname))\n        else:\n            files = GetAllFiles(dirname)\n\n    return files\n\n\n# Get all files in a directory. This doesn't include subdirectories and symbolic links.\ndef GetAllFiles(dirname):\n    if not dirname:\n        return []\n\n    filepattern = dirname + \"/*\"\n    filedirs = glob.glob(filepattern)\n    files = []\n    for f in filedirs:\n        if os.path.isfile(f) and (not os.path.islink(f)):\n            files.append(f)\n    return files\n\n\n# Get symbol file by running 'nm'\ndef GetSymbols(filepath):\n    outputfile = \"testfile-\" + str(time.time()) + \".txt\"\n    cmdline = \"nm \" + filepath + \" 1>\" + outputfile + \" 2>&1\"\n    errCode = os.system(cmdline)\n    if errCode != 0:\n        LogError(\"cmd: '%s' failed with error %d\" % (cmdline, errCode))\n        return \"\"\n    return outputfile\n\n\n# Parse symbol file created by 'nm'\ndef ParseSymbols(symbolfile, glibcver):\n    with open(symbolfile, \"r\") as fh:\n        lines = fh.readlines()\n\n    for line in lines:\n        if \"@GLIBC_\" in line:\n            line = line.strip()\n            ParseLine(line, glibcver)\n\n        # libstdc++ should be statically linked starting from version 1.4\n        for unexpected_symbol in [\"GLIBCXX\", \"CXXABI\"]:\n            if unexpected_symbol in line:\n                LogError(\"Unexpected symbol {0}\".format(unexpected_symbol))\n\n\n# Parse one line to check for higher GLIBC version.\n# Report error if found\ndef ParseLine(line, glibcver):\n    global totalErrors\n    items = line.split(\"GLIBC_\")\n    if len(items) != 2:\n        LogError(\"unexpected symbol: %s\" % (line))\n    else:\n        if CompareVer(items[1], glibcver):\n            totalErrors = totalErrors + 1\n            LogInfo(line)\n\n\n# Return True if ver1 > ver2.\n# Return False otherwise.\ndef CompareVer(ver1, ver2):\n    v1list = ver1.split(\".\")\n    v2list = ver2.split(\".\")\n    n = min(len(v1list), len(v2list))\n    for i in range(n):\n        x = int(v1list[i])\n        y = int(v2list[i])\n        if x > y:\n            return True\n        elif x < y:\n            return False\n\n    if len(v1list) > len(v2list):\n        return True\n\n    return False\n\n\ndef RunTest(filepath, dirname, glibcver):\n    LogInfo(\"Parse GLIBC versions ...\")\n    files = GetFilesToParse(filepath, dirname)\n\n    if len(files) == 0:\n        LogError(\"no file to parse. Abort.\")\n        return\n\n    for binfile in files:\n        LogInfo(\"\\nStart to parse file '%s' ...\" % (binfile))\n        symbolfile = GetSymbols(binfile)\n        if symbolfile:\n            ParseSymbols(symbolfile, glibcver)\n            os.remove(symbolfile)\n\n    if totalErrors == 0:\n        LogInfo(\"\\nNo error is found. Test passed successfully.\")\n    else:\n        LogInfo(\"\\nTest failed. Total errors found: %d\" % (totalErrors))\n\n\nif __name__ == \"__main__\":\n    args = ParseCmdLine()\n    RunTest(args.filepath, args.dir, args.glibcver)\n    sys.exit(totalErrors)\n"
  },
  {
    "path": "Diagnostic/mocks/Readme.txt",
    "content": "These three modules contain minimal mocks to allow the waagent code to load up on a non-Unix (e.g. windows) platform.\nThey're just enough to allow the import statements to be executed; if you try to actually exercise the waagent\nfunctionality that relies upon them, you won't be happy.\n\nIn order to make these visible in the correct way, you'll need to add the full path of this directory to the\nPYTHONPATH environment variable. Obviously, you shouldn't do this on Unix systems (including Linux and FreeBSD); the\nreal modules are visible already, and you don't need these mocks.\n"
  },
  {
    "path": "Diagnostic/mocks/__init__.py",
    "content": ""
  },
  {
    "path": "Diagnostic/mocks/crypt.py",
    "content": "def crypt(password, salt):\n    pass"
  },
  {
    "path": "Diagnostic/mocks/fcntl.py",
    "content": "def ioctl(fileid, ioctl_num, arg):\n    pass\n"
  },
  {
    "path": "Diagnostic/mocks/pwd.py",
    "content": "def getpwnam(name):\n    pass\n"
  },
  {
    "path": "Diagnostic/run_unittests.sh",
    "content": "#!/bin/bash\n\nfor test in watchertests test_commonActions test_lad_logging_config test_lad_config_all test_LadDiagnosticUtil \\\n                test_builtin test_lad_ext_settings; do\n    python -m tests.$test\ndone\n"
  },
  {
    "path": "Diagnostic/services/mdsd-lde.service",
    "content": "[Unit]\nDescription=Azure Linux Diagnostic Extension\n\nAfter=network-online.target walinuxagent.service\nWants=network-online.target walinuxagent.service\n\nConditionFileIsExecutable={WORKDIR}/diagnostic.py\n\n[Service]\nType=simple\nWorkingDirectory={WORKDIR}/\nExecStart=/usr/bin/python2 {WORKDIR}/diagnostic.py -daemon\nRestart=on-failure\nTimeoutSec=60\nRestartSec=30\nStartLimitBurst=10\nStartLimitInterval=3600\n\n[Install]\nWantedBy=multi-user.target\n"
  },
  {
    "path": "Diagnostic/services/metrics-extension.service",
    "content": "[Unit]\nDescription=Metrics Extension service for Linux Agent metrics sourcing\nAfter=network.target\n\n[Service]\nExecStart=%ME_BIN% -TokenSource MSI -Input influxdb_udp -InfluxDbHost 127.0.0.1 -InfluxDbUdpPort %ME_INFLUX_PORT% -DataDirectory %ME_DATA_DIRECTORY% -LocalControlChannel -MonitoringAccount %ME_MONITORING_ACCOUNT% -LogLevel Error\nExecReload=/bin/kill -HUP $MAINPID\nRestart=on-failure\nRestartForceExitStatus=SIGPIPE\nKillMode=control-group\n\n\n[Install]\nWantedBy=multi-user.target\n\n\n"
  },
  {
    "path": "Diagnostic/services/metrics-sourcer.service",
    "content": "[Unit]\nDocumentation=https://github.com/influxdata/telegraf/blob/master/README.md\nDescription=Custom Modified Telegraf service for Linux Agent metrics sourcing\nAfter=network.target\n\n[Service]\nExecStart=%TELEGRAF_BIN% --config %TELEGRAF_AGENT_CONFIG% --config-directory %TELEGRAF_CONFIG_DIR%\nExecReload=/bin/kill -HUP $MAINPID\nRestart=on-failure\nRestartForceExitStatus=SIGPIPE\nKillMode=control-group\n\n\n[Install]\nWantedBy=multi-user.target\n\n\n"
  },
  {
    "path": "Diagnostic/shim.sh",
    "content": "#!/usr/bin/env bash\n\n# This is the main driver file for LAD extension. This file first checks if Python 2 is available on the VM and exits early if not\n# Control arguments passed to the shim are redirected to diagnostic.py without validation.\n\nCOMMAND=\"./diagnostic.py\"\nPYTHON=\"\"\nARG=\"$@\"\n\nfunction find_python() {\n    local python_exec_command=$1\n\n    if command -v python2 >/dev/null 2>&1 ; then\n        eval ${python_exec_command}=\"python2\"\n    fi\n}\n\nfind_python PYTHON\n\nif [ -z \"$PYTHON\" ] # If python2 is not installed, we will fail the install with the following error, requiring cx to have python pre-installed\nthen\n    echo \"No Python 2 interpreter found, which is an LAD extension dependency. Please install Python 2 before retrying LAD extension deployment.\" >&2\n    exit 52 # Missing Dependency\nelse\n    ${PYTHON} --version 2>&1\nfi\n\n${PYTHON} ${COMMAND} ${ARG}\nexit $?"
  },
  {
    "path": "Diagnostic/tests/.gitignore",
    "content": "lad_2_3_metric_definitions_sample.json\n"
  },
  {
    "path": "Diagnostic/tests/__init__.py",
    "content": "#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n"
  },
  {
    "path": "Diagnostic/tests/lad_2_3_compatible_portal_pub_settings.json",
    "content": "{\n  \"StorageAccount\": \"__DIAGNOSTIC_STORAGE_ACCOUNT__\",\n  \"ladCfg\": {\n    \"diagnosticMonitorConfiguration\": {\n      \"eventVolume\": \"Medium\", \n      \"metrics\": {\n        \"metricAggregation\": [\n          {\n            \"scheduledTransferPeriod\": \"PT1H\"\n          }, \n          {\n            \"scheduledTransferPeriod\": \"PT1M\"\n          }\n        ], \n        \"resourceId\": \"__VM_RESOURCE_ID__\"\n      }, \n      \"performanceCounters\": {\n        \"performanceCounterConfiguration\": [\n          {\n            \"annotation\": [\n              {\n                \"displayName\": \"Disk read guest OS\", \n                \"locale\": \"en-us\"\n              }\n            ], \n            \"class\": \"disk\", \n            \"condition\": \"IsAggregate=TRUE\", \n            \"counter\": \"readbytespersecond\", \n            \"counterSpecifier\": \"/builtin/disk/readbytespersecond\", \n            \"type\": \"builtin\", \n            \"unit\": \"BytesPerSecond\"\n          }, \n          {\n            \"annotation\": [\n              {\n                \"displayName\": \"Disk writes\", \n                \"locale\": \"en-us\"\n              }\n            ], \n            \"class\": \"disk\", \n            \"condition\": \"IsAggregate=TRUE\", \n            \"counter\": \"writespersecond\", \n            \"counterSpecifier\": \"/builtin/disk/writespersecond\", \n            \"type\": \"builtin\", \n            \"unit\": \"CountPerSecond\"\n          }, \n          {\n            \"annotation\": [\n              {\n                \"displayName\": \"Disk transfer time\", \n                \"locale\": \"en-us\"\n              }\n            ], \n            \"class\": \"disk\", \n            \"condition\": \"IsAggregate=TRUE\", \n            \"counter\": \"averagetransfertime\", \n            \"counterSpecifier\": \"/builtin/disk/averagetransfertime\", \n            \"type\": \"builtin\", \n            \"unit\": \"Seconds\"\n          }, \n          {\n            \"annotation\": [\n              {\n                \"displayName\": \"Disk transfers\", \n                \"locale\": \"en-us\"\n              }\n            ], \n            \"class\": \"disk\", \n            \"condition\": \"IsAggregate=TRUE\", \n            \"counter\": \"transferspersecond\", \n            \"counterSpecifier\": \"/builtin/disk/transferspersecond\", \n            \"type\": \"builtin\", \n            \"unit\": \"CountPerSecond\"\n          }, \n          {\n            \"annotation\": [\n              {\n                \"displayName\": \"Disk write guest OS\", \n                \"locale\": \"en-us\"\n              }\n            ], \n            \"class\": \"disk\", \n            \"condition\": \"IsAggregate=TRUE\", \n            \"counter\": \"writebytespersecond\", \n            \"counterSpecifier\": \"/builtin/disk/writebytespersecond\", \n            \"type\": \"builtin\", \n            \"unit\": \"BytesPerSecond\"\n          }, \n          {\n            \"annotation\": [\n              {\n                \"displayName\": \"Disk read time\", \n                \"locale\": \"en-us\"\n              }\n            ], \n            \"class\": \"disk\", \n            \"condition\": \"IsAggregate=TRUE\", \n            \"counter\": \"averagereadtime\", \n            \"counterSpecifier\": \"/builtin/disk/averagereadtime\", \n            \"type\": \"builtin\", \n            \"unit\": \"Seconds\"\n          }, \n          {\n            \"annotation\": [\n              {\n                \"displayName\": \"Disk write time\", \n                \"locale\": \"en-us\"\n              }\n            ], \n            \"class\": \"disk\", \n            \"condition\": \"IsAggregate=TRUE\", \n            \"counter\": \"averagewritetime\", \n            \"counterSpecifier\": \"/builtin/disk/averagewritetime\", \n            \"type\": \"builtin\", \n            \"unit\": \"Seconds\"\n          }, \n          {\n            \"annotation\": [\n              {\n                \"displayName\": \"Disk total bytes\", \n                \"locale\": \"en-us\"\n              }\n            ], \n            \"class\": \"disk\", \n            \"condition\": \"IsAggregate=TRUE\", \n            \"counter\": \"bytespersecond\", \n            \"counterSpecifier\": \"/builtin/disk/bytespersecond\", \n            \"type\": \"builtin\", \n            \"unit\": \"BytesPerSecond\"\n          }, \n          {\n            \"annotation\": [\n              {\n                \"displayName\": \"Disk reads\", \n                \"locale\": \"en-us\"\n              }\n            ], \n            \"class\": \"disk\", \n            \"condition\": \"IsAggregate=TRUE\", \n            \"counter\": \"readspersecond\", \n            \"counterSpecifier\": \"/builtin/disk/readspersecond\", \n            \"type\": \"builtin\", \n            \"unit\": \"CountPerSecond\"\n          }, \n          {\n            \"annotation\": [\n              {\n                \"displayName\": \"Disk queue length\", \n                \"locale\": \"en-us\"\n              }\n            ], \n            \"class\": \"disk\", \n            \"condition\": \"IsAggregate=TRUE\", \n            \"counter\": \"averagediskqueuelength\", \n            \"counterSpecifier\": \"/builtin/disk/averagediskqueuelength\", \n            \"type\": \"builtin\", \n            \"unit\": \"Count\"\n          }, \n          {\n            \"annotation\": [\n              {\n                \"displayName\": \"Network in guest OS\", \n                \"locale\": \"en-us\"\n              }\n            ], \n            \"class\": \"network\", \n            \"counter\": \"bytesreceived\", \n            \"counterSpecifier\": \"/builtin/network/bytesreceived\", \n            \"type\": \"builtin\", \n            \"unit\": \"Bytes\"\n          }, \n          {\n            \"annotation\": [\n              {\n                \"displayName\": \"Network total bytes\", \n                \"locale\": \"en-us\"\n              }\n            ], \n            \"class\": \"network\", \n            \"counter\": \"bytestotal\", \n            \"counterSpecifier\": \"/builtin/network/bytestotal\", \n            \"type\": \"builtin\", \n            \"unit\": \"Bytes\"\n          }, \n          {\n            \"annotation\": [\n              {\n                \"displayName\": \"Network out guest OS\", \n                \"locale\": \"en-us\"\n              }\n            ], \n            \"class\": \"network\", \n            \"counter\": \"bytestransmitted\", \n            \"counterSpecifier\": \"/builtin/network/bytestransmitted\", \n            \"type\": \"builtin\", \n            \"unit\": \"Bytes\"\n          }, \n          {\n            \"annotation\": [\n              {\n                \"displayName\": \"Network collisions\", \n                \"locale\": \"en-us\"\n              }\n            ], \n            \"class\": \"network\", \n            \"counter\": \"totalcollisions\", \n            \"counterSpecifier\": \"/builtin/network/totalcollisions\", \n            \"type\": \"builtin\", \n            \"unit\": \"Count\"\n          }, \n          {\n            \"annotation\": [\n              {\n                \"displayName\": \"Packets received errors\", \n                \"locale\": \"en-us\"\n              }\n            ], \n            \"class\": \"network\", \n            \"counter\": \"totalrxerrors\", \n            \"counterSpecifier\": \"/builtin/network/totalrxerrors\", \n            \"type\": \"builtin\", \n            \"unit\": \"Count\"\n          }, \n          {\n            \"annotation\": [\n              {\n                \"displayName\": \"Packets sent\", \n                \"locale\": \"en-us\"\n              }\n            ], \n            \"class\": \"network\", \n            \"counter\": \"packetstransmitted\", \n            \"counterSpecifier\": \"/builtin/network/packetstransmitted\", \n            \"type\": \"builtin\", \n            \"unit\": \"Count\"\n          }, \n          {\n            \"annotation\": [\n              {\n                \"displayName\": \"Packets received\", \n                \"locale\": \"en-us\"\n              }\n            ], \n            \"class\": \"network\", \n            \"counter\": \"packetsreceived\", \n            \"counterSpecifier\": \"/builtin/network/packetsreceived\", \n            \"type\": \"builtin\", \n            \"unit\": \"Count\"\n          }, \n          {\n            \"annotation\": [\n              {\n                \"displayName\": \"Packets sent errors\", \n                \"locale\": \"en-us\"\n              }\n            ], \n            \"class\": \"network\", \n            \"counter\": \"totaltxerrors\", \n            \"counterSpecifier\": \"/builtin/network/totaltxerrors\", \n            \"type\": \"builtin\", \n            \"unit\": \"Count\"\n          }, \n          {\n            \"annotation\": [\n              {\n                \"displayName\": \"Filesystem transfers/sec\", \n                \"locale\": \"en-us\"\n              }\n            ], \n            \"class\": \"filesystem\", \n            \"condition\": \"IsAggregate=TRUE\", \n            \"counter\": \"transferspersecond\", \n            \"counterSpecifier\": \"/builtin/filesystem/transferspersecond\", \n            \"type\": \"builtin\", \n            \"unit\": \"CountPerSecond\"\n          }, \n          {\n            \"annotation\": [\n              {\n                \"displayName\": \"Filesystem % free space\", \n                \"locale\": \"en-us\"\n              }\n            ], \n            \"class\": \"filesystem\", \n            \"condition\": \"IsAggregate=TRUE\", \n            \"counter\": \"percentfreespace\", \n            \"counterSpecifier\": \"/builtin/filesystem/percentfreespace\", \n            \"type\": \"builtin\", \n            \"unit\": \"Percent\"\n          }, \n          {\n            \"annotation\": [\n              {\n                \"displayName\": \"Filesystem % used space\", \n                \"locale\": \"en-us\"\n              }\n            ], \n            \"class\": \"filesystem\", \n            \"condition\": \"IsAggregate=TRUE\", \n            \"counter\": \"percentusedspace\", \n            \"counterSpecifier\": \"/builtin/filesystem/percentusedspace\", \n            \"type\": \"builtin\", \n            \"unit\": \"Percent\"\n          }, \n          {\n            \"annotation\": [\n              {\n                \"displayName\": \"Filesystem used space\", \n                \"locale\": \"en-us\"\n              }\n            ], \n            \"class\": \"filesystem\", \n            \"condition\": \"IsAggregate=TRUE\", \n            \"counter\": \"usedspace\", \n            \"counterSpecifier\": \"/builtin/filesystem/usedspace\", \n            \"type\": \"builtin\", \n            \"unit\": \"Bytes\"\n          }, \n          {\n            \"annotation\": [\n              {\n                \"displayName\": \"Filesystem read bytes/sec\", \n                \"locale\": \"en-us\"\n              }\n            ], \n            \"class\": \"filesystem\", \n            \"condition\": \"IsAggregate=TRUE\", \n            \"counter\": \"bytesreadpersecond\", \n            \"counterSpecifier\": \"/builtin/filesystem/bytesreadpersecond\", \n            \"type\": \"builtin\", \n            \"unit\": \"CountPerSecond\"\n          }, \n          {\n            \"annotation\": [\n              {\n                \"displayName\": \"Filesystem free space\", \n                \"locale\": \"en-us\"\n              }\n            ], \n            \"class\": \"filesystem\", \n            \"condition\": \"IsAggregate=TRUE\", \n            \"counter\": \"freespace\", \n            \"counterSpecifier\": \"/builtin/filesystem/freespace\", \n            \"type\": \"builtin\", \n            \"unit\": \"Bytes\"\n          }, \n          {\n            \"annotation\": [\n              {\n                \"displayName\": \"Filesystem % free inodes\", \n                \"locale\": \"en-us\"\n              }\n            ], \n            \"class\": \"filesystem\", \n            \"condition\": \"IsAggregate=TRUE\", \n            \"counter\": \"percentfreeinodes\", \n            \"counterSpecifier\": \"/builtin/filesystem/percentfreeinodes\", \n            \"type\": \"builtin\", \n            \"unit\": \"Percent\"\n          }, \n          {\n            \"annotation\": [\n              {\n                \"displayName\": \"Filesystem bytes/sec\", \n                \"locale\": \"en-us\"\n              }\n            ], \n            \"class\": \"filesystem\", \n            \"condition\": \"IsAggregate=TRUE\", \n            \"counter\": \"bytespersecond\", \n            \"counterSpecifier\": \"/builtin/filesystem/bytespersecond\", \n            \"type\": \"builtin\", \n            \"unit\": \"BytesPerSecond\"\n          }, \n          {\n            \"annotation\": [\n              {\n                \"displayName\": \"Filesystem reads/sec\", \n                \"locale\": \"en-us\"\n              }\n            ], \n            \"class\": \"filesystem\", \n            \"condition\": \"IsAggregate=TRUE\", \n            \"counter\": \"readspersecond\", \n            \"counterSpecifier\": \"/builtin/filesystem/readspersecond\", \n            \"type\": \"builtin\", \n            \"unit\": \"CountPerSecond\"\n          }, \n          {\n            \"annotation\": [\n              {\n                \"displayName\": \"Filesystem write bytes/sec\", \n                \"locale\": \"en-us\"\n              }\n            ], \n            \"class\": \"filesystem\", \n            \"condition\": \"IsAggregate=TRUE\", \n            \"counter\": \"byteswrittenpersecond\", \n            \"counterSpecifier\": \"/builtin/filesystem/byteswrittenpersecond\", \n            \"type\": \"builtin\", \n            \"unit\": \"CountPerSecond\"\n          }, \n          {\n            \"annotation\": [\n              {\n                \"displayName\": \"Filesystem writes/sec\", \n                \"locale\": \"en-us\"\n              }\n            ], \n            \"class\": \"filesystem\", \n            \"condition\": \"IsAggregate=TRUE\", \n            \"counter\": \"writespersecond\", \n            \"counterSpecifier\": \"/builtin/filesystem/writespersecond\", \n            \"type\": \"builtin\", \n            \"unit\": \"CountPerSecond\"\n          }, \n          {\n            \"annotation\": [\n              {\n                \"displayName\": \"Filesystem % used inodes\", \n                \"locale\": \"en-us\"\n              }\n            ], \n            \"class\": \"filesystem\", \n            \"condition\": \"IsAggregate=TRUE\", \n            \"counter\": \"percentusedinodes\", \n            \"counterSpecifier\": \"/builtin/filesystem/percentusedinodes\", \n            \"type\": \"builtin\", \n            \"unit\": \"Percent\"\n          }, \n          {\n            \"annotation\": [\n              {\n                \"displayName\": \"CPU IO wait time\", \n                \"locale\": \"en-us\"\n              }\n            ], \n            \"class\": \"processor\", \n            \"condition\": \"IsAggregate=TRUE\", \n            \"counter\": \"percentiowaittime\", \n            \"counterSpecifier\": \"/builtin/processor/percentiowaittime\", \n            \"type\": \"builtin\", \n            \"unit\": \"Percent\"\n          }, \n          {\n            \"annotation\": [\n              {\n                \"displayName\": \"CPU user time\", \n                \"locale\": \"en-us\"\n              }\n            ], \n            \"class\": \"processor\", \n            \"condition\": \"IsAggregate=TRUE\", \n            \"counter\": \"percentusertime\", \n            \"counterSpecifier\": \"/builtin/processor/percentusertime\", \n            \"type\": \"builtin\", \n            \"unit\": \"Percent\"\n          }, \n          {\n            \"annotation\": [\n              {\n                \"displayName\": \"CPU nice time\", \n                \"locale\": \"en-us\"\n              }\n            ], \n            \"class\": \"processor\", \n            \"condition\": \"IsAggregate=TRUE\", \n            \"counter\": \"percentnicetime\", \n            \"counterSpecifier\": \"/builtin/processor/percentnicetime\", \n            \"type\": \"builtin\", \n            \"unit\": \"Percent\"\n          }, \n          {\n            \"annotation\": [\n              {\n                \"displayName\": \"CPU percentage guest OS\", \n                \"locale\": \"en-us\"\n              }\n            ], \n            \"class\": \"processor\", \n            \"condition\": \"IsAggregate=TRUE\", \n            \"counter\": \"percentprocessortime\", \n            \"counterSpecifier\": \"/builtin/processor/percentprocessortime\", \n            \"type\": \"builtin\", \n            \"unit\": \"Percent\"\n          }, \n          {\n            \"annotation\": [\n              {\n                \"displayName\": \"CPU interrupt time\", \n                \"locale\": \"en-us\"\n              }\n            ], \n            \"class\": \"processor\", \n            \"condition\": \"IsAggregate=TRUE\", \n            \"counter\": \"percentinterrupttime\", \n            \"counterSpecifier\": \"/builtin/processor/percentinterrupttime\", \n            \"type\": \"builtin\", \n            \"unit\": \"Percent\"\n          }, \n          {\n            \"annotation\": [\n              {\n                \"displayName\": \"CPU idle time\", \n                \"locale\": \"en-us\"\n              }\n            ], \n            \"class\": \"processor\", \n            \"condition\": \"IsAggregate=TRUE\", \n            \"counter\": \"percentidletime\", \n            \"counterSpecifier\": \"/builtin/processor/percentidletime\", \n            \"type\": \"builtin\", \n            \"unit\": \"Percent\"\n          }, \n          {\n            \"annotation\": [\n              {\n                \"displayName\": \"CPU privileged time\", \n                \"locale\": \"en-us\"\n              }\n            ], \n            \"class\": \"processor\", \n            \"condition\": \"IsAggregate=TRUE\", \n            \"counter\": \"percentprivilegedtime\", \n            \"counterSpecifier\": \"/builtin/processor/percentprivilegedtime\", \n            \"type\": \"builtin\", \n            \"unit\": \"Percent\"\n          }, \n          {\n            \"annotation\": [\n              {\n                \"displayName\": \"Memory available\", \n                \"locale\": \"en-us\"\n              }\n            ], \n            \"class\": \"memory\", \n            \"counter\": \"availablememory\", \n            \"counterSpecifier\": \"/builtin/memory/availablememory\", \n            \"type\": \"builtin\", \n            \"unit\": \"Bytes\"\n          }, \n          {\n            \"annotation\": [\n              {\n                \"displayName\": \"Swap percent used\", \n                \"locale\": \"en-us\"\n              }\n            ], \n            \"class\": \"memory\", \n            \"counter\": \"percentusedswap\", \n            \"counterSpecifier\": \"/builtin/memory/percentusedswap\", \n            \"type\": \"builtin\", \n            \"unit\": \"Percent\"\n          }, \n          {\n            \"annotation\": [\n              {\n                \"displayName\": \"Memory used\", \n                \"locale\": \"en-us\"\n              }\n            ], \n            \"class\": \"memory\", \n            \"counter\": \"usedmemory\", \n            \"counterSpecifier\": \"/builtin/memory/usedmemory\", \n            \"type\": \"builtin\", \n            \"unit\": \"Bytes\"\n          }, \n          {\n            \"annotation\": [\n              {\n                \"displayName\": \"Page reads\", \n                \"locale\": \"en-us\"\n              }\n            ], \n            \"class\": \"memory\", \n            \"counter\": \"pagesreadpersec\", \n            \"counterSpecifier\": \"/builtin/memory/pagesreadpersec\", \n            \"type\": \"builtin\", \n            \"unit\": \"CountPerSecond\"\n          }, \n          {\n            \"annotation\": [\n              {\n                \"displayName\": \"Swap available\", \n                \"locale\": \"en-us\"\n              }\n            ], \n            \"class\": \"memory\", \n            \"counter\": \"availableswap\", \n            \"counterSpecifier\": \"/builtin/memory/availableswap\", \n            \"type\": \"builtin\", \n            \"unit\": \"Bytes\"\n          }, \n          {\n            \"annotation\": [\n              {\n                \"displayName\": \"Swap percent available\", \n                \"locale\": \"en-us\"\n              }\n            ], \n            \"class\": \"memory\", \n            \"counter\": \"percentavailableswap\", \n            \"counterSpecifier\": \"/builtin/memory/percentavailableswap\", \n            \"type\": \"builtin\", \n            \"unit\": \"Percent\"\n          }, \n          {\n            \"annotation\": [\n              {\n                \"displayName\": \"Mem. percent available\", \n                \"locale\": \"en-us\"\n              }\n            ], \n            \"class\": \"memory\", \n            \"counter\": \"percentavailablememory\", \n            \"counterSpecifier\": \"/builtin/memory/percentavailablememory\", \n            \"type\": \"builtin\", \n            \"unit\": \"Percent\"\n          }, \n          {\n            \"annotation\": [\n              {\n                \"displayName\": \"Pages\", \n                \"locale\": \"en-us\"\n              }\n            ], \n            \"class\": \"memory\", \n            \"counter\": \"pagespersec\", \n            \"counterSpecifier\": \"/builtin/memory/pagespersec\", \n            \"type\": \"builtin\", \n            \"unit\": \"CountPerSecond\"\n          }, \n          {\n            \"annotation\": [\n              {\n                \"displayName\": \"Swap used\", \n                \"locale\": \"en-us\"\n              }\n            ], \n            \"class\": \"memory\", \n            \"counter\": \"usedswap\", \n            \"counterSpecifier\": \"/builtin/memory/usedswap\", \n            \"type\": \"builtin\", \n            \"unit\": \"Bytes\"\n          }, \n          {\n            \"annotation\": [\n              {\n                \"displayName\": \"Memory percentage\", \n                \"locale\": \"en-us\"\n              }\n            ], \n            \"class\": \"memory\", \n            \"counter\": \"percentusedmemory\", \n            \"counterSpecifier\": \"/builtin/memory/percentusedmemory\", \n            \"type\": \"builtin\", \n            \"unit\": \"Percent\"\n          }, \n          {\n            \"annotation\": [\n              {\n                \"displayName\": \"Page writes\", \n                \"locale\": \"en-us\"\n              }\n            ], \n            \"class\": \"memory\", \n            \"counter\": \"pageswrittenpersec\", \n            \"counterSpecifier\": \"/builtin/memory/pageswrittenpersec\", \n            \"type\": \"builtin\", \n            \"unit\": \"CountPerSecond\"\n          }\n        ]\n      }, \n      \"syslogEvents\": {\n        \"syslogEventConfiguration\": {\n          \"LOG_AUTH\": \"LOG_DEBUG\", \n          \"LOG_AUTHPRIV\": \"LOG_DEBUG\", \n          \"LOG_CRON\": \"LOG_DEBUG\", \n          \"LOG_DAEMON\": \"LOG_DEBUG\", \n          \"LOG_FTP\": \"LOG_DEBUG\", \n          \"LOG_KERN\": \"LOG_DEBUG\", \n          \"LOG_LOCAL0\": \"LOG_DEBUG\", \n          \"LOG_LOCAL1\": \"LOG_DEBUG\", \n          \"LOG_LOCAL2\": \"LOG_DEBUG\", \n          \"LOG_LOCAL3\": \"LOG_DEBUG\", \n          \"LOG_LOCAL4\": \"LOG_DEBUG\", \n          \"LOG_LOCAL5\": \"LOG_DEBUG\", \n          \"LOG_LOCAL6\": \"LOG_DEBUG\", \n          \"LOG_LOCAL7\": \"LOG_DEBUG\", \n          \"LOG_LPR\": \"LOG_DEBUG\", \n          \"LOG_MAIL\": \"LOG_DEBUG\", \n          \"LOG_NEWS\": \"LOG_DEBUG\", \n          \"LOG_SYSLOG\": \"LOG_DEBUG\", \n          \"LOG_USER\": \"LOG_DEBUG\", \n          \"LOG_UUCP\": \"LOG_DEBUG\"\n        }\n      }\n    }, \n    \"sampleRateInSeconds\": 15\n  }\n}\n"
  },
  {
    "path": "Diagnostic/tests/test_LadDiagnosticUtil.py",
    "content": "#!/usr/bin/env python\n#\n# Azure Linux extension\n#\n# Copyright (c) Microsoft Corporation\n# All rights reserved.\n# MIT License\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"\"Software\"\"), to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the following conditions:\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the\n# Software.\n# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\n# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nimport unittest\nimport Utils.LadDiagnosticUtil as LadUtil\n\n\nclass TestGetDiagnosticsMonitorConfigurationElement(unittest.TestCase):\n    def setUp(self):\n        self.empty_config = {}\n        self.bogus_config = {\"foo\": \"bar\"}\n        self.missing_from_config = {\"diagnosticMonitorConfiguration\": {\"foo\": \"bar\"}}\n        self.valid_config = \\\n            {\n                \"diagnosticMonitorConfiguration\":\n                    {\n                        \"foo\": \"bar\",\n                        \"eventVolume\": \"Large\",\n                        \"sinksConfig\": {\n                            \"Sink\": [\n                                {\n                                    \"name\": \"sink1\",\n                                    \"type\": \"EventHub\",\n                                    \"sasURL\": \"https://sbnamespace.servicebus.windows.net/raw?sr=https%3a%2f%2fsb\"\n                                              \"namespace.servicebus.windows.net%2fraw%2f&sig=SIGNATURE%3d\"\n                                              \"&se=1804371161&skn=writer\"\n                                }\n                            ]\n                        },\n                        \"metrics\": {\n                            \"resourceId\": \"/subscriptions/1111-2222-3333-4444/resourcegroups/RG1/compute/foo\",\n                            \"metricAggregation\": [\n                                {\"scheduledTransferPeriod\": \"PT5M\"},\n                                {\"scheduledTransferPeriod\": \"PT1H\"},\n                            ]\n                        },\n                        \"performanceCounters\": {\n                            \"sinks\": \"sink1\",\n                            \"performanceCounterConfiguration\": [\n                                {\n                                    \"type\": \"builtin\",\n                                    \"class\": \"Processor\",\n                                    \"counter\": \"PercentIdleTime\",\n                                    \"counterSpecifier\": \"/builtin/Processor/PercentIdleTime\",\n                                    \"condition\": \"IsAggregate=TRUE\",\n                                    \"sampleRate\": \"PT15S\",\n                                    \"unit\": \"Percent\",\n                                    \"annotation\": [\n                                        {\n                                            \"displayName\": \"Aggregate CPU %idle time\",\n                                            \"locale\": \"en-us\"\n                                        }\n                                    ]\n                                }\n                            ]\n                        },\n                        \"syslogEvents\": {\n                            \"sinks\": \"sink2\",\n                            \"syslogEventConfiguration\": {\n                                \"LOG_LOCAL1\": \"LOG_INFO\",\n                                \"LOG_MAIL\": \"LOG_FATAL\"\n                            }\n                        }\n                    },\n                \"sampleRateInSeconds\": 60\n            }\n\n    def test_empty_config(self):\n        self.assertIsNone(LadUtil.getDiagnosticsMonitorConfigurationElement(self.empty_config, \"dummy\"))\n\n    def test_bogus_config(self):\n        self.assertIsNone(LadUtil.getDiagnosticsMonitorConfigurationElement(self.bogus_config, \"dummy\"))\n\n    def test_entry_not_present(self):\n        self.assertIsNone(LadUtil.getDiagnosticsMonitorConfigurationElement(self.missing_from_config, \"dummy\"))\n\n    def test_entry_is_present(self):\n        self.assertEqual(LadUtil.getDiagnosticsMonitorConfigurationElement(self.valid_config, \"foo\"), \"bar\")\n\n    def test_getDefaultSampleRateFromLadCfg(self):\n        self.assertEqual(LadUtil.getDefaultSampleRateFromLadCfg(self.valid_config), 60)\n\n    def test_getEventVolumeFromLadCfg(self):\n        self.assertEqual(LadUtil.getEventVolumeFromLadCfg(self.valid_config), \"Large\")\n\n    def test_getAggregationPeriodsFromLadCfg(self):\n        periods = LadUtil.getAggregationPeriodsFromLadCfg(self.valid_config)\n        self.assertEqual(len(periods), 2)\n        self.assertIn('PT5M', periods)\n        self.assertIn('PT1H', periods)\n\n    def test_getPerformanceCounterCfgFromLadCfg(self):\n        definitions = LadUtil.getPerformanceCounterCfgFromLadCfg(self.valid_config)\n        self.assertEqual(1, len(definitions))\n        metric = definitions[0]\n        self.assertIn('counterSpecifier', metric)\n        self.assertEqual('/builtin/Processor/PercentIdleTime', metric['counterSpecifier'])\n\n    def test_getResourceIdFromLadCfg(self):\n        self.assertIsNone(LadUtil.getResourceIdFromLadCfg(self.missing_from_config))\n        res_id = LadUtil.getResourceIdFromLadCfg(self.valid_config)\n        self.assertIsNotNone(res_id)\n        self.assertIn(\"1111-2222-3333-4444\", res_id)\n\n    def test_getFeatureWideSinksFromLadCfg(self):\n        self.assertEqual(LadUtil.getFeatureWideSinksFromLadCfg(self.valid_config, 'syslogEvents'), ['sink2'])\n        self.assertEqual(LadUtil.getFeatureWideSinksFromLadCfg(self.valid_config, 'performanceCounters'), ['sink1'])\n\n\nclass TestSinkConfiguration(unittest.TestCase):\n    def setUp(self):\n        self.config = \\\n            {\n                \"sink\": [\n                    {\n                        \"name\": \"sink1\",\n                        \"type\": \"EventHub\",\n                        \"sasURL\": \"https://sbnamespace.servicebus.windows.net/raw?sr=https%3a%2f%2fsb\"\n                                  \"namespace.servicebus.windows.net%2fraw%2f&sig=SIGNATURE%3d\"\n                                  \"&se=1804371161&skn=writer\"\n                    },\n                    {\n                        \"name\": \"sink2\",\n                        \"type\": \"JsonBlob\"\n                    },\n                    {\n                        \"name\": \"sink3\",\n                        \"type\": \"EventHub\",\n                        \"sasURL\": \"https://sbnamespace2.servicebus.windows.net/raw?sr=https%3a%2f%2fsb\"\n                                  \"namespace.servicebus.windows.net%2fraw%2f&sig=SIGNATURE%3d\"\n                                  \"&se=99999999999&skn=writer\"\n                    }\n                ]\n            }\n        self.sink_config = LadUtil.SinkConfiguration()\n        self.sink_config.insert_from_config(self.config)\n\n    def test_insert_from_config(self):\n        json_config = {}\n        sinks = LadUtil.SinkConfiguration()\n        msgs = sinks.insert_from_config(json_config)\n        self.assertEqual(msgs, '')\n        json_config = {'sink': [{'Name': 'bad case'}]}\n        sinks = LadUtil.SinkConfiguration()\n        msgs = sinks.insert_from_config(json_config)\n        self.assertEqual(msgs, \"Ignoring invalid sink definition {'Name': 'bad case'}\")\n\n    def test_get_all_sink_names(self):\n        sinks = self.sink_config.get_all_sink_names()\n        self.assertEqual(len(sinks), len(self.config[\"sink\"]))\n        self.assertIn(\"sink1\", sinks)\n        for sink in self.config[\"sink\"]:\n            self.assertIn(sink[\"name\"], sinks)\n\n    def helper_get_sink_by_name(self, name, type, sasURL=False):\n        sink = self.sink_config.get_sink_by_name(name)\n        self.assertIsNotNone(sink)\n        self.assertEqual(sink['name'], name)\n        self.assertEqual(sink['type'], type)\n        if sasURL:\n            self.assertIn('sasURL', sink)\n\n    def test_get_sink_by_name(self):\n        self.assertIsNone(self.sink_config.get_sink_by_name(\"BogusSink\"))\n        self.helper_get_sink_by_name('sink1', 'EventHub', True)\n        self.helper_get_sink_by_name('sink2', 'JsonBlob')\n        self.helper_get_sink_by_name('sink3', 'EventHub', True)\n\n    def helper_get_sinks_by_type(self, type, names):\n        sink_list = self.sink_config.get_sinks_by_type(type)\n        self.assertEqual(len(sink_list), len(names))\n        # Ugly nested loops... Please suggest any better Pythonic code\n        names_from_sink_list = [sink['name'] for sink in sink_list]\n        for name in names:\n            self.assertIn(name, names_from_sink_list)\n\n    def test_get_sinks_by_type(self):\n        sink_list = self.sink_config.get_sinks_by_type(\"Bogus\")\n        self.assertEqual(len(sink_list), 0)\n        self.helper_get_sinks_by_type('EventHub', ['sink1', 'sink3'])\n        self.helper_get_sinks_by_type('JsonBlob', ['sink2'])\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "Diagnostic/tests/test_builtin.py",
    "content": "import unittest\nimport Providers.Builtin as BProvider\nimport Utils.ProviderUtil as ProvUtil\nfrom Utils.mdsd_xml_templates import entire_xml_cfg_tmpl\nimport xml.etree.ElementTree as ET\nimport json\nimport re\n\n\nclass TestBuiltinMetric(unittest.TestCase):\n    def setUp(self):\n        self.basic_valid = {\n            \"type\": \"builtin\",\n            \"class\": \"Processor\",\n            \"counter\": \"PercentIdleTime\",\n            \"counterSpecifier\": \"/builtin/Processor/PercentIdleTime\",\n            \"condition\": 'IsAggregate=TRUE',\n            \"sampleRate\": \"PT30S\",\n            \"unit\": \"Percent\",\n            \"annotation\": [\n                {\n                    \"displayName\": \"Aggregate CPU %idle time\",\n                    \"locale\": \"en-us\"\n                }\n            ]\n        }\n        self.mapped = {\n            \"type\": \"builtin\",\n            \"class\": \"filesystem\",\n            \"counter\": \"Freespace\",\n            \"counterSpecifier\": \"/builtin/Filesystem/Freespace(/)\",\n            \"condition\": 'Name=\"/\"',\n            \"unit\": \"Bytes\",\n            \"annotation\": [\n                {\n                    \"displayName\": \"Free space on /\",\n                    \"locale\": \"en-us\"\n                }\n            ]\n        }\n\n    def test_IsType(self):\n        try:\n            item = BProvider.BuiltinMetric(self.basic_valid)\n            self.assertTrue(item.is_type('builtin'))\n        except Exception as ex:\n            self.fail(\"BuiltinMetric Constructor raised exception: {0}\".format(ex))\n\n    def test_Class(self):\n        dupe = self.basic_valid.copy()\n        del dupe['class']\n        self.assertRaises(ProvUtil.InvalidCounterSpecification, BProvider.BuiltinMetric, dupe)\n        try:\n            metric = BProvider.BuiltinMetric(self.basic_valid)\n            self.assertEqual(metric.class_name(), 'processor')\n        except Exception as ex:\n            self.fail(\"BuiltinMetric Constructor raised exception: {0}\".format(ex))\n\n    def test_Counter(self):\n        dupe = self.basic_valid.copy()\n        del dupe['counter']\n        self.assertRaises(ProvUtil.InvalidCounterSpecification, BProvider.BuiltinMetric, dupe)\n        try:\n            metric = BProvider.BuiltinMetric(self.basic_valid)\n            self.assertEqual(metric.counter_name(), 'PercentIdleTime')\n        except Exception as ex:\n            self.fail(\"BuiltinMetric Constructor raised exception: {0}\".format(ex))\n        try:\n            metric = BProvider.BuiltinMetric(self.mapped)\n            self.assertEqual(metric.counter_name(), 'FreeMegabytes')\n        except Exception as ex:\n            self.fail(\"BuiltinMetric Constructor raised exception: {0}\".format(ex))\n\n    def test_condition(self):\n        dupe = self.basic_valid.copy()\n        del dupe['condition']\n        try:\n            metric = BProvider.BuiltinMetric(dupe)\n            self.assertIsNone(metric.condition())\n        except Exception as ex:\n            self.fail(\"BuiltinMetric Constructor (dupe) raised exception: {0}\".format(ex))\n        try:\n            metric = BProvider.BuiltinMetric(self.mapped)\n            self.assertEqual(metric.condition(), 'Name=\"/\"')\n        except Exception as ex:\n            self.fail(\"BuiltinMetric Constructor (self.mapped) raised exception: {0}\".format(ex))\n        try:\n            metric = BProvider.BuiltinMetric(self.basic_valid)\n            self.assertEqual(metric.condition(), 'IsAggregate=TRUE')\n        except Exception as ex:\n            self.fail(\"BuiltinMetric Constructor (self.basic_valid) raised exception: {0}\".format(ex))\n\n    def test_label(self):\n        dupe = self.basic_valid.copy()\n        del dupe['counterSpecifier']\n        self.assertRaises(ProvUtil.InvalidCounterSpecification, BProvider.BuiltinMetric, dupe)\n        try:\n            metric = BProvider.BuiltinMetric(self.basic_valid)\n            self.assertEqual(metric.label(), '/builtin/Processor/PercentIdleTime')\n        except Exception as ex:\n            self.fail(\"BuiltinMetric Constructor raised exception: {0}\".format(ex))\n\n    def test_sample_rate(self):\n        try:\n            metric = BProvider.BuiltinMetric(self.basic_valid)\n            self.assertEqual(metric.sample_rate(), 30)\n        except Exception as ex:\n            self.fail(\"BuiltinMetric Constructor raised exception: {0}\".format(ex))\n        dupe = self.basic_valid.copy()\n        del dupe['sampleRate']\n        try:\n            metric = BProvider.BuiltinMetric(dupe)\n            self.assertEqual(metric.sample_rate(), 15)\n        except Exception as ex:\n            self.fail(\"BuiltinMetric Constructor raised exception: {0}\".format(ex))\n\n\nclass TestMakeXML(unittest.TestCase):\n    def setUp(self):\n        self.base_xml = entire_xml_cfg_tmpl\n    def test_two_and_two(self):\n        specs = [\n            {\n                \"type\": \"builtin\",\n                \"class\": \"Processor\",\n                \"counter\": \"PercentIdleTime\",\n                \"counterSpecifier\": \"/builtin/Processor/PercentIdleTime\",\n                \"condition\": \"IsAggregate=TRUE\",\n                \"sampleRate\": \"PT30S\",\n            },\n            {\n                \"type\": \"builtin\",\n                \"class\": \"filesystem\",\n                \"counter\": \"Freespace\",\n                \"counterSpecifier\": \"/builtin/Filesystem/Freespace(/)\",\n                \"condition\": \"Name='/'\",\n            },\n            {\n                \"type\": \"builtin\",\n                \"class\": \"Processor\",\n                \"counter\": \"PercentProcessorTime\",\n                \"counterSpecifier\": \"/builtin/Processor/PercentProcessorTime\",\n                \"condition\": \"IsAggregate=TRUE\",\n                \"sampleRate\": \"PT30S\",\n            },\n            {\n                \"type\": \"builtin\",\n                \"class\": \"filesystem\",\n                \"counter\": \"Freespace\",\n                \"counterSpecifier\": \"/builtin/Filesystem/Freespace(/mnt)\",\n                \"condition\": \"Name=\\\"/mnt\\\"\",\n            },\n        ]\n\n        sink_names = set()\n        for spec in specs:\n            try:\n                sink = BProvider.AddMetric(spec)\n                self.assertIsNotNone(sink)\n                sink_names.add(sink)\n            except Exception as ex:\n                self.fail(\"AddMetric({0}) raised exception: {1}\".format(spec, ex))\n        self.assertEqual(len(sink_names), 3)\n\n        doc = ET.ElementTree(ET.fromstring(self.base_xml))\n        BProvider.UpdateXML(doc)\n        # xml_string = ET.tostring(doc.getroot())\n        # print xml_string\n\n\nclass Lad2_3CompatiblePortalPublicSettingsGenerator(unittest.TestCase):\n\n    @unittest.skip(\"Lad2_3Compat test needs redesign to be useful outside of internal development environment\")\n    def test_lad_2_3_compatible_portal_public_settings(self):\n        \"\"\"\n        This is rather a utility function that attempts to generate a standard LAD 3.0 protected settings JSON string\n        for the Azure Portal charts experience. Unit, displayName, and condition are inferred/auto-filled from\n        a sample Azure Insights metric definitions JSON pulled from ACIS.\n        \"\"\"\n        pub_settings = {\n            \"StorageAccount\": \"__DIAGNOSTIC_STORAGE_ACCOUNT__\",\n            \"ladCfg\": {\n                \"sampleRateInSeconds\": 15,\n                \"diagnosticMonitorConfiguration\": {\n                    \"eventVolume\": \"Medium\",\n                    \"metrics\": {\n                        \"metricAggregation\": [\n                            {\n                                \"scheduledTransferPeriod\": \"PT1H\"\n                            },\n                            {\n                                \"scheduledTransferPeriod\": \"PT1M\"\n                            }\n                        ],\n                        \"resourceId\": \"__VM_RESOURCE_ID__\"\n                    },\n                    \"performanceCounters\": {\n                        \"performanceCounterConfiguration\": []\n                    },\n                    \"syslogEvents\": {\n                        \"syslogEventConfiguration\": {\n                            'LOG_AUTH': 'LOG_DEBUG',\n                            'LOG_AUTHPRIV': 'LOG_DEBUG',\n                            'LOG_CRON': 'LOG_DEBUG',\n                            'LOG_DAEMON': 'LOG_DEBUG',\n                            'LOG_FTP': 'LOG_DEBUG',\n                            'LOG_KERN': 'LOG_DEBUG',\n                            'LOG_LOCAL0': 'LOG_DEBUG',\n                            'LOG_LOCAL1': 'LOG_DEBUG',\n                            'LOG_LOCAL2': 'LOG_DEBUG',\n                            'LOG_LOCAL3': 'LOG_DEBUG',\n                            'LOG_LOCAL4': 'LOG_DEBUG',\n                            'LOG_LOCAL5': 'LOG_DEBUG',\n                            'LOG_LOCAL6': 'LOG_DEBUG',\n                            'LOG_LOCAL7': 'LOG_DEBUG',\n                            'LOG_LPR': 'LOG_DEBUG',\n                            'LOG_MAIL': 'LOG_DEBUG',\n                            'LOG_NEWS': 'LOG_DEBUG',\n                            'LOG_SYSLOG': 'LOG_DEBUG',\n                            'LOG_USER': 'LOG_DEBUG',\n                            'LOG_UUCP': 'LOG_DEBUG'\n                        }\n                    }\n                }\n            }\n        }\n        each_perf_counter_cfg_template = {\n            \"unit\": \"__TO_BE_FILLED__\",\n            \"type\": \"builtin\",\n            \"class\": \"__TO_BE_REPLACED_BY_CODE\",\n            \"counter\": \"__TO_BE_REPLACED_BY_CODE__\",\n            \"counterSpecifier\": \"__TO_BE_REPLACED_BY_CODE__\",\n            \"annotation\": \"__TO_BE_FILLED__\",  # Needs to be assigned a new instance to avoid shallow copy\n            # [\n            #     {\n            #         \"locale\": \"en-us\",\n            #         \"displayName\": \"__TO_BE_FILLED__\"\n            #     }\n            # ],\n            \"condition\": \"__TO_BE_FILLED__\"\n        }\n\n        perf_counter_cfg_list = pub_settings['ladCfg']['diagnosticMonitorConfiguration']['performanceCounters']['performanceCounterConfiguration']\n        units_and_names = self.extract_perf_counter_units_and_names_from_metrics_def_sample()\n\n        for class_name in BProvider._builtIns:\n            for lad_counter_name, scx_counter_name in BProvider._builtIns[class_name].iteritems():\n                perf_counter_cfg = dict(each_perf_counter_cfg_template)\n                perf_counter_cfg['class'] = class_name\n                perf_counter_cfg['counter'] = lad_counter_name\n                counter_specifier = '/builtin/{0}/{1}'.format(class_name, lad_counter_name)\n                perf_counter_cfg['counterSpecifier'] = counter_specifier\n                perf_counter_cfg['condition'] = BProvider.default_condition(class_name)\n                if not perf_counter_cfg['condition']:\n                    del perf_counter_cfg['condition']\n                counter_specifier_with_scx_name = '/builtin/{0}/{1}'.format(class_name.title(), scx_counter_name)\n                if counter_specifier_with_scx_name in units_and_names:\n                    perf_counter_cfg['unit'] = units_and_names[counter_specifier_with_scx_name]['unit']\n                    perf_counter_cfg['annotation'] = [{\n                        'displayName': units_and_names[counter_specifier_with_scx_name]['displayName'],\n                        'locale': 'en-us'\n                    }]\n                else:\n                    # Use some ad hoc logic to auto-fill missing values (all from FileSystem class)\n                    perf_counter_cfg['unit'] = self.inferred_unit_name_from_counter_name(scx_counter_name)\n                    perf_counter_cfg['annotation'] = [{\n                        'displayName': self.inferred_display_name_from_class_counter_names(class_name, scx_counter_name),\n                        'locale': 'en-us'\n                    }]\n                perf_counter_cfg_list.append(perf_counter_cfg)\n\n        actual = json.dumps(pub_settings, sort_keys=True, indent=2)\n        print actual\n        # Uncomment the following 2 lines when generating expected JSON file (of course after validating the actual)\n        #with open('lad_2_3_compatible_portal_pub_settings.json', 'w') as f:\n        #    f.write(actual)\n        with open('lad_2_3_compatible_portal_pub_settings.json') as f:\n            expected = f.read()\n        self.assertEqual(json.dumps(json.loads(expected), sort_keys=True),\n                         json.dumps(json.loads(actual), sort_keys=True))\n        to_be_filled = re.findall(r'\"__.*?__\"', actual)\n        self.assertEqual(2, len(to_be_filled))\n        self.assertIn('\"__DIAGNOSTIC_STORAGE_ACCOUNT__\"', to_be_filled)\n        self.assertIn('\"__VM_RESOURCE_ID__\"', to_be_filled)\n\n    def inferred_unit_name_from_counter_name(self, scx_counter_name):\n        if 'Percent' in scx_counter_name:\n            return 'Percent'\n        if re.match(r'Bytes.*PerSecond', scx_counter_name):\n            return 'BytesPerSecond'  # According to the ACIS-pulled metric definitions sample...\n        if 'PerSecond' in scx_counter_name:\n            return 'CountPerSecond'  # Again according to the ACIS-pulled metric defs sample...\n        if scx_counter_name in BProvider._scaling['memory'] or scx_counter_name in BProvider._scaling['filesystem']:\n            return 'Bytes'  # Scaled MiB to Bytes counters, so use Bytes as unit\n        raise Exception(\"Can't infer unit name from scx counter name ({0})\".format(scx_counter_name))\n\n    def inferred_display_name_from_class_counter_names(self, class_name, scx_counter_name):\n        desc = scx_counter_name\n        desc = desc.replace('PerSecond', '/sec')\n        desc = ' '.join([word.lower() for word in re.findall('[A-Z]+[^A-Z]*', desc)])\n        desc = desc.replace('percent', '%').replace('megabytes', 'space')\n        return '{0} {1}'.format(class_name.title(), desc)\n\n    def extract_perf_counter_units_and_names_from_metrics_def_sample(self):\n        \"\"\"\n        Another utility function that extracts perf counter units and display names from an Azure metrics\n        definition sample file (not included in the repo). Again this is to be used only manually under\n        the desired environment when needed.\n        :return: Dictionary of counter specifier to unit/displayName map.\n        \"\"\"\n        results = {}\n        metric_definitions = {}\n        with open('lad_2_3_metric_definitions_sample.json') as f:\n            metric_definitions = json.load(f)\n        for dict_item in metric_definitions['value']:  # This is a list of dictionaries for all metrics\n            # E.g., '\\\\Memory\\\\AvailableMemory' to '/builtin/Memory/AvailableMemory'\n            # Also, Azure Insights uses 'PhysicalDisk' and 'NetworkInterface' instead of 'Disk' and 'Network',\n            # so replace them as well.\n            counter_specifier = '/builtin{0}'.format(dict_item['name']['value'].replace('\\\\', '/')\n                                                     .replace('PhysicalDisk', 'Disk')\n                                                     .replace('NetworkInterface', 'Network'))\n            display_name = dict_item['name']['localizedValue']  # E.g., 'Memory available'\n            unit = dict_item['unit']  # E.g., 'Bytes'\n            results[counter_specifier] = { 'unit': unit, 'displayName': display_name }\n        return results\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "Diagnostic/tests/test_commonActions.py",
    "content": "import unittest\nimport os\nimport errno\nimport platform\nimport time\nimport string\nimport random\nimport DistroSpecific\nfrom Utils.WAAgentUtil import waagent\n\n\nclass TestCommonActions(unittest.TestCase):\n    _pid = os.getpid()\n    _sequence = 0\n    _messages = []\n    _distro = None\n\n    def make_temp_filename(self):\n        self._sequence += 1\n        return '/tmp/TestCommonActions_{0}_{1}_{2}'.format(self._pid, time.time(), self._sequence)\n\n    def log(self, message):\n        self._messages.append(message)\n\n    @staticmethod\n    def random_string(size, charset=string.ascii_uppercase + string.digits):\n        return ''.join(random.SystemRandom().choice(charset) for _ in range(size))\n\n    def setUp(self):\n        dist = platform.dist()\n        self._messages = []\n        self._distro = DistroSpecific.get_distro_actions(dist[0], dist[1], self.log)\n\n    def tearDown(self):\n        pass\n\n    def test_log_run_get_output_silent_success(self):\n        (error, results) = self._distro.log_run_get_output('/bin/true')\n        self.assertEqual(error, 0)\n        self.assertEqual(results, '')\n\n    def test_log_run_get_output_success(self):\n        expected = TestCommonActions.random_string(50) + '\\n'\n        filename = self.make_temp_filename()\n        with open(filename, 'w') as f:\n            f.write(expected)\n        (error, results) = self._distro.log_run_get_output('cat {0}'.format(filename))\n        os.remove(filename)\n        self.assertEqual(results, expected)\n        self.assertEqual(error, 0)\n\n    def test_log_run_get_output_failure(self):\n        bad_file= '/bin/ThIsDoEsNoTeXiSt'\n        (error, results) = self._distro.log_run_get_output(bad_file)\n        self.assertEqual(127, error)\n        self.assertIn(bad_file, results)    # Should be an error message talking about the non-existent file\n\n    def test_log_run_ignore_output(self):\n        filename = self.make_temp_filename()\n        try:\n            os.remove(filename)\n        except OSError as e:\n            if e.errno != errno.ENOENT:\n                self.fail(\"Pre-test os.delete({0}) returned {1}\".format(filename, errno.errorcode[e.errno]))\n        error = self._distro.log_run_ignore_output(\"touch {0}\".format(filename))\n        self.assertEqual(error, 0)\n        try:\n            os.remove(filename)\n        except IOError as e:\n            if e.errno == errno.ENOENT:\n                self.fail(\"Test command did not properly execute\")\n            else:\n                self.fail(\"Post-test os.delete({0}) returned {1}\".format(filename, errno.errorcode[e.errno]))\n\n    def test_log_run_with_timeout_force_timeout(self):\n        (status, output) = self._distro.log_run_with_timeout(\"sleep 10; echo sleep done\", timeout=5)\n        self.assertEqual(output, 'Process timeout\\n')\n        self.assertEqual(status, 1)\n\n    def test_log_run_with_timeout_without_timeout(self):\n        (status, output) = self._distro.log_run_with_timeout(\"echo success; exit 2\", timeout=5)\n        self.assertEqual(output, 'success\\n')\n        self.assertEqual(status, 2)\n\n    def test_log_run_multiple_cmds(self):\n        expected = 'foo\\nbar\\n'\n        cmds = ('echo foo', 'echo bar')\n        error, output = self._distro.log_run_multiple_cmds(cmds, False)\n        self.assertEqual(error, 0)\n        self.assertEqual(output, expected)\n\n    def test_log_run_multiple_cmds_no_timeout(self):\n        expected = 'foo\\nbar\\n'\n        cmds = ('echo foo', 'echo bar')\n        error, output = self._distro.log_run_multiple_cmds(cmds, True)\n        self.assertEqual(error, 0)\n        self.assertEqual(output, expected)\n\n    def test_log_run_multiple_cmds_partial_timeout(self):\n        expected = 'Process timeout\\nbar\\n'\n        cmds = ('sleep 30; echo foo', 'echo bar')\n        error, output = self._distro.log_run_multiple_cmds(cmds, True, 5)\n        self.assertEqual(error, 1)\n        self.assertEqual(output, expected)\n\n\nif __name__ == '__main__':\n    waagent.LoggerInit('waagent.verbose.log', None, True)\n    unittest.main()\n"
  },
  {
    "path": "Diagnostic/tests/test_lad_config_all.py",
    "content": "# Make LadConfigAll class unittest-able here.\n# To achieve that, the following were done:\n# - Mock VM's cert/prv key files (w/ thumbprint) that's used for decrypting the extensions's protected settings\n#   and for encrypting storage key/SAS token in mdsd XML file\n# - Mock a complete LAD extension's handler setting (that includes protected settings and public settings).\n# - Mock RunGetOutput for external command executions.\n# - Mock any other things that are necessary!\n# It'd be easiest to create a test VM w/ LAD enabled and copy out necessary files to here to be used for this test.\n# The test VM was destroyed immediately. A test storage account was used and deleted immediately.\n# TODO Try to generate priv key/cert/storage shared key dynamically here.\n\nimport binascii\nimport json\nimport os\nimport unittest\nfrom xml.etree import ElementTree as ET\n# This test suite uses xmlunittest package. Install it by running 'pip install xmlunittest'.\n# Documentation at http://python-xmlunittest.readthedocs.io/en/latest/\nfrom xmlunittest import XmlTestMixin\n\nfrom Utils.lad_ext_settings import *\n# The following line will work on an Azure Linux VM (where waagent is installed), but fail on a non-Azure Linux VM\n# (because of no waagent). It's because lad_config_all.py will import misc_helpers.py, which will try to import\n# waagent from WAAgentUtil.py.\n# To work around this on a non-Azure Linux VM, define PYTHONPATH env var\n# with \"azure-linux-extensions/Common/WALinuxAgent-2.0.16\" included in it.\n# E.g., run 'export PYTHONPATH=<gitroot>/azure-linux-extensions/Common/WALinuxAgent-2.0.16' before running this test.\n#\n# Also, if you're trying to execute this test on a Windows system rather than under Linux, the waagent code relies on\n# three Linux-only modules you'll need to mock out: crypt(crypt()), pwd(getpwnam()), and fcntl(ioctl()).\nfrom lad_config_all import *\n\n# Mocked waagent/LAD dir/files\ntest_waagent_dir = os.path.join(os.path.dirname(__file__), 'var_lib_waagent')\ntest_lad_dir = os.path.join(test_waagent_dir, 'lad_dir')\ntest_lad_settings_logging_json_file = os.path.join(test_lad_dir, 'config', 'lad_settings_logging.json')\ntest_lad_settings_metric_json_file = os.path.join(test_lad_dir, 'config', 'lad_settings_metric.json')\n\n\n# Mocked functions\n\n# We're not really interested in testing the ability to decrypt the private settings; that's tested elsewhere.\n# Instead, we assume the test handlerSettings object contains the decrypted Private settings already, since we just\n# need to test our ability to read and manipulate those settings.\ndef decrypt_protected_settings(handlerSettings):\n    pass\n\n\ndef print_content_with_header(header_text, content):\n    header = '>>>>> ' + header_text + ' >>>>>'\n    print header\n    print content\n    print '<' * len(header)\n    print\n\n\ndef mock_fetch_uuid():\n    return \"DEADBEEF-0000-1111-2222-77DEADBEEF77\"\n\n\ndef mock_encrypt_secret(cert, secret):\n    # Encode secret w/ binascii.hex() to avoid invalid chars in XML.\n    # The actual/real return value of the non-mocked encrypt_secret() is in that form.\n    # We still keep the \"ENCRYPTED(...)\" part here to show that clearly in our test outputs.\n    secret = binascii.b2a_hex(secret).upper()\n    return \"ENCRYPTED({0},{1})\".format(cert, secret)\n\n\ndef mock_log_info(msg):\n    print 'LOG:', msg\n\n\ndef mock_log_error(msg):\n    print 'ERROR:', msg\n\n\ndef load_test_config(filename):\n    \"\"\"\n    Load a test configuration into a LadConfigAll object\n    :param filename: Name of config file\n    :rtype: LadConfigAll\n    :return: Loaded configuration\n    \"\"\"\n    with open(filename) as f:\n        handler_settings = json.loads(f.read())['runtimeSettings'][0]['handlerSettings']\n    decrypt_protected_settings(handler_settings)\n    lad_settings = LadExtSettings(handler_settings)\n\n    return LadConfigAll(lad_settings, test_lad_dir, '', 'test_lad_deployment_id', mock_fetch_uuid,\n                        mock_encrypt_secret, mock_log_info, mock_log_error)\n\n\nclass LadConfigAllTest(unittest.TestCase, XmlTestMixin):\n    def test_lad_config_all_logging_only(self):\n        \"\"\"\n        Perform basic LadConfigAll object tests with logging-only configs,\n        like generating various configs and validating them.\n        \"\"\"\n        lad_cfg = load_test_config(test_lad_settings_logging_json_file)\n        result, msg = lad_cfg.generate_all_configs()\n        self.assertTrue(result, 'Config generation failed: ' + msg)\n\n        with open(os.path.join(test_lad_dir, 'xmlCfg.xml')) as f:\n            mdsd_xml_cfg = f.read()\n        print_content_with_header('Generated mdsd XML cfg for logging-only LAD settings', mdsd_xml_cfg)\n        self.assertTrue(mdsd_xml_cfg, 'Empty mdsd XML config is invalid!')\n\n        rsyslog_cfg = lad_cfg.get_rsyslog_config()\n        print_content_with_header('Generated rsyslog cfg', rsyslog_cfg)\n        self.assertTrue(rsyslog_cfg, 'Empty rsyslog cfg is invalid')\n\n        syslog_ng_cfg = lad_cfg.get_syslog_ng_config()\n        print_content_with_header('Generated syslog-ng cfg', syslog_ng_cfg)\n        self.assertTrue(syslog_ng_cfg, 'Empty syslog-ng cfg is invalid')\n\n        fluentd_out_mdsd_cfg = lad_cfg.get_fluentd_out_mdsd_config()\n        print_content_with_header('Generated fluentd out_mdsd cfg', fluentd_out_mdsd_cfg)\n        self.assertTrue(fluentd_out_mdsd_cfg, 'Empty fluentd out_mdsd cfg is invalid')\n\n        fluentd_syslog_src_cfg = lad_cfg.get_fluentd_syslog_src_config()\n        print_content_with_header('Generated fluentd syslog src cfg', fluentd_syslog_src_cfg)\n        self.assertTrue(fluentd_syslog_src_cfg, 'Empty fluentd syslog src cfg is invalid')\n\n        fluentd_tail_src_cfg = lad_cfg.get_fluentd_tail_src_config()\n        print_content_with_header('Generated fluentd tail src cfg', fluentd_tail_src_cfg)\n        self.assertTrue(fluentd_tail_src_cfg, 'Empty fluentd tail src cfg is invalid')\n\n    def test_lad_config_all_metric_only(self):\n        \"\"\"\n        Perform basic LadConfigAll object tests with metric-only configs,\n        like generating various configs and validating them.\n        \"\"\"\n        lad_cfg = load_test_config(test_lad_settings_metric_json_file)\n        result, msg = lad_cfg.generate_all_configs()\n        self.assertTrue(result, 'Config generation failed: ' + msg)\n\n        with open(os.path.join(test_lad_dir, 'xmlCfg.xml')) as f:\n            mdsd_xml_cfg = f.read()\n        print_content_with_header('Generated mdsd XML cfg for metric-only LAD settings', mdsd_xml_cfg)\n        self.assertTrue(mdsd_xml_cfg, 'Empty mdsd XML config is invalid!')\n\n        # Verify using xmlunittests\n        root = self.assertXmlDocument(mdsd_xml_cfg)\n        expected_xml_str = \"\"\"\n<MonitoringManagement eventVersion=\"2\" namespace=\"\" timestamp=\"2017-03-27T19:45:00.000\" version=\"1.0\">\n  <Accounts>\n    <SharedAccessSignature account=\"ladunittestfakeaccount\" decryptKeyPath=\"B175B535DFE9F93659E5AFD893BF99BBF9DF28A5.prv\" isDefault=\"true\" key=\"ENCRYPTED(B175B535DFE9F93659E5AFD893BF99BBF9DF28A5.crt,4E4F545F415F5245414C5F544F4B454E)\" moniker=\"moniker\" tableEndpoint=\"https://ladunittestfakeaccount.table.core.windows.net/\" blobEndpoint=\"https://ladunittestfakeaccount.blob.core.windows.net/\" />\n  </Accounts>\n\n  <Management defaultRetentionInDays=\"90\" eventVolume=\"Large\">\n    <Identity>\n      <IdentityComponent name=\"DeploymentId\">test_lad_deployment_id</IdentityComponent>\n      <IdentityComponent name=\"Host\" useComputerName=\"true\" />\n    </Identity>\n    <AgentResourceUsage diskQuotaInMB=\"50000\" />\n  <OboDirectPartitionField name=\"resourceId\" value=\"ladtest_resource_id\" />\n  <OboDirectPartitionField name=\"agentIdentityHash\" value=\"DEADBEEF-0000-1111-2222-77DEADBEEF77\" />\n  </Management>\n\n  <Schemas>\n  </Schemas>\n\n  <Sources>\n  </Sources>\n\n  <Events>\n    <MdsdEvents>\n    </MdsdEvents>\n\n    <OMI>\n    <OMIQuery cqlQuery=\"SELECT FreeMegabytes FROM SCX_FileSystemStatisticalInformation WHERE Name='/'\" eventName=\"builtin000003\" omiNamespace=\"root/scx\" sampleRateInSeconds=\"15\" storeType=\"local\">\n  <Unpivot columnName=\"CounterName\" columnValue=\"Value\" columns=\"FreeMegabytes\">\n    <MapName name=\"FreeMegabytes\" scaleUp=\"1048576\">/builtin/filesystem/freespace(/mnt)</MapName>\n  </Unpivot>\n</OMIQuery><OMIQuery cqlQuery=\"SELECT UsedMegabytes FROM SCX_FileSystemStatisticalInformation WHERE Name=&quot;/&quot;\" eventName=\"builtin000002\" omiNamespace=\"root/scx\" sampleRateInSeconds=\"15\" storeType=\"local\">\n  <Unpivot columnName=\"CounterName\" columnValue=\"Value\" columns=\"UsedMegabytes\">\n    <MapName name=\"UsedMegabytes\" scaleUp=\"1048576\">/builtin/filesystem/usedspace</MapName>\n  </Unpivot>\n</OMIQuery><OMIQuery cqlQuery=\"SELECT PercentProcessorTime FROM SCX_ProcessorStatisticalInformation WHERE IsAggregate=TRUE\" eventName=\"builtin000001\" omiNamespace=\"root/scx\" sampleRateInSeconds=\"15\" storeType=\"local\">\n  <Unpivot columnName=\"CounterName\" columnValue=\"Value\" columns=\"PercentProcessorTime\">\n    <MapName name=\"PercentProcessorTime\">/builtin/processor/PercentProcessorTime</MapName>\n  </Unpivot>\n</OMIQuery>\n  <OMIQuery cqlQuery=\"SELECT PercentAvailableMemory, PercentUsedSwap FROM SCX_MemoryStatisticalInformation\" dontUsePerNDayTable=\"true\" eventName=\"LinuxMemory\" omiNamespace=\"root/scx\" priority=\"High\" sampleRateInSeconds=\"300\" />\n  <OMIQuery cqlQuery=\"SELECT PercentAvailableMemory, PercentUsedSwap FROM SCX_MemoryStatisticalInformation\" dontUsePerNDayTable=\"true\" eventName=\"LinuxMemoryEventHub\" omiNamespace=\"root/scx\" priority=\"High\" sampleRateInSeconds=\"300\" storeType=\"local\" />\n  <OMIQuery cqlQuery=\"SELECT PercentProcessorTime FROM SCX_ProcessorStatisticalInformation\" dontUsePerNDayTable=\"true\" eventName=\"ProcessorInfoJsonBlob\" omiNamespace=\"root/scx\" priority=\"High\" sampleRateInSeconds=\"60\" storeType=\"JsonBlob\" />\n  <OMIQuery cqlQuery=\"SELECT PercentProcessorTime FROM SCX_ProcessorStatisticalInformation\" dontUsePerNDayTable=\"true\" eventName=\"ProcessorInfoEventHub\" omiNamespace=\"root/scx\" priority=\"High\" sampleRateInSeconds=\"60\" storeType=\"local\" />\n  <OMIQuery cqlQuery=\"SELECT FreeMegabytes FROM SCX_FileSystemStatisticalInformation\" dontUsePerNDayTable=\"true\" eventName=\"LinuxFileSystem\" omiNamespace=\"root/scx\" priority=\"High\" sampleRateInSeconds=\"300\" />\n  <OMIQuery cqlQuery=\"SELECT FreeMegabytes FROM SCX_FileSystemStatisticalInformation\" dontUsePerNDayTable=\"true\" eventName=\"FileSystemJsonBlob\" omiNamespace=\"root/scx\" priority=\"High\" sampleRateInSeconds=\"300\" storeType=\"JsonBlob\" />\n  </OMI>\n\n    <DerivedEvents>\n    <DerivedEvent duration=\"PT1H\" eventName=\"WADMetricsPT1HP10DV2S\" isFullName=\"true\" source=\"builtin000001\" storeType=\"Central\">\n<LADQuery columnName=\"CounterName\" columnValue=\"Value\" instanceID=\"\" partitionKey=\"ladtest:005Fresource:005Fid\" />\n</DerivedEvent><DerivedEvent duration=\"PT1M\" eventName=\"WADMetricsPT1MP10DV2S\" isFullName=\"true\" source=\"builtin000001\" storeType=\"Central\">\n<LADQuery columnName=\"CounterName\" columnValue=\"Value\" instanceID=\"\" partitionKey=\"ladtest:005Fresource:005Fid\" />\n</DerivedEvent><DerivedEvent duration=\"PT1H\" eventName=\"WADMetricsPT1HP10DV2S\" isFullName=\"true\" source=\"builtin000002\" storeType=\"Central\">\n<LADQuery columnName=\"CounterName\" columnValue=\"Value\" instanceID=\"\" partitionKey=\"ladtest:005Fresource:005Fid\" />\n</DerivedEvent><DerivedEvent duration=\"PT1M\" eventName=\"WADMetricsPT1MP10DV2S\" isFullName=\"true\" source=\"builtin000002\" storeType=\"Central\">\n<LADQuery columnName=\"CounterName\" columnValue=\"Value\" instanceID=\"\" partitionKey=\"ladtest:005Fresource:005Fid\" />\n</DerivedEvent><DerivedEvent duration=\"PT1H\" eventName=\"WADMetricsPT1HP10DV2S\" isFullName=\"true\" source=\"builtin000003\" storeType=\"Central\">\n<LADQuery columnName=\"CounterName\" columnValue=\"Value\" instanceID=\"\" partitionKey=\"ladtest:005Fresource:005Fid\" />\n</DerivedEvent><DerivedEvent duration=\"PT1M\" eventName=\"WADMetricsPT1MP10DV2S\" isFullName=\"true\" source=\"builtin000003\" storeType=\"Central\">\n<LADQuery columnName=\"CounterName\" columnValue=\"Value\" instanceID=\"\" partitionKey=\"ladtest:005Fresource:005Fid\" />\n</DerivedEvent></DerivedEvents>\n  </Events>\n\n  <EventStreamingAnnotations>\n  <EventStreamingAnnotation name=\"LinuxMemoryEventHub\">\n       <EventPublisher>\n         <Key decryptKeyPath=\"B175B535DFE9F93659E5AFD893BF99BBF9DF28A5.prv\">ENCRYPTED(B175B535DFE9F93659E5AFD893BF99BBF9DF28A5.crt,68747470733A2F2F66616B65267361732575726C3B31)</Key>\n       </EventPublisher>\n    </EventStreamingAnnotation><EventStreamingAnnotation name=\"ProcessorInfoEventHub\">\n       <EventPublisher>\n         <Key decryptKeyPath=\"B175B535DFE9F93659E5AFD893BF99BBF9DF28A5.prv\">ENCRYPTED(B175B535DFE9F93659E5AFD893BF99BBF9DF28A5.crt,68747470733A2F2F66616B65267361732575726C3B32)</Key>\n       </EventPublisher>\n    </EventStreamingAnnotation></EventStreamingAnnotations>\n\n</MonitoringManagement>\n\n\"\"\"\n        # The following is at least insensitive to whitespaces... Also it's way more complicated\n        # to create XPaths for this, so just use the following API.\n        self.assertXmlEquivalentOutputs(mdsd_xml_cfg, expected_xml_str)\n\n    def test_update_metric_collection_settings(self):\n        test_config = \\\n            {\n                \"diagnosticMonitorConfiguration\":\n                    {\n                        \"foo\": \"bar\",\n                        \"eventVolume\": \"Large\",\n                        \"sinksConfig\": {\n                            \"sink\": [\n                                {\n                                    \"name\": \"sink1\",\n                                    \"type\": \"EventHub\",\n                                    \"sasURL\": \"https://sbnamespace.servicebus.windows.net/raw?sr=https%3a%2f%2fsb\"\n                                              \"namespace.servicebus.windows.net%2fraw%2f&sig=SIGNATURE%3d\"\n                                              \"&se=1804371161&skn=writer\"\n                                }\n                            ]\n                        },\n                        \"metrics\": {\n                            \"resourceId\": \"/subscriptions/1111-2222-3333-4444/resourcegroups/RG1/compute/foo\",\n                            \"metricAggregation\": [\n                                {\"scheduledTransferPeriod\": \"PT5M\"},\n                                {\"scheduledTransferPeriod\": \"PT1H\"},\n                            ]\n                        },\n                        \"performanceCounters\": {\n                            \"sinks\": \"sink1\",\n                            \"performanceCounterConfiguration\": [\n                                {\n                                    \"type\": \"builtin\",\n                                    \"class\": \"Processor\",\n                                    \"counter\": \"PercentIdleTime\",\n                                    \"counterSpecifier\": \"/builtin/Processor/PercentIdleTime\",\n                                    \"condition\": \"IsAggregate=TRUE\",\n                                    \"sampleRate\": \"PT15S\",\n                                    \"unit\": \"Percent\",\n                                    \"annotation\": [\n                                        {\n                                            \"displayName\": \"Aggregate CPU %idle time\",\n                                            \"locale\": \"en-us\"\n                                        }\n                                    ]\n                                }\n                            ]\n                        },\n                        \"syslogEvents\": {\n                            \"syslogEventConfiguration\": {\n                                \"LOG_LOCAL1\": \"LOG_INFO\",\n                                \"LOG_MAIL\": \"LOG_FATAL\"\n                            }\n                        }\n                    },\n                \"sampleRateInSeconds\": 60\n            }\n\n        test_sinks_config = \\\n            {\n                \"sink\": [\n                    {\n                        \"name\": \"sink1\",\n                        \"type\": \"EventHub\",\n                        \"sasURL\": \"https://sbnamespace.servicebus.windows.net/raw?sr=https%3a%2f%2fsb\"\n                                  \"namespace.servicebus.windows.net%2fraw%2f&sig=SIGNATURE%3d\"\n                                  \"&se=1804371161&skn=writer\"\n                    }\n                ]\n            }\n\n        configurator = load_test_config(test_lad_settings_logging_json_file)\n        configurator._sink_configs.insert_from_config(test_sinks_config)\n        configurator._update_metric_collection_settings(test_config)\n        print ET.tostring(configurator._mdsd_config_xml_tree.getroot())\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "Diagnostic/tests/test_lad_ext_settings.py",
    "content": "import json\nimport unittest\n\nfrom Utils.lad_ext_settings import *\n\nclass LadExtSettingsTest(unittest.TestCase):\n\n    def setUp(self):\n        handler_settings_sample_in_str = \"\"\"\n{\n  \"protectedSettings\": {\n    \"storageAccountName\": \"mystgacct\",\n    \"storageAccountSasToken\": \"SECRET\",\n    \"sinksConfig\": {\n      \"sink\": [\n        {\n          \"type\": \"JsonBlob\",\n          \"name\": \"JsonBlobSink1\"\n        },\n        {\n          \"type\": \"JsonBlob\",\n          \"name\": \"JsonBlobSink2\"\n        },\n        {\n          \"type\": \"EventHub\",\n          \"name\": \"EventHubSink1\",\n          \"sasURL\": \"SECRET\"\n        },\n        {\n          \"type\": \"EventHub\",\n          \"name\": \"EventHubSink2\",\n          \"sasURL\": \"SECRET\"\n        }\n      ]\n    }\n  },\n  \"publicSettings\": {\n    \"StorageAccount\": \"mystgacct\",\n    \"sampleRateInSeconds\": 15,\n    \"fileLogs\": [\n      {\n        \"sinks\": \"EventHubSink1\",\n        \"file\": \"/var/log/myladtestlog\"\n      }\n    ]\n  }\n}\n\"\"\"\n        self._lad_settings = LadExtSettings(json.loads(handler_settings_sample_in_str))\n\n    def test_redacted_handler_settings(self):\n        expected = \"\"\"\n{\n  \"protectedSettings\": {\n    \"sinksConfig\": {\n      \"sink\": [\n        {\n          \"name\": \"JsonBlobSink1\",\n          \"type\": \"JsonBlob\"\n        },\n        {\n          \"name\": \"JsonBlobSink2\",\n          \"type\": \"JsonBlob\"\n        },\n        {\n          \"name\": \"EventHubSink1\",\n          \"sasURL\": \"REDACTED_SECRET\",\n          \"type\": \"EventHub\"\n        },\n        {\n          \"name\": \"EventHubSink2\",\n          \"sasURL\": \"REDACTED_SECRET\",\n          \"type\": \"EventHub\"\n        }\n      ]\n    },\n    \"storageAccountName\": \"mystgacct\",\n    \"storageAccountSasToken\": \"REDACTED_SECRET\"\n  },\n  \"publicSettings\": {\n    \"StorageAccount\": \"mystgacct\",\n    \"fileLogs\": [\n      {\n        \"file\": \"/var/log/myladtestlog\",\n        \"sinks\": \"EventHubSink1\"\n      }\n    ],\n    \"sampleRateInSeconds\": 15\n  }\n}\n\"\"\"\n        actual_json = json.loads(self._lad_settings.redacted_handler_settings())\n        print json.dumps(actual_json, sort_keys=True, indent=2)\n        self.assertEqual(json.dumps(json.loads(expected), sort_keys=True),\n                         json.dumps(actual_json, sort_keys=True))\n        # Validate that the original wasn't modified (that is, redaction should be on a deep copy)\n        print \"===== Original handler setting (shouldn't be redacted, must be different from the deep copy) =====\"\n        print json.dumps(self._lad_settings.get_handler_settings(), sort_keys=True, indent=2)\n        self.assertNotEqual(json.dumps(self._lad_settings.get_handler_settings(), sort_keys=True),\n                            json.dumps(actual_json, sort_keys=True))\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "Diagnostic/tests/test_lad_logging_config.py",
    "content": "import unittest\nimport json\nfrom xml.etree import ElementTree as ET\n# This test suite uses xmlunittest package. Install it by running 'pip install xmlunittest'.\n# Documentation at http://python-xmlunittest.readthedocs.io/en/latest/\nfrom xmlunittest import XmlTestMixin\n\nfrom Utils.lad_logging_config import *\nfrom Utils.omsagent_util import get_syslog_ng_src_name\nfrom Utils.mdsd_xml_templates import entire_xml_cfg_tmpl\nimport Utils.LadDiagnosticUtil as LadUtil\nfrom tests.test_lad_config_all import mock_encrypt_secret\n\n\nclass LadLoggingConfigTest(unittest.TestCase, XmlTestMixin):\n\n    def setUp(self):\n        \"\"\"\n        Create LadLoggingConfig objects for use by test cases\n        \"\"\"\n        # \"syslogEvents\" LAD config example\n        syslogEvents_json_ext_settings = \"\"\"\n            {\n                \"sinks\": \"SyslogJsonBlob,SyslogEventHub\",\n                \"syslogEventConfiguration\": {\n                    \"LOG_LOCAL0\": \"LOG_CRIT\",\n                    \"LOG_USER\": \"LOG_ERR\"\n                }\n            }\n            \"\"\"\n        # \"fileLogs\" LAD config example\n        fileLogs_json_ext_settings = \"\"\"\n            [\n                {\n                    \"file\": \"/var/log/mydaemonlog1\",\n                    \"table\": \"MyDaemon1Events\",\n                    \"sinks\": \"Filelog1JsonBlob,FilelogEventHub\"\n                },\n                {\n                    \"file\": \"/var/log/mydaemonlog2\",\n                    \"table\": \"MyDaemon2Events\",\n                    \"sinks\": \"Filelog2JsonBlob\"\n                }\n            ]\n            \"\"\"\n        # \"sinksConfig\" LAD config example\n        sinksConfig_json_ext_settings = \"\"\"\n            {\n                \"sink\": [\n                    {\n                        \"name\": \"SyslogEventHub\",\n                        \"type\": \"EventHub\",\n                        \"sasURL\": \"https://fake&sas%url;for_syslog_eh\"\n                    },\n                    {\n                        \"name\": \"SyslogJsonBlob\",\n                        \"type\": \"JsonBlob\"\n                    },\n                    {\n                        \"name\": \"FilelogEventHub\",\n                        \"type\": \"EventHub\",\n                        \"sasURL\": \"https://fake&sas%url;for_filelog_eh\"\n                    },\n                    {\n                        \"name\": \"Filelog1JsonBlob\",\n                        \"type\": \"JsonBlob\"\n                    },\n                    {\n                        \"name\": \"Filelog2JsonBlob\",\n                        \"type\": \"JsonBlob\"\n                    }\n                ]\n            }\n            \"\"\"\n\n        sinksConfig = LadUtil.SinkConfiguration()\n        sinksConfig.insert_from_config(json.loads(sinksConfig_json_ext_settings))\n        syslogEvents = json.loads(syslogEvents_json_ext_settings)\n        mock_pkey_path = \"/waagent/dir/mock_pkey.prv\"\n        mock_cert_path = \"/waagent/dir/mock_cert.crt\"\n        self.cfg_syslog = LadLoggingConfig(syslogEvents, None, sinksConfig, mock_pkey_path, mock_cert_path, mock_encrypt_secret)\n        fileLogs = json.loads(fileLogs_json_ext_settings)\n        self.cfg_filelog = LadLoggingConfig(None, fileLogs, sinksConfig, mock_pkey_path, mock_cert_path, mock_encrypt_secret)\n        self.cfg_none = LadLoggingConfig(None, None, sinksConfig, mock_pkey_path, mock_cert_path, mock_encrypt_secret)\n\n        # XPaths representations of expected XML outputs, for use with xmlunittests package\n        self.oms_syslog_expected_xpaths = ('./Sources/Source[@name=\"mdsd.syslog\" and @dynamic_schema=\"true\"]',\n                                           './Events/MdsdEvents/MdsdEventSource[@source=\"mdsd.syslog\"]',\n                                           './Events/MdsdEvents/MdsdEventSource[@source=\"mdsd.syslog\"]/RouteEvent[@dontUsePerNDayTable=\"true\" and @eventName=\"LinuxSyslog\" and @priority=\"High\"]',\n                                           './Events/MdsdEvents/MdsdEventSource[@source=\"mdsd.syslog\"]/RouteEvent[@dontUsePerNDayTable=\"true\" and @eventName=\"SyslogJsonBlob\" and @priority=\"High\" and @storeType=\"JsonBlob\"]',\n                                           './EventStreamingAnnotations/EventStreamingAnnotation[@name=\"mdsd.syslog\"]/EventPublisher/Key',  # TODO Perform CDATA validation\n                                          )\n        self.oms_filelog_expected_xpaths = ('./Sources/Source[@name=\"mdsd.filelog.var.log.mydaemonlog1\" and @dynamic_schema=\"true\"]',\n                                            './Sources/Source[@name=\"mdsd.filelog.var.log.mydaemonlog2\" and @dynamic_schema=\"true\"]',\n                                            './Events/MdsdEvents/MdsdEventSource[@source=\"mdsd.filelog.var.log.mydaemonlog1\"]',\n                                            './Events/MdsdEvents/MdsdEventSource[@source=\"mdsd.filelog.var.log.mydaemonlog1\"]/RouteEvent[@dontUsePerNDayTable=\"true\" and @eventName=\"MyDaemon1Events\" and @priority=\"High\"]',\n                                            './Events/MdsdEvents/MdsdEventSource[@source=\"mdsd.filelog.var.log.mydaemonlog1\"]/RouteEvent[@dontUsePerNDayTable=\"true\" and @eventName=\"Filelog1JsonBlob\" and @priority=\"High\" and @storeType=\"JsonBlob\"]',\n                                            './Events/MdsdEvents/MdsdEventSource[@source=\"mdsd.filelog.var.log.mydaemonlog2\"]',\n                                            './Events/MdsdEvents/MdsdEventSource[@source=\"mdsd.filelog.var.log.mydaemonlog2\"]/RouteEvent[@dontUsePerNDayTable=\"true\" and @eventName=\"MyDaemon2Events\" and @priority=\"High\"]',\n                                            './Events/MdsdEvents/MdsdEventSource[@source=\"mdsd.filelog.var.log.mydaemonlog2\"]/RouteEvent[@dontUsePerNDayTable=\"true\" and @eventName=\"Filelog2JsonBlob\" and @priority=\"High\" and @storeType=\"JsonBlob\"]',\n                                            './EventStreamingAnnotations/EventStreamingAnnotation[@name=\"mdsd.filelog.var.log.mydaemonlog1\"]/EventPublisher/Key',  # TODO Perform CDATA validation\n                                           )\n\n    def test_oms_syslog_mdsd_configs(self):\n        \"\"\"\n        Test whether syslog/syslog-ng config (for use with omsagent) is correctly generated for both 'syslogEvents'\n        and 'syslogCfg' settings. Also test whether the coresponding mdsd XML config is correctly generated.\n        \"\"\"\n        # Basic config (single dest table)\n        self.__helper_test_oms_syslog_mdsd_configs(self.cfg_syslog, self.oms_syslog_expected_xpaths)\n\n        # No syslog config case\n        self.assertFalse(self.cfg_none.get_rsyslog_config())\n        self.assertFalse(self.cfg_none.get_syslog_ng_config())\n        self.assertFalse(self.cfg_none.get_mdsd_syslog_config())\n\n    def __helper_test_oms_syslog_mdsd_configs(self, cfg, expected_xpaths):\n        \"\"\"\n        Helper for test_oms_rsyslog().\n        :param cfg: SyslogMdsdConfig object containing syslog config\n        \"\"\"\n        print '=== Actual oms rsyslog config output ==='\n        oms_rsyslog_config = cfg.get_rsyslog_config()\n        print oms_rsyslog_config\n        print '========================================'\n        lines = oms_rsyslog_config.strip().split('\\n')\n        # Item (line) count should match\n        self.assertEqual(len(cfg._fac_sev_map), len(lines))\n        # Each line should be correctly formatted\n        for l in lines:\n            self.assertRegexpMatches(l, r\"\\w+\\.\\w+\\s+@127\\.0\\.0\\.1:%SYSLOG_PORT%\")\n        # For each facility-severity, there should be corresponding line.\n        for fac, sev in cfg._fac_sev_map.iteritems():\n            index = oms_rsyslog_config.find('{0}.{1}'.format(syslog_name_to_rsyslog_name(fac),\n                                                             syslog_name_to_rsyslog_name(sev)))\n            self.assertGreaterEqual(index, 0)\n        print \"*** Actual output verified ***\\n\"\n\n        print '=== Actual oms syslog-ng config output ==='\n        oms_syslog_ng_config = cfg.get_syslog_ng_config()\n        print oms_syslog_ng_config\n        print '=========================================='\n        lines = oms_syslog_ng_config.strip().split('\\n')\n        # Item (line) count should match\n        self.assertGreaterEqual(len(lines), len(cfg._fac_sev_map))\n        # Each line should be correctly formatted\n        for l in lines:\n            self.assertRegexpMatches(l, r'log \\{{ source\\({0}\\); filter\\(f_LAD_oms_f_\\w+\\); '\n                                        r'filter\\(f_LAD_oms_ml_\\w+\\); destination\\(d_LAD_oms\\); \\}}'\n                                        .format(get_syslog_ng_src_name()))\n        # For each facility-severity, there should be corresponding line.\n        for fac, sev in cfg._fac_sev_map.iteritems():\n            index = oms_syslog_ng_config.find('log {{ source({0}); filter(f_LAD_oms_f_{1}); filter(f_LAD_oms_ml_{2}); '\n                                              'destination(d_LAD_oms); }}'.format(get_syslog_ng_src_name(),\n                                                                                  syslog_name_to_rsyslog_name(fac),\n                                                                                  syslog_name_to_rsyslog_name(sev)))\n            self.assertGreaterEqual(index, 0)\n        print \"*** Actual output verified ***\\n\"\n\n        print '=== Actual oms syslog mdsd XML output ==='\n        xml = cfg.get_mdsd_syslog_config()\n        print xml\n        print '========================================='\n        root = self.assertXmlDocument(xml)\n        self.assertXpathsOnlyOne(root, expected_xpaths)\n        print \"*** Actual output verified ***\\n\"\n\n    def test_oms_filelog_mdsd_config(self):\n        \"\"\"\n        Test whether mdsd XML config for LAD fileLog settings is correctly generated.\n        \"\"\"\n        print '=== Actual oms filelog mdsd XML config output ==='\n        xml = self.cfg_filelog.get_mdsd_filelog_config()\n        print xml\n        print '================================================='\n        root = self.assertXmlDocument(xml)\n\n        self.assertXpathsOnlyOne(root, self.oms_filelog_expected_xpaths)\n        print \"*** Actual output verified ***\\n\"\n\n        # Other configs should be all ''\n        self.assertFalse(self.cfg_syslog.get_mdsd_filelog_config())\n        self.assertFalse(self.cfg_none.get_mdsd_filelog_config())\n\n    def __helper_test_oms_fluentd_config(self, header_text, expected, actual):\n        header = \"=== Actual output of {0} ===\".format(header_text)\n        print header\n        print actual\n        print '=' * len(header)\n        # TODO BADBAD exact string matching...\n        self.assertEqual(expected, actual)\n        pass\n\n    def test_oms_fluentd_configs(self):\n        \"\"\"\n        Test whether fluentd syslog/tail source configs & out_mdsd config are correctly generated.\n        \"\"\"\n        actual = self.cfg_syslog.get_fluentd_syslog_src_config()\n        expected = \"\"\"\n<source>\n  type syslog\n  port %SYSLOG_PORT%\n  bind 127.0.0.1\n  protocol_type udp\n  include_source_host true\n  tag mdsd.syslog\n</source>\n\n# Generate fields expected for existing mdsd syslog collection schema.\n<filter mdsd.syslog.**>\n  type record_transformer\n  enable_ruby\n  <record>\n    # Fields for backward compatibility with Azure Shoebox V1 (Table storage)\n    Ignore \"syslog\"\n    Facility ${tag_parts[2]}\n    Severity ${tag_parts[3]}\n    EventTime ${time.strftime('%Y-%m-%dT%H:%M:%S%z')}\n    SendingHost ${record[\"source_host\"]}\n    Msg ${record[\"message\"]}\n    # Rename 'host' key, as mdsd will add 'Host' for Azure Table and it'll be confusing\n    hostname ${record[\"host\"]}\n  </record>\n  remove_keys host,message,source_host  # Renamed (duplicated) fields, so just remove\n</filter>\n\"\"\"\n        self.__helper_test_oms_fluentd_config('fluentd basic syslog src config', expected, actual)\n\n        actual = self.cfg_filelog.get_fluentd_syslog_src_config()\n        expected = ''\n        self.__helper_test_oms_fluentd_config('fluentd syslog src config for no syslog', expected, actual)\n\n        actual = self.cfg_syslog.get_fluentd_out_mdsd_config()\n        expected_out_mdsd_cfg_template = r\"\"\"\n# Output to mdsd\n<match mdsd.**>\n    type mdsd\n    log_level warn\n    djsonsocket /var/run/mdsd/lad_mdsd_djson.socket  # Full path to mdsd dynamic json socket file\n    acktimeoutms 5000  # max time in milli-seconds to wait for mdsd acknowledge response. If 0, no wait.\n{optional_lines}    num_threads 1\n    buffer_chunk_limit 1000k\n    buffer_type file\n    buffer_path /var/opt/microsoft/omsagent/LAD/state/out_mdsd*.buffer\n    buffer_queue_limit 128\n    flush_interval 10s\n    retry_limit 3\n    retry_wait 10s\n</match>\n\"\"\"\n        out_mdsd_optional_config_lines = r\"\"\"    mdsd_tag_regex_patterns [ \"^mdsd\\\\.syslog\" ] # fluentd tag patterns whose match will be used as mdsd source name\n\"\"\"\n        self.__helper_test_oms_fluentd_config('fluentd out_mdsd config for basic syslog cfg',\n                                              expected_out_mdsd_cfg_template.format(\n                                                  optional_lines=out_mdsd_optional_config_lines), actual)\n\n        actual = self.cfg_filelog.get_fluentd_filelog_src_config()\n        expected = \"\"\"\n# For all monitored files\n<source>\n  @type tail\n  path /var/log/mydaemonlog1,/var/log/mydaemonlog2\n  pos_file /var/opt/microsoft/omsagent/LAD/tmp/filelogs.pos\n  tag mdsd.filelog.*\n  format none\n  message_key Msg  # LAD uses \"Msg\" as the field name\n</source>\n\n# Add FileTag field (existing LAD behavior)\n<filter mdsd.filelog.**>\n  @type record_transformer\n  <record>\n    FileTag ${tag_suffix[2]}\n  </record>\n</filter>\n\"\"\"\n        self.__helper_test_oms_fluentd_config('fluentd tail src config for fileLogs', expected, actual)\n\n        actual = self.cfg_filelog.get_fluentd_out_mdsd_config()\n        self.__helper_test_oms_fluentd_config('fluentd out_mdsd config for filelog only (no syslog) cfg',\n                                              expected_out_mdsd_cfg_template.format(optional_lines=''), actual)\n\n        actual = self.cfg_none.get_fluentd_out_mdsd_config()\n        self.__helper_test_oms_fluentd_config('fluentd out_mdsd config for blank cfg (syslog disabled)',\n                                              expected_out_mdsd_cfg_template.format(optional_lines=''), actual)\n\n    def test_copy_schema_source_mdsdevent_eh_url_elems(self):\n        \"\"\"\n        Tests whether copy_schema_source_mdsdevent_eh_url_elems() works fine.\n        Uses oms_syslog_expected_xpaths and oms_filelog_expected_xpaths XPath lists\n        to test the operation.\n        \"\"\"\n        xml_string_srcs = [ self.cfg_syslog.get_mdsd_syslog_config(),\n                            self.cfg_filelog.get_mdsd_filelog_config()\n                          ]\n        dst_xml_tree = ET.ElementTree(ET.fromstring(entire_xml_cfg_tmpl))\n        map(lambda x: copy_source_mdsdevent_eh_url_elems(dst_xml_tree, x), xml_string_srcs)\n        print '=== mdsd config XML after combining syslog/filelogs XML configs ==='\n        xml = ET.tostring(dst_xml_tree.getroot())\n        print xml\n        print '==================================================================='\n        # Verify using xmlunittests\n        root = self.assertXmlDocument(xml)\n        self.assertXpathsOnlyOne(root, self.oms_syslog_expected_xpaths)\n        self.assertXpathsOnlyOne(root, self.oms_filelog_expected_xpaths)\n        print \"*** Actual output verified ***\\n\"\n\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "Diagnostic/tests/var_lib_waagent/lad_dir/config/lad_settings_logging.json",
    "content": "{\n  \"runtimeSettings\": [\n    {\n      \"handlerSettings\": {\n        \"publicSettings\": {\n          \"StorageAccount\": \"ladunittestdiag487\",\n          \"ladCfg\": {\n            \"diagnosticMonitorConfiguration\": {\n              \"syslogEvents\": {\n                \"sinks\": \"SyslogJsonBlob,SyslogEventHub\",\n                \"syslogEventConfiguration\": {\n                  \"LOG_USER\": \"LOG_ERR\",\n                  \"LOG_LOCAL0\": \"LOG_CRIT\"\n                }\n              }\n            }\n          },\n          \"fileLogs\" : [\n            {\n              \"file\": \"/var/log/mydaemonlog1\",\n              \"table\": \"MyDaemon1Events\",\n              \"sinks\": \"Filelog1JsonBlob,FilelogEventHub\"\n            },\n            {\n              \"file\": \"/var/log/mydaemonlog2\",\n              \"sinks\": \"Filelog2JsonBlob\"\n            }\n          ],\n          \"perfCfg\":  [\n            {\"query\": \"SELECT PercentAvailableMemory, AvailableMemory, UsedMemory, PercentUsedSwap FROM SCX_MemoryStatisticalInformation\",\n             \"table\": \"LinuxMemory\"},\n            {\"query\": \"SELECT PercentProcessorTime, PercentIOWaitTime, PercentIdleTime FROM SCX_ProcessorStatisticalInformation WHERE Name='_TOTAL'\",\n             \"table\": \"LinuxCpu\"},\n            {\"query\": \"SELECT AverageWriteTime,AverageReadTime,ReadBytesPerSecond,WriteBytesPerSecond FROM  SCX_DiskDriveStatisticalInformation WHERE Name='_TOTAL'\",\n             \"table\": \"LinuxDisk\"}\n          ]\n        },\n        \"protectedSettingsCertThumbprint\": \"B175B535DFE9F93659E5AFD893BF99BBF9DF28A5\",\n        \"protectedSettings\": {\n          \"storageAccountName\":\"ladunittestfakeaccount\",\n          \"storageAccountSasToken\":\"NOT_A_REAL_TOKEN\",\n          \"storageAccountEndPoint\":\"https://core.windows.net/\",\n          \"sinksConfig\": {\n            \"sink\": [\n              {\n                \"sasURL\": \"https://fake_sas_url_1\",\n                \"type\": \"EventHub\",\n                \"name\": \"SyslogEventHub\"\n              },\n              {\n                \"type\": \"JsonBlob\",\n                \"name\": \"SyslogJsonBlob\"\n              },\n              {\n                \"sasURL\": \"https://fake_sas_url_2\",\n                \"type\": \"EventHub\",\n                \"name\": \"FilelogEventHub\"\n              },\n              {\n                \"type\": \"JsonBlob\",\n                \"name\": \"Filelog1JsonBlob\"\n              },\n              {\n                \"type\": \"JsonBlob\",\n                \"name\": \"Filelog2JsonBlob\"\n              }\n            ]\n          }\n        }\n      }\n    }\n  ]\n}\n"
  },
  {
    "path": "Diagnostic/tests/var_lib_waagent/lad_dir/config/lad_settings_metric.json",
    "content": "{\n  \"runtimeSettings\": [\n    {\n      \"handlerSettings\": {\n        \"protectedSettings\": {\n          \"storageAccountEndPoint\": \"https://core.windows.net/\",\n          \"storageAccountSasToken\": \"?NOT_A_REAL_TOKEN\",\n          \"storageAccountName\": \"ladunittestfakeaccount\",\n          \"sinksConfig\": {\n            \"sink\": [\n              {\n                \"sasURL\": \"https://fake&sas%url;1\",\n                \"type\": \"EventHub\",\n                \"name\": \"LinuxMemoryEventHub\"\n              },\n              {\n                \"type\": \"JsonBlob\",\n                \"name\": \"SyslogJsonBlob\"\n              },\n              {\n                \"sasURL\": \"https://fake&sas%url;2\",\n                \"type\": \"EventHub\",\n                \"name\": \"ProcessorInfoEventHub\"\n              },\n              {\n                \"type\": \"JsonBlob\",\n                \"name\": \"ProcessorInfoJsonBlob\"\n              },\n              {\n                \"type\": \"JsonBlob\",\n                \"name\": \"FileSystemJsonBlob\"\n              }\n            ]\n          }\n        },\n        \"protectedSettingsCertThumbprint\": \"B175B535DFE9F93659E5AFD893BF99BBF9DF28A5\",\n        \"publicSettings\": {\n          \"ladCfg\": {\n            \"diagnosticMonitorConfiguration\": {\n              \"eventVolume\": \"Large\",\n              \"metrics\": {\n                \"resourceId\": \"ladtest_resource_id\",\n                \"metricAggregation\": [\n                  {\n                    \"scheduledTransferPeriod\": \"PT1H\"\n                  },\n                  {\n                    \"scheduledTransferPeriod\": \"PT1M\"\n                  }\n                ]\n              },\n              \"performanceCounters\": {\n                \"performanceCounterConfiguration\": [\n                  {\n                    \"class\": \"Processor\",\n                    \"condition\": \"IsAggregate=TRUE\",\n                    \"annotation\": [\n                      {\n                        \"displayName\": \"Aggregate CPU %utilization\",\n                        \"locale\": \"en-us\"\n                      }\n                    ],\n                    \"counterSpecifier\": \"/builtin/processor/PercentProcessorTime\",\n                    \"counter\": \"percentprocessorTime\",\n                    \"type\": \"builtin\",\n                    \"unit\": \"Percent\"\n                  },\n                  {\n                    \"class\": \"Filesystem\",\n                    \"condition\": \"Name=\\\"/\\\"\",\n                    \"annotation\": [\n                      {\n                        \"displayName\": \"Used disk space on /\",\n                        \"locale\": \"en-us\"\n                      }\n                    ],\n                    \"counterSpecifier\": \"/builtin/filesystem/usedspace\",\n                    \"counter\": \"UsedSpace\",\n                    \"type\": \"builtin\",\n                    \"unit\": \"Bytes\"\n                  },\n                  {\n                    \"class\": \"Filesystem\",\n                    \"condition\": \"Name='/'\",\n                    \"annotation\": [\n                      {\n                        \"displayName\": \"Free disk space on /mnt\",\n                        \"locale\": \"en-us\"\n                      }\n                    ],\n                    \"counterSpecifier\": \"/builtin/filesystem/freespace(/mnt)\",\n                    \"counter\": \"FreeSpace\",\n                    \"type\": \"builtin\",\n                    \"unit\": \"Bytes\"\n                  }\n\n                ]\n              }\n            }\n          },\n          \"perfCfg\": [\n            {\n              \"query\": \"SELECT PercentAvailableMemory, PercentUsedSwap FROM SCX_MemoryStatisticalInformation\",\n              \"table\": \"LinuxMemory\",\n              \"sinks\": \"LinuxMemoryEventHub\"\n            },\n            {\n              \"query\": \"SELECT PercentProcessorTime FROM SCX_ProcessorStatisticalInformation\",\n              \"sinks\": \"ProcessorInfoJsonBlob,ProcessorInfoEventHub\",\n              \"frequency\": 60\n            },\n            {\n              \"query\": \"SELECT FreeMegabytes FROM SCX_FileSystemStatisticalInformation\",\n              \"table\": \"LinuxFileSystem\",\n              \"sinks\": \"FileSystemJsonBlob\"\n            }\n          ],\n          \"sampleRateInSeconds\": 15,\n          \"StorageAccount\": \"ladtest\"\n        }\n      }\n    }\n  ]\n}\n"
  },
  {
    "path": "Diagnostic/tests/watchertests.py",
    "content": "import unittest\nimport diagnostic\nimport sys\nimport subprocess\nimport os\nimport errno\nimport watcherutil\n\n\nclass FStabUnitTests(unittest.TestCase):\n    _watcher = None\n    _datapath = os.path.join(os.getcwd(), 'utdata')\n\n    def setUp(self):\n        self._watcher = watcherutil.Watcher(sys.stderr, sys.stdout)\n\n        try:\n            os.mkdir(self._datapath)\n        except OSError as e:\n            if e.errno != errno.EEXIST:\n                raise\n            pass\n\n        # mount an overlay so that we can make changes to /etc/fstab\n        subprocess.call(['sudo', \n            'mount', '-t', 'overlayfs', 'overlayfs',\n            '-olowerdir=/etc,upperdir=' + self._datapath,\n            '/etc'])\n        pass\n\n    def tearDown(self):\n        subprocess.call(['sudo', 'umount', '/etc'])\n        try:\n            os.rmdir(self._datapath)\n        except OSError as e:\n            pass\n\n    def test_fstab_basic(self):\n        self.assertEqual(self._watcher.handle_fstab(ignore_time=True), 0)\n       \n    def test_fstab_touch(self):\n        subprocess.call(['sudo', 'touch', '/etc/fstab'])\n        self.assertEqual(self._watcher.handle_fstab(ignore_time=True), 0)\n\n    def addFstabEntry(self, fstabentry):\n        with open(self._datapath + '/fstab', 'w') as f:\n            f.write(fstabentry)\n            f.write('\\n')\n\n    @unittest.skip('Skipping because mount -f fails to detect error')\n    def test_fstab_baduuid(self):\n        self.addFstabEntry('UUID=1111111-1111-1111-1111-111111111111 /test ext4 defaults 0 0')\n        pdb.set_trace()\n        self.assertNotEqual(self._watcher.handle_fstab(ignore_time=True), 0)\n\n    @unittest.skip('Skipping because mount -f fails to detect error')\n    def test_fstab_baddevicename(self):\n        self.addFstabEntry('/dev/foobar /test ext4 defaults 0 0')\n        self.assertNotEqual(self._watcher.handle_fstab(ignore_time=True), 0)\n\n    @unittest.skip('Skipping because mount -f fails to detect error')\n    def test_fstab_malformedentry(self):\n        self.addFstabEntry('/test /dev/foobar ext4 defaults 0 0')\n        self.assertNotEqual(self._watcher.handle_fstab(ignore_time=True), 0)\n\n    def test_fstab_goodentry(self):\n        self.addFstabEntry('/dev/sdb1 /test ext4 defaults 0 0')\n        self.assertEqual(self._watcher.handle_fstab(ignore_time=True), 0)\n\n\n\nif __name__ == '__main__':\n    unittest.main()\n\n"
  },
  {
    "path": "Diagnostic/virtual-machines-linux-diagnostic-extension-v3.md",
    "content": "---\ntitle: Page title that displays in the browser tab and search results | Microsoft Docs\ndescription: Article description that will be displayed on landing pages and in most search results\nservices: virtual-machines-linux\ndocumentationcenter: dev-center-name\nauthor: jasonzio\nmanager: anandram\n\n\nms.service: virtual-machines-linux\nms.devlang: may be required\nms.topic: article\nms.tgt_pltfrm: vm-linux\nms.workload: required\nms.date: 04/21/2017\nms.author: jasonzio@microsoft.com\n\n---\n\n# Use Linux Diagnostic Extension v3 to monitor metrics and logs\n\n## Introduction\n\nThe Linux Diagnostic Extension helps a user monitor the health of a Linux VM running on Microsoft Azure. It has the following capabilities:\n\n* Collects system performance metrics from the VM and stores them in a specific table in a designated storage account (usually the account in which the VM's boot vhd is stored).\n* Retrieves log events from syslog and stores them in a specific table in the designated storage account.\n* Enables users to customize the data metrics that will be collected and uploaded.\n* Enables users to customize the syslog facilities and severity levels of events that will be collected and uploaded.\n* Enables users to upload specified log files to a designated storage table.\n* Supports sending the above data to arbitrary EventHub endpoints and JSON-formatted blobs in the designated storage account.\n\nThis extension works with both the classic and Resource Manager deployment models.\n\n### Migration from previous versions of the extension\n\nThe latest version of the extension is **3.0**. **Any old versions (2.x) will be deprecated and may be unpublished on or after 2018-07-31**.\n\nThis extension introduces breaking changes to the configuration of the extension. One such change was made to improve the security of the extension; as a result, backwards compatibility with 2.x could not be maintained. Also, the Extension Publisher for this extension is different than the publisher for the 2.x versions.\n\nIn order to migrate from 2.x to this new version of the extension, you must uninstall the old extension (under the old publisher name) and then install the new extension.\n\nWe strongly recommended you install the extension with automatic minor version upgrade enabled. On classic (ASM) VMs, you can achieve this by specifying '3.*' as the version if you are installing the extension through Azure XPLAT CLI or Powershell. On ARM VMs, you can achieve this by including '\"autoUpgradeMinorVersion\": true' in the VM deployment template.\n\n## Enable the extension\n\nYou can enable this extension by using the [Azure portal](https://portal.azure.com/#), Azure PowerShell, or Azure CLI scripts.\n\nUse the Azure portal to view performance data directly from the Azure portal:\n\n![image](./media/virtual-machines-linux-diagnostic-extension-v3/graph_metrics.png)\n\nThis article focuses on how to enable and configure the extension by using Azure CLI commands. Only a subset of the features of the extension can only be configured via the Azure portal, which will ignore (and leave unchanged) the parts of the configuration it does not address.\n\n## Prerequisites\n\n* **Azure Linux Agent version 2.2.0 or later**.\n  Note that most Azure VM Linux gallery images include version 2.2.7 or later. You can run **/usr/sbin/waagent -version** to confirm which version is installed on the VM. If the VM is running an older version of the guest agent, you can follow [these instructions on GitHub](https://github.com/Azure/WALinuxAgent \"instructions\") to update it.\n* **Azure CLI**. Follow [this guidance for installing CLI](../xplat-cli-install.md) to set up the Azure CLI environment on your machine. After Azure CLI is installed, you can use the **azure** command from your command-line interface (Bash, Terminal, or command prompt) to access the Azure CLI commands. For example:\n  * Run **azure vm extension set --help** for detailed help information.\n  * Run **azure login** to sign in to Azure.\n  * Run **azure vm list** to list all the virtual machines that you have on Azure.\n* A storage account to store the data. You will need a storage account name that was created previously and an account SAS token to upload the data to your storage.\n\n## Protected Settings\n\nThis set of configuration information contains sensitive information which should be protected from public view, e.g. storage credentials. These settings are transmitted to and stored by the extension in encrypted form.\n\n```json\n{\n    \"storageAccountName\" : \"the storage account to receive data\",\n    \"storageAccountEndPoint\": \"the URL prefix for the cloud for this account\",\n    \"storageAccountSasToken\": \"SAS access token\",\n    \"mdsdHttpProxy\": \"HTTP proxy settings\",\n    \"sinksConfig\": { ... }\n}\n```\n\nName | Value\n---- | -----\nstorageAccountName | The name of the storage account in which data will be written by the extension\nstorageAccountEndPoint | (optional) The endpoint identifying the cloud in which the storage account exists. For the Azure public cloud (which is the default when this setting is not given), this would be [https://core.windows.net](https://core.windows.net); set this appropriately for a storage account in a national cloud.\nstorageAccountSasToken | An [Account SAS token](https://azure.microsoft.com/en-us/blog/sas-update-account-sas-now-supports-all-storage-services/) for Blob and Table services (ss='bt'), containers and objects (srt='co'), which grants add, create, list, update, and write permissions (sp='acluw')\nmdsdHttpProxy | (optional) HTTP proxy information needed to enable the extension to connect to the specified storage account and endpoint.\nsinksConfig | (optional) Details of alternative destinations to which metrics and events can be delivered. The specific details of the various data sinks supported by the extension are covered below.\n\nYou can easily construct the required SAS token through the Azure portal. Select the general-purpose storage account which you want the extension to write, then select \"Shared access signature\" from the Settings part of the left menu. Make the appropriate choices as described above and click the \"Generate SAS\" button.\n\n![image](./media/virtual-machines-linux-diagnostic-extension-v3/makeSAS.png)\n\nCopy the generated SAS into the storageAccountSasToken field; remove the leading question-mark (\"?\").\n\n### sinksConfig\n\n```json\n\"sinksConfig\": {\n    \"sink\": [\n        {\n            \"name\": \"sinkname\",\n            \"type\": \"sinktype\",\n            ...\n        },\n        ...\n    ]\n},\n```\n\nThis section defines additional destinations to which the extension will deliver the information it collects. The \"sink\" array contains an object for each additional data sink. The object will contain additional attributes as determined by the \"type\" attribute.\n\nElement | Value\n------- | -----\nname | A string used to refer to this sink elsewhere in the extension configuration.\ntype | The type of sink being defined. Determines the other values (if any) in instances of this type.\n\nVersion 3.0 of the Linux Diagnostic Extension supports two sink types: EventHub, and JsonBlob.\n\n#### The EventHub sink\n\n```json\n\"sink\": [\n    {\n        \"name\": \"sinkname\",\n        \"type\": \"EventHub\",\n        \"sasUrl\": \"https SAS URL\"\n    },\n    ...\n]\n```\n\nThe \"sasURL\" entry contains the full URL, including SAS token, for the EventHub endpoint to which data should be published. The SAS URL should be built using the EventHub endpoint (policy-level) shared key, not the root-level shared key for the entire EventHub subscription. Event Hubs SAS tokens are different from Storage SAS tokens; details can be found [on this web page](https://docs.microsoft.com/en-us/rest/api/eventhub/generate-sas-token).\n\n#### The JsonBlob sink\n\n```json\n\"sink\": [\n    {\n        \"name\": \"sinkname\",\n        \"type\": \"JsonBlob\"\n    },\n    ...\n]\n```\n\nData directed to a JsonBlob sink will be stored in blobs in a container with the same name as the sink. The Azure storage rules for blob container names apply to the names of JsonBlob sinks: between 3 and 63 lower-case alphanumeric ASCII characters or dashes. Individual blobs will be created every hour for each instance of the extension writing to the container. The blobs will always contain a syntactically-valid JSON object; new entries are added atomically.\n\n## Public settings\n\nThis structure contains various blocks of settings which control the information collected by the extension.\n\n```json\n{\n    \"mdsdHttpProxy\" : \"\",\n    \"ladCfg\":  { ... },\n    \"perfCfg\": { ... },\n    \"fileLogs\": { ... }\n}\n```\n\nElement | Value\n------- | -----\nmdsdHttpProxy | (optional) Same as in the Private Settings (see above). The public value is overridden by the private value, if set. If the proxy setting contains a secret (like a password), it shouldn't be specified here, but should be specified in the Private Settings.\n\nThe remaining elements are described in detail, below.\n\n### ladCfg\n\n```json\n\"ladCfg\": {\n    \"diagnosticMonitorConfiguration\": {\n        \"eventVolume\": \"Medium\",\n        \"metrics\": { ... },\n        \"performanceCounters\": { ... },\n        \"syslogEvents\": { ... }\n    },\n    \"sampleRateInSeconds\": 15\n}\n```\n\nControls the gathering of metrics and logs for delivery to the Azure Metrics service and to other data destinations (\"sinks\"). All settings in this section, with the exception of eventVolume, can be controlled via the Azure portal as well as through PowerShell, CLI, or template.\n\nThe Azure Metrics service requires metrics to be stored in a very particular Azure storage table. Similarly, log events must be stored in a different, but also very particular, table. All instances of the diagnostic extension configured (via Private Config) to use the same storage account name and endpoint will add their metrics and logs to the same table. If too many VMs are writing to the same table partition, Azure can throttle writes to that partition. The eventVolume setting changes how partition keys are constructed so that, across all instances of the extension writing to the same table, entries are spread across 1, 10, or 100 different partitions.\n\nElement | Value\n------- | -----\neventVolume | Controls the number of partitions created within the storage table. Must be one of \"Large\", \"Medium\", or \"Small\".\nsampleRateInSeconds | The default interval between collection of raw (unaggregated) metrics. The smallest supported sample rate is 15 seconds.\n\n#### metrics\n\n```json\n\"metrics\": {\n    \"resourceId\": \"/subscriptions/...\",\n    \"metricAggregation\" : [\n        { \"scheduledTransferPeriod\" : \"PT1H\" },\n        { \"scheduledTransferPeriod\" : \"PT5M\" }\n    ]\n}\n```\n\nSamples of the metrics specified in the performanceCounters section are periodically collected. Those raw samples are aggregated to produce mean, minimum, maximum, and last-collected values, along with the count of raw samples used to compute the aggregate. If multiple scheduledTransferPeriod frequencies appear (as in the example), each aggregation is computed independently over the specified interval. The name of the storage table to which aggregated metrics are written (and from which Azure Metrics reads data) is based, in part, on the transfer period of the aggregated metrics stored within it.\n\nElement | Value\n------- | -----\nresourceId | The ARM resource ID of the VM or of the VM Scale Set to which the VM belongs. This setting must be also specified if any JsonBlob sink is used in the configuration.\nscheduledTransferPeriod | The frequency at which aggregate metrics are to be computed and transferred to Azure Metrics, expressed as an IS 8601 time interval. The smallest transfer period is 60 seconds, i.e. PT60S or PT1M.\n\nSamples of the metrics specified in the performanceCounters section are collected every 15 seconds or at the sample rate explicitly defined for the counter. If multiple scheduledTransferPeriod frequencies appear (as in the example), each aggregation is computed independently. The name of the storage table to which aggregated metrics are written (and from which Azure Metrics reads data) is based, in part, on the transfer period of the aggregated metrics stored within it.\n\n#### performanceCounters\n\n```json\n\"performanceCounters\": {\n    \"sinks\": \"\",\n    \"performanceCounterConfiguration\": [\n        {\n            \"type\": \"builtin\",\n            \"class\": \"Processor\",\n            \"counter\": \"PercentIdleTime\",\n            \"counterSpecifier\": \"/builtin/Processor/PercentIdleTime\",\n            \"condition\": \"IsAggregate=TRUE\",\n            \"sampleRate\": \"PT15S\",\n            \"unit\": \"Percent\",\n            \"annotation\": [\n                {\n                    \"displayName\" : \"Aggregate CPU %idle time\",\n                    \"locale\" : \"en-us\"\n                }\n            ],\n        },\n    ]\n}\n```\n\nElement | Value\n------- | -----\nsinks | A comma-separated list of names of sinks (as defined in the sinksConfig section of the Private configuration file) to which aggregated metric results should be published. All aggregated metrics will be published to each listed sink. Example: \"EHsink1,myjsonsink\"\ntype | Identifies the actual provider of the metric.\nclass | Together with \"counter\", identifies the specific metric within the provider's namespace.\ncounter | Together with \"class\", identifies the specific metric within the provider's namespace.\ncounterSpecifier | Identifies the specific metric within the Azure Metrics namespace.\ncondition | Selects a specific instance of the object to which the metric applies or selects the aggregation across all instances of that object. See the metric definitions (below) for more information.\nsampleRate | IS 8601 interval which sets the rate at which raw samples for this metric are collected. If not set, the collection interval is set by the value of sampleRateInSeconds (see \"ladCfg\"). The shortest supported sample rate is 15 seconds, i.e. PT15S.\nunit | Should be one of these strings: \"Count\", \"Bytes\", \"Seconds\", \"Percent\", \"CountPerSecond\", \"BytesPerSecond\", \"Millisecond\". Defines the unit for the metric. The consumer of the collected data will expect the data LAD collects to match this unit. LAD ignores this field.\ndisplayName | The label (in the language specified by the associated locale setting) to be attached to this data in Azure Metrics. LAD ignores this field.\n\n#### syslogEvents\n\n```json\n\"syslogEvents\": {\n    \"sinks\": \"\",\n    \"syslogEventConfiguration\": {\n        \"facilityName1\": \"minSeverity\",\n        \"facilityName2\": \"minSeverity\",\n        ...\n    }\n}\n```\n\nThe syslogEventConfiguration collection has one entry for each syslog facility of interest. Setting a minSeverity of \"NONE\" for a particular facility behaves exactly as if that facility did not appear in the element at all; no events from that facility are captured.\n\nElement | Value\n------- | -----\nsinks | A comma-separated list of names of sinks to which individual log events should be published. All log events matching the restrictions in syslogEventConfiguration will be published to each listed sink. Example: \"EHforsyslog\"\nfacilityName | A syslog facility name (e.g. \"LOG\\_USER\" or \"LOG\\_LOCAL0\"). See the \"facility\" section of the [syslog man page](http://man7.org/linux/man-pages/man3/syslog.3.html) for the full list.\nminSeverity | A syslog severity level (e.g. \"LOG\\_ERR\" or \"LOG\\_INFO\"). See the \"level\" section of the [syslog man page](http://man7.org/linux/man-pages/man3/syslog.3.html) for the full list. The extension will capture events sent to the facility at or above the specified level.\n\n### perfCfg\n\nControls execution of arbitrary [OMI](https://github.com/Microsoft/omi) queries.\n\n```json\n\"perfCfg\": [\n    {\n        \"namespace\": \"root/scx\",\n        \"query\": \"SELECT PercentAvailableMemory, PercentUsedSwap FROM SCX_MemoryStatisticalInformation\",\n        \"table\": \"LinuxOldMemory\",\n        \"frequency\": 300,\n        \"sinks\": \"\"\n    }\n]\n```\n\nElement | Value\n------- | -----\nnamespace | (optional) The OMI namespace within which the query should be executed. If unspecified, the default value is \"root/scx\", implemented by the [System Center Cross-platform Providers](http://scx.codeplex.com/wikipage?title=xplatproviders&referringTitle=Documentation).\nquery | The OMI query to be executed.\ntable | (optional) The Azure storage table, in the designated storage account (see above) into which the results of the query will be placed.\nfrequency | (optional) The number of seconds between execution of the query. Default value is 300 (5 minutes); minimum value is 15 seconds.\nsinks | (optional) A comma-separated list of names of additional sinks to which raw sample metric results should be published. No aggregation of these raw samples is computed by the extension or by Azure Metrics.\n\nEither \"table\" or \"sinks\", or both, must be specified.\n\n### fileLogs\n\nControls the capture of log files by rsyslogd or syslog-ng. As new text lines are written to the file, rsyslogd/syslog-ng captures them and passes them to the diagnostic extension, which in turn writes them as table rows or to the specified sinks (JsonBlob or EventHub).\n\n```json\n\"fileLogs\": [\n    {\n        \"file\": \"/var/log/mydaemonlog\",\n        \"table\": \"MyDaemonEvents\",\n        \"sinks\": \"\"\n    }\n]\n```\n\nElement | Value\n------- | -----\nfile | The full pathname of the log file to be watched and captured. The pathname must name a single file; it cannot name a directory or contain wildcards.\ntable | (optional) The Azure storage table, in the designated storage account (see above), into which new lines from the \"tail\" of the file will be placed.\nsinks | (optional) A comma-separated list of names of additional sinks to which log lines should be published.\n\nEither \"table\" or \"sinks\", or both, must be specified.\n\n## Metrics supported by \"builtin\"\n\nThe \"builtin\" metric provider is a source of metrics most interesting to a broad set of users. These metrics fall into five broad classes:\n\n* Processor\n* Memory\n* Network\n* Filesystem\n* Disk\n\nThe available metrics are described in greater detail in the following sections.\n\n### Builtin metrics for the Processor class\n\nThe Processor class of metrics provides information about processor usage in the VM. When aggregating percentages, the result is the average across all CPUs. For example, given a VM with two cores, if one core was 100% busy for a given aggregation window and the other core was 100% idle, the reported PercentIdleTime would be 50; if each core was 50% busy for the same period, the reported result would also be 50. In a four core system, with one core 100% busy and the others completely idle, the reported PercentIdleTime would be 75.\n\ncounter | Meaning\n------- | -------\nPercentIdleTime | Percentage of time during the aggregation window that processors were executing the kernel idle loop\nPercentProcessorTime | Percentage of time executing a non-idle thread\nPercentIOWaitTime | Percentage of time waiting for IO operations to complete\nPercentInterruptTime | Percentage of time executing hardware/software interrupts and DPCs (deferred procedure calls)\nPercentUserTime | Of non-idle time during the aggregation window, the percentage of time spent in user more at normal priority\nPercentNiceTime | Of non-idle time, the percentage spent at lowered (nice) priority\nPercentPrivilegedTime | Of non-idle time, the percentage spent in privileged (kernel) mode\n\nThe first four counters should sum to 100%. The last three counters also sum to 100%; they subdivide the sum of PercentProcessorTime, PercentIOWaitTime, and PercentInterruptTime.\n\nTo obtain a single metric aggregated across all processors, set \"condition\" to \"IsAggregate=TRUE\". To obtain a metric for a specific processor, set \"condition\" to \"Name=\\\\\"*nn*\\\\\"\" where *nn* is the logical processor number as known to the operating system, typically in the range 0..*n-1*.\n\n### Builtin metrics for the Memory class\n\nThe Memory class of metrics provide information about memory utilization, paging, and swapping.\n\ncounter | Meaning\n------- | -------\nAvailableMemory | Available physical memory in MiB\nPercentAvailableMemory | Available physical memory as a percent of total memory\nUsedMemory | In-use physical memory (MiB)\nPercentUsedMemory | In-use physical memory as a percent of total memory\nPagesPerSec | Total paging (read/write)\nPagesReadPerSec | Pages read from backing store (pagefile, program file, mapped file, etc)\nPagesWrittenPerSec | Pages written to backing store (pagefile, mapped file, etc)\nAvailableSwap | Unused swap space (MiB)\nPercentAvailableSwap | Unused swap space as a percentage of total swap\nUsedSwap | In-use swap space (MiB)\nPercentUsedSwap | In-use swap space as a percentage of total swap\n\nThis family of metrics has only a single instance; the \"condition\" attribute has no useful settings and should be omitted.\n\n### Builtin metrics for the Network class\n\nThe Network class of metrics provide information about network activity, aggregated across all network devices (eth0, eth1, etc.) since boot. Bandwidth information is not directly available; it is best retrieved from host metrics rather than from within the guest.\n\ncounter | Meaning\n------- | -------\nBytesTransmitted | Total bytes sent since boot\nBytesReceived | Total bytes received since boot\nBytesTotal | Total bytes sent or received since boot\nPacketsTransmitted | Total packets sent since boot\nPacketsReceived | Total packets received since boot\nTotalRxErrors | Number of receive errors since boot\nTotalTxErrors | Number of transmit errors since boot\nTotalCollisions | Number of collisions reported by the network ports since boot\n\nThis family of metrics has only a single instance; the \"condition\" attribute has no useful settings and should be omitted.\n\n### Builtin metrics for the Filesystem class\n\nThe Filesystem class of metrics provide information about filesystem usage. Absolute and percentage values are reported as they'd be displayed to an ordinary user (not root).\n\ncounter | Meaning\n------- | -------\nFreeSpace | Available disk space in bytes\nUsedSpace | Used disk space in bytes\nPercentFreeSpace | Percentage free space\nPercentUsedSpace | Percentage used space\nPercentFreeInodes | Percentage of unused inodes\nPercentUsedInodes | Percentage of allocated (in use) inodes summed across all filesystems\nBytesReadPerSecond | Bytes read per second\nBytesWrittenPerSecond | Bytes written per second\nBytesPerSecond | Bytes read or written per second\nReadsPerSecond | Read operations per second\nWritesPerSecond | Write operations per second\nTransfersPerSecond | Read or write operations per second\n\nAggregated values across all file systems can be obtained by setting \"condition\" to \"IsAggregate=True\". Values for a specific mounted file system can be obtained by setting \"condition\" to 'Name=\"*mountpoint*\"' where *mountpoint* is the path at which the filesystem was mounted (\"/\", \"/mnt\", etc.).\n\n### Builtin metrics for the Disk class\n\nThe Disk class of metrics provide information about disk device usage. These statistics apply to the drive itself without regard to the number of file systems that may exist on the device; if there are multiple file systems on a device, the counters for that device are, effectively, aggregated across of them.\n\ncounter | Meaning\n------- | -------\nReadsPerSecond | Read operations per second\nWritesPerSecond | Write operations per second\nTransfersPerSecond | Total operations per second\nAverageReadTime | Average seconds per read operation\nAverageWriteTime | Average seconds per write operation\nAverageTransferTime | Average seconds per operation\nAverageDiskQueueLength | Average number of queued disk operations\nReadBytesPerSecond | Number of bytes read per second\nWriteBytesPerSecond | Number of bytes written per second\nBytesPerSecond | Number of bytes read or written per second\n\nAggregated values across all disks can be obtained by setting \"condition\" to \"IsAggregate=True\". Values for a specific disk device can be obtained by setting \"condition\" to \"Name=\\\\\"*devicename*\\\\\"\" where *devicename* is the path of the device file for the disk (\"/dev/sda1\", \"/dev/sdb1\", etc.).\n\n## Installing and configuring LAD 3.0 via CLI\n\nAssuming your protected settings are in the file PrivateConfig.json and your public configuration information is in PublicConfig.json, run this command:\n\n> azure vm extension set *resource_group_name* *vm_name* LinuxDiagnostic Microsoft.Azure.Diagnostics '3.*' --private-config-path PrivateConfig.json --public-config-path PublicConfig.json\n\nPlease note that the above command assumes you are in the Azure Resource Management mode (arm) of the Azure CLI and applies only to the Azure ARM VMs, not to any classic Azure VMs. For classic (or ASM, Azure Service Management) VMs, you'll need to set the CLI mode to \"asm\" (run `azure config mode asm`) before running the above command, and you should also omit the resource group name in the command (there is no notion of resource groups in ASM). For more information on different modes of Azure CLI and how to use them, please refer to related documentation like [this](https://docs.microsoft.com/en-us/azure/xplat-cli-connect).\n\n## An example LAD 3.0 configuration\n\nBased on the above definitions, here's a sample LAD 3.0 extension configuration with some explanation. Please note that in order to apply this sample to your case, you should use your own storage account name, account SAS token, and EventHubs SAS tokens. First, the following private settings (that should be saved in a file as PrivateConfig.json, if you want to use the above Azure CLI command to enable the extension) will configure a storage account, its account SAS token, and various sinks (JsonBlob or EventHubs with SAS tokens):\n\n```json\n{\n  \"storageAccountName\": \"yourdiagstgacct\",\n  \"storageAccountSasToken\": \"sv=xxxx-xx-xx&ss=bt&srt=co&sp=wlacu&st=yyyy-yy-yyT21%3A22%3A00Z&se=zzzz-zz-zzT21%3A22%3A00Z&sig=fake_signature\",\n  \"sinksConfig\": {\n    \"sink\": [\n      {\n        \"name\": \"SyslogJsonBlob\",\n        \"type\": \"JsonBlob\"\n      },\n      {\n        \"name\": \"FilelogJsonBlob\",\n        \"type\": \"JsonBlob\"\n      },\n      {\n        \"name\": \"LinuxCpuJsonBlob\",\n        \"type\": \"JsonBlob\"\n      },\n      {\n        \"name\": \"WADMetricJsonBlob\",\n        \"type\": \"JsonBlob\"\n      },\n      {\n        \"name\": \"LinuxCpuEventHub\",\n        \"type\": \"EventHub\",\n        \"sasURL\": \"https://youreventhubnamespace.servicebus.windows.net/youreventhubpublisher?sr=https%3a%2f%2fyoureventhubnamespace.servicebus.windows.net%2fyoureventhubpublisher%2f&sig=fake_signature&se=1808096361&skn=yourehpolicy\"\n      },\n      {\n        \"name\": \"WADMetricEventHub\",\n        \"type\": \"EventHub\",\n        \"sasURL\": \"https://youreventhubnamespace.servicebus.windows.net/youreventhubpublisher?sr=https%3a%2f%2fyoureventhubnamespace.servicebus.windows.net%2fyoureventhubpublisher%2f&sig=yourehpolicy&skn=yourehpolicy\"\n      },\n      {\n        \"name\": \"LoggingEventHub\",\n        \"type\": \"EventHub\",\n        \"sasURL\": \"https://youreventhubnamespace.servicebus.windows.net/youreventhubpublisher?sr=https%3a%2f%2fyoureventhubnamespace.servicebus.windows.net%2fyoureventhubpublisher%2f&sig=yourehpolicy&se=1808096361&skn=yourehpolicy\"\n      }\n    ]\n  }\n}\n```\n\nThen the following public settings (that should be saved in a file as PublicConfig.json for the Azure CLI command above) will do the following:\n\n* Uploads percent-processor-time and used-disk-space to Azure Metric service table (this will allow you to view these metrics in the Azure Portal), and your EventHub (as specified in your sink `WADMetricEventHub`) and your Azure Blob storage (container name is `wadmetricjsonblob`).\n* Uploads messages from syslog facility \"user\" and severity \"info\" or above to your Azure Table storage (always on by default, and the Azure Table name is `LinuxSyslog*`), your Azure Blob storage (container name is `syslogjsonblob*`), and your EventHubs publisher (as specified in your sink name `LoggingEventHub`).\n* Uploads raw OMI query results (PercentProcessorTime and PercentIdleTime) to your Azure Table storage (table name is `LinuxCpu*`), your Azure Blob storage (container name is `linuxcpujsonblob*`) and your EventHubs publisher (as specified in your sink name `LinuxCpuEventHub`).\n* Uploads appended lines in file `/var/log/myladtestlog` to your Azure Table storage (table name is MyLadTestLog\\*), your Azure Blob storage (container name is `filelogjsonblob*`), and to your EventHubs publisher (as specified in your sink name `LoggingEventHub`).\n\n```json\n{\n  \"StorageAccount\": \"yourdiagstgacct\",\n  \"sampleRateInSeconds\": 15,\n  \"ladCfg\": {\n    \"diagnosticMonitorConfiguration\": {\n      \"performanceCounters\": {\n        \"sinks\": \"WADMetricEventHub,WADMetricJsonBlob\",\n        \"performanceCounterConfiguration\": [\n          {\n            \"unit\": \"Percent\",\n            \"type\": \"builtin\",\n            \"counter\": \"PercentProcessorTime\",\n            \"counterSpecifier\": \"/builtin/Processor/PercentProcessorTime\",\n            \"annotation\": [\n              {\n                \"locale\": \"en-us\",\n                \"displayName\": \"Aggregate CPU %utilization\"\n              }\n            ],\n            \"condition\": \"IsAggregate=TRUE\",\n            \"class\": \"Processor\"\n          },\n          {\n            \"unit\": \"Bytes\",\n            \"type\": \"builtin\",\n            \"counter\": \"UsedSpace\",\n            \"counterSpecifier\": \"/builtin/FileSystem/UsedSpace\",\n            \"annotation\": [\n              {\n                \"locale\": \"en-us\",\n                \"displayName\": \"Used disk space on /\"\n              }\n            ],\n            \"condition\": \"Name=\\\"/\\\"\",\n            \"class\": \"Filesystem\"\n          }\n        ]\n      },\n      \"metrics\": {\n        \"metricAggregation\": [\n          {\n            \"scheduledTransferPeriod\": \"PT1H\"\n          },\n          {\n            \"scheduledTransferPeriod\": \"PT1M\"\n          }\n        ],\n        \"resourceId\": \"/subscriptions/your_azure_subscription_id/resourceGroups/your_resource_group_name/providers/Microsoft.Compute/virtualMachines/your_vm_name\"\n      },\n      \"eventVolume\": \"Large\",\n      \"syslogEvents\": {\n        \"sinks\": \"SyslogJsonBlob,LoggingEventHub\",\n        \"syslogEventConfiguration\": {\n          \"LOG_USER\": \"LOG_INFO\"\n        }\n      }\n    }\n  },\n  \"perfCfg\": [\n    {\n      \"query\": \"SELECT PercentProcessorTime, PercentIdleTime FROM SCX_ProcessorStatisticalInformation WHERE Name='_TOTAL'\",\n      \"table\": \"LinuxCpu\",\n      \"frequency\": 60,\n      \"sinks\": \"LinuxCpuJsonBlob,LinuxCpuEventHub\"\n    }\n  ],\n  \"fileLogs\": [\n    {\n      \"file\": \"/var/log/myladtestlog\",\n      \"table\": \"MyLadTestLog\",\n      \"sinks\": \"FilelogJsonBlob,LoggingEventHub\"\n    }\n  ]\n}\n```\n\nPlease note that you must provide the correct `resourceId` in order for the Azure Metrics service to display your `performanceCounters` data correctly in the Azure Portal charts. The resource ID is also used by JsonBlob sinks as well when forming the names of blobs.\n\n## Configuring and enabling the extension for Azure Portal metrics charting experiences\n\nHere's a sample configuration (provided in the `wget` URL below), and installation instructions, that will configure LAD 3.0 to capture and store exactly the same metrics (actually file system metrics are newly added in LAD 3.0) as were provided by LAD 2.3 for Azure Portal VM metrics charting experiences (and default syslog collection as enabled on LAD 2.3). You should consider this just an example; you'll want to modify the metrics to suit your own needs.\n\nIf you'd like to proceed, please execute the following commands on your Azure CLI terminal after [installing Azure CLI 2.0](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli) and wget (run `sudo apt-get install wget` on a Debian-based Linux disro or `sudo yum install wget` on a Redhat-based Linux distro). Also make sure to provide correct values for your Azure VM diagnostic paremeters in the first 3 lines.\n\n```bash\n# Set your Azure VM diagnostic parameters correctly below\nmy_resource_group=<your_azure_resource_group_name_containing_your_azure_linux_vm>\nmy_linux_vm=<your_azure_linux_vm_name>\nmy_diagnostic_storage_account=<your_azure_storage_account_for_storing_vm_diagnostic_data>\n\n# Should login to Azure first before anything else\naz login\n\n# Get VM resource ID as well, and replace storage account name and resource ID in the public settings.\nmy_vm_resource_id=$(az vm show -g $my_resource_group -n $my_linux_vm --query \"id\" -o tsv)\nwget https://raw.githubusercontent.com/Azure/azure-linux-extensions/master/Diagnostic/tests/lad_2_3_compatible_portal_pub_settings.json -O portal_public_settings.json\nsed -i \"s#__DIAGNOSTIC_STORAGE_ACCOUNT__#$my_diagnostic_storage_account#g\" portal_public_settings.json\nsed -i \"s#__VM_RESOURCE_ID__#$my_vm_resource_id#g\" portal_public_settings.json\n\n# Set protected settings (storage account SAS token)\nmy_diagnostic_storage_account_sastoken=$(az storage account generate-sas --account-name $my_diagnostic_storage_account --expiry 9999-12-31T23:59Z --permissions wlacu --resource-types co --services bt -o tsv)\nmy_lad_protected_settings=\"{'storageAccountName': '$my_diagnostic_storage_account', 'storageAccountSasToken': '$my_diagnostic_storage_account_sastoken'}\"\n\n# Finallly enable (set) the extension for the Portal metrics charts experience\naz vm extension set --publisher Microsoft.Azure.Diagnostics --name LinuxDiagnostic --version 3.0 --resource-group $my_resource_group --vm-name $my_linux_vm --protected-settings \"${my_lad_protected_settings}\" --settings portal_public_settings.json\n\n# Done\n```\n\nThe URL and its contents are subject to change. You should download a copy of the portal settings JSON file and customize it for your needs; any templates or automation you construct should use your own copy, rather than downloading that URL each time.\n\n### Important notes on customizing the downloaded `portal_public_settings.json`\n\nAfter experimenting with the downloaded `portal_public_settings.json` configuration as is, you may want to customize it for your own fit. For example, you may want to remove the entire `syslogEvents` section of the downloaded `portal_public_settings.json` if you don't need to collect syslog events at all. You can also remove unneeded entries in the `performanceCounterConfiguration` section of the downloaded `portal_public_settings.json` if you are not interested in some metrics. However, you should not modify other settings without fully understanding what they are and how they work. Only recommended customization at this point is to remove unwanted metrics or syslog events, and possibly changing the `displayName` values for metrics of your interest.\n\n### Important notes on upgrading to LAD 3.0 from LAD 2.3\n\n**Please use a new/different storage account for LAD 3.0** if you are upgrading from LAD 2.3. As mentioned earlier, you should uninstall LAD 2.3 first in order to upgrade to LAD 3.0, and if you specify the same storage account for LAD 3.0 as used in LAD 2.3, the syslog events collection with the new LAD 3.0 may not work because of a small change in LAD 3.0's syslog Azure Table name. Therefore, you should use a new storage account for LAD 3.0 if you still want to collect syslog events.\n\n## Review your data\n\nThe performance and diagnostic data are stored in an Azure Storage table by default. Review [How to use Azure Table Storage from Ruby](../storage/storage-ruby-how-to-use-table-storage.md) to learn how to access the data in the storage table by using Azure Table Storage Ruby API. Note that Azure Storage APIs are available in many other languages and platforms.\n\nIf you specified JsonBlob sinks for your LAD extension configuration, then the same storage account's blob containers will hold your performance and/or diagnostic data. You can consume the blob data using any Azure Blob Storage APIs.\n\nIn addition, you can use following UI tools to access the data in Azure Storage:\n\n1. [Microsoft Azure Storage Explorer](http://storageexplorer.com/)\n1. Visual Studio Server Explorer.\n1. [Azure Storage Explorer](https://azurestorageexplorer.codeplex.com/ \"Azure Storage Explorer\").\n\nThe following is a snapshot of a Microsoft Azure Storage Explorer session showing the generated Azure Storage tables and containers from a correctly configured LAD 3.0 extension on a test VM. Note that the snapshot doesn't match exactly with the sample LAD 3.0 configuration provided above.\n\n![image](./media/virtual-machines-linux-diagnostic-extension-v3/stg_explorer.png)\n\nIf you specified EventHubs sinks for your LAD extension configuraiton, then you'll want to consume the published EventHubs messages following related EventHubs documentation. You may want to start from [here](https://docs.microsoft.com/en-us/azure/event-hubs/event-hubs-what-is-event-hubs).\n"
  },
  {
    "path": "Diagnostic/watcherutil.py",
    "content": "#!/usr/bin/env python\n#\n# Azure Linux extension\n#\n# Linux Azure Diagnostic Extension (Current version is specified in manifest.xml)\n# Copyright (c) Microsoft Corporation\n# All rights reserved.\n# MIT License\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"\"Software\"\"), to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the following conditions:\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the\n# Software.\n# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\n# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nimport subprocess\nimport os\nimport datetime\nimport time\nimport string\nimport traceback\n\n\nclass Watcher:\n    \"\"\"\n    A class that handles periodic monitoring activities that are requested for LAD to perform.\n    The first such activity is to watch /etc/fstab and report (log to console) if there's anything\n    wrong with that. There might be other such monitoring activities that will be added later.\n    \"\"\"\n\n    def __init__(self, hutil_error, hutil_log, log_to_console=False):\n        \"\"\"\n        Constructor.\n        :param hutil_error: Error logging function (e.g., hutil.error). This is not a stream.\n        :param hutil_log: Normal logging function (e.g., hutil.log). This is not a stream.\n        :param log_to_console: Indicates whether to log any issues to /dev/console or not.\n        \"\"\"\n        # This is only for the /etc/fstab watcher feature.\n        self._fstab_last_mod_time = os.path.getmtime('/etc/fstab')\n\n        self._hutil_error = hutil_error\n        self._hutil_log = hutil_log\n        self._log_to_console = log_to_console\n\n        self._imds_logger = None\n\n    def _do_log_to_console_if_enabled(self, message):\n        \"\"\"\n        Write 'message' to console. Stolen from waagent LogToCon().\n        \"\"\"\n        if self._log_to_console:\n            try:\n                with open('/dev/console', 'w') as console:\n                    message = filter(lambda x: x in string.printable, message)\n                    console.write(message.encode('ascii', 'ignore') + '\\n')\n            except IOError as e:\n                self._hutil_error('Error writing to console. Exception={0}'.format(e))\n\n    def handle_fstab(self, ignore_time=False):\n        \"\"\"\n        Watches if /etc/fstab is modified and verifies if it's OK. Otherwise, report it in logs or to /dev/console.\n        :param ignore_time: Disable the default logic of delaying /etc/fstab verification by 1 minute.\n                            This is to allow any test code to avoid waiting 1 minute unnecessarily.\n        :return: None\n        \"\"\"\n        try_mount = False\n        if ignore_time:\n            try_mount = True\n        else:\n            current_mod_time = os.path.getmtime('/etc/fstab')\n            current_mod_date_time = datetime.datetime.fromtimestamp(current_mod_time)\n\n            # Only try to mount if it's been at least 1 minute since the \n            # change to fstab was done, to prevent spewing out erroneous spew\n            if (current_mod_time != self._fstab_last_mod_time and\n                datetime.datetime.now() > current_mod_date_time +\n                    datetime.timedelta(minutes=1)):\n                try_mount = True\n                self._fstab_last_mod_time = current_mod_time\n\n        ret = 0\n        if try_mount:\n            ret = subprocess.call(['sudo', 'mount', '-a', '-vf'])\n            if ret != 0:\n                # There was an error running mount, so log\n                error_msg = 'fstab modification failed mount validation.  Please correct before reboot.'\n                self._hutil_error(error_msg)\n                self._do_log_to_console_if_enabled(error_msg)\n            else:\n                # No errors\n                self._hutil_log('fstab modification passed mount validation')\n        return ret\n\n    def set_imds_logger(self, imds_logger):\n        self._imds_logger = imds_logger\n\n    def watch(self):\n        \"\"\"\n        Main loop performing various monitoring activities periodically.\n        Currently iterates every 5 minutes, and other periodic activities might be\n        added in the loop later.\n        :return: None\n        \"\"\"\n        while True:\n            # /etc/fstab watcher\n            self.handle_fstab()\n\n            # IMDS probe (only sporadically, inside the function)\n            if self._imds_logger:\n                try:\n                    self._imds_logger.log_imds_data_if_right_time()\n                except Exception as e:\n                    self._hutil_error('ImdsLogger exception: {0}\\nStacktrace: {1}'.format(e, traceback.format_exc()))\n\n            # Sleep 5 minutes\n            time.sleep(60 * 5)\n        pass\n"
  },
  {
    "path": "LAD-AMA-Common/metrics_ext_utils/__init__.py",
    "content": "# Metrics Extension helper script for LAD/AMA"
  },
  {
    "path": "LAD-AMA-Common/metrics_ext_utils/metrics_common_utils.py",
    "content": "#!/usr/bin/env python\n#\n# Azure Linux extension\n#\n# Copyright (c) Microsoft Corporation\n# All rights reserved.\n# MIT License\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"\"Software\"\"), to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the following conditions:\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the\n# Software.\n# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\n# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nimport os\n\ndef is_systemd():\n    \"\"\"\n    Check if the system is using systemd\n    \"\"\"\n    return os.path.isdir(\"/run/systemd/system\")\n\n\ndef is_arc_installed():\n    \"\"\"\n    Check if the system is an on prem machine running Arc\n    \"\"\"\n    # Using systemctl to check this since Arc only supports VM that have systemd\n    check_arc = os.system(\"systemctl status himdsd 1>/dev/null 2>&1\")\n    return check_arc == 0\n\n\ndef get_arc_endpoint():\n    \"\"\"\n    Find the endpoint for arc Hybrid IMDS\n    \"\"\"\n    endpoint_filepath = \"/lib/systemd/system.conf.d/azcmagent.conf\"\n    with open(endpoint_filepath, \"r\") as f:\n        data = f.read()\n    endpoint = data.split(\"\\\"IMDS_ENDPOINT=\")[1].split(\"\\\"\\n\")[0]\n\n    return endpoint"
  },
  {
    "path": "LAD-AMA-Common/metrics_ext_utils/metrics_constants.py",
    "content": "#!/usr/bin/env python\n#\n# Azure Linux extension\n#\n# Copyright (c) Microsoft Corporation\n# All rights reserved.\n# MIT License\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"\"Software\"\"), to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the following conditions:\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the\n# Software.\n# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\n# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n# This File contains constants used for Platform Metrics feature in LAD and Azure Monitor Extension\n\nmetrics_extension_namespace = \"Azure.VM.Linux.GuestMetrics\"\n\n#AMA Constants\nama_metrics_extension_bin = \"/opt/microsoft/azuremonitoragent/bin/MetricsExtension\"\nmetrics_extension_service_name = \"metrics-extension\"\nmetrics_extension_service_path = \"/lib/systemd/system/metrics-extension.service\"\nmetrics_extension_service_path_usr_lib = \"/usr/lib/systemd/system/metrics-extension.service\"\nmetrics_extension_service_path_etc = \"/etc/systemd/system/metrics-extension.service\"\n\nama_telegraf_bin = \"/opt/microsoft/azuremonitoragent/bin/telegraf\"\ntelegraf_service_name = \"metrics-sourcer\"\ntelegraf_service_path = \"/lib/systemd/system/metrics-sourcer.service\"\ntelegraf_service_path_usr_lib = \"/usr/lib/systemd/system/metrics-sourcer.service\"\ntelegraf_service_path_etc = \"/etc/systemd/system/metrics-sourcer.service\"\n\nama_metrics_extension_udp_port = \"17659\"\n\n#LAD Constants\nlad_metrics_extension_bin = \"/usr/local/lad/bin/MetricsExtension\"\nlad_metrics_extension_service_name = \"metrics-extension-lad\"\nlad_metrics_extension_service_path = \"/lib/systemd/system/metrics-extension-lad.service\"\nlad_metrics_extension_service_path_usr_lib = \"/usr/lib/systemd/system/metrics-extension-lad.service\"\n\nlad_telegraf_bin = \"/usr/local/lad/bin/telegraf\"\nlad_telegraf_service_name = \"metrics-sourcer-lad\"\nlad_telegraf_service_path = \"/lib/systemd/system/metrics-sourcer-lad.service\"\nlad_telegraf_service_path_usr_lib = \"/usr/lib/systemd/system/metrics-sourcer-lad.service\"\n\nlad_metrics_extension_udp_port = \"13459\"\nlad_metrics_extension_influx_udp_url = \"udp://127.0.0.1:\" + lad_metrics_extension_udp_port\ntelegraf_influx_url = \"unix:///var/run/mdsd/lad_mdsd_influx.socket\""
  },
  {
    "path": "LAD-AMA-Common/metrics_ext_utils/metrics_ext_handler.py",
    "content": "#!/usr/bin/env python\n#\n# Azure Linux extension\n#\n# Copyright (c) Microsoft Corporation\n# All rights reserved.\n# MIT License\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"\"Software\"\"), to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the following conditions:\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the\n# Software.\n# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\n# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nimport platform\nimport sys\nimport json\nimport os\nfrom shutil import copyfile, rmtree\nimport stat\nimport grp\nimport pwd\nimport filecmp\nimport metrics_ext_utils.metrics_constants as metrics_constants\nimport subprocess\nimport time\nimport signal\nimport metrics_ext_utils.metrics_common_utils as metrics_utils\n\ntry:\n    import urllib.request as urllib # Python 3+\nexcept ImportError:\n    import urllib2 as urllib # Python 2\n\ntry:\n    import urllib.error as urlerror # Python 3+\nexcept ImportError:\n    import urllib2 as urlerror # Python 2\n\ntry:\n    from urllib.parse import urlparse # Python 3+\nexcept ImportError:\n    from urlparse import urlparse # Python 2\n\n# Cloud Environments\nPublicCloudName     = \"azurepubliccloud\"\nFairfaxCloudName    = \"azureusgovernmentcloud\"\nMooncakeCloudName   = \"azurechinacloud\"\nUSNatCloudName      = \"usnat\" # EX\nUSSecCloudName      = \"ussec\" # RX\nArcACloudName       = \"azurestackcloud\"\nDefaultCloudName    = PublicCloudName # Fallback\n\nARMDomainMap = {\n    PublicCloudName:    \"management.azure.com\",\n    FairfaxCloudName:   \"management.usgovcloudapi.net\",\n    MooncakeCloudName:  \"management.chinacloudapi.cn\",\n    USNatCloudName:     \"management.azure.eaglex.ic.gov\",\n    USSecCloudName:     \"management.azure.microsoft.scloud\",\n    ArcACloudName:      \"armmanagement.autonomous.cloud.private\"\n}\n\n\ndef is_running(is_lad):\n    \"\"\"\n    This method is used to check if metrics binary is currently running on the system or not.\n    In order to check whether it needs to be restarted from the watcher daemon\n    \"\"\"\n    if is_lad:\n        metrics_bin = metrics_constants.lad_metrics_extension_bin\n    else:\n        metrics_bin = metrics_constants.ama_metrics_extension_bin\n\n    proc = subprocess.Popen([\"ps  aux | grep MetricsExtension | grep -v grep\"], stdout=subprocess.PIPE, shell=True)\n    output = proc.communicate()[0]\n    if metrics_bin in output.decode('utf-8', 'ignore'):\n        return True\n    else:\n        return False\n\n\ndef stop_metrics_service(is_lad):\n    \"\"\"\n    Stop the metrics service if VM is using is systemd, otherwise check if the pid_file exists,\n    and if the pid belongs to the MetricsExtension process, if yes, then kill the process\n    This method is called before remove_metrics_service by the main extension code\n    :param is_lad: boolean whether the extension is LAD or not (AMA)\n    \"\"\"\n\n    if is_lad:\n        metrics_ext_bin = metrics_constants.lad_metrics_extension_bin\n    else:\n        metrics_ext_bin = metrics_constants.ama_metrics_extension_bin\n\n    # If the VM has systemd, then we will use that to stop\n    if metrics_utils.is_systemd():\n        code = 1\n        metrics_service_path = get_metrics_extension_service_path(is_lad)\n        metrics_service_name = get_metrics_extension_service_name(is_lad)\n\n        if os.path.isfile(metrics_service_path):\n            code = os.system(\"systemctl stop {0}\".format(metrics_service_name))            \n        else:\n            return False, \"Metrics Extension service file does not exist. Failed to stop ME service: {0}.service.\".format(metrics_service_name)\n\n        if code != 0:\n            return False, \"Unable to stop Metrics Extension service: {0}. Failed with code {1}\".format(metrics_service_name, code)\n    else:\n        #This VM does not have systemd, So we will use the pid from the last ran metrics process and terminate it\n        _, configFolder = get_handler_vars()\n        metrics_conf_dir = configFolder + \"/metrics_configs/\"\n        metrics_pid_path = metrics_conf_dir + \"metrics_pid.txt\"\n\n        if os.path.isfile(metrics_pid_path):\n            pid = \"\"\n            with open(metrics_pid_path, \"r\") as f:\n                pid = f.read()\n            if pid != \"\":\n                # Check if the process running is indeed MetricsExtension, ignore if the process output doesn't contain MetricsExtension\n                proc = subprocess.Popen([\"ps -o cmd= {0}\".format(pid)], stdout=subprocess.PIPE, shell=True)\n                output = proc.communicate()[0]\n                if metrics_ext_bin in output.decode('utf-8', 'ignore'):\n                    os.kill(int(pid), signal.SIGKILL)\n                else:\n                    return False, \"Found a different process running with PID {0}. Failed to stop MetricsExtension.\".format(pid)\n            else:\n                return False, \"No pid found for a currently running Metrics Extension process in {0}. Failed to stop Metrics Extension.\".format(metrics_pid_path)\n        else:\n            return False, \"File containing the pid for the running Metrics Extension process at {0} does not exit. Failed to stop Metrics Extension\".format(metrics_pid_path)\n\n    return True, \"Successfully stopped metrics-extension service\"\n\ndef remove_metrics_service(is_lad):\n    \"\"\"\n    Remove the metrics service if the VM is using systemd as well as the MetricsExtension Binary\n    This method is called after stop_metrics_service by the main extension code during Extension uninstall\n    :param is_lad: boolean whether the extension is LAD or not (AMA)\n    \"\"\"\n\n    metrics_service_path = get_metrics_extension_service_path(is_lad)\n\n    if os.path.isfile(metrics_service_path):\n        code = os.remove(metrics_service_path)\n\n    if is_lad:\n        metrics_ext_bin = metrics_constants.lad_metrics_extension_bin\n    else:\n        metrics_ext_bin = metrics_constants.ama_metrics_extension_bin\n\n    # Checking To see if the files were successfully removed, since os.remove doesn't return an error code\n    if os.path.isfile(metrics_ext_bin):\n        remove_code = os.remove(metrics_ext_bin)\n\n    return True, \"Successfully removed metrics-extensions service and MetricsExtension binary.\"\n\ndef generate_Arc_MSI_token(resource = \"https://ingestion.monitor.azure.com/\"):\n    \"\"\"\n    This method is used to query the Hyrbid metdadata service of Arc to get the MSI Auth token for the VM and write it to the ME config location\n    This is called from the main extension code after config setup is complete\n    \"\"\"\n    _, configFolder = get_handler_vars()\n    me_config_dir = configFolder + \"/metrics_configs/\"\n    me_auth_file_path = me_config_dir + \"AuthToken-MSI.json\"\n    expiry_epoch_time = \"\"\n    log_messages = \"\"\n    retries = 1\n    max_retries = 3\n    sleep_time = 5\n\n    if not os.path.exists(me_config_dir):\n        log_messages += \"Metrics extension config directory - {0} does not exist. Failed to generate MSI auth token fo ME.\\n\".format(me_config_dir)\n        return False, expiry_epoch_time, log_messages\n    try:\n        data = None\n        while retries <= max_retries:\n            arc_endpoint = metrics_utils.get_arc_endpoint()\n            try:\n                msiauthurl = arc_endpoint + \"/metadata/identity/oauth2/token?api-version=2019-11-01&resource=\" + resource\n                req = urllib.Request(msiauthurl, headers={'Metadata':'true'})\n                res = urllib.urlopen(req)\n            except:\n                # The above request is expected to fail and add a key to the path\n                authkey_dir = \"/var/opt/azcmagent/tokens/\"\n                if not os.path.exists(authkey_dir):\n                    log_messages += \"Unable to find the auth key file at {0} returned from the arc msi auth request.\".format(authkey_dir)\n                    return False, expiry_epoch_time, log_messages\n                keys_dir = []\n                for filename in os.listdir(authkey_dir):\n                    keys_dir.append(filename)\n\n                authkey_path = authkey_dir + keys_dir[-1]\n                auth = \"basic \"\n                with open(authkey_path, \"r\") as f:\n                    key = f.read()\n                auth += key\n                req = urllib.Request(msiauthurl, headers={'Metadata':'true', 'authorization':auth})\n                res = urllib.urlopen(req)\n                data = json.loads(res.read().decode('utf-8', 'ignore'))\n\n            if not data or \"access_token\" not in data:\n                retries += 1\n            else:\n                break\n\n            log_messages += \"Failed to fetch MSI Auth url. Retrying in {2} seconds. Retry Count - {0} out of Mmax Retries - {1}\\n\".format(retries, max_retries, sleep_time)\n            time.sleep(sleep_time)\n\n\n        if retries > max_retries:\n            log_messages += \"Unable to generate a valid MSI auth token at {0}.\\n\".format(me_auth_file_path)\n            return False, expiry_epoch_time, log_messages\n\n        with open(me_auth_file_path, \"w\") as f:\n            f.write(json.dumps(data))\n\n        if \"expires_on\" in data:\n            expiry_epoch_time  = data[\"expires_on\"]\n        else:\n            log_messages += \"Error parsing the msi token at {0} for the token expiry time. Failed to generate the correct token\\n\".format(me_auth_file_path)\n            return False, expiry_epoch_time, log_messages\n\n    except Exception as e:\n        log_messages += \"Failed to get msi auth token. Please check if VM's system assigned Identity is enabled Failed with error {0}\\n\".format(e)\n        return False, expiry_epoch_time, log_messages\n\n    return True, expiry_epoch_time, log_messages\n\n\ndef generate_MSI_token(identifier_name = '', identifier_value = '', is_lad = True):\n    \"\"\"\n    This method is used to query the metdadata service to get the MSI Auth token for the VM and write it to the ME config location\n    This is called from the main extension code after config setup is complete\n    \"\"\"\n\n    if metrics_utils.is_arc_installed():\n        _, _, _, az_environment, _ = get_imds_values(is_lad)\n        if az_environment.lower() == ArcACloudName:\n            return generate_Arc_MSI_token(\"https://monitoring.azs\")\n        return generate_Arc_MSI_token()\n    else:\n        _, configFolder = get_handler_vars()\n        me_config_dir = configFolder + \"/metrics_configs/\"\n        me_auth_file_path = me_config_dir + \"AuthToken-MSI.json\"\n        expiry_epoch_time = \"\"\n        log_messages = \"\"\n        retries = 1\n        max_retries = 3\n        sleep_time = 5\n\n        if not os.path.exists(me_config_dir):\n            log_messages += \"Metrics extension config directory - {0} does not exist. Failed to generate MSI auth token for ME.\\n\".format(me_config_dir)\n            return False, expiry_epoch_time, log_messages\n        try:\n            data = None\n            while retries <= max_retries:\n                msiauthurl = \"http://169.254.169.254/metadata/identity/oauth2/token?api-version=2018-02-01&resource=https://ingestion.monitor.azure.com/\"\n\n                if identifier_name and identifier_value:\n                    msiauthurl += '&{0}={1}'.format(identifier_name, identifier_value)\n\n                req = urllib.Request(msiauthurl, headers={'Metadata':'true', 'Content-Type':'application/json'})\n                res = urllib.urlopen(req)\n                data = json.loads(res.read().decode('utf-8', 'ignore'))\n\n                if not data or \"access_token\" not in data:\n                    retries += 1\n                else:\n                    break\n\n                log_messages += \"Failed to fetch MSI Auth url. Retrying in {2} seconds. Retry Count - {0} out of Mmax Retries - {1}\\n\".format(retries, max_retries, sleep_time)\n                time.sleep(sleep_time)\n\n\n            if retries > max_retries:\n                log_messages += \"Unable to generate a valid MSI auth token at {0}.\\n\".format(me_auth_file_path)\n                return False, expiry_epoch_time, log_messages\n\n            with open(me_auth_file_path, \"w\") as f:\n                f.write(json.dumps(data))\n\n            if \"expires_on\" in data:\n                expiry_epoch_time  = data[\"expires_on\"]\n            else:\n                log_messages += \"Error parsing the MSI token at {0} for the token expiry time. Failed to generate the correct token\\n\".format(me_auth_file_path)\n                return False, expiry_epoch_time, log_messages\n\n        except Exception as e:\n            log_messages += \"Failed to get MSI auth token. Please check if the VM's system assigned identity is enabled or the user assigned identity \"\n            log_messages += \"passed in the extension settings exists and is assigned to this VM. Failed with error {0}\\n\".format(e)\n            return False, expiry_epoch_time, log_messages\n\n        return True, expiry_epoch_time, log_messages\n\ndef get_ArcA_MSI_token(resource = \"https://monitoring.azs\"):\n    \"\"\"\n    This method is used to query the Hyrbid metdadata service of ArcA to get the MSI Auth token for the VM\n    \"\"\"\n    token_string = \"\"\n    log_messages = \"\"\n    retries = 1\n    max_retries = 3\n    sleep_time = 5\n\n    try:\n        data = None\n        while retries <= max_retries:\n            arc_endpoint = metrics_utils.get_arc_endpoint()\n            try:\n                msiauthurl = arc_endpoint + \"/metadata/identity/oauth2/token?api-version=2019-11-01&resource=\" + resource\n                req = urllib.Request(msiauthurl, headers={'Metadata':'true'})\n                res = urllib.urlopen(req)\n            except:\n                # The above request is expected to fail and add a key to the path\n                authkey_dir = \"/var/opt/azcmagent/tokens/\"\n                if not os.path.exists(authkey_dir):\n                    log_messages += \"Unable to find the auth key file at {0} returned from the arc msi auth request.\".format(authkey_dir)\n                    return False, token_string, log_messages\n                keys_dir = []\n                for filename in os.listdir(authkey_dir):\n                    keys_dir.append(filename)\n\n                authkey_path = authkey_dir + keys_dir[-1]\n                auth = \"basic \"\n                with open(authkey_path, \"r\") as f:\n                    key = f.read()\n                auth += key\n                req = urllib.Request(msiauthurl, headers={'Metadata':'true', 'authorization':auth})\n                res = urllib.urlopen(req)\n                data = json.loads(res.read().decode('utf-8', 'ignore'))\n\n            if not data or \"access_token\" not in data:\n                retries += 1\n            else:\n                break\n\n            log_messages += \"Failed to fetch MSI Auth url. Retrying in {2} seconds. Retry Count - {0} out of Mmax Retries - {1}\\n\".format(retries, max_retries, sleep_time)\n            time.sleep(sleep_time)\n\n\n        if retries > max_retries:\n            log_messages += \"Unable to fetch a valid MSI auth token for {0}.\\n\".format(resource)\n            return False, token_string, log_messages\n\n        token_string = data[\"access_token\"]\n\n    except Exception as e:\n        log_messages += \"Failed to get msi auth token. Please check if VM's system assigned Identity is enabled Failed with error {0}\\n\".format(e)\n        return False, token_string, log_messages\n\n    return True, token_string, log_messages\n\n\ndef setup_me_service(is_lad, configFolder, monitoringAccount, metrics_ext_bin, me_influx_port, managed_identity=\"sai\", HUtilObj=None):\n    \"\"\"\n    Setup the metrics service if VM is using systemd\n    :param configFolder: Path for the config folder for metrics extension\n    :param monitoringAccount: Monitoring Account name that ME will upload data to\n    :param metrics_ext_bin: Path for the binary for metrics extension\n    :param me_influx_port: Influxdb port that metrics extension will listen on\n    \"\"\"\n\n    me_service_path = get_metrics_extension_service_path(is_lad)\n    me_service_template_path = os.getcwd() + \"/services/metrics-extension.service\"\n    daemon_reload_status = 1\n\n    if not os.path.exists(configFolder):\n        raise Exception(\"Metrics extension config directory does not exist. Failed to set up ME service.\")\n\n    me_influx_socket_path = configFolder + \"/mdm_influxdb.socket\"\n\n    if os.path.isfile(me_service_template_path):\n        copyfile(me_service_template_path, me_service_path)\n\n        if os.path.isfile(me_service_path):\n            os.system(r\"sed -i 's+%ME_BIN%+{1}+' {0}\".format(me_service_path, metrics_ext_bin))\n            os.system(r\"sed -i 's+%ME_INFLUX_PORT%+{1}+' {0}\".format(me_service_path, me_influx_port))\n            os.system(r\"sed -i 's+%ME_DATA_DIRECTORY%+{1}+' {0}\".format(me_service_path, configFolder))\n            os.system(r\"sed -i 's+%ME_MONITORING_ACCOUNT%+{1}+' {0}\".format(me_service_path, monitoringAccount))\n            os.system(r\"sed -i 's+%ME_MANAGED_IDENTITY%+{1}+' {0}\".format(me_service_path, managed_identity))\n            os.system(r\"sed -i 's+%ME_INFLUX_SOCKET_FILE_PATH%+{1}+' {0}\".format(me_service_path, me_influx_socket_path))\n            daemon_reload_status = os.system(\"systemctl daemon-reload\")\n            if daemon_reload_status != 0:\n                message = \"Unable to reload systemd after ME service file change. Failed to set up ME service. Check system for hardening. Exit code:\" + str(daemon_reload_status)\n                if HUtilObj is not None:\n                    HUtilObj.log(message)\n                else:\n                    print('Info: {0}'.format(message))\n\n        else:\n            raise Exception(\"Unable to copy Metrics extension service file to {0}. Failed to set up ME service.\".format(me_service_path))\n    else:\n        raise Exception(\"Metrics extension service template file does not exist at {0}. Failed to set up ME service.\".format(me_service_template_path))\n    return True\n\n\ndef start_metrics_cmv2():\n    \"\"\"\n    Start the metrics service in CMv2 mode\n    \"\"\"\n\n    # Re using the code to grab the config directories and imds values because start will be called from Enable process outside this script\n    log_messages = \"\"\n\n    metrics_ext_bin = metrics_constants.ama_metrics_extension_bin\n    if not os.path.isfile(metrics_ext_bin):\n        log_messages += \"Metrics Extension binary does not exist. Failed to start ME service.\"\n        return False, log_messages\n\n    # If the VM has systemd, then we use that to start/stop\n    metrics_service_name = get_metrics_extension_service_name(False)\n    if metrics_utils.is_systemd():\n        service_restart_status = os.system(\"systemctl restart {0}\".format(metrics_service_name))\n        if service_restart_status != 0:\n            log_messages += \"Unable to start {0} using systemctl. Failed to start ME service. Check system for hardening.\".format(metrics_service_name)\n            return False, log_messages\n        else:\n            return True, log_messages\n\n    return False, log_messages\n\n\ndef start_metrics(is_lad, managed_identity=\"sai\"):\n    \"\"\"\n    Start the metrics service if VM is using is systemd, otherwise start the binary as a process and store the pid,\n    to a file in the MetricsExtension config directory,\n    This method is called after config setup is completed by the main extension code\n    :param is_lad: boolean whether the extension is LAD or not (AMA)\n    \"\"\"\n\n    # Re using the code to grab the config directories and imds values because start will be called from Enable process outside this script\n    log_messages = \"\"\n\n    if is_lad:\n        metrics_ext_bin = metrics_constants.lad_metrics_extension_bin\n    else:\n        metrics_ext_bin = metrics_constants.ama_metrics_extension_bin\n    if not os.path.isfile(metrics_ext_bin):\n        log_messages += \"Metrics Extension binary does not exist. Failed to start ME service.\"\n        return False, log_messages\n\n    if is_lad:\n        me_influx_port = metrics_constants.lad_metrics_extension_udp_port\n    else:\n        me_influx_port = metrics_constants.ama_metrics_extension_udp_port\n\n    # If the VM has systemd, then we use that to start/stop\n    metrics_service_name = get_metrics_extension_service_name(is_lad)\n    if metrics_utils.is_systemd():\n        service_restart_status = os.system(\"systemctl restart {0}\".format(metrics_service_name))\n        if service_restart_status != 0:\n            log_messages += \"Unable to start {0} using systemctl. Failed to start ME service. Check system for hardening.\".format(metrics_service_name)\n            return False, log_messages\n\n    #Else start ME as a process and save the pid to a file so that we can terminate it while disabling/uninstalling\n    else:\n        _, configFolder = get_handler_vars()\n        me_config_dir = configFolder + \"/metrics_configs/\"\n        #query imds to get the subscription id\n        az_resource_id, subscription_id, location, az_environment, data = get_imds_values(is_lad)\n\n        if is_lad:\n            monitoringAccount = \"CUSTOMMETRIC_\"+ subscription_id\n        else:\n            monitoringAccount = \"CUSTOMMETRIC_\"+ subscription_id + \"_\" + location\n\n        metrics_pid_path = me_config_dir + \"metrics_pid.txt\"\n\n        # If LAD, use ME startup arguments for LAD, otherwise use ME startup arguments for AMA\n        if is_lad:\n            binary_exec_command = \"{0} -TokenSource MSI -Input influxdb_udp -InfluxDbHost 127.0.0.1 -InfluxDbUdpPort {1} -DataDirectory {2} -LocalControlChannel -MonitoringAccount {3} -LogLevel Error\".format(metrics_ext_bin, me_influx_port, me_config_dir, monitoringAccount)\n        else:\n            log_messages += \"MetricsExtension will not be started.\"\n            return False, log_messages\n        \n        proc = subprocess.Popen(binary_exec_command.split(\" \"), stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n        time.sleep(3) #sleeping for 3 seconds before checking if the process is still running, to give it ample time to relay crash info\n        p = proc.poll()\n\n        if p is None: #Process is running successfully\n            metrics_pid = proc.pid\n\n            #write this pid to a file for future use\n            with open(metrics_pid_path, \"w+\") as f:\n                f.write(str(metrics_pid))\n        else:\n            out, err = proc.communicate()\n            log_messages += \"Unable to run MetricsExtension binary as a process due to error - {0}. Failed to start MetricsExtension.\".format(err)\n            return False, log_messages\n    return True, log_messages\n\n\ndef create_metrics_extension_conf(az_resource_id, aad_url):\n    \"\"\"\n    Create the metrics extension config\n    :param az_resource_id: Azure Resource ID for the VM\n    :param aad_url: AAD auth url for the VM\n    \"\"\"\n    conf_json = '''{\n  \"timeToTerminateInMs\": 4000,\n  \"configurationExpirationPeriodInMinutes\": 1440,\n  \"configurationQueriesFrequencyInSec\": 900,\n  \"configurationQueriesTimeoutInSec\": 30,\n  \"maxAcceptedMetricAgeInSec\": 1200,\n  \"maxDataEtwDelayInSec\": 3,\n  \"maxPublicationAttemptsPerMinute\": 5,\n  \"maxPublicationBytesPerMinute\": 10000000,\n  \"maxPublicationMetricsPerMinute\": 500000,\n  \"maxPublicationPackageSizeInBytes\": 2500000,\n  \"maxRandomPublicationDelayInSec\": 25,\n  \"metricsSerializationVersion\": 4,\n  \"minGapBetweenPublicationAttemptsInSec\": 5,\n  \"publicationTimeoutInSec\": 30,\n  \"staleMonitoringAccountsPeriodInMinutes\": 20,\n  \"internalMetricPublicationTimeoutInMinutes\": 20,\n  \"dnsResolutionPeriodInSec\": 180,\n  \"maxAggregationQueueSize\": 500000,\n  \"initialAccountConfigurationLoadWaitPeriodInSec\": 20,\n  \"etwMinBuffersPerCore\": 2,\n  \"etwMaxBuffersPerCore\": 16,\n  \"etwBufferSizeInKb\": 1024,\n  \"internalQueueSizeManagementPeriodInSec\": 900,\n  \"etwLateHeartbeatAllowedCycleCount\": 24,\n  \"etwSampleRatio\": 0,\n  \"maxAcceptedMetricFutureAgeInSec\": 1200,\n  \"aggregatedMetricDiagnosticTracePeriod\": 900,\n  \"aggregatedMetricDiagnosticTraceMaxSize\": 100,\n  \"enableMetricMetadataPublication\": true,\n  \"enableDimensionTrimming\": true,\n  \"shutdownRequestedThreshold\": 5,\n  \"internalMetricProductionLevel\": 0,\n  \"maxPublicationWithoutResponseTimeoutInSec\": 300,\n  \"maxConfigQueryWithoutResponseTimeoutInSec\": 300,\n  \"maxThumbprintsPerAccountToLoad\": 100,\n  \"maxPacketsToCaptureLocally\": 0,\n  \"maxNumberOfRawEventsPerCycle\": 1000000,\n  \"publicationSimulated\": false,\n  \"maxAggregationTimeoutPerCycleInSec\": 20,\n  \"maxRawEventInputQueueSize\": 2000000,\n  \"publicationIntervalInSec\": 60,\n  \"interningSwapPeriodInMin\": 240,\n  \"interningClearPeriodInMin\": 5,\n  \"enableParallelization\": true,\n  \"enableDimensionSortingOnIngestion\": true,\n  \"rawEtwEventProcessingParallelizationFactor\": 1,\n  \"maxRandomConfigurationLoadingDelayInSec\": 120,\n  \"aggregationProcessingParallelizationFactor\": 1,\n  \"aggregationProcessingPerPartitionPeriodInSec\": 20,\n  \"aggregationProcessingParallelizationVolumeThreshold\": 500000,\n  \"useSharedHttpClients\": true,\n  \"loadFromConfigurationCache\": true,\n  \"restartByDateTimeUtc\": \"0001-01-01T00:00:00\",\n  \"restartStableIdTarget\": \"\",\n  \"enableIpV6\": false,\n  \"disableCustomMetricAgeSupport\": false,\n  \"globalPublicationCertificateThumbprint\": \"\",\n  \"maxHllSerializationVersion\": 2,\n  \"enableNodeOwnerMode\": false,\n  \"performAdditionalAzureHostIpV6Checks\": false,\n  \"compressMetricData\": false,\n  \"publishMinMaxByDefault\": true,\n  \"azureResourceId\": \"'''+ az_resource_id +'''\",\n  \"aadAuthority\": \"'''+ aad_url +'''\",\n  \"aadTokenEnvVariable\": \"MSIAuthToken\"\n} '''\n    return conf_json\n\ndef create_custom_metrics_conf(mds_gig_endpoint_region, gig_endpoint = \"\"):\n    \"\"\"\n    Create the metrics extension config\n    :param mds_gig_endpoint_region: mds gig endpoint region for the VM\n    \"\"\"\n    # Note : mds gig endpoint url is only for 3rd party customers. 1st party endpoint is different\n\n    if not gig_endpoint:\n        gig_hostname = mds_gig_endpoint_region + \".monitoring.azure.com\"\n        gig_ingestion_endpoint = \"https://\" + gig_hostname + \"/api/v1/ingestion/ingest\"\n    else:\n        gig_hostname = urlparse(gig_endpoint).netloc\n        gig_ingestion_endpoint = gig_endpoint + \"/api/v1/ingestion/ingest\"\n\n    conf_json = '''{\n        \"version\": 17,\n        \"maxMetricAgeInSeconds\": 0,\n        \"endpointsForClientForking\": [],\n        \"homeStampGslbHostname\": \"''' + gig_hostname + '''\",\n        \"endpointsForClientPublication\": [\n            \"''' + gig_ingestion_endpoint + '''\"\n        ]\n    } '''\n    return conf_json\n\ndef get_handler_vars():\n    \"\"\"\n    This method is taken from the Waagent code. This is used to grab the log and config file location from the json public setting for the Extension\n    \"\"\"\n\n    logFolder = \"\"\n    configFolder = \"\"\n    handler_env_path = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'HandlerEnvironment.json'))\n    if os.path.exists(handler_env_path):\n        with open(handler_env_path, 'r') as handler_env_file:\n            handler_env_txt = handler_env_file.read()\n        handler_env = json.loads(handler_env_txt)\n        if type(handler_env) == list:\n            handler_env = handler_env[0]\n        if \"handlerEnvironment\" in handler_env:\n            if \"logFolder\" in handler_env[\"handlerEnvironment\"]:\n                logFolder = handler_env[\"handlerEnvironment\"][\"logFolder\"]\n            if \"configFolder\" in handler_env[\"handlerEnvironment\"]:\n                configFolder = handler_env[\"handlerEnvironment\"][\"configFolder\"]\n\n    return logFolder, configFolder\n\n\ndef get_imds_values(is_lad, HUtilObj=None):\n    \"\"\"\n    Query imds to get required values for MetricsExtension config for this VM\n    \"\"\"\n    retries = 1\n    max_retries = 3\n    sleep_time = 5\n    imds_url = \"\"\n    is_arc = False\n\n    if is_lad:\n        imds_url = \"http://169.254.169.254/metadata/instance?api-version=2019-03-11\"\n    else:\n        if metrics_utils.is_arc_installed():\n            imds_url = metrics_utils.get_arc_endpoint()\n            imds_url += \"/metadata/instance?api-version=2019-11-01\"\n            is_arc = True\n        else:\n            imds_url = \"http://169.254.169.254/metadata/instance?api-version=2019-03-11\"\n\n    message = \"IMDS url to query: \" + imds_url\n    if HUtilObj is not None:\n        HUtilObj.log(message)\n    else:\n        print('Info: {0}'.format(message))\n\n    data = None\n    while retries <= max_retries:\n\n        try:\n            req = urllib.Request(imds_url, headers={'Metadata':'true'})\n            res = urllib.urlopen(req)\n            data = json.loads(res.read().decode('utf-8', 'ignore'))\n        except:\n            pass\n\n        if \"compute\" not in data:\n            retries += 1\n        else:\n            break\n\n        time.sleep(sleep_time)\n\n    if retries > max_retries:\n        raise Exception(\"Unable to find 'compute' key in imds query response. Reached max retry limit of - {0} times. Failed to set up ME.\".format(max_retries))\n\n    if \"resourceId\" not in data[\"compute\"]:\n        raise Exception(\"Unable to find 'resourceId' key in imds query response. Failed to set up ME.\")\n\n    az_resource_id = data[\"compute\"][\"resourceId\"]\n\n    if \"subscriptionId\" not in data[\"compute\"]:\n        raise Exception(\"Unable to find 'subscriptionId' key in imds query response. Failed to set up ME.\")\n\n    subscription_id = data[\"compute\"][\"subscriptionId\"]\n\n    if \"location\" not in data[\"compute\"]:\n        raise Exception(\"Unable to find 'location' key in imds query response. Failed to set up ME.\")\n\n    location = data[\"compute\"][\"location\"]\n\n    if \"azEnvironment\" not in data[\"compute\"]:\n        raise Exception(\"Unable to find 'azEnvironment' key in imds query response. Failed to set up ME.\")\n\n    az_environment = data[\"compute\"][\"azEnvironment\"]\n\n    return az_resource_id, subscription_id, location, az_environment, data\n\ndef get_arca_endpoints_from_himds():\n    \"\"\"\n    Query himds to get required arca endpoints for MetricsExtension config for this connected machine\n    \"\"\"\n    retries = 1\n    max_retries = 3\n    sleep_time = 5\n    imds_url = \"http://localhost:40342/metadata/endpoints?api-version=2019-11-01\"\n\n    if metrics_utils.is_arc_installed():\n        imds_url = metrics_utils.get_arc_endpoint()\n        imds_url += \"/metadata/endpoints?api-version=2019-11-01\"\n\n    data = None\n    while retries <= max_retries:\n\n        try:\n            req = urllib.Request(imds_url, headers={'Metadata':'true'})\n            res = urllib.urlopen(req)\n            data = json.loads(res.read().decode('utf-8', 'ignore'))\n        except:\n            pass\n\n        if \"dataplaneEndpoints\" not in data or \"resourceManager\" not in data:\n            retries += 1\n        else:\n            break\n\n        time.sleep(sleep_time)\n\n    if retries > max_retries:\n        raise Exception(\"Unable to find 'dataplaneEndpoints' key in imds query response. Reached max retry limit of - {0} times. Failed to set up ME.\".format(max_retries))\n\n    if \"arcMonitorControlServiceEndpoint\" not in data[\"dataplaneEndpoints\"]:\n        raise Exception(\"Unable to find 'arcMonitorControlServiceEndpoint' key in imds query response. Failed to set up ME.\")\n\n    mcs_endpoint = data[\"dataplaneEndpoints\"][\"arcMonitorControlServiceEndpoint\"]\n    arm_endpoint = data[\"resourceManager\"]\n\n    return arm_endpoint, mcs_endpoint\n\ndef get_arca_ingestion_endpoint_from_mcs():\n    \"\"\"\n    Query himds to get required arca endpoints for MetricsExtension config for this connected machine\n    \"\"\"\n    retries = 1\n    max_retries = 3\n    sleep_time = 5\n\n    _, mcs_endpoint = get_arca_endpoints_from_himds()\n    az_resource_id, _, _, _, _ = get_imds_values(False)\n    msi_token_fetched, mcs_token, log_messages = get_ArcA_MSI_token()\n    if not msi_token_fetched:\n        raise Exception(\"Unable to fetch MCS token, error message: \" + log_messages)\n    \n\n    mcs_config_query_url = mcs_endpoint + az_resource_id + \"/agentConfigurations?platform=linux&includeMeConfig=true&api-version=2022-06-02\"\n\n    if not mcs_token.lower().startswith(\"bearer \"):\n        mcs_token = \"Bearer \" + mcs_token\n\n    data = None\n    while retries <= max_retries:\n\n        # Query imds to get the required information\n        req = urllib.Request(mcs_config_query_url, headers={'Metadata':'true', 'Authorization':mcs_token})\n        res = urllib.urlopen(req)\n        data = json.loads(res.read().decode('utf-8', 'ignore'))\n\n        if \"configurations\" not in data:\n            retries += 1\n        else:\n            break\n\n        time.sleep(sleep_time)\n\n    if retries > max_retries:\n        raise Exception(\"Unable to find 'configurations' key in amcs query response. Reached max retry limit of - {0} times. Failed to set up ME.\".format(max_retries))\n\n    if \"content\" not in data[\"configurations\"][0]:\n        raise Exception(\"Unable to find 'content' key in amcs query response. Failed to set up ME.\")\n    \n    if \"channels\" not in data[\"configurations\"][0][\"content\"]:\n        raise Exception(\"Unable to find 'channels' key in amcs query response. Failed to set up ME.\")\n    \n    if \"endpoint\" not in data[\"configurations\"][0][\"content\"][\"channels\"][0]:\n        raise Exception(\"Unable to find 'endpoint' key in amcs query response. Failed to set up ME.\")\n\n    ingestion_endpoint = data[\"configurations\"][0][\"content\"][\"channels\"][0][\"endpoint\"]\n\n    # try:\n    #     gig_hostname = urllib.parse.urlparse(ingestion_endpoint).netloc\n\n    # except Exception as e:\n    #     raise Exception(\"Failed to retrieve ingestion host name with Exception='{0}'. \".format(e))\n\n    return ingestion_endpoint\n\ndef get_arm_domain(az_environment):\n    \"\"\"\n    Return the ARM domain to use based on the Azure environment\n    \"\"\"\n\n    try:\n        if az_environment.lower() == ArcACloudName:\n            arm_endpoint, _ = get_arca_endpoints_from_himds()\n            arm_endpoint_parsed = urlparse(arm_endpoint)\n            domain = arm_endpoint_parsed.netloc\n        else:\n            domain = ARMDomainMap[az_environment.lower()]\n\n    except KeyError:\n        raise Exception(\"Unknown cloud environment \\\"{0}\\\". Failed to set up ME.\".format(az_environment))\n\n    return domain\n\n\ndef get_metrics_extension_service_path(is_lad):\n    \"\"\"\n    Utility method to get the service path\n    \"\"\"\n    if(is_lad):\n        if os.path.exists(\"/lib/systemd/system/\"):\n            return metrics_constants.lad_metrics_extension_service_path\n        elif os.path.exists(\"/usr/lib/systemd/system/\"):\n            return metrics_constants.lad_metrics_extension_service_path_usr_lib\n        else:\n            raise Exception(\"Systemd unit files do not exist at /lib/systemd/system or /usr/lib/systemd/system/. Failed to setup Metrics Extension service.\")\n    else:\n        if os.path.exists(\"/etc/systemd/system\"):\n            return metrics_constants.metrics_extension_service_path_etc\n        if os.path.exists(\"/lib/systemd/system/\"):\n            return metrics_constants.metrics_extension_service_path\n        elif os.path.exists(\"/usr/lib/systemd/system/\"):\n            return metrics_constants.metrics_extension_service_path_usr_lib\n        else:\n            raise Exception(\"Systemd unit files do not exist at /etc/systemd/system, /lib/systemd/system or /usr/lib/systemd/system/. Failed to setup Metrics Extension service.\")\n\n\ndef get_metrics_extension_service_name(is_lad):\n    \"\"\"\n    Utility method to get the service name\n    \"\"\"\n    if(is_lad):    \n        return metrics_constants.lad_metrics_extension_service_name\n    else:\n        return metrics_constants.metrics_extension_service_name\n\n\ndef setup_me(is_lad, managed_identity=\"sai\", HUtilObj=None, is_local_control_channel=True, user=None, group=None):\n    \"\"\"\n    The main method for creating and writing MetricsExtension configuration as well as service setup\n    :param is_lad: Boolean value for whether the extension is Lad or not (AMA)\n    :param is_local_control_channel: Boolean value for whether MetricsExtension needs to be run in `-LocalControlChannel` mode (CMv1 only)\n    :param user: User that would own MetricsExtension process. If not specified, would default to the caller, in this case being root\n    :param group: Group that would own MetricsExtension process. If not specified, would default to the caller, in this case being root\n    \"\"\"\n    _, config_folder = get_handler_vars()\n    me_config_dir = config_folder + \"/metrics_configs/\"\n    create_empty_data_directory(me_config_dir)\n\n    if not is_local_control_channel:\n        # CMv2 and related modes\n        me_monitoring_account = \"\"\n        if user and group:\n            # Remove any previous user setup for MetricsExtension if it exists\n            remove_user(user, HUtilObj=HUtilObj)\n            # Create user/group for metrics-extension.service if it is requested\n            ensure_user_and_group(user, group, create_if_missing=True, HUtilObj=HUtilObj)\n            # For ARC, add user to himds group if it exists\n            ensure_user_and_group(user, \"himds\", create_if_missing=False, HUtilObj=HUtilObj)\n            # In CMv2 with user and group specified, create directory for MetricsExtension config caching\n            me_config_dir = \"/var/run/azuremetricsext\"\n            create_empty_data_directory(me_config_dir, user, group, HUtilObj=HUtilObj)\n    else:\n        # query imds to get the required information\n        az_resource_id, subscription_id, location, az_environment, data = get_imds_values(is_lad)\n        arm_domain = get_arm_domain(az_environment)\n\n        # get tenantID\n        # The url request will fail due to missing authentication header, but we get the auth url from the header of the request fail exception\n        aad_auth_url = \"\"\n        arm_url = \"https://{0}/subscriptions/{1}?api-version=2014-04-01\".format(arm_domain, subscription_id)\n        try:\n            req = urllib.Request(arm_url, headers={'Content-Type':'application/json'})\n\n            res = urllib.urlopen(req)\n\n        except urlerror.HTTPError as e:\n            err_res = e.headers[\"WWW-Authenticate\"]\n            for line in err_res.split(\",\"):\n                    if \"Bearer authorization_uri\" in line:\n                            data = line.split(\"=\")\n                            aad_auth_url = data[1][1:-1] # Removing the quotes from the front and back\n                            break\n\n        except Exception as e:\n            message = \"Failed to retrieve AAD Authentication URL from \" + arm_url + \" with Exception='{0}'. \".format(e)\n            message += \"Continuing with metrics setup without AAD auth url.\"\n            if HUtilObj is not None:\n                HUtilObj.log(message)\n            else:\n                print('Info: {0}'.format(message))\n\n        #create metrics conf\n        me_conf = create_metrics_extension_conf(az_resource_id, aad_auth_url)\n\n        #create custom metrics conf\n        if az_environment.lower() == ArcACloudName:\n            ingestion_endpoint = get_arca_ingestion_endpoint_from_mcs()\n            custom_conf = create_custom_metrics_conf(location, ingestion_endpoint)\n        else:\n            custom_conf = create_custom_metrics_conf(location)\n\n        #write configs to disk\n        me_conf_path = me_config_dir + \"MetricsExtensionV1_Configuration.json\"\n        with open(me_conf_path, \"w\") as f:\n            f.write(me_conf)\n\n        if is_lad:\n            me_monitoring_account = \"CUSTOMMETRIC_\"+ subscription_id\n        else:\n            me_monitoring_account = \"CUSTOMMETRIC_\"+ subscription_id + \"_\" +location\n\n        custom_conf_path = me_config_dir + me_monitoring_account.lower() +\"_MonitoringAccount_Configuration.json\"\n\n        with open(custom_conf_path, \"w\") as f:\n            f.write(custom_conf)\n\n    # Copy MetricsExtension Binary to the bin location\n    me_bin_local_path = os.getcwd() + \"/MetricsExtensionBin/MetricsExtension\"\n    if is_lad:\n        metrics_ext_bin = metrics_constants.lad_metrics_extension_bin\n    else:\n        metrics_ext_bin = metrics_constants.ama_metrics_extension_bin\n\n    if is_lad:\n        lad_bin_path = \"/usr/local/lad/bin/\"\n        # Checking if directory exists before copying ME bin over to /usr/local/lad/bin/\n        if not os.path.exists(lad_bin_path):\n            os.makedirs(lad_bin_path)\n\n    # Check if previous file exist at the location, compare the two binaries,\n    # If the files are not same, remove the older file, and copy the new one\n    # If they are the same, then we ignore it and don't copy\n    if os.path.isfile(me_bin_local_path):\n        if os.path.isfile(metrics_ext_bin):\n            if not filecmp.cmp(me_bin_local_path, metrics_ext_bin):\n                # Removing the file in case it is already being run in a process,\n                # in which case we can get an error \"text file busy\" while copying\n                os.remove(metrics_ext_bin)\n                copyfile(me_bin_local_path, metrics_ext_bin)\n                os.chmod(metrics_ext_bin, stat.S_IXGRP | stat.S_IRGRP | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IXOTH | stat.S_IROTH)\n\n        else:\n            # No previous binary exist, simply copy it and make it executable\n            copyfile(me_bin_local_path, metrics_ext_bin)\n            os.chmod(metrics_ext_bin, stat.S_IXGRP | stat.S_IRGRP | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IXOTH | stat.S_IROTH)\n    else:\n        raise Exception(\"Unable to copy MetricsExtension Binary, could not find file at the location {0} . Failed to set up ME.\".format(me_bin_local_path))\n\n    if is_lad:\n        me_influx_port = metrics_constants.lad_metrics_extension_udp_port\n    else:\n        me_influx_port = metrics_constants.ama_metrics_extension_udp_port\n\n    # setup metrics extension service\n    # If the VM has systemd, then we use that to start/stop\n    if metrics_utils.is_systemd():\n        setup_me_service(is_lad, me_config_dir, me_monitoring_account, metrics_ext_bin, me_influx_port, managed_identity, HUtilObj)\n\n    return True\n\n\ndef remove_user(user, HUtilObj=None):\n    \"\"\"\n    Removes existing user.\n    Note: This is important as the older MetricsExtension might have created the user which needs to be removed.\n    This mechanism can be removed in the future, if the user and group are maintained from MetricsExtension package.\n    :param user: linux user\n    :param HUtilObj: utility object for logging\n    \"\"\"\n    try:\n        pwd.getpwnam(user)\n    except KeyError:\n        if HUtilObj:\n            HUtilObj.log('User {0} does not exist.'.format(user))\n        return\n    except Exception as e:\n        if HUtilObj:\n            HUtilObj.log('Error while checking user {0}: {1}'.format(user, e))\n        return\n\n    try:\n        process = subprocess.Popen(['userdel', \"-r\", user], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n        out, err = process.communicate()\n        if process.returncode != 0:\n            if HUtilObj:\n                HUtilObj.log('Failed to delete user {0}. stderr: {1}'.format(user, err))\n    except Exception as e:\n        if HUtilObj:\n            HUtilObj.log('Error while deleting user {0}: {1}'.format(user, e))\n\n\ndef ensure_user_and_group(user, group, create_if_missing=False, HUtilObj=None):\n    \"\"\"\n    Ensures if the user and group exists, optionally creating them if it does not exist.\n    Group is checked, user is checked and then user is added to the group.\n    Returns True if all of them are available (or created), else returns False.\n    :param user: linux user\n    :param group: linux group\n    :param create_if_missing: boolean if true, create the requested user and group, where user belongs to the group\n    :param HUtilObj: utility object for logging\n    \"\"\"\n    # Check/Create group if missing\n    try:\n        grp.getgrnam(group)\n        if HUtilObj:\n            HUtilObj.log('Group {0} exists.'.format(group))\n    except KeyError:\n        if create_if_missing:\n            try:\n                process = subprocess.Popen(['groupadd', group], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n                out, err = process.communicate()\n                if process.returncode != 0:\n                    if HUtilObj:\n                        HUtilObj.log('Failed to create group {0}. stderr: {1}'.format(group, err))\n                    return False\n                if HUtilObj:\n                    HUtilObj.log('Group {0} created.'.format(group))\n            except Exception as e:\n                if HUtilObj:\n                    HUtilObj.log('Error while creating group {0}: {1}'.format(group, e))\n                return False\n        else:\n            if HUtilObj:\n                HUtilObj.log('Group {0} does not exist.'.format(group))\n            return False\n    except Exception as e:\n        if HUtilObj:\n            HUtilObj.log('Error while checking group {0}: {1}'.format(group, e))\n        return False\n\n    # Check/Create user if missing\n    try:\n        pwd.getpwnam(user)\n        if HUtilObj:\n            HUtilObj.log('User {0} exists.'.format(user))\n    except KeyError:\n        if create_if_missing:\n            try:\n                process = subprocess.Popen([\n                    'useradd', '--no-create-home', '--system', '--shell', '/usr/sbin/nologin', user\n                ], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n                out, err = process.communicate()\n                if process.returncode != 0:\n                    if HUtilObj:\n                        HUtilObj.log('Failed to create user {0}. stderr: {1}'.format(user, err))\n                    return False\n                if HUtilObj:\n                    HUtilObj.log('User {0} created.'.format(user))\n            except Exception as e:\n                if HUtilObj:\n                    HUtilObj.log('Error while creating user {0}: {1}'.format(user, e))\n                return False\n        else:\n            if HUtilObj:\n                HUtilObj.log('User {0} does not exist.'.format(user))\n            return False\n    except Exception as e:\n        if HUtilObj:\n            HUtilObj.log('Error while checking user {0}: {1}'.format(user, e))\n        return False\n\n    # Add user to group\n    try:\n        process = subprocess.Popen(['usermod', '-aG', group, user], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n        out, err = process.communicate()\n        if process.returncode != 0:\n            if HUtilObj:\n                HUtilObj.log('Failed to add user {0} to group {1}. stderr: {2}'.format(user, group, err))\n            return False\n        if HUtilObj:\n            HUtilObj.log('User {0} added to group {1}.'.format(user, group))\n    except Exception as e:\n        if HUtilObj:\n            HUtilObj.log('Error while adding user {0} to group {1}: {2}'.format(user, group, e))\n        return False\n\n    if HUtilObj:\n        HUtilObj.log('User {0} added to group {1} (or already a member).'.format(user, group))\n    return True\n\n\ndef create_empty_data_directory(me_config_dir, user=None, group=None, mode=0o755, HUtilObj=None):\n    '''\n    Creates an empty data directory where MetricsExtension can store cached configurations.\n    For CMv1, MetricsExtension requires mdsd to provide all configurations on disk.\n    For CMv2, MetricsExtension requires an empty data directory where it can cache its configurations.\n    '''\n    try:\n        # Clear older config directory if exists.\n        if os.path.exists(me_config_dir):\n            rmtree(me_config_dir)\n        os.makedirs(me_config_dir, mode=mode)\n\n        if user and group:\n            # Get UID and GID from user and group names\n            uid = pwd.getpwnam(user).pw_uid\n            gid = grp.getgrnam(group).gr_gid\n\n            # Set the ownership\n            os.chown(me_config_dir, uid, gid)\n\n        if HUtilObj:\n            HUtilObj.log('Directory {0} created with ownership {1}:{2}.'.format(me_config_dir, user, group))\n    except Exception as e:\n        if HUtilObj:\n            HUtilObj.log('Failed to create directory: {0}'.format(e))\n"
  },
  {
    "path": "LAD-AMA-Common/telegraf_utils/__init__.py",
    "content": "# Telegraf config parser module package\n"
  },
  {
    "path": "LAD-AMA-Common/telegraf_utils/telegraf_config_handler.py",
    "content": "#!/usr/bin/env python\n#\n# Azure Linux extension\n#\n# Copyright (c) Microsoft Corporation\n# All rights reserved.\n# MIT License\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"\"Software\"\"), to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the following conditions:\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the\n# Software.\n# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\n# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nimport sys\nimport json\nimport os\nfrom telegraf_utils.telegraf_name_map import name_map\nimport subprocess\nimport signal\nfrom shutil import copyfile, rmtree\nimport time\nimport metrics_ext_utils.metrics_constants as metrics_constants\nimport metrics_ext_utils.metrics_common_utils as metrics_utils\n\ntry:\n    # Python 3+\n    import urllib.request as urllib\nexcept ImportError:\n    # Python 2\n    import urllib2 as urllib\n\n\"\"\"\nSample input data received by this script\n[\n    {\n        \"displayName\" : \"Network->Packets sent\",\n        \"interval\" : \"15s\",\n        \"sink\" : [\"mdsd\" , \"me\"]\n    },\n    {\n        \"displayName\" : \"Network->Packets recieved\",\n        \"interval\" : \"15s\",\n        \"sink\" : [\"mdsd\" , \"me\"]\n    }\n]\n\"\"\"\n\ndef parse_config(data, me_url, mdsd_url, is_lad, az_resource_id, subscription_id, resource_group, region, virtual_machine_name):\n    \"\"\"\n    Main parser method to convert Metrics config from extension configuration to telegraf configuration\n    :param data: Parsed Metrics Configuration from which telegraf config is created\n    :param me_url: The url to which telegraf will send metrics to for MetricsExtension\n    :param mdsd_url: The url to which telegraf will send metrics to for MDSD\n    :param is_lad: Boolean value for whether the extension is Lad or not (AMA)\n    :param az_resource_id: Azure Resource ID value for the VM\n    :param subscription_id: Azure Subscription ID value for the VM\n    :param resource_group: Azure Resource Group value for the VM\n    :param region: Azure Region value for the VM\n    :param virtual_machine_name: Azure Virtual Machine Name value (Only in the case for VMSS) for the VM\n    \"\"\"\n    storage_namepass_list = []    \n    storage_namepass_str = \"\"\n\n    vmi_rate_counters_list = [\"LogicalDisk\\\\BytesPerSecond\", \"LogicalDisk\\\\ReadBytesPerSecond\", \"LogicalDisk\\\\ReadsPerSecond\",  \"LogicalDisk\\\\WriteBytesPerSecond\", \"LogicalDisk\\\\WritesPerSecond\", \"LogicalDisk\\\\TransfersPerSecond\", \"Network\\\\ReadBytesPerSecond\", \"Network\\\\WriteBytesPerSecond\"]\n\n    MetricsExtensionNamepsace = metrics_constants.metrics_extension_namespace\n    has_mdsd_output = False\n    has_me_output = False\n    \n    if len(data) == 0:\n        raise Exception(\"Empty config data received.\")\n\n    if me_url is None or mdsd_url is None:\n        raise Exception(\"No url provided for Influxdb output plugin to ME, AMA.\")\n\n    telegraf_json = {}\n    counterConfigIdMap = {}\n\n    for item in data:\n        sink = item[\"sink\"]\n        if \"mdsd\" in sink:\n            has_mdsd_output = True\n        if \"me\" in sink:\n            has_me_output = True\n        counter = item[\"displayName\"]\n        if counter in name_map:\n            plugin = name_map[counter][\"plugin\"]\n\n            is_vmi = plugin.endswith(\"_vmi\")\n            telegraf_plugin = plugin\n            if is_vmi:\n                splitResult = plugin.split('_')\n                telegraf_plugin = splitResult[0]            \n                \n            if counter not in counterConfigIdMap:\n                counterConfigIdMap[counter] = []\n\n            configIds = counterConfigIdMap[counter]\n\n            configurationIds = item[\"configurationId\"]\n\n            for configId in configurationIds:\n                if configId not in configIds:\n                    configIds.append(configId)\n            \n            omiclass = \"\"\n            if is_lad:\n                omiclass = counter.split(\"->\")[0]\n            else:\n                omiclass = name_map[counter][\"module\"]\n\n            if omiclass not in telegraf_json:\n                telegraf_json[omiclass] = {}\n            if plugin not in telegraf_json[omiclass]:\n                telegraf_json[omiclass][plugin] = {}\n            telegraf_json[omiclass][plugin][name_map[counter][\"field\"]] = {}\n\n            if is_lad:\n                telegraf_json[omiclass][plugin][name_map[counter][\"field\"]][\"displayName\"] = counter.split(\"->\")[1]\n            else:\n                telegraf_json[omiclass][plugin][name_map[counter][\"field\"]][\"displayName\"] = counter\n\n            telegraf_json[omiclass][plugin][name_map[counter][\"field\"]][\"interval\"] = item[\"interval\"]\n            if is_lad:\n                telegraf_json[omiclass][plugin][name_map[counter][\"field\"]][\"ladtablekey\"] = name_map[counter][\"ladtablekey\"]\n            if \"op\" in name_map[counter]:\n                telegraf_json[omiclass][plugin][name_map[counter][\"field\"]][\"op\"] = name_map[counter][\"op\"]\n\n    \"\"\"\n    Sample converted telegraf conf dict -\n\n    \"network\": {\n        \"net\": {\n            \"bytes_total\": {\"interval\": \"15s\",\"displayName\": \"Network total bytes\",\"ladtablekey\": \"/builtin/network/bytestotal\"},\n            \"drop_total\": {\"interval\": \"15s\",\"displayName\": \"Network collisions\",\"ladtablekey\": \"/builtin/network/totalcollisions\"},\n            \"err_in\": {\"interval\": \"15s\",\"displayName\": \"Packets received errors\",\"ladtablekey\": \"/builtin/network/totalrxerrors\"},\n            \"packets_sent\": {\"interval\": \"15s\",\"displayName\": \"Packets sent\",\"ladtablekey\": \"/builtin/network/packetstransmitted\"},\n        }\n    },\n    \"filesystem\": {\n        \"disk\": {\n            \"used_percent\": {\"interval\": \"15s\",\"displayName\": \"Filesystem % used space\",\"ladtablekey\": \"/builtin/filesystem/percentusedspace\"},\n            \"used\": {\"interval\": \"15s\",\"displayName\": \"Filesystem used space\",\"ladtablekey\": \"/builtin/filesystem/usedspace\"},\n            \"free\": {\"interval\": \"15s\",\"displayName\": \"Filesystem free space\",\"ladtablekey\": \"/builtin/filesystem/freespace\"},\n            \"inodes_free_percent\": {\"interval\": \"15s\",\"displayName\": \"Filesystem % free inodes\",\"ladtablekey\": \"/builtin/filesystem/percentfreeinodes\"},\n        },\n        \"diskio\": {\n            \"writes_filesystem\": {\"interval\": \"15s\",\"displayName\": \"Filesystem writes/sec\",\"ladtablekey\": \"/builtin/filesystem/writespersecond\",\"op\": \"rate\"},\n            \"total_transfers_filesystem\": {\"interval\": \"15s\",\"displayName\": \"Filesystem transfers/sec\",\"ladtablekey\": \"/builtin/filesystem/transferspersecond\",\"op\": \"rate\"},\n            \"reads_filesystem\": {\"interval\": \"15s\",\"displayName\": \"Filesystem reads/sec\",\"ladtablekey\": \"/builtin/filesystem/readspersecond\",\"op\": \"rate\"},\n        }\n    },\n        \"\"\"\n\n    if len(telegraf_json) == 0:\n        raise Exception(\"Unable to parse telegraf config into intermediate dictionary.\")\n\n    excess_diskio_plugin_list_lad = [\"total_transfers_filesystem\", \"read_bytes_filesystem\", \"total_bytes_filesystem\", \"write_bytes_filesystem\", \"reads_filesystem\", \"writes_filesystem\"]\n    excess_diskio_field_drop_list_str = \"\"\n\n\n    int_file = {\"filename\":\"intermediate.json\", \"data\": json.dumps(telegraf_json)}\n    output = []\n    output.append(int_file)\n\n    for omiclass in telegraf_json:\n        input_str = \"\"\n        ama_rename_str = \"\"\n        metricsext_rename_str = \"\"\n        lad_specific_rename_str = \"\"\n        rate_specific_aggregator_str = \"\"\n        aggregator_str = \"\"\n        for plugin in telegraf_json[omiclass]:\n            config_file = {\"filename\" : omiclass+\".conf\"}\n            # Arbitrary max value for finding min\n            min_interval = \"999999999s\"\n            is_vmi = plugin.endswith(\"_vmi\")\n            is_vmi_rate_counter = False\n            for field in telegraf_json[omiclass][plugin]:\n                if not is_vmi_rate_counter:\n                    is_vmi_rate_counter = telegraf_json[omiclass][plugin][field][\"displayName\"] in vmi_rate_counters_list\n            \n            # if is_vmi_rate_counter:\n            #     min_interval = \"1s\"\n                \n            if is_vmi or is_vmi_rate_counter:\n                splitResult = plugin.split('_')\n                telegraf_plugin = splitResult[0]\n                input_str += \"[[inputs.\" + telegraf_plugin + \"]]\\n\"\n                # plugin = plugin[:-4]\n            else:\n                input_str += \"[[inputs.\" + plugin + \"]]\\n\"\n            # input_str += \" \"*2 + \"name_override = \\\"\" + omiclass + \"\\\"\\n\"\n\n            # If it's a lad config then add the namepass fields for sending totals to storage\n            # always skip lad plugin names as they should be dropped from ME\n            lad_plugin_name = plugin + \"_total\"\n            if lad_plugin_name not in storage_namepass_list:\n                    storage_namepass_list.append(lad_plugin_name)\n                    \n            if is_lad:                \n                lad_specific_rename_str += \"\\n[[processors.rename]]\\n\"\n                lad_specific_rename_str += \" \"*2 + \"namepass = [\\\"\" + lad_plugin_name + \"\\\"]\\n\"                \n            elif is_vmi  or is_vmi_rate_counter:                \n                if plugin not in storage_namepass_list:\n                    storage_namepass_list.append(plugin + \"_mdsd\")\n            else:\n                ama_plugin_name = plugin + \"_mdsd_la_perf\"\n                ama_rename_str += \"\\n[[processors.rename]]\\n\"\n                ama_rename_str += \" \"*2 + \"namepass = [\\\"\" + ama_plugin_name + \"\\\"]\\n\"\n                if ama_plugin_name not in storage_namepass_list:\n                    storage_namepass_list.append(ama_plugin_name)\n\n            namespace = MetricsExtensionNamepsace\n            if is_vmi or is_vmi_rate_counter:\n                namespace = \"insights.virtualmachine\"\n\n            if is_vmi_rate_counter:\n                # Adding \"_rated\" as a substring for vmi rate metrics to avoid renaming collisions\n                plugin_name = plugin + \"_rated\"\n            else:\n                plugin_name = plugin\n\n            metricsext_rename_str += \"\\n[[processors.rename]]\\n\"\n            metricsext_rename_str += \" \"*2 + \"namepass = [\\\"\" + plugin_name + \"\\\"]\\n\"\n            metricsext_rename_str += \"\\n\" + \" \"*2 + \"[[processors.rename.replace]]\\n\"\n            metricsext_rename_str += \" \"*4 + \"measurement = \\\"\" + plugin_name + \"\\\"\\n\"\n            metricsext_rename_str += \" \"*4 + \"dest = \\\"\" + namespace + \"\\\"\\n\"\n\n            fields = \"\"\n            ops_fields = \"\"\n            non_ops_fields = \"\"\n            non_rate_aggregate = False\n            ops = \"\"\n            rate_aggregate = False\n            for field in telegraf_json[omiclass][plugin]:\n                fields += \"\\\"\" + field + \"\\\", \"\n                if is_vmi or is_vmi_rate_counter :\n                    if \"MB\" in field:\n                        fields += \"\\\"\" + field.replace('MB','Bytes') + \"\\\", \"\n\n                #Use the shortest interval time for the whole plugin\n                new_interval = telegraf_json[omiclass][plugin][field][\"interval\"]\n                if int(new_interval[:-1]) < int(min_interval[:-1]):\n                    min_interval = new_interval\n\n                #compute values for aggregator options\n                if \"op\" in telegraf_json[omiclass][plugin][field]:\n                    if telegraf_json[omiclass][plugin][field][\"op\"] == \"rate\":\n                        rate_aggregate = True\n                        ops = \"\\\"rate\\\", \\\"rate_min\\\", \\\"rate_max\\\", \\\"rate_count\\\", \\\"rate_sum\\\", \\\"rate_mean\\\"\"\n                    if is_lad:\n                        ops_fields += \"\\\"\" +  telegraf_json[omiclass][plugin][field][\"ladtablekey\"] + \"\\\", \"\n                    else:\n                        ops_fields += \"\\\"\" +  telegraf_json[omiclass][plugin][field][\"displayName\"] + \"\\\", \"\n                else:\n                    non_rate_aggregate = True\n                    if is_lad:\n                        non_ops_fields += \"\\\"\" +  telegraf_json[omiclass][plugin][field][\"ladtablekey\"] + \"\\\", \"\n                    else:\n                        non_ops_fields += \"\\\"\" +  telegraf_json[omiclass][plugin][field][\"displayName\"] + \"\\\", \"\n\n                #Add respective rename processor plugin based on the displayname\n                if is_lad:\n                    lad_specific_rename_str += \"\\n\" + \" \"*2 + \"[[processors.rename.replace]]\\n\"\n                    lad_specific_rename_str += \" \"*4 + \"field = \\\"\" + field + \"\\\"\\n\"\n                    lad_specific_rename_str += \" \"*4 + \"dest = \\\"\" + telegraf_json[omiclass][plugin][field][\"ladtablekey\"] + \"\\\"\\n\"\n                elif not is_vmi and not is_vmi_rate_counter:\n                    # no rename of fields as they are set in telegraf directly                \n                    ama_rename_str += \"\\n\" + \" \"*2 + \"[[processors.rename.replace]]\\n\"\n                    ama_rename_str += \" \"*4 + \"field = \\\"\" + field + \"\\\"\\n\"\n                    ama_rename_str += \" \"*4 + \"dest = \\\"\" + telegraf_json[omiclass][plugin][field][\"displayName\"] + \"\\\"\\n\"\n\n                # Avoid adding the rename logic for the redundant *_filesystem fields for diskio which were added specifically for OMI parity in LAD\n                # Had to re-use these six fields to avoid renaming issues since both Filesystem and Disk in OMI-LAD use them\n                # AMA only uses them once so only need this for LAD\n                if is_lad:\n                    if field in excess_diskio_plugin_list_lad:\n                        excess_diskio_field_drop_list_str += \"\\\"\" + field + \"\\\", \"\n                    else:\n                        metricsext_rename_str += \"\\n\" + \" \"*2 + \"[[processors.rename.replace]]\\n\"\n                        metricsext_rename_str += \" \"*4 + \"field = \\\"\" + field + \"\\\"\\n\"\n                        metricsext_rename_str += \" \"*4 + \"dest = \\\"\" + plugin + \"/\" + field + \"\\\"\\n\"\n                elif not is_vmi and not is_vmi_rate_counter:\n                    # no rename of fields as they are set in telegraf directly                \n                    metricsext_rename_str += \"\\n\" + \" \"*2 + \"[[processors.rename.replace]]\\n\"\n                    metricsext_rename_str += \" \"*4 + \"field = \\\"\" + field + \"\\\"\\n\"\n                    metricsext_rename_str += \" \"*4 + \"dest = \\\"\" + plugin + \"/\" + field + \"\\\"\\n\"\n\n            #Add respective operations for aggregators\n            # if is_lad:\n            if not is_vmi and not is_vmi_rate_counter:\n                suffix = \"\"\n                if is_lad:\n                    suffix = \"_total\\\"]\\n\"\n                else:\n                    suffix = \"_mdsd_la_perf\\\"]\\n\"\n                    \n                if rate_aggregate:\n                    aggregator_str += \"[[aggregators.basicstats]]\\n\"\n                    aggregator_str += \" \"*2 + \"namepass = [\\\"\" + plugin + suffix\n                    aggregator_str += \" \"*2 + \"period = \\\"\" + min_interval + \"\\\"\\n\"\n                    aggregator_str += \" \"*2 + \"drop_original = true\\n\"\n                    aggregator_str += \" \"*2 + \"fieldpass = [\" + ops_fields[:-2] + \"]\\n\" #-2 to strip the last comma and space\n                    aggregator_str += \" \"*2 + \"stats = [\" + ops + \"]\\n\"\n\n                if non_rate_aggregate:\n                    aggregator_str += \"[[aggregators.basicstats]]\\n\"\n                    aggregator_str += \" \"*2 + \"namepass = [\\\"\" + plugin + suffix\n                    aggregator_str += \" \"*2 + \"period = \\\"\" + min_interval + \"\\\"\\n\"\n                    aggregator_str += \" \"*2 + \"drop_original = true\\n\"\n                    aggregator_str += \" \"*2 + \"fieldpass = [\" + non_ops_fields[:-2] + \"]\\n\" #-2 to strip the last comma and space\n                    aggregator_str += \" \"*2 + \"stats = [\\\"mean\\\", \\\"max\\\", \\\"min\\\", \\\"sum\\\", \\\"count\\\"]\\n\\n\"\n            \n            elif is_vmi_rate_counter:\n                # Aggregator config for MDSD\n                aggregator_str += \"[[aggregators.basicstats]]\\n\"\n                aggregator_str += \" \"*2 + \"namepass = [\\\"\" + plugin + \"_mdsd\\\"]\\n\"\n                aggregator_str += \" \"*2 + \"period = \\\"\" + min_interval + \"\\\"\\n\"\n                aggregator_str += \" \"*2 + \"drop_original = true\\n\"\n                aggregator_str += \" \"*2 + \"fieldpass = [\" + ops_fields[:-2].replace('\\\\','\\\\\\\\\\\\\\\\') + \"]\\n\" #-2 to strip the last comma and space\n                aggregator_str += \" \"*2 + \"stats = [\" + ops + \"]\\n\\n\"\n\n                # Aggregator config for ME\n                aggregator_str += \"[[aggregators.mdmratemetrics]]\\n\"\n                aggregator_str += \" \"*2 + \"namepass = [\\\"\" + plugin + \"\\\"]\\n\"\n                aggregator_str += \" \"*2 + \"period = \\\"\" + min_interval + \"\\\"\\n\"\n                aggregator_str += \" \"*2 + \"drop_original = true\\n\"\n                aggregator_str += \" \"*2 + \"fieldpass = [\" + ops_fields[:-2].replace('\\\\','\\\\\\\\\\\\\\\\') + \"]\\n\" #-2 to strip the last comma and space\n                aggregator_str += \" \"*2 + \"stats = [\\\"rate\\\"]\\n\\n\"\n\n                \n            if is_lad:\n                lad_specific_rename_str += \"\\n\"\n            elif not is_vmi and not is_vmi_rate_counter:\n                # no rename of fields as they are set in telegraf directly            \n                ama_rename_str += \"\\n\"\n\n            # Using fields[: -2] here to get rid of the last \", \" at the end of the string\n            input_str += \" \"*2 + \"fieldpass = [\"+fields[:-2]+\"]\\n\"\n            if plugin == \"cpu\":\n                input_str += \" \"*2 + \"report_active = true\\n\"\n            \n            # Rate interval needs to be atleast twice the regular sourcing interval for aggregation to work. \n            # Since we want all the VMI metrics to be sent at the same interval as selected by the customer, To overcome the twice the min internval limitation, \n            # We are sourcing the VMI metrics that need to be aggregated at half the selected frequency \n            rated_min_interval = str(int(min_interval[:-1]) // 2) + \"s\" \n            input_str += \" \"*2 + \"interval = \" + \"\\\"\" + rated_min_interval + \"\\\"\\n\\n\"\n\n            telegraf_plugin = plugin\n            if is_vmi:\n                splitResult = plugin.split('_')\n                telegraf_plugin = splitResult[0]\n\n            if not is_lad:\n                configIds = counterConfigIdMap[telegraf_json[omiclass][plugin][field][\"displayName\"]]\n                for configId in configIds:\n                    input_str += \"\\n\"\n                    input_str += \" \"*2 + \"[inputs.\" + telegraf_plugin + \".tags]\\n\"\n                    input_str += \" \"*4 + \"configurationId=\\\"\" + configId + \"\\\"\\n\\n\"\n                    break\n\n            config_file[\"data\"] = input_str + \"\\n\" +  metricsext_rename_str + \"\\n\" + ama_rename_str + \"\\n\" + lad_specific_rename_str + \"\\n\"  +aggregator_str\n            output.append(config_file)\n            config_file = {}\n\n    \"\"\"\n    Sample telegraf TOML file output\n\n    [[inputs.net]]\n\n    fieldpass = [\"err_out\", \"packets_sent\", \"err_in\", \"bytes_sent\", \"packets_recv\"]\n    interval = \"5s\"\n\n    [[inputs.cpu]]\n\n    fieldpass = [\"usage_nice\", \"usage_user\", \"usage_idle\", \"usage_active\", \"usage_irq\", \"usage_system\"]\n    interval = \"15s\"\n\n    [[processors.rename]]\n\n    [[processors.rename.replace]]\n        measurement = \"net\"\n        dest = \"network\"\n\n    [[processors.rename.replace]]\n        field = \"err_out\"\n        dest = \"Packets sent errors\"\n\n    [[aggregators.basicstats]]\n    period = \"30s\"\n    drop_original = false\n    fieldpass = [\"Disk reads\", \"Disk writes\", \"Filesystem write bytes/sec\"]\n    stats = [\"rate\"]\n\n    \"\"\"\n\n    ## Get the log folder directory from HandlerEnvironment.json and use that for the telegraf default logging\n    logFolder, _ = get_handler_vars()\n    for measurement in storage_namepass_list:\n        storage_namepass_str += \"\\\"\" + measurement + \"\\\", \"\n\n\n    # Telegraf basic agent and output config\n    agentconf = \"[agent]\\n\"\n    agentconf += \"  interval = \\\"10s\\\"\\n\"\n    agentconf += \"  round_interval = true\\n\"\n    agentconf += \"  metric_batch_size = 1000\\n\"\n    agentconf += \"  metric_buffer_limit = 1000000\\n\"\n    agentconf += \"  collection_jitter = \\\"0s\\\"\\n\"\n    agentconf += \"  flush_interval = \\\"10s\\\"\\n\"\n    agentconf += \"  flush_jitter = \\\"0s\\\"\\n\"\n    agentconf += \"  logtarget = \\\"file\\\"\\n\"\n    agentconf += \"  quiet = true\\n\"\n    agentconf += \"  logfile = \\\"\" + logFolder + \"/telegraf.log\\\"\\n\"\n    agentconf += \"  logfile_rotation_max_size = \\\"100MB\\\"\\n\"\n    agentconf += \"  logfile_rotation_max_archives = 5\\n\"\n    agentconf += \"\\n# Configuration for adding gloabl tags\\n\"\n    agentconf += \"[global_tags]\\n\"\n    if is_lad:\n        agentconf += \"  DeploymentId= \\\"${DeploymentId}\\\"\\n\"\n    agentconf += \"  \\\"microsoft.subscriptionId\\\"= \\\"\" + subscription_id + \"\\\"\\n\"\n    agentconf += \"  \\\"microsoft.resourceGroupName\\\"= \\\"\" + resource_group + \"\\\"\\n\"\n    agentconf += \"  \\\"microsoft.regionName\\\"= \\\"\" + region + \"\\\"\\n\"\n    agentconf += \"  \\\"microsoft.resourceId\\\"= \\\"\" + az_resource_id + \"\\\"\\n\"\n    if virtual_machine_name != \"\":\n        agentconf += \"  \\\"VMInstanceId\\\"= \\\"\" + virtual_machine_name + \"\\\"\\n\"    \n    if has_me_output or is_lad:\n        agentconf += \"\\n# Configuration for sending metrics to MetricsExtension\\n\"\n\n        # for AMA we use Sockets to write to ME but for LAD we continue using UDP\n        # because we support a lot more counters in AMA path and ME is not able to handle it with UDP\n        if is_lad:\n            agentconf += \"[[outputs.influxdb]]\\n\"\n        else:\n            agentconf += \"[[outputs.socket_writer]]\\n\"\n        agentconf += \"  namedrop = [\" + storage_namepass_str[:-2] + \"]\\n\"\n        if is_lad:\n            agentconf += \"  fielddrop = [\" + excess_diskio_field_drop_list_str[:-2] + \"]\\n\"\n        \n        if is_lad:\n            agentconf += \"  urls = [\\\"\" + str(me_url) + \"\\\"]\\n\\n\"\n            agentconf += \"  udp_payload = \\\"2048B\\\"\\n\\n\"\n        else:\n            agentconf += \"  data_format = \\\"influx\\\"\\n\"\n            agentconf += \"  address = \\\"\" + str(me_url) + \"\\\"\\n\\n\"\n    if has_mdsd_output:\n        agentconf += \"\\n# Configuration for sending metrics to MDSD\\n\"\n        agentconf += \"[[outputs.socket_writer]]\\n\"\n        agentconf += \"  namepass = [\" + storage_namepass_str[:-2] + \"]\\n\"\n        agentconf += \"  data_format = \\\"influx\\\"\\n\"\n        agentconf += \"  address = \\\"\" + str(mdsd_url) + \"\\\"\\n\\n\"\n    agentconf += \"\\n# Configuration for outputing metrics to file. Uncomment to enable.\\n\"\n    agentconf += \"#[[outputs.file]]\\n\"\n    agentconf += \"#  files = [\\\"./metrics_to_file.out\\\"]\\n\\n\"\n\n    agent_file = {\"filename\":\"telegraf.conf\", \"data\": agentconf}\n    output.append(agent_file)\n\n\n    return output, storage_namepass_list\n\n\ndef write_configs(configs, telegraf_conf_dir, telegraf_d_conf_dir):\n    \"\"\"\n    Write the telegraf config created by config parser method to disk at the telegraf config location\n    :param configs: Telegraf config data parsed by the parse_config method above\n    :param telegraf_conf_dir: Path where the telegraf.conf is written to on the disk\n    :param telegraf_d_conf_dir: Path where the individual module telegraf configs are written to on the disk\n    \"\"\"\n    # Delete the older config folder to prevent telegraf from loading older configs\n    if os.path.exists(telegraf_conf_dir):\n        rmtree(telegraf_conf_dir)\n\n    os.mkdir(telegraf_conf_dir)\n\n    os.mkdir(telegraf_d_conf_dir)\n\n    for configfile in configs:\n        if configfile[\"filename\"] == \"telegraf.conf\" or configfile[\"filename\"] == \"intermediate.json\":\n            path = telegraf_conf_dir + configfile[\"filename\"]\n        else:\n            path = telegraf_d_conf_dir + configfile[\"filename\"]\n        with open(path, \"w\") as f:\n            f.write(configfile[\"data\"])\n\n\n\ndef get_handler_vars():\n    \"\"\"\n    This method is taken from the Waagent code. This is used to grab the log and config file location from the json public setting for the Extension\n    \"\"\"\n    logFolder = \"\"\n    configFolder = \"\"\n    handler_env_path = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'HandlerEnvironment.json'))\n    if os.path.exists(handler_env_path):\n        with open(handler_env_path, 'r') as handler_env_file:\n            handler_env_txt = handler_env_file.read()\n        handler_env = json.loads(handler_env_txt)\n        if type(handler_env) == list:\n            handler_env = handler_env[0]\n        if \"handlerEnvironment\" in handler_env:\n            if \"logFolder\" in handler_env[\"handlerEnvironment\"]:\n                logFolder = handler_env[\"handlerEnvironment\"][\"logFolder\"]\n            if \"configFolder\" in handler_env[\"handlerEnvironment\"]:\n                configFolder = handler_env[\"handlerEnvironment\"][\"configFolder\"]\n\n    return logFolder, configFolder\n\n\ndef is_running(is_lad):\n    \"\"\"\n    This method is used to check if telegraf binary is currently running on the system or not.\n    In order to check whether it needs to be restarted from the watcher daemon\n    \"\"\"\n    if is_lad:\n        telegraf_bin = metrics_constants.lad_telegraf_bin\n    else:\n        telegraf_bin = metrics_constants.ama_telegraf_bin\n\n    proc = subprocess.Popen([\"ps  aux | grep telegraf | grep -v grep\"], stdout=subprocess.PIPE, shell=True)\n    output = proc.communicate()[0]\n    if telegraf_bin in output.decode('utf-8', 'ignore'):\n        return True\n    else:\n        return False\n\ndef stop_telegraf_service(is_lad):\n    \"\"\"\n    Stop the telegraf service if VM is using is systemd, otherwise check if the pid_file exists,\n    and if the pid belongs to the Telegraf process, if yes, then kill the process\n    This method is called before remove_telegraf_service by the main extension code\n    :param is_lad: boolean whether the extension is LAD or not (AMA)\n    \"\"\"\n\n    if is_lad:\n        telegraf_bin = metrics_constants.lad_telegraf_bin\n    else:\n        telegraf_bin = metrics_constants.ama_telegraf_bin\n\n    # If the VM has systemd, then we will use that to stop\n    if metrics_utils.is_systemd():\n        code = 1\n        telegraf_service_path = get_telegraf_service_path(is_lad)\n        telegraf_service_name = get_telegraf_service_name(is_lad)\n\n        if os.path.isfile(telegraf_service_path):\n            code = os.system(\"systemctl stop {0}\".format(telegraf_service_name))              \n        else:\n            return False, \"Telegraf service file does not exist. Failed to stop telegraf service: {0}.service.\".format(telegraf_service_name)\n\n        if code != 0:\n            return False, \"Unable to stop telegraf service: {0}.service. Run systemctl status {0}.service for more info.\".format(telegraf_service_name)\n\n    # Whether or not VM has systemd, let's check if we have any telegraf pids saved and if so, terminate the associated process\n    _, configFolder = get_handler_vars()\n    telegraf_conf_dir = configFolder + \"/telegraf_configs/\"\n    telegraf_pid_path = telegraf_conf_dir + \"telegraf_pid.txt\"\n    if os.path.isfile(telegraf_pid_path):\n        with open(telegraf_pid_path, \"r\") as f:\n            for pid in f.readlines():\n                # Verify the pid actually belongs to telegraf\n                cmd_path = os.path.join(\"/proc\", str(pid.strip(\"\\n\")), \"cmdline\")\n                if os.path.exists(cmd_path):\n                    with open(cmd_path, \"r\") as cmd_f:\n                        cmdline = cmd_f.readlines()\n                        if cmdline[0].find(telegraf_bin) >= 0:\n                            os.kill(int(pid), signal.SIGKILL)\n        os.remove(telegraf_pid_path)\n    elif not metrics_utils.is_systemd():\n        return False, \"Could not find telegraf service nor process to stop.\"\n\n    return True, \"Successfully stopped metrics-sourcer service\"\n\n\ndef remove_telegraf_service(is_lad):\n    \"\"\"\n    Remove the telegraf service if the VM is using systemd as well as the telegraf Binary\n    This method is called after stop_telegraf_service by the main extension code during Extension uninstall\n    :param is_lad: boolean whether the extension is LAD or not (AMA)\n    \"\"\"\n\n    telegraf_service_path = get_telegraf_service_path(is_lad)\n    telegraf_service_name = get_telegraf_service_name(is_lad)\n\n    if os.path.isfile(telegraf_service_path):\n        os.remove(telegraf_service_path)\n    else:\n        return True, \"Unable to remove the Telegraf service as the file doesn't exist.\"\n\n    # Checking To see if the file was successfully removed, since os.remove doesn't return an error code\n    if os.path.isfile(telegraf_service_path):\n        return False, \"Unable to remove telegraf service: {0}.service at {1}.\".format(telegraf_service_name, telegraf_service_path)\n\n    return True, \"Successfully removed {0} service\".format(telegraf_service_name)\n\n\ndef setup_telegraf_service(is_lad, telegraf_bin, telegraf_d_conf_dir, telegraf_agent_conf, HUtilObj=None):\n    \"\"\"\n    Add the metrics-sourcer service if the VM is using systemd\n    This method is called in handle_config\n    :param telegraf_bin: path to the telegraf binary\n    :param telegraf_d_conf_dir: path to telegraf .d conf subdirectory\n    :param telegraf_agent_conf: path to telegraf .conf file\n    \"\"\"\n    telegraf_service_path = get_telegraf_service_path(is_lad)\n    telegraf_service_template_path = os.getcwd() + \"/services/metrics-sourcer.service\"\n\n    if not os.path.exists(telegraf_d_conf_dir):\n        raise Exception(\"Telegraf config directory does not exist. Failed to setup telegraf service.\")\n\n    if not os.path.isfile(telegraf_agent_conf):\n        raise Exception(\"Telegraf agent config does not exist. Failed to setup telegraf service.\")\n\n    if os.path.isfile(telegraf_service_template_path):\n\n        copyfile(telegraf_service_template_path, telegraf_service_path)\n\n        if os.path.isfile(telegraf_service_path):\n            os.system(r\"sed -i 's+%TELEGRAF_BIN%+{1}+' {0}\".format(telegraf_service_path, telegraf_bin))\n            os.system(r\"sed -i 's+%TELEGRAF_AGENT_CONFIG%+{1}+' {0}\".format(telegraf_service_path, telegraf_agent_conf))\n            os.system(r\"sed -i 's+%TELEGRAF_CONFIG_DIR%+{1}+' {0}\".format(telegraf_service_path, telegraf_d_conf_dir))\n\n            daemon_reload_status = os.system(\"systemctl daemon-reload\")\n            if daemon_reload_status != 0:\n                message = \"Unable to reload systemd after Telegraf service file change. Failed to setup telegraf service. Check system for hardening. Exit code:\" + str(daemon_reload_status)\n                if HUtilObj is not None:\n                    HUtilObj.log(message)\n                else:\n                    print('Info: {0}'.format(message))\n\n        else:\n            raise Exception(\"Unable to copy Telegraf service template file to {0}. Failed to setup telegraf service.\".format(telegraf_service_path))\n    else:\n        raise Exception(\"Telegraf service template file does not exist at {0}. Failed to setup telegraf service.\".format(telegraf_service_template_path))\n\n    return True\n\n\ndef start_telegraf(is_lad):\n    \"\"\"\n    Start the telegraf service if VM is using is systemd, otherwise start the binary as a process and store the pid\n    to a file in the telegraf config directory\n    This method is called after config setup is completed by the main extension code\n    :param is_lad: boolean whether the extension is LAD or not (AMA)\n    \"\"\"\n\n    # Re using the code to grab the config directories and imds values because start will be called from Enable process outside this script\n    log_messages = \"\"\n\n    if is_lad:\n        telegraf_bin = metrics_constants.lad_telegraf_bin\n    else:\n        telegraf_bin = metrics_constants.ama_telegraf_bin\n\n    if not os.path.isfile(telegraf_bin):\n        log_messages += \"Telegraf binary does not exist. Failed to start telegraf service.\"\n        return False, log_messages\n\n    # Ensure that any old telegraf processes are cleaned up to avoid duplication\n    stop_telegraf_service(is_lad)\n\n    # If the VM has systemd, telegraf will be managed as a systemd service\n    telegraf_service_name = get_telegraf_service_name(is_lad)\n    if metrics_utils.is_systemd():\n        service_restart_status = os.system(\"systemctl restart {0}\".format(telegraf_service_name))        \n        if service_restart_status != 0:\n            log_messages += \"Unable to start Telegraf service using systemctl. Failed to start telegraf service. Check system for hardening.\"\n            return False, log_messages\n\n    # Otherwise, start telegraf as a process and save the pid to a file so that we can terminate it while disabling/uninstalling\n    else:\n        _, configFolder = get_handler_vars()\n        telegraf_conf_dir = configFolder + \"/telegraf_configs/\"\n        telegraf_agent_conf = telegraf_conf_dir + \"telegraf.conf\"\n        telegraf_d_conf_dir = telegraf_conf_dir + \"telegraf.d/\"\n        telegraf_pid_path = telegraf_conf_dir + \"telegraf_pid.txt\"\n\n        binary_exec_command = \"{0} --config {1} --config-directory {2}\".format(telegraf_bin, telegraf_agent_conf, telegraf_d_conf_dir)\n        proc = subprocess.Popen(binary_exec_command.split(\" \"), stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n        # Sleeping for 3 seconds before checking if the process is still running, to give it ample time to relay crash info\n        time.sleep(3)\n        p = proc.poll()\n\n        # Process is running successfully\n        if p is None:\n            telegraf_pid = proc.pid\n\n            # Write this pid to a file for future use\n            try:\n                with open(telegraf_pid_path, \"a\") as f:\n                    f.write(str(telegraf_pid) + '\\n')\n            except Exception as e:\n                log_messages += \"Successfully started telegraf binary, but could not save telegraf pidfile.\"\n        else:\n            out, err = proc.communicate()\n            log_messages += \"Unable to run telegraf binary as a process due to error - {0}. Failed to start telegraf.\".format(err)\n            return False, log_messages\n    return True, log_messages\n\n\ndef get_telegraf_service_path(is_lad):\n    \"\"\"\n    Utility method to get the service path in case /lib/systemd/system doesnt exist on the OS\n    \"\"\"\n    if is_lad:\n        if os.path.exists(\"/lib/systemd/system/\"):\n            return metrics_constants.lad_telegraf_service_path\n        elif os.path.exists(\"/usr/lib/systemd/system/\"):\n            return metrics_constants.lad_telegraf_service_path_usr_lib\n        else:\n            raise Exception(\"Systemd unit files do not exist at /lib/systemd/system or /usr/lib/systemd/system/. Failed to setup telegraf service.\")\n    else:\n        if os.path.exists(\"/lib/systemd/system/\"):\n            return metrics_constants.telegraf_service_path\n        elif os.path.exists(\"/usr/lib/systemd/system/\"):\n            return metrics_constants.telegraf_service_path_usr_lib\n        else:\n            raise Exception(\"Systemd unit files do not exist at /lib/systemd/system or /usr/lib/systemd/system/. Failed to setup telegraf service.\")\n\ndef get_telegraf_service_name(is_lad):\n    \"\"\"\n    Utility method to get the service name\n    \"\"\"\n    if(is_lad):    \n        return metrics_constants.lad_telegraf_service_name\n    else:\n        return metrics_constants.telegraf_service_name\n        \n\ndef handle_config(config_data, me_url, mdsd_url, is_lad):\n    \"\"\"\n    The main method to perfom the task of parsing the config , writing them to disk, setting up, stopping, removing and starting telegraf\n    :param config_data: Parsed Metrics Configuration from which telegraf config is created\n    :param me_url: The url to which telegraf will send metrics to for MetricsExtension\n    :param mdsd_url: The url to which telegraf will send metrics to for MDSD\n    :param is_lad: Boolean value for whether the extension is Lad or not (AMA)\n    \"\"\"\n\n    # Making the imds call to get resource id, sub id, resource group and region for the dimensions for telegraf metrics\n    retries = 1\n    max_retries = 3\n    sleep_time = 5\n    imdsurl = \"\"\n    is_arc = False\n\n    if is_lad:\n        imdsurl = \"http://169.254.169.254/metadata/instance?api-version=2019-03-11\"\n    else:\n        if metrics_utils.is_arc_installed():\n            imdsurl = metrics_utils.get_arc_endpoint()\n            imdsurl += \"/metadata/instance?api-version=2019-11-01\"\n            is_arc = True\n        else:\n            imdsurl = \"http://169.254.169.254/metadata/instance?api-version=2019-03-11\"\n\n\n    data = None\n    while retries <= max_retries:\n\n        req = urllib.Request(imdsurl, headers={'Metadata':'true'})\n        res = urllib.urlopen(req)\n        data = json.loads(res.read().decode('utf-8', 'ignore'))\n\n        if \"compute\" not in data:\n            retries += 1\n        else:\n            break\n\n        time.sleep(sleep_time)\n\n    if retries > max_retries:\n        raise Exception(\"Unable to find 'compute' key in imds query response. Reached max retry limit of - {0} times. Failed to setup Telegraf.\".format(max_retries))\n\n    if \"resourceId\" not in data[\"compute\"]:\n        raise Exception(\"Unable to find 'resourceId' key in imds query response. Failed to setup Telegraf.\")\n\n    # resource id is needed for ME to show metrics on the metrics blade of the VM/VMSS\n    # ME expected ID- /subscriptions/<sub-id>/resourceGroups/<rg_name>/providers/Microsoft.Compute/virtualMachineScaleSets/<VMSSName>\n    # or /subscriptions/20ff167c-9f4b-4a73-9fd6-0dbe93fa778a/resourceGroups/sidama/providers/Microsoft.Compute/virtualMachines/syslogReliability_1ec84a39\n    az_resource_id = data[\"compute\"][\"resourceId\"]\n\n    # If the instance is VMSS instance resource id of a uniform VMSS then trim the last two values from the resource id ie - \"/virtualMachines/0\"\n    # Since ME expects the resource id in a particular format. For egs -\n    # IMDS returned ID - /subscriptions/<sub-id>/resourceGroups/<rg_name>/providers/Microsoft.Compute/virtualMachineScaleSets/<VMSSName>/virtualMachines/0\n    # ME expected ID- /subscriptions/<sub-id>/resourceGroups/<rg_name>/providers/Microsoft.Compute/virtualMachineScaleSets/<VMSSName>\n    if \"virtualMachineScaleSets\" in az_resource_id: \n        az_resource_id = \"/\".join(az_resource_id.split(\"/\")[:-2])\n\n    virtual_machine_name = \"\"\n    if \"vmScaleSetName\" in data[\"compute\"] and data[\"compute\"][\"vmScaleSetName\"] != \"\":\n        virtual_machine_name = data[\"compute\"][\"name\"]\n        # for flexible VMSS above resource id is instance specific and won't have virtualMachineScaleSets\n        # for e.g., /subscriptions/20ff167c-9f4b-4a73-9fd6-0dbe93fa778a/resourceGroups/sidama/providers/Microsoft.Compute/virtualMachines/syslogReliability_1ec84a39\n        # ME expected ID- /subscriptions/<sub-id>/resourceGroups/<rg_name>/providers/Microsoft.Compute/virtualMachineScaleSets/<VMSSName>\n        if \"virtualMachineScaleSets\" not in az_resource_id: \n            az_resource_id = \"/\".join(az_resource_id.split(\"/\")[:-2]) + \"/virtualMachineScaleSets/\" + data[\"compute\"][\"vmScaleSetName\"]\n\n    if \"subscriptionId\" not in data[\"compute\"]:\n        raise Exception(\"Unable to find 'subscriptionId' key in imds query response. Failed to setup Telegraf.\")\n\n    subscription_id = data[\"compute\"][\"subscriptionId\"]\n\n    if \"resourceGroupName\" not in data[\"compute\"]:\n        raise Exception(\"Unable to find 'resourceGroupName' key in imds query response. Failed to setup Telegraf.\")\n\n    resource_group = data[\"compute\"][\"resourceGroupName\"]\n\n    if \"location\" not in data[\"compute\"]:\n        raise Exception(\"Unable to find 'location' key in imds query response. Failed to setup Telegraf.\")\n\n    region = data[\"compute\"][\"location\"]\n\n    #call the method to first parse the configs\n    output, namespaces = parse_config(config_data, me_url, mdsd_url, is_lad, az_resource_id, subscription_id, resource_group, region, virtual_machine_name)\n\n    _, configFolder = get_handler_vars()\n    if is_lad:\n        telegraf_bin = metrics_constants.lad_telegraf_bin\n    else:\n        telegraf_bin = metrics_constants.ama_telegraf_bin\n\n    telegraf_conf_dir = configFolder + \"/telegraf_configs/\"\n    telegraf_agent_conf = telegraf_conf_dir + \"telegraf.conf\"\n    telegraf_d_conf_dir = telegraf_conf_dir + \"telegraf.d/\"\n\n\n    #call the method to write the configs\n    write_configs(output, telegraf_conf_dir, telegraf_d_conf_dir)\n\n    # Setup Telegraf service.\n    # If the VM has systemd, then we will copy over the systemd unit file and use that to start/stop\n    if metrics_utils.is_systemd():\n        telegraf_service_setup = setup_telegraf_service(is_lad, telegraf_bin, telegraf_d_conf_dir, telegraf_agent_conf)\n        if not telegraf_service_setup:\n            return False, []\n\n    return True, namespaces\n"
  },
  {
    "path": "LAD-AMA-Common/telegraf_utils/telegraf_name_map.py",
    "content": "#!/usr/bin/env python\n#\n# Azure Linux extension\n#\n# Copyright (c) Microsoft Corporation\n# All rights reserved.\n# MIT License\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"\"Software\"\"), to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the following conditions:\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the\n# Software.\n# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\n# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nname_map = {\n\n######These are the counter keys and telegraf plugins for LAD/AMA\n\n\"processor->cpu io wait time\" : {\"plugin\":\"cpu\", \"field\":\"usage_iowait\", \"ladtablekey\":\"/builtin/processor/percentiowaittime\"},\n\"processor->cpu user time\" : {\"plugin\":\"cpu\", \"field\":\"usage_user\", \"ladtablekey\":\"/builtin/processor/percentusertime\"},\n\"processor->cpu nice time\" : {\"plugin\":\"cpu\", \"field\":\"usage_nice\", \"ladtablekey\":\"/builtin/processor/percentnicetime\"},\n\"processor->cpu percentage guest os\" : {\"plugin\":\"cpu\", \"field\":\"usage_active\", \"ladtablekey\":\"/builtin/processor/percentprocessortime\"},\n\"processor->cpu interrupt time\" : {\"plugin\":\"cpu\", \"field\":\"usage_irq\", \"ladtablekey\":\"/builtin/processor/percentinterrupttime\"},\n\"processor->cpu idle time\" : {\"plugin\":\"cpu\", \"field\":\"usage_idle\", \"ladtablekey\":\"/builtin/processor/percentidletime\"},\n\"processor->cpu privileged time\" : {\"plugin\":\"cpu\", \"field\":\"usage_system\", \"ladtablekey\":\"/builtin/processor/percentprivilegedtime\"},\n\n\"% IO Wait Time\" : {\"plugin\":\"cpu\", \"field\":\"usage_iowait\", \"module\":\"processor\"},\n\"% User Time\" : {\"plugin\":\"cpu\", \"field\":\"usage_user\", \"module\":\"processor\"},\n\"% Nice Time\" : {\"plugin\":\"cpu\", \"field\":\"usage_nice\", \"module\":\"processor\"},\n\"% Processor Time\" : {\"plugin\":\"cpu\", \"field\":\"usage_active\", \"module\":\"processor\"},\n\"% Interrupt Time\" : {\"plugin\":\"cpu\", \"field\":\"usage_irq\", \"module\":\"processor\"},\n\"% Idle Time\" : {\"plugin\":\"cpu\", \"field\":\"usage_idle\", \"module\":\"processor\"},\n\"% Privileged Time\" : {\"plugin\":\"cpu\", \"field\":\"usage_system\", \"module\":\"processor\"},\n\n# VM Insights\n# 8 slashes because this goes from JSON -> Python -> Telegraf config -> Go -> C++ and each level does an escape\n\"Processor\\\\UtilizationPercentage\" : {\"plugin\":\"cpu_vmi\", \"field\":\"Processor\\\\\\\\\\\\\\\\UtilizationPercentage\", \"module\":\"processor\"},\n\"Computer\\\\Heartbeat\" : {\"plugin\":\"cpu_heartbeat_vmi\", \"field\":\"Computer\\\\\\\\\\\\\\\\Heartbeat\", \"module\":\"processor\"},\n\n\"network->network in guest os\" : {\"plugin\":\"net\", \"field\":\"bytes_recv\", \"ladtablekey\":\"/builtin/network/bytesreceived\"},\n\"network->network total bytes\" : {\"plugin\":\"net\", \"field\":\"bytes_total\", \"ladtablekey\":\"/builtin/network/bytestotal\"}, #Need to calculate sum\n\"network->network out guest os\" : {\"plugin\":\"net\", \"field\":\"bytes_sent\", \"ladtablekey\":\"/builtin/network/bytestransmitted\"},\n\"network->network collisions\" : {\"plugin\":\"net\", \"field\":\"drop_total\", \"ladtablekey\":\"/builtin/network/totalcollisions\"}, #Need to calculate sum\n\"network->packets received errors\" : {\"plugin\":\"net\", \"field\":\"err_in\", \"ladtablekey\":\"/builtin/network/totalrxerrors\"},\n\"network->packets sent\" : {\"plugin\":\"net\", \"field\":\"packets_sent\", \"ladtablekey\":\"/builtin/network/packetstransmitted\"},\n\"network->packets received\" : {\"plugin\":\"net\", \"field\":\"packets_recv\", \"ladtablekey\":\"/builtin/network/packetsreceived\"},\n\"network->packets sent errors\" : {\"plugin\":\"net\", \"field\":\"err_out\", \"ladtablekey\":\"/builtin/network/totaltxerrors\"},\n\n\"Total Bytes Received\" : {\"plugin\":\"net\", \"field\":\"bytes_recv\", \"module\":\"network\"},\n\"Total Bytes\" : {\"plugin\":\"net\", \"field\":\"bytes_total\", \"module\":\"network\"}, #Need to calculate sum\n\"Total Bytes Transmitted\" : {\"plugin\":\"net\", \"field\":\"bytes_sent\", \"module\":\"network\"},\n\"Total Collisions\" : {\"plugin\":\"net\", \"field\":\"drop_total\", \"module\":\"network\"}, #Need to calculate sum\n\"Total Rx Errors\" : {\"plugin\":\"net\", \"field\":\"err_in\", \"module\":\"network\"},\n\"Total Packets Transmitted\" : {\"plugin\":\"net\", \"field\":\"packets_sent\", \"module\":\"network\"},\n\"Total Packets Received\" : {\"plugin\":\"net\", \"field\":\"packets_recv\", \"module\":\"network\"},\n\"Total Tx Errors\" : {\"plugin\":\"net\", \"field\":\"err_out\", \"module\":\"network\"},\n\n# VM Insights\n# \"Network\\ReadBytesPerSecond\", \"Network\\WriteBytesPerSecond\"\n# 8 slashes because this goes from JSON -> Python -> Telegraf config -> Go -> C++ and each level does an escape\n\"Network\\\\ReadBytesPerSecond\" : {\"plugin\":\"net_recv_vmi\", \"field\":\"Network\\\\\\\\\\\\\\\\ReadBytesPerSecond\", \"op\":\"rate\", \"module\":\"network\"},\n\"Network\\\\WriteBytesPerSecond\" : {\"plugin\":\"net_sent_vmi\", \"field\":\"Network\\\\\\\\\\\\\\\\WriteBytesPerSecond\", \"op\":\"rate\", \"module\":\"network\"},\n\n\"memory->memory available\" : {\"plugin\":\"mem\", \"field\":\"available\", \"ladtablekey\":\"/builtin/memory/availablememory\"},\n\"memory->mem. percent available\" : {\"plugin\":\"mem\", \"field\":\"available_percent\", \"ladtablekey\":\"/builtin/memory/percentavailablememory\"},\n\"memory->memory used\" : {\"plugin\":\"mem\", \"field\":\"used\", \"ladtablekey\":\"/builtin/memory/usedmemory\"},\n\"memory->memory percentage\" : {\"plugin\":\"mem\", \"field\":\"used_percent\", \"ladtablekey\":\"/builtin/memory/percentusedmemory\"},\n\n\"memory->swap available\" : {\"plugin\":\"swap\", \"field\":\"free\", \"ladtablekey\":\"/builtin/memory/availableswap\"},\n\"memory->swap percent available\" : {\"plugin\":\"swap\", \"field\":\"free_percent\", \"ladtablekey\":\"/builtin/memory/percentavailableswap\"}, #Need to calculate percentage\n\"memory->swap used\" : {\"plugin\":\"swap\", \"field\":\"used\", \"ladtablekey\":\"/builtin/memory/usedswap\"},\n\"memory->swap percent used\" : {\"plugin\":\"swap\", \"field\":\"used_percent\", \"ladtablekey\":\"/builtin/memory/percentusedswap\"},\n\n\"memory->page reads\": {\"plugin\":\"kernel_vmstat\", \"field\":\"pgpgin\", \"op\":\"rate\", \"ladtablekey\":\"/builtin/memory/pagesreadpersec\"},\n\"memory->page writes\" : {\"plugin\":\"kernel_vmstat\", \"field\":\"pgpgout\", \"op\":\"rate\", \"ladtablekey\":\"/builtin/memory/pageswrittenpersec\"},\n\"memory->pages\" : {\"plugin\":\"kernel_vmstat\", \"field\":\"total_pages\", \"op\":\"rate\", \"ladtablekey\":\"/builtin/memory/pagespersec\"},\n\n\"Available MBytes Memory\" : {\"plugin\":\"mem\", \"field\":\"available\", \"module\":\"memory\"},\n\"% Available Memory\" : {\"plugin\":\"mem\", \"field\":\"available_percent\", \"module\":\"memory\"},\n\"Used Memory MBytes\" : {\"plugin\":\"mem\", \"field\":\"used\", \"module\":\"memory\"},\n\"% Used Memory\" : {\"plugin\":\"mem\", \"field\":\"used_percent\", \"module\":\"memory\"},\n\n\"Available MBytes Swap\" : {\"plugin\":\"swap\", \"field\":\"free\", \"module\":\"memory\"},\n\"% Available Swap Space\" : {\"plugin\":\"swap\", \"field\":\"free_percent\", \"module\":\"memory\"}, #Need to calculate percentage\n\"Used MBytes Swap Space\" : {\"plugin\":\"swap\", \"field\":\"used\", \"module\":\"memory\"},\n\"% Used Swap Space\" : {\"plugin\":\"swap\", \"field\":\"used_percent\", \"module\":\"memory\"},\n\n\"Page Reads/sec\": {\"plugin\":\"kernel_vmstat\", \"field\":\"pgpgin\", \"op\":\"rate\", \"module\":\"memory\"},\n\"Page Writes/sec\" : {\"plugin\":\"kernel_vmstat\", \"field\":\"pgpgout\", \"op\":\"rate\", \"module\":\"memory\"},\n\"Pages/sec\" : {\"plugin\":\"kernel_vmstat\", \"field\":\"total_pages\", \"op\":\"rate\", \"module\":\"memory\"},\n\n# VM Insights\n# 8 slashes because this goes from JSON -> Python -> Telegraf config -> Go -> C++ and each level does an escape\n\"Memory\\\\AvailableMB\" : {\"plugin\":\"mem_vmi\", \"field\":\"Memory\\\\\\\\\\\\\\\\AvailableMB\", \"module\":\"memory\"},\n\"Memory\\\\AvailablePercentage\" : {\"plugin\":\"mem_vmi\", \"field\":\"Memory\\\\\\\\\\\\\\\\AvailablePercentage\", \"module\":\"memory\"},\n\n#OMI Filesystem plugin\n\"filesystem->filesystem used space\" : {\"plugin\":\"disk\", \"field\":\"used\", \"ladtablekey\":\"/builtin/filesystem/usedspace\"},\n\"filesystem->filesystem % used space\" : {\"plugin\":\"disk\", \"field\":\"used_percent\", \"ladtablekey\":\"/builtin/filesystem/percentusedspace\"},\n\"filesystem->filesystem free space\" : {\"plugin\":\"disk\", \"field\":\"free\", \"ladtablekey\":\"/builtin/filesystem/freespace\"},\n\"filesystem->filesystem % free space\" : {\"plugin\":\"disk\", \"field\":\"free_percent\", \"ladtablekey\":\"/builtin/filesystem/percentfreespace\"}, #Need to calculate percentage\n\"filesystem->filesystem % free inodes\" : {\"plugin\":\"disk\", \"field\":\"inodes_free_percent\", \"ladtablekey\":\"/builtin/filesystem/percentfreeinodes\"}, #Need to calculate percentage\n\"filesystem->filesystem % used inodes\" : {\"plugin\":\"disk\", \"field\":\"inodes_used_percent\", \"ladtablekey\":\"/builtin/filesystem/percentusedinodes\"}, #Need to calculate percentage\n\n\"filesystem->filesystem transfers/sec\" : {\"plugin\":\"diskio\", \"field\":\"total_transfers_filesystem\", \"op\":\"rate\", \"ladtablekey\":\"/builtin/filesystem/transferspersecond\"}, #Need to calculate sum\n\"filesystem->filesystem read bytes/sec\" : {\"plugin\":\"diskio\", \"field\":\"read_bytes_filesystem\", \"op\":\"rate\", \"ladtablekey\":\"/builtin/filesystem/bytesreadpersecond\"}, #Need to calculate rate (but each second not each interval)\n\"filesystem->filesystem bytes/sec\" : {\"plugin\":\"diskio\", \"field\":\"total_bytes_filesystem\", \"op\":\"rate\", \"ladtablekey\":\"/builtin/filesystem/bytespersecond\"}, #Need to calculate rate and then sum\n\"filesystem->filesystem write bytes/sec\" : {\"plugin\":\"diskio\", \"field\":\"write_bytes_filesystem\", \"op\":\"rate\", \"ladtablekey\":\"/builtin/filesystem/byteswrittenpersecond\"}, #Need to calculate rate (but each second not each interval)\n\"filesystem->filesystem reads/sec\" : {\"plugin\":\"diskio\", \"field\":\"reads_filesystem\", \"op\":\"rate\", \"ladtablekey\":\"/builtin/filesystem/readspersecond\"}, #Need to calculate rate (but each second not each interval)\n\"filesystem->filesystem writes/sec\" : {\"plugin\":\"diskio\", \"field\":\"writes_filesystem\", \"op\":\"rate\", \"ladtablekey\":\"/builtin/filesystem/writespersecond\"}, #Need to calculate rate (but each second not each interval)\n\n\"% Used Space\" : {\"plugin\":\"disk\", \"field\":\"used_percent\", \"module\":\"filesystem\"},\n\"Free Megabytes\" : {\"plugin\":\"disk\", \"field\":\"free\", \"module\":\"filesystem\"},\n\"% Free Space\" : {\"plugin\":\"disk\", \"field\":\"free_percent\", \"module\":\"filesystem\"}, #Need to calculate percentage\n\"% Free Inodes\" : {\"plugin\":\"disk\", \"field\":\"inodes_free_percent\", \"module\":\"filesystem\"}, #Need to calculate percentage\n\"% Used Inodes\" : {\"plugin\":\"disk\", \"field\":\"inodes_used_percent\", \"module\":\"filesystem\"}, #Need to calculate percentage\n\n\"Disk Transfers/sec\" : {\"plugin\":\"diskio\", \"field\":\"total_transfers\", \"op\":\"rate\", \"module\":\"filesystem\"}, #Need to calculate sum\n\"Disk Read Bytes/sec\" : {\"plugin\":\"diskio\", \"field\":\"read_bytes\", \"op\":\"rate\", \"module\":\"filesystem\"}, #Need to calculate rate (but each second not each interval)\n\"Logical Disk Bytes/sec\" : {\"plugin\":\"diskio\", \"field\":\"total_bytes\", \"op\":\"rate\", \"module\":\"filesystem\"}, #Need to calculate rate and then sum\n\"Disk Write Bytes/sec\" : {\"plugin\":\"diskio\", \"field\":\"write_bytes\", \"op\":\"rate\", \"module\":\"filesystem\"}, #Need to calculate rate (but each second not each interval)\n\"Disk Reads/sec\" : {\"plugin\":\"diskio\", \"field\":\"reads\", \"op\":\"rate\", \"module\":\"filesystem\"}, #Need to calculate rate (but each second not each interval)\n\"Disk Writes/sec\" : {\"plugin\":\"diskio\", \"field\":\"writes\", \"op\":\"rate\", \"module\":\"filesystem\"}, #Need to calculate rate (but each second not each interval)\n\n# VM Insights\n# 8 slashes because this goes from JSON -> Python -> Telegraf config -> Go -> C++ and each level does an escape\n\"LogicalDisk\\\\FreeSpaceMB\" : {\"plugin\":\"disk_vmi\", \"field\":\"LogicalDisk\\\\\\\\\\\\\\\\FreeSpaceMB\", \"module\":\"filesystem\"},\n\"LogicalDisk\\\\FreeSpacePercentage\" : {\"plugin\":\"disk_vmi\", \"field\":\"LogicalDisk\\\\\\\\\\\\\\\\FreeSpacePercentage\", \"module\":\"filesystem\"}, #Need to calculate percentage\n\"LogicalDisk\\\\Status\" : {\"plugin\":\"disk_vmi\", \"field\":\"LogicalDisk\\\\\\\\\\\\\\\\Status\", \"module\":\"filesystem\"}, #Need to calculate percentage\n\n#\"LogicalDisk\\BytesPerSecond\", \"LogicalDisk\\ReadBytesPerSecond\", \"LogicalDisk\\ReadsPerSecond\",  \"LogicalDisk\\WriteBytesPerSecond\", \"LogicalDisk\\WritesPerSecond\", \"LogicalDisk\\TransfersPerSecond\", \n\n\"LogicalDisk\\\\TransfersPerSecond\" : {\"plugin\":\"diskio_vmi\", \"field\":\"LogicalDisk\\\\\\\\\\\\\\\\TransfersPerSecond\", \"op\":\"rate\", \"module\":\"filesystem\"}, #Need to calculate sum\n\"LogicalDisk\\\\ReadBytesPerSecond\" : {\"plugin\":\"diskio_vmi\", \"field\":\"LogicalDisk\\\\\\\\\\\\\\\\ReadBytesPerSecond\", \"op\":\"rate\", \"module\":\"filesystem\"}, #Need to calculate rate (but each second not each interval)\n\"LogicalDisk\\\\BytesPerSecond\" : {\"plugin\":\"diskio_vmi\", \"field\":\"LogicalDisk\\\\\\\\\\\\\\\\BytesPerSecond\", \"op\":\"rate\", \"module\":\"filesystem\"}, #Need to calculate rate and then sum\n\"LogicalDisk\\\\WriteBytesPerSecond\" : {\"plugin\":\"diskio_vmi\", \"field\":\"LogicalDisk\\\\\\\\\\\\\\\\WriteBytesPerSecond\", \"op\":\"rate\", \"module\":\"filesystem\"}, #Need to calculate rate (but each second not each interval)\n\"LogicalDisk\\\\ReadsPerSecond\" : {\"plugin\":\"diskio_vmi\", \"field\":\"LogicalDisk\\\\\\\\\\\\\\\\ReadsPerSecond\", \"op\":\"rate\", \"module\":\"filesystem\"}, #Need to calculate rate (but each second not each interval)\n\"LogicalDisk\\\\WritesPerSecond\" : {\"plugin\":\"diskio_vmi\", \"field\":\"LogicalDisk\\\\\\\\\\\\\\\\WritesPerSecond\", \"op\":\"rate\", \"module\":\"filesystem\"}, #Need to calculate rate (but each second not each interval)\n\n# Process plugin\n\"Pct User Time\" : {\"plugin\":\"procstat\", \"field\":\"cpu_time_user\", \"module\":\"process\"},\n\"Pct Privileged Time\" : {\"plugin\":\"procstat\", \"field\":\"cpu_time_system\", \"module\":\"process\"},\n\"Used Memory\" : {\"plugin\":\"procstat\", \"field\":\"memory_rss\", \"module\":\"process\"},\n\"Virtual Shared Memory\" : {\"plugin\":\"procstat\", \"field\":\"memory_vms\", \"module\":\"process\"},\n  \n# System plugin\n\"Uptime\" : {\"plugin\":\"system\", \"field\":\"uptime\", \"module\":\"system\"},\n\"Load1\" : {\"plugin\":\"system\", \"field\":\"load1\", \"module\":\"system\"},\n\"Load5\" : {\"plugin\":\"system\", \"field\":\"load5\", \"module\":\"system\"},\n\"Load15\" : {\"plugin\":\"system\", \"field\":\"load15\", \"module\":\"system\"},\n\"Users\" : {\"plugin\":\"system\", \"field\":\"n_users\", \"module\":\"system\"},\n\"CPUs\" : {\"plugin\":\"system\", \"field\":\"n_cpus\", \"module\":\"system\"},\n\"Unique Users\" : {\"plugin\":\"system\", \"field\":\"n_unique_users\", \"module\":\"system\"},\n\n# #OMI Disk plugin\n\"disk->disk read guest os\" : {\"plugin\":\"diskio\", \"field\":\"read_bytes\", \"op\":\"rate\", \"ladtablekey\":\"/builtin/disk/readbytespersecond\"},\n\"disk->disk write guest os\" : {\"plugin\":\"diskio\", \"field\":\"write_bytes\", \"op\":\"rate\", \"ladtablekey\":\"/builtin/disk/writebytespersecond\"},\n\"disk->disk total bytes\" : {\"plugin\":\"diskio\", \"field\":\"total_bytes\", \"op\":\"rate\", \"ladtablekey\":\"/builtin/disk/bytespersecond\"},\n\"disk->disk reads\" : {\"plugin\":\"diskio\", \"field\":\"reads\", \"op\":\"rate\", \"ladtablekey\":\"/builtin/disk/readspersecond\"}, #Need to calculate rate (but each second not each interval)\n\"disk->disk writes\" : {\"plugin\":\"diskio\", \"field\":\"writes\", \"op\":\"rate\", \"ladtablekey\":\"/builtin/disk/writespersecond\"},\n\"disk->disk transfers\" : {\"plugin\":\"diskio\", \"field\":\"total_transfers\", \"op\":\"rate\", \"ladtablekey\":\"/builtin/disk/transferspersecond\"},\n\"disk->disk read time\" : {\"plugin\":\"diskio\", \"field\":\"read_time\", \"op\":\"rate\", \"ladtablekey\":\"/builtin/disk/averagereadtime\"},\n\"disk->disk write time\" : {\"plugin\":\"diskio\", \"field\":\"write_time\", \"op\":\"rate\", \"ladtablekey\":\"/builtin/disk/averagewritetime\"},\n\"disk->disk transfer time\" : {\"plugin\":\"diskio\", \"field\":\"io_time\", \"op\":\"rate\", \"ladtablekey\":\"/builtin/disk/averagetransfertime\"},\n\"disk->disk queue length\" : {\"plugin\":\"diskio\", \"field\":\"iops_in_progress\", \"ladtablekey\":\"/builtin/disk/averagediskqueuelength\"}\n\n##### These are the counter keys and telegraf plugins for Azure Monitor Agent\n\n}\n"
  },
  {
    "path": "LICENSE.txt",
    "content": "                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"[]\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright 2016 Microsoft Corporation\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n"
  },
  {
    "path": "Makefile",
    "content": "default: clean init build\n\nEXTENSIONS = \\\n\tCustomScript \\\n\tDSC \\\n\tOSPatching \\\n\tVMBackup\n\nclean:\n\trm -rf build\n\ninit:\n\t@mkdir -p build\n\nbuild: init $(EXTENSIONS) buildVMAccess\n\n\ndefine make-extension-zip\n$(eval NAME    = $(shell grep -Pom1 \"(?<=<Type>)[^<]+\" $@/manifest.xml))\n$(eval VERSION = $(shell grep -Pom1 \"(?<=<Version>)[^<]+\" $@/manifest.xml))\n\n@echo \"Building '$(NAME)-$(VERSION).zip' ...\"\n@cd $@ && find . -type f | grep -v \"/test/\" | grep -v \"./references\" | zip -9 -@ ../build/$(NAME)-$(VERSION).zip > /dev/null\n@find ./Utils    -type f | grep -v \"/test/\"                          | zip -9 -@ build/$(NAME)-$(VERSION).zip > /dev/null\nendef\n\n\n$(EXTENSIONS):\n\t$(make-extension-zip)\n\t@cd Common/ && echo ./waagentloader.py           | zip -9 -@ ../build/$(NAME)-$(VERSION).zip > /dev/null\n\t@cd Common/WALinuxAgent-2.0.16 && echo ./waagent | zip -9 -@ ../../build/$(NAME)-$(VERSION).zip > /dev/null\n\n\n\nbuildVMAccess:\n\t$(eval NAME  = $(shell grep -Pom1 \"(?<=<Type>)[^<]+\" VMAccess/manifest.xml))\n\t$(eval VERSION = $(shell grep -Pom1 \"(?<=<Version>)[^<]+\" VMAccess/manifest.xml))\n\t@echo \"Building '$(NAME)-$(VERSION).zip' ...\"\n\t@cd VMAccess && find . -type f | grep -v \"/test/\" | grep -v \"./references\" | zip -9 -@ ../build/$(NAME)-$(VERSION).zip > /dev/null\n\t@zip -9 build/$(NAME)-$(VERSION).zip ./Utils/__init__.py ./Utils/constants.py ./Utils/distroutils.py\\\n\t\t./Utils/extensionutils.py ./Utils/handlerutil2.py ./Utils/logger.py ./Utils/ovfutils.py > /dev/null\n\n\n.PHONY: clean build $(EXTENSIONS) buildVMAccess\n"
  },
  {
    "path": "OSPatching/HandlerManifest.json",
    "content": "[\n  {\n    \"version\": 1.0,\n    \"handlerManifest\": {\n      \"disableCommand\": \"./handler.py -disable\",\n      \"enableCommand\": \"./handler.py -enable\",\n      \"installCommand\": \"./handler.py -install\",\n      \"uninstallCommand\": \"./handler.py -uninstall\",\n      \"updateCommand\": \"./handler.py -update\",\n      \"rebootAfterInstall\": false,\n      \"reportHeartbeat\": false\n    }\n  }\n]\n"
  },
  {
    "path": "OSPatching/README.md",
    "content": "# :warning: IMPORTANT :warning:\n**The OSPatching extension for Linux is deprecated.**\n\nOSPatchingForLinux is deprecated and will be retired February 2018.\n\nYour Linux distro has well supported and maintained ways to enable automatic updates\nfor your VMs to include VMs you use in Production environments. It is recommended\nthat you consult your distro's best practices for automatic updates.\n\n## Linux Distributions\n- Ubuntu\n  - See the [unattended-upgrades](https://help.ubuntu.com/lts/serverguide/automatic-updates.html) package documentation\n- CentOS and RHEL\n  - See the manpage of `yum-cron` for the auto-update mechanism documentation\n\n\n# OSPatching Extension\nAllows the owner of the Azure VM to configure a Linux VM patching schedule cycle\nor perform OS patching on-demand as a one-time task. The actual patching operation\nis scheduled as a cron job.\n\nLastest version is 2.3.\n\nYou can read the User Guide, [Automate Linux VM OS Updates Using OSPatching Extension (outdated, needs to update)](http://azure.microsoft.com/blog/2014/10/23/automate-linux-vm-os-updates-using-ospatching-extension/).\n\nOSPatching Extension can:\n* Patch the OS automatically as a scheduled task\n* Patch the OS as a one-time task\n* The patching can be stopped before the actual patching operation begins\n* The status of VM can be checked by user-defined scripts stored locally, in GitHub, or in Azure Storage\n\n# User Guide\n\n## 1. Configuration schema\nAll settings are set in the protected configuration. No settings are available in the public configuration and it can be omitted.\n\n### 1.1. Protected configuration\nSchema for the protected configuration file.\n\n| Name | Description | Value Type | Default Value |\n|:---|:---|:---|:---|\n| disabled | Flag to disable this extension | required, boolean | false |\n| stop | Flag to cancel the OS update process | required, boolean | false |\n| rebootAfterPatch | The reboot behavior after patching | optional, string | RebootIfNeed |\n| category | Type of patches to install | optional, string | Important |\n| installDuration | The allowed total time for installation | optional, string | 01:00 |\n| oneoff | Patch the OS immediately | optional, boolean | false |\n| intervalOfWeeks | The update frequency (in weeks) | optional, string | 1 |\n| dayOfWeek | The patching date (of the week)You can specify multiple days in a week | optional, string | Everyday |\n| startTime | Start time of patching | optional, string | 03:00 |\n| distUpgradeList | Path to a repo list which for which a full upgrade (e.g. dist-upgrade in Ubuntu) will occur | optional, string | /etc/apt/sources.list.d/custom.list |\n| distUpgradeAll | Flag to enable full upgrade (e.g. dist-upgrade in Ubuntu) for all repos/packages. Disabled (False) by default | optional, bool | True |\n| vmStatusTest | Including `local`, `idleTestScript` and `healthyTestScript` | optional, object | |\n| local | Flag to assign the location of user-defined scripts | optional, boolean | false |\n| idleTestScript | If `local` is true, it is the contents of the idle test script. Otherwise, it is the uri of the idle test script. | optional, string | |\n| healthyTestScript | If `local` is true, it is the contents of the healthy test script. Otherwise, it is the uri of the healthy test script. | optional, string | |\n| storageAccountName | The name of the storage account | optional, string | |\n| storageAccountKey | The access key of the storage account | optional, string | |\n  \nIf the vmStatusTest scripts are stored in the private Azure Storage, you must provide\n`storageAccountName` and `storageAccountKey`. You can get these two values from Azure Portal.\n \n```json\n{\n  \"disabled\": false,\n  \"stop\": false,\n  \"rebootAfterPatch\": \"RebootIfNeed|Required|NotRequired|Auto\",\n  \"category\": \"Important|ImportantAndRecommended\",\n  \"installDuration\": \"<hr:min>\",\n  \"oneoff\": false,\n  \"intervalOfWeeks\": \"<number>\",\n  \"dayOfWeek\": \"Sunday|Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Everyday\",\n  \"startTime\": \"<hr:min>\",\n  \"distUpgradeList\": \"</etc/apt/sources.list.d/custom.list>\",\n  \"vmStatusTest\": {\n    \"local\": false,\n    \"idleTestScript\": \"<path_to_idletestscript>\",\n    \"healthyTestScript\": \"<path_to_healthytestscript>\"\n  },\n  \"storageAccountName\": \"<storage-account-name>\",\n  \"storageAccountKey\": \"<storage-account-key>\"\n}\n```\n\n## 2. Deploying the Extension to a VM\n\nYou can deploy it using Azure CLI, Azure Powershell and ARM template.\n\n> **NOTE:** Creating VM in Azure has two deployment model: Classic and [Resource Manager][arm-overview].\nIn diffrent models, the deploying commands have different syntaxes. Please select the right\none in section 2.1 and 2.2 below.\n \n### 2.1. Using [**Azure CLI**][azure-cli]\nBefore deploying OSPatching Extension, you should configure your `protected.json` (in section 1.1 above).\n\n#### 2.1.1 Classic\nThe Classic mode is also called Azure Service Management mode. You can change to it by running:\n```\n$ azure config mode asm\n```\n\nYou can deploying OSPatching Extension by running:\n```\n$ azure vm extension set <vm-name> \\\nOSPatchingForLinux Microsoft.OSTCExtensions <version> \\\n--private-config-path protected.json\n```\n\nIn the command above, you can change version with `\"*\"` to use latest\nversion available, or `\"2.*\"` to get newest version that does not introduce non-\nbreaking schema changes. To find the latest version available, run:\n```\n$ azure vm extension list\n```\n\n#### 2.1.2 Resource Manager\nYou can change to Azure Resource Manager mode by running:\n```\n$ azure config mode arm\n```\n\nYou can deploy OSPatching Extension by running:\n```\n$ azure vm extension set <resource-group> <vm-name> \\\nOSPatchingForLinux Microsoft.OSTCExtensions <version> \\\n--private-config-path protected.json\n```\n\n> **NOTE:** In ARM mode, `azure vm extension list` is not available for now.\n\n\n### 2.2. Using [**Azure Powershell**][azure-powershell]\n\n#### 2.2.1 Classic\n\nYou can login to your Azure account (Azure Service Management mode) by running:\n\n```powershell\nAdd-AzureAccount\n```\n\nYou can deploying OSPatching Extension by running:\n\n```powershell\n$VmName = '<vm-name>'\n$vm = Get-AzureVM -ServiceName $VmName -Name $VmName\n\n$ExtensionName = 'OSPatchingForLinux'\n$Publisher = 'Microsoft.OSTCExtensions'\n$Version = '<version>'\n\n$idleTestScriptUri = '<path_to_idletestscript>'\n$healthyTestScriptUri = '<path_to_healthytestscript>'\n\n$PrivateConfig = ConvertTo-Json -InputObject @{\n    \"disabled\" = $false;\n    \"stop\" = $true|$false;\n    \"rebootAfterPatch\" = \"RebootIfNeed|Required|NotRequired|Auto\";\n    \"category\" = \"Important|ImportantAndRecommended\";\n    \"installDuration\" = \"<hr:min>\";\n    \"oneoff\" = $true|$false;\n    \"intervalOfWeeks\" = \"<number>\";\n    \"dayOfWeek\" = \"Sunday|Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Everyday\";\n    \"startTime\" = \"<hr:min>\";\n    \"vmStatusTest\" = (@{\n        \"local\" = $false;\n        \"idleTestScript\" = $idleTestScriptUri;\n        \"healthyTestScript\" = $healthyTestScriptUri\n    });\n    \"storageAccountName\" = \"<storage_account_name>\";\n    \"storageAccountKey\" = \"<storage_account_key>\"\n}\n\nSet-AzureVMExtension -ExtensionName $ExtensionName -VM $vm `\n  -Publisher $Publisher -Version $Version `\n  -PrivateConfiguration $PrivateConfig |\n  Update-AzureVM\n```\n\n#### 2.2.2 Resource Manager\n\nYou can login to your Azure account (Azure Resource Manager mode) by running:\n\n```powershell\nLogin-AzureRmAccount\n```\n\nClick [**HERE**](https://azure.microsoft.com/en-us/documentation/articles/powershell-azure-resource-manager/) to learn more about how to use Azure PowerShell with Azure Resource Manager.\n\nYou can deploying OSPatching Extension by running:\n\n```powershell\n$RGName = '<resource-group-name>'\n$VmName = '<vm-name>'\n$Location = '<location>'\n\n$ExtensionName = 'OSPatchingForLinux'\n$Publisher = 'Microsoft.OSTCExtensions'\n$Version = '<version>'\n\n$PrivateConf = ConvertTo-Json -InputObject @{\n    \"disabled\" = $false;\n    \"stop\" = $true|$false;\n    \"rebootAfterPatch\" = \"RebootIfNeed|Required|NotRequired|Auto\";\n    \"category\" = \"Important|ImportantAndRecommended\";\n    \"installDuration\" = \"<hr:min>\";\n    \"oneoff\" = $true|$false;\n    \"intervalOfWeeks\" = \"<number>\";\n    \"dayOfWeek\" = \"Sunday|Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Everyday\";\n    \"startTime\" = \"<hr:min>\";\n    \"vmStatusTest\" = (@{\n        \"local\" = $false;\n        \"idleTestScript\" = $idleTestScriptUri;\n        \"healthyTestScript\" = $healthyTestScriptUri\n    });\n    \"storageAccountName\" = \"<storage_account_name>\";\n    \"storageAccountKey\" = \"<storage_account_key>\"\n}\n\nSet-AzureRmVMExtension -ResourceGroupName $RGName -VMName $VmName -Location $Location `\n  -Name $ExtensionName -Publisher $Publisher -ExtensionType $ExtensionName `\n  -TypeHandlerVersion $Version -ProtectedSettingString $PrivateConf\n```\n\n### 2.3. Using [**ARM Template**][arm-template]\n\n```json\n{\n  \"type\": \"Microsoft.Compute/virtualMachines/extensions\",\n  \"name\": \"<extension-deployment-name>\",\n  \"apiVersion\": \"<api-version>\",\n  \"location\": \"<location>\",\n  \"dependsOn\": [\n    \"[concat('Microsoft.Compute/virtualMachines/', <vm-name>)]\"\n  ],\n  \"properties\": {\n    \"publisher\": \"Microsoft.OSTCExtensions\",\n    \"type\": \"OSPatchingForLinux\",\n    \"typeHandlerVersion\": \"2.0\",\n    \"protectedSettings\": {\n      \"disabled\": false,\n      \"stop\": false,\n      \"rebootAfterPatch\": \"RebootIfNeed|Required|NotRequired|Auto\",\n      \"category\": \"Important|ImportantAndRecommended\",\n      \"installDuration\": \"<hr:min>\",\n      \"oneoff\": false,\n      \"intervalOfWeeks\": \"<number>\",\n      \"dayOfWeek\": \"Sunday|Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Everyday\",\n      \"startTime\": \"<hr:min>\",\n      \"vmStatusTest\": {\n        \"local\": false,\n        \"idleTestScript\": \"<path_to_idletestscript>\",\n        \"healthyTestScript\": \"<path_to_healthytestscript>\"\n      },\n      \"storageAccountName\": \"<storage-account-name>\",\n      \"storageAccountKey\": \"<storage-account-key>\"\n    }\n  }\n}\n```\n\nThe sample ARM template is [201-ospatching-extension-on-ubuntu](https://github.com/Azure/azure-quickstart-templates/tree/master/201-ospatching-extension-on-ubuntu).\n\nFor more details about ARM template, please visit [Authoring Azure Resource Manager templates](https://azure.microsoft.com/en-us/documentation/articles/resource-group-authoring-templates/).\n\n## 3. Scenarios\n\n### 3.1 Setting up regularly scheduled patching\n**Protected Settings**\n```json\n{\n  \"disabled\": false,\n  \"stop\": false,\n  \"rebootAfterPatch\": \"RebootIfNeed\",\n  \"intervalOfWeeks\": \"1\",\n  \"dayOfWeek\": \"Sunday|Wednesday\",\n  \"startTime\": \"03:00\",\n  \"category\": \"ImportantAndRecommended\",\n  \"installDuration\": \"00:30\"\n}\n```\n\n### 3.2 Setting up one-off patching\n**Protected Settings**\n```json\n{\n  \"disabled\": false,\n  \"stop\": false,\n  \"rebootAfterPatch\": \"RebootIfNeed\",\n  \"oneoff\": true,\n  \"category\": \"ImportantAndRecommended\",\n  \"installDuration\": \"00:30\"\n}\n```\n\n### 3.3 Stop the running patching\nYou can stop the OS updates to debug issues. Once the `stop` parameter is set to `true`, the OS update will stop after the current update is finished.\n\n**Protected Settings**\n```json\n{\n  \"disabled\": false,\n  \"stop\": true  \n}\n```\n\n### 3.4 Test the idle before patching and the health after patching\nIf the `vmStatusTest` scripts are stored in Azure Storage private containers, you have to provide the `storageAccountName` and `storageAccountKey`.\n\n**Protected Settings**\n```json\n{\n  \"disabled\": false,\n  \"stop\": false,\n  \"rebootAfterPatch\": \"RebootIfNeed\",\n  \"category\": \"ImportantAndRecommended\",\n  \"installDuration\": \"00:30\",\n  \"oneoff\": false,\n  \"intervalOfWeeks\": \"1\",\n  \"dayOfWeek\": \"Sunday|Wednesday\",\n  \"startTime\": \"03:00\",\n  \"vmStatusTest\": {\n    \"local\": false,\n    \"idleTestScript\": \"<path_to_idletestscript>\",\n    \"healthyTestScript\": \"<path_to_healthytestscript>\"\n  },\n  \"storageAccountName\": \"MyAccount\",\n  \"storageAccountKey\": \"Mykey\"\n}\n```\n\n### 3.5 Enable the extension repeatedly\nEnabling the OSPatching Extension with the exact same configuration is unsupported and will result in\na no-op (nothing will happen). If you need to run scripts repeatedly, you can add a timestamp.\n\n```json\n\"timestamp\": 123456789\n```\n\n### 3.6 Disable the extension\nIf you want to switch to manual OS update temporarily, you can set the `disable` parameter to `true` instead of uninstalling the OSPatching Extension.\n\n## Debugging\n* The operation log of the extension is `/var/log/azure/<extension-name>/<version>/extension.log` file.\n* The installation status of the extension is reported back to Azure so that the user can see the status on Azure Portal.\n  This does not mean the OSPatching Extension successfully applied the current configuration to the VM.\n* Attempting to enable the OSPatching Extension 2 or more times with the same configuration will result in nothing happening.\n  See [Enable the extension repeatedly](#3.5 Enable the extension repeatedly) section above for more details.\n\n# Known Issues\n* If the scheduled task does not run on some RedHat distros, there may be a selinux-policy problem. Please refer to\n[https://bugzilla.redhat.com/show\\_bug.cgi?id=657104](https://bugzilla.redhat.com/show_bug.cgi?id=657104)\n\n[azure-powershell]: https://azure.microsoft.com/en-us/documentation/articles/powershell-install-configure/\n[azure-cli]: https://azure.microsoft.com/en-us/documentation/articles/xplat-cli/\n[arm-template]: http://azure.microsoft.com/en-us/documentation/templates/ \n[arm-overview]: https://azure.microsoft.com/en-us/documentation/articles/resource-group-overview/\n"
  },
  {
    "path": "OSPatching/azure/__init__.py",
    "content": "#-------------------------------------------------------------------------\n# Copyright (c) Microsoft.  All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#--------------------------------------------------------------------------\nimport ast\nimport base64\nimport hashlib\nimport hmac\nimport sys\nimport types\nimport warnings\nimport inspect\nif sys.version_info < (3,):\n    from urllib2 import quote as url_quote\n    from urllib2 import unquote as url_unquote\n    _strtype = basestring\nelse:\n    from urllib.parse import quote as url_quote\n    from urllib.parse import unquote as url_unquote\n    _strtype = str\n\nfrom datetime import datetime\nfrom xml.dom import minidom\nfrom xml.sax.saxutils import escape as xml_escape\n\n#--------------------------------------------------------------------------\n# constants\n\n__author__ = 'Microsoft Corp. <ptvshelp@microsoft.com>'\n__version__ = '0.8.4'\n\n# Live ServiceClient URLs\nBLOB_SERVICE_HOST_BASE = '.blob.core.windows.net'\nQUEUE_SERVICE_HOST_BASE = '.queue.core.windows.net'\nTABLE_SERVICE_HOST_BASE = '.table.core.windows.net'\nSERVICE_BUS_HOST_BASE = '.servicebus.windows.net'\nMANAGEMENT_HOST = 'management.core.windows.net'\n\n# Development ServiceClient URLs\nDEV_BLOB_HOST = '127.0.0.1:10000'\nDEV_QUEUE_HOST = '127.0.0.1:10001'\nDEV_TABLE_HOST = '127.0.0.1:10002'\n\n# Default credentials for Development Storage Service\nDEV_ACCOUNT_NAME = 'devstoreaccount1'\nDEV_ACCOUNT_KEY = 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=='\n\n# All of our error messages\n_ERROR_CANNOT_FIND_PARTITION_KEY = 'Cannot find partition key in request.'\n_ERROR_CANNOT_FIND_ROW_KEY = 'Cannot find row key in request.'\n_ERROR_INCORRECT_TABLE_IN_BATCH = \\\n    'Table should be the same in a batch operations'\n_ERROR_INCORRECT_PARTITION_KEY_IN_BATCH = \\\n    'Partition Key should be the same in a batch operations'\n_ERROR_DUPLICATE_ROW_KEY_IN_BATCH = \\\n    'Row Keys should not be the same in a batch operations'\n_ERROR_BATCH_COMMIT_FAIL = 'Batch Commit Fail'\n_ERROR_MESSAGE_NOT_PEEK_LOCKED_ON_DELETE = \\\n    'Message is not peek locked and cannot be deleted.'\n_ERROR_MESSAGE_NOT_PEEK_LOCKED_ON_UNLOCK = \\\n    'Message is not peek locked and cannot be unlocked.'\n_ERROR_QUEUE_NOT_FOUND = 'Queue was not found'\n_ERROR_TOPIC_NOT_FOUND = 'Topic was not found'\n_ERROR_CONFLICT = 'Conflict ({0})'\n_ERROR_NOT_FOUND = 'Not found ({0})'\n_ERROR_UNKNOWN = 'Unknown error ({0})'\n_ERROR_SERVICEBUS_MISSING_INFO = \\\n    'You need to provide servicebus namespace, access key and Issuer'\n_ERROR_STORAGE_MISSING_INFO = \\\n    'You need to provide both account name and access key'\n_ERROR_ACCESS_POLICY = \\\n    'share_access_policy must be either SignedIdentifier or AccessPolicy ' + \\\n    'instance'\n_WARNING_VALUE_SHOULD_BE_BYTES = \\\n    'Warning: {0} must be bytes data type. It will be converted ' + \\\n    'automatically, with utf-8 text encoding.'\n_ERROR_VALUE_SHOULD_BE_BYTES = '{0} should be of type bytes.'\n_ERROR_VALUE_NONE = '{0} should not be None.'\n_ERROR_VALUE_NEGATIVE = '{0} should not be negative.'\n_ERROR_CANNOT_SERIALIZE_VALUE_TO_ENTITY = \\\n    'Cannot serialize the specified value ({0}) to an entity.  Please use ' + \\\n    'an EntityProperty (which can specify custom types), int, str, bool, ' + \\\n    'or datetime.'\n_ERROR_PAGE_BLOB_SIZE_ALIGNMENT = \\\n    'Invalid page blob size: {0}. ' + \\\n    'The size must be aligned to a 512-byte boundary.'\n\n_USER_AGENT_STRING = 'pyazure/' + __version__\n\nMETADATA_NS = 'http://schemas.microsoft.com/ado/2007/08/dataservices/metadata'\n\n\nclass WindowsAzureData(object):\n\n    ''' This is the base of data class.\n    It is only used to check whether it is instance or not. '''\n    pass\n\n\nclass WindowsAzureError(Exception):\n\n    ''' WindowsAzure Excpetion base class. '''\n\n    def __init__(self, message):\n        super(WindowsAzureError, self).__init__(message)\n\n\nclass WindowsAzureConflictError(WindowsAzureError):\n\n    '''Indicates that the resource could not be created because it already\n    exists'''\n\n    def __init__(self, message):\n        super(WindowsAzureConflictError, self).__init__(message)\n\n\nclass WindowsAzureMissingResourceError(WindowsAzureError):\n\n    '''Indicates that a request for a request for a resource (queue, table,\n    container, etc...) failed because the specified resource does not exist'''\n\n    def __init__(self, message):\n        super(WindowsAzureMissingResourceError, self).__init__(message)\n\n\nclass WindowsAzureBatchOperationError(WindowsAzureError):\n\n    '''Indicates that a batch operation failed'''\n\n    def __init__(self, message, code):\n        super(WindowsAzureBatchOperationError, self).__init__(message)\n        self.code = code\n\n\nclass Feed(object):\n    pass\n\n\nclass _Base64String(str):\n    pass\n\n\nclass HeaderDict(dict):\n\n    def __getitem__(self, index):\n        return super(HeaderDict, self).__getitem__(index.lower())\n\n\ndef _encode_base64(data):\n    if isinstance(data, _unicode_type):\n        data = data.encode('utf-8')\n    encoded = base64.b64encode(data)\n    return encoded.decode('utf-8')\n\n\ndef _decode_base64_to_bytes(data):\n    if isinstance(data, _unicode_type):\n        data = data.encode('utf-8')\n    return base64.b64decode(data)\n\n\ndef _decode_base64_to_text(data):\n    decoded_bytes = _decode_base64_to_bytes(data)\n    return decoded_bytes.decode('utf-8')\n\n\ndef _get_readable_id(id_name, id_prefix_to_skip):\n    \"\"\"simplified an id to be more friendly for us people\"\"\"\n    # id_name is in the form 'https://namespace.host.suffix/name'\n    # where name may contain a forward slash!\n    pos = id_name.find('//')\n    if pos != -1:\n        pos += 2\n        if id_prefix_to_skip:\n            pos = id_name.find(id_prefix_to_skip, pos)\n            if pos != -1:\n                pos += len(id_prefix_to_skip)\n        pos = id_name.find('/', pos)\n        if pos != -1:\n            return id_name[pos + 1:]\n    return id_name\n\n\ndef _get_entry_properties_from_node(entry, include_id, id_prefix_to_skip=None, use_title_as_id=False):\n    ''' get properties from entry xml '''\n    properties = {}\n\n    etag = entry.getAttributeNS(METADATA_NS, 'etag')\n    if etag:\n        properties['etag'] = etag\n    for updated in _get_child_nodes(entry, 'updated'):\n        properties['updated'] = updated.firstChild.nodeValue\n    for name in _get_children_from_path(entry, 'author', 'name'):\n        if name.firstChild is not None:\n            properties['author'] = name.firstChild.nodeValue\n\n    if include_id:\n        if use_title_as_id:\n            for title in _get_child_nodes(entry, 'title'):\n                properties['name'] = title.firstChild.nodeValue\n        else:\n            for id in _get_child_nodes(entry, 'id'):\n                properties['name'] = _get_readable_id(\n                    id.firstChild.nodeValue, id_prefix_to_skip)\n\n    return properties\n\n\ndef _get_entry_properties(xmlstr, include_id, id_prefix_to_skip=None):\n    ''' get properties from entry xml '''\n    xmldoc = minidom.parseString(xmlstr)\n    properties = {}\n\n    for entry in _get_child_nodes(xmldoc, 'entry'):\n        properties.update(_get_entry_properties_from_node(entry, include_id, id_prefix_to_skip))\n\n    return properties\n\n\ndef _get_first_child_node_value(parent_node, node_name):\n    xml_attrs = _get_child_nodes(parent_node, node_name)\n    if xml_attrs:\n        xml_attr = xml_attrs[0]\n        if xml_attr.firstChild:\n            value = xml_attr.firstChild.nodeValue\n            return value\n\n\ndef _get_child_nodes(node, tagName):\n    return [childNode for childNode in node.getElementsByTagName(tagName)\n            if childNode.parentNode == node]\n\n\ndef _get_children_from_path(node, *path):\n    '''descends through a hierarchy of nodes returning the list of children\n    at the inner most level.  Only returns children who share a common parent,\n    not cousins.'''\n    cur = node\n    for index, child in enumerate(path):\n        if isinstance(child, _strtype):\n            next = _get_child_nodes(cur, child)\n        else:\n            next = _get_child_nodesNS(cur, *child)\n        if index == len(path) - 1:\n            return next\n        elif not next:\n            break\n\n        cur = next[0]\n    return []\n\n\ndef _get_child_nodesNS(node, ns, tagName):\n    return [childNode for childNode in node.getElementsByTagNameNS(ns, tagName)\n            if childNode.parentNode == node]\n\n\ndef _create_entry(entry_body):\n    ''' Adds common part of entry to a given entry body and return the whole\n    xml. '''\n    updated_str = datetime.utcnow().isoformat()\n    if datetime.utcnow().utcoffset() is None:\n        updated_str += '+00:00'\n\n    entry_start = '''<?xml version=\"1.0\" encoding=\"utf-8\" standalone=\"yes\"?>\n<entry xmlns:d=\"http://schemas.microsoft.com/ado/2007/08/dataservices\" xmlns:m=\"http://schemas.microsoft.com/ado/2007/08/dataservices/metadata\" xmlns=\"http://www.w3.org/2005/Atom\" >\n<title /><updated>{updated}</updated><author><name /></author><id />\n<content type=\"application/xml\">\n    {body}</content></entry>'''\n    return entry_start.format(updated=updated_str, body=entry_body)\n\n\ndef _to_datetime(strtime):\n    return datetime.strptime(strtime, \"%Y-%m-%dT%H:%M:%S.%f\")\n\n_KNOWN_SERIALIZATION_XFORMS = {\n    'include_apis': 'IncludeAPIs',\n    'message_id': 'MessageId',\n    'content_md5': 'Content-MD5',\n    'last_modified': 'Last-Modified',\n    'cache_control': 'Cache-Control',\n    'account_admin_live_email_id': 'AccountAdminLiveEmailId',\n    'service_admin_live_email_id': 'ServiceAdminLiveEmailId',\n    'subscription_id': 'SubscriptionID',\n    'fqdn': 'FQDN',\n    'private_id': 'PrivateID',\n    'os_virtual_hard_disk': 'OSVirtualHardDisk',\n    'logical_disk_size_in_gb': 'LogicalDiskSizeInGB',\n    'logical_size_in_gb': 'LogicalSizeInGB',\n    'os': 'OS',\n    'persistent_vm_downtime_info': 'PersistentVMDowntimeInfo',\n    'copy_id': 'CopyId',\n    }\n\n\ndef _get_serialization_name(element_name):\n    \"\"\"converts a Python name into a serializable name\"\"\"\n    known = _KNOWN_SERIALIZATION_XFORMS.get(element_name)\n    if known is not None:\n        return known\n\n    if element_name.startswith('x_ms_'):\n        return element_name.replace('_', '-')\n    if element_name.endswith('_id'):\n        element_name = element_name.replace('_id', 'ID')\n    for name in ['content_', 'last_modified', 'if_', 'cache_control']:\n        if element_name.startswith(name):\n            element_name = element_name.replace('_', '-_')\n\n    return ''.join(name.capitalize() for name in element_name.split('_'))\n\nif sys.version_info < (3,):\n    _unicode_type = unicode\n\n    def _str(value):\n        if isinstance(value, unicode):\n            return value.encode('utf-8')\n\n        return str(value)\nelse:\n    _str = str\n    _unicode_type = str\n\n\ndef _str_or_none(value):\n    if value is None:\n        return None\n\n    return _str(value)\n\n\ndef _int_or_none(value):\n    if value is None:\n        return None\n\n    return str(int(value))\n\n\ndef _bool_or_none(value):\n    if value is None:\n        return None\n\n    if isinstance(value, bool):\n        if value:\n            return 'true'\n        else:\n            return 'false'\n\n    return str(value)\n\n\ndef _convert_class_to_xml(source, xml_prefix=True):\n    if source is None:\n        return ''\n\n    xmlstr = ''\n    if xml_prefix:\n        xmlstr = '<?xml version=\"1.0\" encoding=\"utf-8\"?>'\n\n    if isinstance(source, list):\n        for value in source:\n            xmlstr += _convert_class_to_xml(value, False)\n    elif isinstance(source, WindowsAzureData):\n        class_name = source.__class__.__name__\n        xmlstr += '<' + class_name + '>'\n        for name, value in vars(source).items():\n            if value is not None:\n                if isinstance(value, list) or \\\n                    isinstance(value, WindowsAzureData):\n                    xmlstr += _convert_class_to_xml(value, False)\n                else:\n                    xmlstr += ('<' + _get_serialization_name(name) + '>' +\n                               xml_escape(str(value)) + '</' +\n                               _get_serialization_name(name) + '>')\n        xmlstr += '</' + class_name + '>'\n    return xmlstr\n\n\ndef _find_namespaces_from_child(parent, child, namespaces):\n    \"\"\"Recursively searches from the parent to the child,\n    gathering all the applicable namespaces along the way\"\"\"\n    for cur_child in parent.childNodes:\n        if cur_child is child:\n            return True\n        if _find_namespaces_from_child(cur_child, child, namespaces):\n            # we are the parent node\n            for key in cur_child.attributes.keys():\n                if key.startswith('xmlns:') or key == 'xmlns':\n                    namespaces[key] = cur_child.attributes[key]\n            break\n    return False\n\n\ndef _find_namespaces(parent, child):\n    res = {}\n    for key in parent.documentElement.attributes.keys():\n        if key.startswith('xmlns:') or key == 'xmlns':\n            res[key] = parent.documentElement.attributes[key]\n    _find_namespaces_from_child(parent, child, res)\n    return res\n\n\ndef _clone_node_with_namespaces(node_to_clone, original_doc):\n    clone = node_to_clone.cloneNode(True)\n\n    for key, value in _find_namespaces(original_doc, node_to_clone).items():\n        clone.attributes[key] = value\n\n    return clone\n\n\ndef _convert_response_to_feeds(response, convert_callback):\n    if response is None:\n        return None\n\n    feeds = _list_of(Feed)\n\n    x_ms_continuation = HeaderDict()\n    for name, value in response.headers:\n        if 'x-ms-continuation' in name:\n            x_ms_continuation[name[len('x-ms-continuation') + 1:]] = value\n    if x_ms_continuation:\n        setattr(feeds, 'x_ms_continuation', x_ms_continuation)\n\n    xmldoc = minidom.parseString(response.body)\n    xml_entries = _get_children_from_path(xmldoc, 'feed', 'entry')\n    if not xml_entries:\n        # in some cases, response contains only entry but no feed\n        xml_entries = _get_children_from_path(xmldoc, 'entry')\n    if inspect.isclass(convert_callback) and issubclass(convert_callback, WindowsAzureData):\n        for xml_entry in xml_entries:\n            return_obj = convert_callback()\n            for node in _get_children_from_path(xml_entry,\n                                                'content',\n                                                convert_callback.__name__):\n                _fill_data_to_return_object(node, return_obj)\n            for name, value in _get_entry_properties_from_node(xml_entry,\n                                                               include_id=True,\n                                                               use_title_as_id=True).items():\n                setattr(return_obj, name, value)\n            feeds.append(return_obj)\n    else:\n        for xml_entry in xml_entries:\n            new_node = _clone_node_with_namespaces(xml_entry, xmldoc)\n            feeds.append(convert_callback(new_node.toxml('utf-8')))\n\n    return feeds\n\n\ndef _validate_type_bytes(param_name, param):\n    if not isinstance(param, bytes):\n        raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES.format(param_name))\n\n\ndef _validate_not_none(param_name, param):\n    if param is None:\n        raise TypeError(_ERROR_VALUE_NONE.format(param_name))\n\n\ndef _fill_list_of(xmldoc, element_type, xml_element_name):\n    xmlelements = _get_child_nodes(xmldoc, xml_element_name)\n    return [_parse_response_body_from_xml_node(xmlelement, element_type) \\\n        for xmlelement in xmlelements]\n\n\ndef _fill_scalar_list_of(xmldoc, element_type, parent_xml_element_name,\n                         xml_element_name):\n    '''Converts an xml fragment into a list of scalar types.  The parent xml\n    element contains a flat list of xml elements which are converted into the\n    specified scalar type and added to the list.\n    Example:\n    xmldoc=\n<Endpoints>\n    <Endpoint>http://{storage-service-name}.blob.core.windows.net/</Endpoint>\n    <Endpoint>http://{storage-service-name}.queue.core.windows.net/</Endpoint>\n    <Endpoint>http://{storage-service-name}.table.core.windows.net/</Endpoint>\n</Endpoints>\n    element_type=str\n    parent_xml_element_name='Endpoints'\n    xml_element_name='Endpoint'\n    '''\n    xmlelements = _get_child_nodes(xmldoc, parent_xml_element_name)\n    if xmlelements:\n        xmlelements = _get_child_nodes(xmlelements[0], xml_element_name)\n        return [_get_node_value(xmlelement, element_type) \\\n            for xmlelement in xmlelements]\n\n\ndef _fill_dict(xmldoc, element_name):\n    xmlelements = _get_child_nodes(xmldoc, element_name)\n    if xmlelements:\n        return_obj = {}\n        for child in xmlelements[0].childNodes:\n            if child.firstChild:\n                return_obj[child.nodeName] = child.firstChild.nodeValue\n        return return_obj\n\n\ndef _fill_dict_of(xmldoc, parent_xml_element_name, pair_xml_element_name,\n                  key_xml_element_name, value_xml_element_name):\n    '''Converts an xml fragment into a dictionary. The parent xml element\n    contains a list of xml elements where each element has a child element for\n    the key, and another for the value.\n    Example:\n    xmldoc=\n<ExtendedProperties>\n    <ExtendedProperty>\n        <Name>Ext1</Name>\n        <Value>Val1</Value>\n    </ExtendedProperty>\n    <ExtendedProperty>\n        <Name>Ext2</Name>\n        <Value>Val2</Value>\n    </ExtendedProperty>\n</ExtendedProperties>\n    element_type=str\n    parent_xml_element_name='ExtendedProperties'\n    pair_xml_element_name='ExtendedProperty'\n    key_xml_element_name='Name'\n    value_xml_element_name='Value'\n    '''\n    return_obj = {}\n\n    xmlelements = _get_child_nodes(xmldoc, parent_xml_element_name)\n    if xmlelements:\n        xmlelements = _get_child_nodes(xmlelements[0], pair_xml_element_name)\n        for pair in xmlelements:\n            keys = _get_child_nodes(pair, key_xml_element_name)\n            values = _get_child_nodes(pair, value_xml_element_name)\n            if keys and values:\n                key = keys[0].firstChild.nodeValue\n                value = values[0].firstChild.nodeValue\n                return_obj[key] = value\n\n    return return_obj\n\n\ndef _fill_instance_child(xmldoc, element_name, return_type):\n    '''Converts a child of the current dom element to the specified type.\n    '''\n    xmlelements = _get_child_nodes(\n        xmldoc, _get_serialization_name(element_name))\n\n    if not xmlelements:\n        return None\n\n    return_obj = return_type()\n    _fill_data_to_return_object(xmlelements[0], return_obj)\n\n    return return_obj\n\n\ndef _fill_instance_element(element, return_type):\n    \"\"\"Converts a DOM element into the specified object\"\"\"\n    return _parse_response_body_from_xml_node(element, return_type)\n\n\ndef _fill_data_minidom(xmldoc, element_name, data_member):\n    xmlelements = _get_child_nodes(\n        xmldoc, _get_serialization_name(element_name))\n\n    if not xmlelements or not xmlelements[0].childNodes:\n        return None\n\n    value = xmlelements[0].firstChild.nodeValue\n\n    if data_member is None:\n        return value\n    elif isinstance(data_member, datetime):\n        return _to_datetime(value)\n    elif type(data_member) is bool:\n        return value.lower() != 'false'\n    else:\n        return type(data_member)(value)\n\n\ndef _get_node_value(xmlelement, data_type):\n    value = xmlelement.firstChild.nodeValue\n    if data_type is datetime:\n        return _to_datetime(value)\n    elif data_type is bool:\n        return value.lower() != 'false'\n    else:\n        return data_type(value)\n\n\ndef _get_request_body_bytes_only(param_name, param_value):\n    '''Validates the request body passed in and converts it to bytes\n    if our policy allows it.'''\n    if param_value is None:\n        return b''\n\n    if isinstance(param_value, bytes):\n        return param_value\n\n    # Previous versions of the SDK allowed data types other than bytes to be\n    # passed in, and they would be auto-converted to bytes.  We preserve this\n    # behavior when running under 2.7, but issue a warning.\n    # Python 3 support is new, so we reject anything that's not bytes.\n    if sys.version_info < (3,):\n        warnings.warn(_WARNING_VALUE_SHOULD_BE_BYTES.format(param_name))\n        return _get_request_body(param_value)\n\n    raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES.format(param_name))\n\n\ndef _get_request_body(request_body):\n    '''Converts an object into a request body.  If it's None\n    we'll return an empty string, if it's one of our objects it'll\n    convert it to XML and return it.  Otherwise we just use the object\n    directly'''\n    if request_body is None:\n        return b''\n\n    if isinstance(request_body, WindowsAzureData):\n        request_body = _convert_class_to_xml(request_body)\n\n    if isinstance(request_body, bytes):\n        return request_body\n\n    if isinstance(request_body, _unicode_type):\n        return request_body.encode('utf-8')\n\n    request_body = str(request_body)\n    if isinstance(request_body, _unicode_type):\n        return request_body.encode('utf-8')\n\n    return request_body\n\n\ndef _parse_enum_results_list(response, return_type, resp_type, item_type):\n    \"\"\"resp_body is the XML we received\nresp_type is a string, such as Containers,\nreturn_type is the type we're constructing, such as ContainerEnumResults\nitem_type is the type object of the item to be created, such as Container\n\nThis function then returns a ContainerEnumResults object with the\ncontainers member populated with the results.\n\"\"\"\n\n    # parsing something like:\n    # <EnumerationResults ... >\n    #   <Queues>\n    #       <Queue>\n    #           <Something />\n    #           <SomethingElse />\n    #       </Queue>\n    #   </Queues>\n    # </EnumerationResults>\n    respbody = response.body\n    return_obj = return_type()\n    doc = minidom.parseString(respbody)\n\n    items = []\n    for enum_results in _get_child_nodes(doc, 'EnumerationResults'):\n        # path is something like Queues, Queue\n        for child in _get_children_from_path(enum_results,\n                                             resp_type,\n                                             resp_type[:-1]):\n            items.append(_fill_instance_element(child, item_type))\n\n        for name, value in vars(return_obj).items():\n            # queues, Queues, this is the list its self which we populated\n            # above\n            if name == resp_type.lower():\n                # the list its self.\n                continue\n            value = _fill_data_minidom(enum_results, name, value)\n            if value is not None:\n                setattr(return_obj, name, value)\n\n    setattr(return_obj, resp_type.lower(), items)\n    return return_obj\n\n\ndef _parse_simple_list(response, type, item_type, list_name):\n    respbody = response.body\n    res = type()\n    res_items = []\n    doc = minidom.parseString(respbody)\n    type_name = type.__name__\n    item_name = item_type.__name__\n    for item in _get_children_from_path(doc, type_name, item_name):\n        res_items.append(_fill_instance_element(item, item_type))\n\n    setattr(res, list_name, res_items)\n    return res\n\n\ndef _parse_response(response, return_type):\n    '''\n    Parse the HTTPResponse's body and fill all the data into a class of\n    return_type.\n    '''\n    return _parse_response_body_from_xml_text(response.body, return_type)\n\ndef _parse_service_resources_response(response, return_type):\n    '''\n    Parse the HTTPResponse's body and fill all the data into a class of\n    return_type.\n    '''\n    return _parse_response_body_from_service_resources_xml_text(response.body, return_type)\n\n\ndef _fill_data_to_return_object(node, return_obj):\n    members = dict(vars(return_obj))\n    for name, value in members.items():\n        if isinstance(value, _list_of):\n            setattr(return_obj,\n                    name,\n                    _fill_list_of(node,\n                                  value.list_type,\n                                  value.xml_element_name))\n        elif isinstance(value, _scalar_list_of):\n            setattr(return_obj,\n                    name,\n                    _fill_scalar_list_of(node,\n                                         value.list_type,\n                                         _get_serialization_name(name),\n                                         value.xml_element_name))\n        elif isinstance(value, _dict_of):\n            setattr(return_obj,\n                    name,\n                    _fill_dict_of(node,\n                                  _get_serialization_name(name),\n                                  value.pair_xml_element_name,\n                                  value.key_xml_element_name,\n                                  value.value_xml_element_name))\n        elif isinstance(value, _xml_attribute):\n            real_value = None\n            if node.hasAttribute(value.xml_element_name):\n                real_value = node.getAttribute(value.xml_element_name)\n            if real_value is not None:\n                setattr(return_obj, name, real_value)\n        elif isinstance(value, WindowsAzureData):\n            setattr(return_obj,\n                    name,\n                    _fill_instance_child(node, name, value.__class__))\n        elif isinstance(value, dict):\n            setattr(return_obj,\n                    name,\n                    _fill_dict(node, _get_serialization_name(name)))\n        elif isinstance(value, _Base64String):\n            value = _fill_data_minidom(node, name, '')\n            if value is not None:\n                value = _decode_base64_to_text(value)\n            # always set the attribute, so we don't end up returning an object\n            # with type _Base64String\n            setattr(return_obj, name, value)\n        else:\n            value = _fill_data_minidom(node, name, value)\n            if value is not None:\n                setattr(return_obj, name, value)\n\n\ndef _parse_response_body_from_xml_node(node, return_type):\n    '''\n    parse the xml and fill all the data into a class of return_type\n    '''\n    return_obj = return_type()\n    _fill_data_to_return_object(node, return_obj)\n\n    return return_obj\n\n\ndef _parse_response_body_from_xml_text(respbody, return_type):\n    '''\n    parse the xml and fill all the data into a class of return_type\n    '''\n    doc = minidom.parseString(respbody)\n    return_obj = return_type()\n    xml_name = return_type._xml_name if hasattr(return_type, '_xml_name') else return_type.__name__ \n    for node in _get_child_nodes(doc, xml_name):\n        _fill_data_to_return_object(node, return_obj)\n\n    return return_obj\n\ndef _parse_response_body_from_service_resources_xml_text(respbody, return_type):\n    '''\n    parse the xml and fill all the data into a class of return_type\n    '''\n    doc = minidom.parseString(respbody)\n    return_obj = _list_of(return_type)\n    for node in _get_children_from_path(doc, \"ServiceResources\", \"ServiceResource\"):\n        local_obj = return_type()\n        _fill_data_to_return_object(node, local_obj)\n        return_obj.append(local_obj)\n\n    return return_obj\n\nclass _dict_of(dict):\n\n    \"\"\"a dict which carries with it the xml element names for key,val.\n    Used for deserializaion and construction of the lists\"\"\"\n\n    def __init__(self, pair_xml_element_name, key_xml_element_name,\n                 value_xml_element_name):\n        self.pair_xml_element_name = pair_xml_element_name\n        self.key_xml_element_name = key_xml_element_name\n        self.value_xml_element_name = value_xml_element_name\n        super(_dict_of, self).__init__()\n\n\nclass _list_of(list):\n\n    \"\"\"a list which carries with it the type that's expected to go in it.\n    Used for deserializaion and construction of the lists\"\"\"\n\n    def __init__(self, list_type, xml_element_name=None):\n        self.list_type = list_type\n        if xml_element_name is None:\n            self.xml_element_name = list_type.__name__\n        else:\n            self.xml_element_name = xml_element_name\n        super(_list_of, self).__init__()\n\n\nclass _scalar_list_of(list):\n\n    \"\"\"a list of scalar types which carries with it the type that's\n    expected to go in it along with its xml element name.\n    Used for deserializaion and construction of the lists\"\"\"\n\n    def __init__(self, list_type, xml_element_name):\n        self.list_type = list_type\n        self.xml_element_name = xml_element_name\n        super(_scalar_list_of, self).__init__()\n        \nclass _xml_attribute:\n    \n    \"\"\"a accessor to XML attributes\n    expected to go in it along with its xml element name.\n    Used for deserialization and construction\"\"\"\n    \n    def __init__(self, xml_element_name):\n        self.xml_element_name = xml_element_name\n\n\ndef _update_request_uri_query_local_storage(request, use_local_storage):\n    ''' create correct uri and query for the request '''\n    uri, query = _update_request_uri_query(request)\n    if use_local_storage:\n        return '/' + DEV_ACCOUNT_NAME + uri, query\n    return uri, query\n\n\ndef _update_request_uri_query(request):\n    '''pulls the query string out of the URI and moves it into\n    the query portion of the request object.  If there are already\n    query parameters on the request the parameters in the URI will\n    appear after the existing parameters'''\n\n    if '?' in request.path:\n        request.path, _, query_string = request.path.partition('?')\n        if query_string:\n            query_params = query_string.split('&')\n            for query in query_params:\n                if '=' in query:\n                    name, _, value = query.partition('=')\n                    request.query.append((name, value))\n\n    request.path = url_quote(request.path, '/()$=\\',')\n\n    # add encoded queries to request.path.\n    if request.query:\n        request.path += '?'\n        for name, value in request.query:\n            if value is not None:\n                request.path += name + '=' + url_quote(value, '/()$=\\',') + '&'\n        request.path = request.path[:-1]\n\n    return request.path, request.query\n\n\ndef _dont_fail_on_exist(error):\n    ''' don't throw exception if the resource exists.\n    This is called by create_* APIs with fail_on_exist=False'''\n    if isinstance(error, WindowsAzureConflictError):\n        return False\n    else:\n        raise error\n\n\ndef _dont_fail_not_exist(error):\n    ''' don't throw exception if the resource doesn't exist.\n    This is called by create_* APIs with fail_on_exist=False'''\n    if isinstance(error, WindowsAzureMissingResourceError):\n        return False\n    else:\n        raise error\n\n\ndef _general_error_handler(http_error):\n    ''' Simple error handler for azure.'''\n    if http_error.status == 409:\n        raise WindowsAzureConflictError(\n            _ERROR_CONFLICT.format(str(http_error)))\n    elif http_error.status == 404:\n        raise WindowsAzureMissingResourceError(\n            _ERROR_NOT_FOUND.format(str(http_error)))\n    else:\n        if http_error.respbody is not None:\n            raise WindowsAzureError(\n                _ERROR_UNKNOWN.format(str(http_error)) + '\\n' + \\\n                    http_error.respbody.decode('utf-8'))\n        else:\n            raise WindowsAzureError(_ERROR_UNKNOWN.format(str(http_error)))\n\n\ndef _parse_response_for_dict(response):\n    ''' Extracts name-values from response header. Filter out the standard\n    http headers.'''\n\n    if response is None:\n        return None\n    http_headers = ['server', 'date', 'location', 'host',\n                    'via', 'proxy-connection', 'connection']\n    return_dict = HeaderDict()\n    if response.headers:\n        for name, value in response.headers:\n            if not name.lower() in http_headers:\n                return_dict[name] = value\n\n    return return_dict\n\n\ndef _parse_response_for_dict_prefix(response, prefixes):\n    ''' Extracts name-values for names starting with prefix from response\n    header. Filter out the standard http headers.'''\n\n    if response is None:\n        return None\n    return_dict = {}\n    orig_dict = _parse_response_for_dict(response)\n    if orig_dict:\n        for name, value in orig_dict.items():\n            for prefix_value in prefixes:\n                if name.lower().startswith(prefix_value.lower()):\n                    return_dict[name] = value\n                    break\n        return return_dict\n    else:\n        return None\n\n\ndef _parse_response_for_dict_filter(response, filter):\n    ''' Extracts name-values for names in filter from response header. Filter\n    out the standard http headers.'''\n    if response is None:\n        return None\n    return_dict = {}\n    orig_dict = _parse_response_for_dict(response)\n    if orig_dict:\n        for name, value in orig_dict.items():\n            if name.lower() in filter:\n                return_dict[name] = value\n        return return_dict\n    else:\n        return None\n\n\ndef _sign_string(key, string_to_sign, key_is_base64=True):\n    if key_is_base64:\n        key = _decode_base64_to_bytes(key)\n    else:\n        if isinstance(key, _unicode_type):\n            key = key.encode('utf-8')\n    if isinstance(string_to_sign, _unicode_type):\n        string_to_sign = string_to_sign.encode('utf-8')\n    signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256)\n    digest = signed_hmac_sha256.digest()\n    encoded_digest = _encode_base64(digest)\n    return encoded_digest\n"
  },
  {
    "path": "OSPatching/azure/azure.pyproj",
    "content": "﻿<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<Project DefaultTargets=\"Build\" xmlns=\"http://schemas.microsoft.com/developer/msbuild/2003\" ToolsVersion=\"4.0\">\n  <PropertyGroup>\n    <Configuration Condition=\" '$(Configuration)' == '' \">Debug</Configuration>\n    <SchemaVersion>2.0</SchemaVersion>\n    <ProjectGuid>{25b2c65a-0553-4452-8907-8b5b17544e68}</ProjectGuid>\n    <ProjectHome>\n    </ProjectHome>\n    <StartupFile>storage\\blobservice.py</StartupFile>\n    <SearchPath>..</SearchPath>\n    <WorkingDirectory>.</WorkingDirectory>\n    <OutputPath>.</OutputPath>\n    <Name>azure</Name>\n    <RootNamespace>azure</RootNamespace>\n    <IsWindowsApplication>False</IsWindowsApplication>\n    <LaunchProvider>Standard Python launcher</LaunchProvider>\n    <CommandLineArguments />\n    <InterpreterPath />\n    <InterpreterArguments />\n    <InterpreterId>{9a7a9026-48c1-4688-9d5d-e5699d47d074}</InterpreterId>\n    <InterpreterVersion>3.4</InterpreterVersion>\n    <SccProjectName>SAK</SccProjectName>\n    <SccProvider>SAK</SccProvider>\n    <SccAuxPath>SAK</SccAuxPath>\n    <SccLocalPath>SAK</SccLocalPath>\n  </PropertyGroup>\n  <PropertyGroup Condition=\" '$(Configuration)' == 'Debug' \">\n    <DebugSymbols>true</DebugSymbols>\n    <EnableUnmanagedDebugging>false</EnableUnmanagedDebugging>\n  </PropertyGroup>\n  <PropertyGroup Condition=\" '$(Configuration)' == 'Release' \">\n    <DebugSymbols>true</DebugSymbols>\n    <EnableUnmanagedDebugging>false</EnableUnmanagedDebugging>\n  </PropertyGroup>\n  <ItemGroup>\n    <Compile Include=\"http\\batchclient.py\" />\n    <Compile Include=\"http\\httpclient.py\" />\n    <Compile Include=\"http\\winhttp.py\" />\n    <Compile Include=\"http\\__init__.py\" />\n    <Compile Include=\"servicemanagement\\servicebusmanagementservice.py\" />\n    <Compile Include=\"servicemanagement\\servicemanagementclient.py\" />\n    <Compile Include=\"servicemanagement\\servicemanagementservice.py\" />\n    <Compile Include=\"servicemanagement\\sqldatabasemanagementservice.py\" />\n    <Compile Include=\"servicemanagement\\websitemanagementservice.py\" />\n    <Compile Include=\"servicemanagement\\__init__.py\" />\n    <Compile Include=\"servicebus\\servicebusservice.py\" />\n    <Compile Include=\"storage\\blobservice.py\" />\n    <Compile Include=\"storage\\queueservice.py\" />\n    <Compile Include=\"storage\\cloudstorageaccount.py\" />\n    <Compile Include=\"storage\\tableservice.py\" />\n    <Compile Include=\"storage\\sharedaccesssignature.py\" />\n    <Compile Include=\"__init__.py\" />\n    <Compile Include=\"servicebus\\__init__.py\" />\n    <Compile Include=\"storage\\storageclient.py\" />\n    <Compile Include=\"storage\\__init__.py\" />\n  </ItemGroup>\n  <ItemGroup>\n    <Folder Include=\"http\" />\n    <Folder Include=\"servicemanagement\" />\n    <Folder Include=\"servicebus\\\" />\n    <Folder Include=\"storage\" />\n  </ItemGroup>\n  <ItemGroup>\n    <InterpreterReference Include=\"{2af0f10d-7135-4994-9156-5d01c9c11b7e}\\2.6\" />\n    <InterpreterReference Include=\"{2af0f10d-7135-4994-9156-5d01c9c11b7e}\\2.7\" />\n    <InterpreterReference Include=\"{2af0f10d-7135-4994-9156-5d01c9c11b7e}\\3.3\" />\n    <InterpreterReference Include=\"{2af0f10d-7135-4994-9156-5d01c9c11b7e}\\3.4\" />\n    <InterpreterReference Include=\"{9a7a9026-48c1-4688-9d5d-e5699d47d074}\\2.7\" />\n    <InterpreterReference Include=\"{9a7a9026-48c1-4688-9d5d-e5699d47d074}\\3.3\" />\n    <InterpreterReference Include=\"{9a7a9026-48c1-4688-9d5d-e5699d47d074}\\3.4\" />\n  </ItemGroup>\n  <PropertyGroup>\n    <VisualStudioVersion Condition=\"'$(VisualStudioVersion)' == ''\">10.0</VisualStudioVersion>\n    <VSToolsPath Condition=\"'$(VSToolsPath)' == ''\">$(MSBuildExtensionsPath32)\\Microsoft\\VisualStudio\\v$(VisualStudioVersion)</VSToolsPath>\n    <PtvsTargetsFile>$(VSToolsPath)\\Python Tools\\Microsoft.PythonTools.targets</PtvsTargetsFile>\n  </PropertyGroup>\n  <Import Condition=\"Exists($(PtvsTargetsFile))\" Project=\"$(PtvsTargetsFile)\" />\n  <Import Condition=\"!Exists($(PtvsTargetsFile))\" Project=\"$(MSBuildToolsPath)\\Microsoft.Common.targets\" />\n</Project>"
  },
  {
    "path": "OSPatching/azure/http/__init__.py",
    "content": "#-------------------------------------------------------------------------\n# Copyright (c) Microsoft.  All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#--------------------------------------------------------------------------\n\nHTTP_RESPONSE_NO_CONTENT = 204\n\n\nclass HTTPError(Exception):\n\n    ''' HTTP Exception when response status code >= 300 '''\n\n    def __init__(self, status, message, respheader, respbody):\n        '''Creates a new HTTPError with the specified status, message,\n        response headers and body'''\n        self.status = status\n        self.respheader = respheader\n        self.respbody = respbody\n        Exception.__init__(self, message)\n\n\nclass HTTPResponse(object):\n\n    \"\"\"Represents a response from an HTTP request.  An HTTPResponse has the\n    following attributes:\n\n    status: the status code of the response\n    message: the message\n    headers: the returned headers, as a list of (name, value) pairs\n    body: the body of the response\n    \"\"\"\n\n    def __init__(self, status, message, headers, body):\n        self.status = status\n        self.message = message\n        self.headers = headers\n        self.body = body\n\n\nclass HTTPRequest(object):\n\n    '''Represents an HTTP Request.  An HTTP Request consists of the following\n    attributes:\n\n    host: the host name to connect to\n    method: the method to use to connect (string such as GET, POST, PUT, etc.)\n    path: the uri fragment\n    query: query parameters specified as a list of (name, value) pairs\n    headers: header values specified as (name, value) pairs\n    body: the body of the request.\n    protocol_override:\n        specify to use this protocol instead of the global one stored in\n        _HTTPClient.\n    '''\n\n    def __init__(self):\n        self.host = ''\n        self.method = ''\n        self.path = ''\n        self.query = []      # list of (name, value)\n        self.headers = []    # list of (header name, header value)\n        self.body = ''\n        self.protocol_override = None\n"
  },
  {
    "path": "OSPatching/azure/http/batchclient.py",
    "content": "#-------------------------------------------------------------------------\n# Copyright (c) Microsoft.  All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#--------------------------------------------------------------------------\nimport sys\nimport uuid\n\nfrom azure import (\n    _update_request_uri_query,\n    WindowsAzureError,\n    WindowsAzureBatchOperationError,\n    _get_children_from_path,\n    url_unquote,\n    _ERROR_CANNOT_FIND_PARTITION_KEY,\n    _ERROR_CANNOT_FIND_ROW_KEY,\n    _ERROR_INCORRECT_TABLE_IN_BATCH,\n    _ERROR_INCORRECT_PARTITION_KEY_IN_BATCH,\n    _ERROR_DUPLICATE_ROW_KEY_IN_BATCH,\n    _ERROR_BATCH_COMMIT_FAIL,\n    )\nfrom azure.http import HTTPError, HTTPRequest, HTTPResponse\nfrom azure.http.httpclient import _HTTPClient\nfrom azure.storage import (\n    _update_storage_table_header,\n    METADATA_NS,\n    _sign_storage_table_request,\n    )\nfrom xml.dom import minidom\n\n_DATASERVICES_NS = 'http://schemas.microsoft.com/ado/2007/08/dataservices'\n\nif sys.version_info < (3,):\n    def _new_boundary():\n        return str(uuid.uuid1())\nelse:\n    def _new_boundary():\n        return str(uuid.uuid1()).encode('utf-8')\n\n\nclass _BatchClient(_HTTPClient):\n\n    '''\n    This is the class that is used for batch operation for storage table\n    service. It only supports one changeset.\n    '''\n\n    def __init__(self, service_instance, account_key, account_name,\n                 protocol='http'):\n        _HTTPClient.__init__(self, service_instance, account_name=account_name,\n                             account_key=account_key, protocol=protocol)\n        self.is_batch = False\n        self.batch_requests = []\n        self.batch_table = ''\n        self.batch_partition_key = ''\n        self.batch_row_keys = []\n\n    def get_request_table(self, request):\n        '''\n        Extracts table name from request.uri. The request.uri has either\n        \"/mytable(...)\" or \"/mytable\" format.\n\n        request: the request to insert, update or delete entity\n        '''\n        if '(' in request.path:\n            pos = request.path.find('(')\n            return request.path[1:pos]\n        else:\n            return request.path[1:]\n\n    def get_request_partition_key(self, request):\n        '''\n        Extracts PartitionKey from request.body if it is a POST request or from\n        request.path if it is not a POST request. Only insert operation request\n        is a POST request and the PartitionKey is in the request body.\n\n        request: the request to insert, update or delete entity\n        '''\n        if request.method == 'POST':\n            doc = minidom.parseString(request.body)\n            part_key = _get_children_from_path(\n                doc, 'entry', 'content', (METADATA_NS, 'properties'),\n                (_DATASERVICES_NS, 'PartitionKey'))\n            if not part_key:\n                raise WindowsAzureError(_ERROR_CANNOT_FIND_PARTITION_KEY)\n            return part_key[0].firstChild.nodeValue\n        else:\n            uri = url_unquote(request.path)\n            pos1 = uri.find('PartitionKey=\\'')\n            pos2 = uri.find('\\',', pos1)\n            if pos1 == -1 or pos2 == -1:\n                raise WindowsAzureError(_ERROR_CANNOT_FIND_PARTITION_KEY)\n            return uri[pos1 + len('PartitionKey=\\''):pos2]\n\n    def get_request_row_key(self, request):\n        '''\n        Extracts RowKey from request.body if it is a POST request or from\n        request.path if it is not a POST request. Only insert operation request\n        is a POST request and the Rowkey is in the request body.\n\n        request: the request to insert, update or delete entity\n        '''\n        if request.method == 'POST':\n            doc = minidom.parseString(request.body)\n            row_key = _get_children_from_path(\n                doc, 'entry', 'content', (METADATA_NS, 'properties'),\n                (_DATASERVICES_NS, 'RowKey'))\n            if not row_key:\n                raise WindowsAzureError(_ERROR_CANNOT_FIND_ROW_KEY)\n            return row_key[0].firstChild.nodeValue\n        else:\n            uri = url_unquote(request.path)\n            pos1 = uri.find('RowKey=\\'')\n            pos2 = uri.find('\\')', pos1)\n            if pos1 == -1 or pos2 == -1:\n                raise WindowsAzureError(_ERROR_CANNOT_FIND_ROW_KEY)\n            row_key = uri[pos1 + len('RowKey=\\''):pos2]\n            return row_key\n\n    def validate_request_table(self, request):\n        '''\n        Validates that all requests have the same table name. Set the table\n        name if it is the first request for the batch operation.\n\n        request: the request to insert, update or delete entity\n        '''\n        if self.batch_table:\n            if self.get_request_table(request) != self.batch_table:\n                raise WindowsAzureError(_ERROR_INCORRECT_TABLE_IN_BATCH)\n        else:\n            self.batch_table = self.get_request_table(request)\n\n    def validate_request_partition_key(self, request):\n        '''\n        Validates that all requests have the same PartitiionKey. Set the\n        PartitionKey if it is the first request for the batch operation.\n\n        request: the request to insert, update or delete entity\n        '''\n        if self.batch_partition_key:\n            if self.get_request_partition_key(request) != \\\n                self.batch_partition_key:\n                raise WindowsAzureError(_ERROR_INCORRECT_PARTITION_KEY_IN_BATCH)\n        else:\n            self.batch_partition_key = self.get_request_partition_key(request)\n\n    def validate_request_row_key(self, request):\n        '''\n        Validates that all requests have the different RowKey and adds RowKey\n        to existing RowKey list.\n\n        request: the request to insert, update or delete entity\n        '''\n        if self.batch_row_keys:\n            if self.get_request_row_key(request) in self.batch_row_keys:\n                raise WindowsAzureError(_ERROR_DUPLICATE_ROW_KEY_IN_BATCH)\n        else:\n            self.batch_row_keys.append(self.get_request_row_key(request))\n\n    def begin_batch(self):\n        '''\n        Starts the batch operation. Intializes the batch variables\n\n        is_batch: batch operation flag.\n        batch_table: the table name of the batch operation\n        batch_partition_key: the PartitionKey of the batch requests.\n        batch_row_keys: the RowKey list of adding requests.\n        batch_requests: the list of the requests.\n        '''\n        self.is_batch = True\n        self.batch_table = ''\n        self.batch_partition_key = ''\n        self.batch_row_keys = []\n        self.batch_requests = []\n\n    def insert_request_to_batch(self, request):\n        '''\n        Adds request to batch operation.\n\n        request: the request to insert, update or delete entity\n        '''\n        self.validate_request_table(request)\n        self.validate_request_partition_key(request)\n        self.validate_request_row_key(request)\n        self.batch_requests.append(request)\n\n    def commit_batch(self):\n        ''' Resets batch flag and commits the batch requests. '''\n        if self.is_batch:\n            self.is_batch = False\n            self.commit_batch_requests()\n\n    def commit_batch_requests(self):\n        ''' Commits the batch requests. '''\n\n        batch_boundary = b'batch_' + _new_boundary()\n        changeset_boundary = b'changeset_' + _new_boundary()\n\n        # Commits batch only the requests list is not empty.\n        if self.batch_requests:\n            request = HTTPRequest()\n            request.method = 'POST'\n            request.host = self.batch_requests[0].host\n            request.path = '/$batch'\n            request.headers = [\n                ('Content-Type', 'multipart/mixed; boundary=' + \\\n                    batch_boundary.decode('utf-8')),\n                ('Accept', 'application/atom+xml,application/xml'),\n                ('Accept-Charset', 'UTF-8')]\n\n            request.body = b'--' + batch_boundary + b'\\n'\n            request.body += b'Content-Type: multipart/mixed; boundary='\n            request.body += changeset_boundary + b'\\n\\n'\n\n            content_id = 1\n\n            # Adds each request body to the POST data.\n            for batch_request in self.batch_requests:\n                request.body += b'--' + changeset_boundary + b'\\n'\n                request.body += b'Content-Type: application/http\\n'\n                request.body += b'Content-Transfer-Encoding: binary\\n\\n'\n                request.body += batch_request.method.encode('utf-8')\n                request.body += b' http://'\n                request.body += batch_request.host.encode('utf-8')\n                request.body += batch_request.path.encode('utf-8')\n                request.body += b' HTTP/1.1\\n'\n                request.body += b'Content-ID: '\n                request.body += str(content_id).encode('utf-8') + b'\\n'\n                content_id += 1\n\n                # Add different headers for different type requests.\n                if not batch_request.method == 'DELETE':\n                    request.body += \\\n                        b'Content-Type: application/atom+xml;type=entry\\n'\n                    for name, value in batch_request.headers:\n                        if name == 'If-Match':\n                            request.body += name.encode('utf-8') + b': '\n                            request.body += value.encode('utf-8') + b'\\n'\n                            break\n                    request.body += b'Content-Length: '\n                    request.body += str(len(batch_request.body)).encode('utf-8')\n                    request.body += b'\\n\\n'\n                    request.body += batch_request.body + b'\\n'\n                else:\n                    for name, value in batch_request.headers:\n                        # If-Match should be already included in\n                        # batch_request.headers, but in case it is missing,\n                        # just add it.\n                        if name == 'If-Match':\n                            request.body += name.encode('utf-8') + b': '\n                            request.body += value.encode('utf-8') + b'\\n\\n'\n                            break\n                    else:\n                        request.body += b'If-Match: *\\n\\n'\n\n            request.body += b'--' + changeset_boundary + b'--' + b'\\n'\n            request.body += b'--' + batch_boundary + b'--'\n\n            request.path, request.query = _update_request_uri_query(request)\n            request.headers = _update_storage_table_header(request)\n            auth = _sign_storage_table_request(request,\n                                               self.account_name,\n                                               self.account_key)\n            request.headers.append(('Authorization', auth))\n\n            # Submit the whole request as batch request.\n            response = self.perform_request(request)\n            if response.status >= 300:\n                raise HTTPError(response.status,\n                                _ERROR_BATCH_COMMIT_FAIL,\n                                self.respheader,\n                                response.body)\n\n            # http://www.odata.org/documentation/odata-version-2-0/batch-processing/\n            # The body of a ChangeSet response is either a response for all the\n            # successfully processed change request within the ChangeSet,\n            # formatted exactly as it would have appeared outside of a batch, \n            # or a single response indicating a failure of the entire ChangeSet.\n            responses = self._parse_batch_response(response.body)\n            if responses and responses[0].status >= 300:\n                self._report_batch_error(responses[0])\n\n    def cancel_batch(self):\n        ''' Resets the batch flag. '''\n        self.is_batch = False\n\n    def _parse_batch_response(self, body):\n        parts = body.split(b'--changesetresponse_')\n\n        responses = []\n        for part in parts:\n            httpLocation = part.find(b'HTTP/')\n            if httpLocation > 0:\n                response = self._parse_batch_response_part(part[httpLocation:])\n                responses.append(response)\n\n        return responses\n\n    def _parse_batch_response_part(self, part):\n        lines = part.splitlines();\n\n        # First line is the HTTP status/reason\n        status, _, reason = lines[0].partition(b' ')[2].partition(b' ')\n\n        # Followed by headers and body\n        headers = []\n        body = b''\n        isBody = False\n        for line in lines[1:]:\n            if line == b'' and not isBody:\n                isBody = True\n            elif isBody:\n                body += line\n            else:\n                headerName, _, headerVal = line.partition(b':')\n                headers.append((headerName.lower(), headerVal))\n\n        return HTTPResponse(int(status), reason.strip(), headers, body)\n\n    def _report_batch_error(self, response):\n        xml = response.body.decode('utf-8')\n        doc = minidom.parseString(xml)\n\n        n = _get_children_from_path(doc, (METADATA_NS, 'error'), 'code')\n        code = n[0].firstChild.nodeValue if n and n[0].firstChild else ''\n\n        n = _get_children_from_path(doc, (METADATA_NS, 'error'), 'message')\n        message = n[0].firstChild.nodeValue if n and n[0].firstChild else xml\n\n        raise WindowsAzureBatchOperationError(message, code)\n"
  },
  {
    "path": "OSPatching/azure/http/httpclient.py",
    "content": "#-------------------------------------------------------------------------\n# Copyright (c) Microsoft.  All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#--------------------------------------------------------------------------\nimport base64\nimport os\nimport sys\n\nif sys.version_info < (3,):\n    from httplib import (\n        HTTPSConnection,\n        HTTPConnection,\n        HTTP_PORT,\n        HTTPS_PORT,\n        )\n    from urlparse import urlparse\nelse:\n    from http.client import (\n        HTTPSConnection,\n        HTTPConnection,\n        HTTP_PORT,\n        HTTPS_PORT,\n        )\n    from urllib.parse import urlparse\n\nfrom azure.http import HTTPError, HTTPResponse\nfrom azure import _USER_AGENT_STRING, _update_request_uri_query\n\n\nclass _HTTPClient(object):\n\n    '''\n    Takes the request and sends it to cloud service and returns the response.\n    '''\n\n    def __init__(self, service_instance, cert_file=None, account_name=None,\n                 account_key=None, protocol='https'):\n        '''\n        service_instance: service client instance.\n        cert_file:\n            certificate file name/location. This is only used in hosted\n            service management.\n        account_name: the storage account.\n        account_key:\n            the storage account access key.\n        '''\n        self.service_instance = service_instance\n        self.status = None\n        self.respheader = None\n        self.message = None\n        self.cert_file = cert_file\n        self.account_name = account_name\n        self.account_key = account_key\n        self.protocol = protocol\n        self.proxy_host = None\n        self.proxy_port = None\n        self.proxy_user = None\n        self.proxy_password = None\n        self.use_httplib = self.should_use_httplib()\n\n    def should_use_httplib(self):\n        if sys.platform.lower().startswith('win') and self.cert_file:\n            # On Windows, auto-detect between Windows Store Certificate\n            # (winhttp) and OpenSSL .pem certificate file (httplib).\n            #\n            # We used to only support certificates installed in the Windows\n            # Certificate Store.\n            #   cert_file example: CURRENT_USER\\my\\CertificateName\n            #\n            # We now support using an OpenSSL .pem certificate file,\n            # for a consistent experience across all platforms.\n            #   cert_file example: account\\certificate.pem\n            #\n            # When using OpenSSL .pem certificate file on Windows, make sure\n            # you are on CPython 2.7.4 or later.\n\n            # If it's not an existing file on disk, then treat it as a path in\n            # the Windows Certificate Store, which means we can't use httplib.\n            if not os.path.isfile(self.cert_file):\n                return False\n\n        return True\n\n    def set_proxy(self, host, port, user, password):\n        '''\n        Sets the proxy server host and port for the HTTP CONNECT Tunnelling.\n\n        host: Address of the proxy. Ex: '192.168.0.100'\n        port: Port of the proxy. Ex: 6000\n        user: User for proxy authorization.\n        password: Password for proxy authorization.\n        '''\n        self.proxy_host = host\n        self.proxy_port = port\n        self.proxy_user = user\n        self.proxy_password = password\n\n    def get_uri(self, request):\n        ''' Return the target uri for the request.'''\n        protocol = request.protocol_override \\\n            if request.protocol_override else self.protocol\n        port = HTTP_PORT if protocol == 'http' else HTTPS_PORT\n        return protocol + '://' + request.host + ':' + str(port) + request.path\n\n    def get_connection(self, request):\n        ''' Create connection for the request. '''\n        protocol = request.protocol_override \\\n            if request.protocol_override else self.protocol\n        target_host = request.host\n        target_port = HTTP_PORT if protocol == 'http' else HTTPS_PORT\n\n        if not self.use_httplib:\n            import azure.http.winhttp\n            connection = azure.http.winhttp._HTTPConnection(\n                target_host, cert_file=self.cert_file, protocol=protocol)\n            proxy_host = self.proxy_host\n            proxy_port = self.proxy_port\n        else:\n            if ':' in target_host:\n                target_host, _, target_port = target_host.rpartition(':')\n            if self.proxy_host:\n                proxy_host = target_host\n                proxy_port = target_port\n                host = self.proxy_host\n                port = self.proxy_port\n            else:\n                host = target_host\n                port = target_port\n\n            if protocol == 'http':\n                connection = HTTPConnection(host, int(port))\n            else:\n                connection = HTTPSConnection(\n                    host, int(port), cert_file=self.cert_file)\n\n        if self.proxy_host:\n            headers = None\n            if self.proxy_user and self.proxy_password:\n                auth = base64.encodestring(\n                    \"{0}:{1}\".format(self.proxy_user, self.proxy_password))\n                headers = {'Proxy-Authorization': 'Basic {0}'.format(auth)}\n            connection.set_tunnel(proxy_host, int(proxy_port), headers)\n\n        return connection\n\n    def send_request_headers(self, connection, request_headers):\n        if self.use_httplib:\n            if self.proxy_host:\n                for i in connection._buffer:\n                    if i.startswith(\"Host: \"):\n                        connection._buffer.remove(i)\n                connection.putheader(\n                    'Host', \"{0}:{1}\".format(connection._tunnel_host,\n                                             connection._tunnel_port))\n\n        for name, value in request_headers:\n            if value:\n                connection.putheader(name, value)\n\n        connection.putheader('User-Agent', _USER_AGENT_STRING)\n        connection.endheaders()\n\n    def send_request_body(self, connection, request_body):\n        if request_body:\n            assert isinstance(request_body, bytes)\n            connection.send(request_body)\n        elif (not isinstance(connection, HTTPSConnection) and\n              not isinstance(connection, HTTPConnection)):\n            connection.send(None)\n\n    def perform_request(self, request):\n        ''' Sends request to cloud service server and return the response. '''\n        connection = self.get_connection(request)\n        try:\n            connection.putrequest(request.method, request.path)\n\n            if not self.use_httplib:\n                if self.proxy_host and self.proxy_user:\n                    connection.set_proxy_credentials(\n                        self.proxy_user, self.proxy_password)\n\n            self.send_request_headers(connection, request.headers)\n            self.send_request_body(connection, request.body)\n\n            resp = connection.getresponse()\n            self.status = int(resp.status)\n            self.message = resp.reason\n            self.respheader = headers = resp.getheaders()\n\n            # for consistency across platforms, make header names lowercase\n            for i, value in enumerate(headers):\n                headers[i] = (value[0].lower(), value[1])\n\n            respbody = None\n            if resp.length is None:\n                respbody = resp.read()\n            elif resp.length > 0:\n                respbody = resp.read(resp.length)\n\n            response = HTTPResponse(\n                int(resp.status), resp.reason, headers, respbody)\n            if self.status == 307:\n                new_url = urlparse(dict(headers)['location'])\n                request.host = new_url.hostname\n                request.path = new_url.path\n                request.path, request.query = _update_request_uri_query(request)\n                return self.perform_request(request)\n            if self.status >= 300:\n                raise HTTPError(self.status, self.message,\n                                self.respheader, respbody)\n\n            return response\n        finally:\n            connection.close()\n"
  },
  {
    "path": "OSPatching/azure/http/winhttp.py",
    "content": "#-------------------------------------------------------------------------\n# Copyright (c) Microsoft.  All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#--------------------------------------------------------------------------\nfrom ctypes import (\n    c_void_p,\n    c_long,\n    c_ulong,\n    c_longlong,\n    c_ulonglong,\n    c_short,\n    c_ushort,\n    c_wchar_p,\n    c_byte,\n    byref,\n    Structure,\n    Union,\n    POINTER,\n    WINFUNCTYPE,\n    HRESULT,\n    oledll,\n    WinDLL,\n    )\nimport ctypes\nimport sys\n\nif sys.version_info >= (3,):\n    def unicode(text):\n        return text\n\n#------------------------------------------------------------------------------\n#  Constants that are used in COM operations\nVT_EMPTY = 0\nVT_NULL = 1\nVT_I2 = 2\nVT_I4 = 3\nVT_BSTR = 8\nVT_BOOL = 11\nVT_I1 = 16\nVT_UI1 = 17\nVT_UI2 = 18\nVT_UI4 = 19\nVT_I8 = 20\nVT_UI8 = 21\nVT_ARRAY = 8192\n\nHTTPREQUEST_PROXYSETTING_PROXY = 2\nHTTPREQUEST_SETCREDENTIALS_FOR_PROXY = 1\n\nHTTPREQUEST_PROXY_SETTING = c_long\nHTTPREQUEST_SETCREDENTIALS_FLAGS = c_long\n#------------------------------------------------------------------------------\n# Com related APIs that are used.\n_ole32 = oledll.ole32\n_oleaut32 = WinDLL('oleaut32')\n_CLSIDFromString = _ole32.CLSIDFromString\n_CoInitialize = _ole32.CoInitialize\n_CoInitialize.argtypes = [c_void_p]\n\n_CoCreateInstance = _ole32.CoCreateInstance\n\n_SysAllocString = _oleaut32.SysAllocString\n_SysAllocString.restype = c_void_p\n_SysAllocString.argtypes = [c_wchar_p]\n\n_SysFreeString = _oleaut32.SysFreeString\n_SysFreeString.argtypes = [c_void_p]\n\n# SAFEARRAY*\n# SafeArrayCreateVector(_In_ VARTYPE vt,_In_ LONG lLbound,_In_ ULONG\n# cElements);\n_SafeArrayCreateVector = _oleaut32.SafeArrayCreateVector\n_SafeArrayCreateVector.restype = c_void_p\n_SafeArrayCreateVector.argtypes = [c_ushort, c_long, c_ulong]\n\n# HRESULT\n# SafeArrayAccessData(_In_ SAFEARRAY *psa, _Out_ void **ppvData);\n_SafeArrayAccessData = _oleaut32.SafeArrayAccessData\n_SafeArrayAccessData.argtypes = [c_void_p, POINTER(c_void_p)]\n\n# HRESULT\n# SafeArrayUnaccessData(_In_ SAFEARRAY *psa);\n_SafeArrayUnaccessData = _oleaut32.SafeArrayUnaccessData\n_SafeArrayUnaccessData.argtypes = [c_void_p]\n\n# HRESULT\n# SafeArrayGetUBound(_In_ SAFEARRAY *psa, _In_ UINT nDim, _Out_ LONG\n# *plUbound);\n_SafeArrayGetUBound = _oleaut32.SafeArrayGetUBound\n_SafeArrayGetUBound.argtypes = [c_void_p, c_ulong, POINTER(c_long)]\n\n\n#------------------------------------------------------------------------------\n\nclass BSTR(c_wchar_p):\n\n    ''' BSTR class in python. '''\n\n    def __init__(self, value):\n        super(BSTR, self).__init__(_SysAllocString(value))\n\n    def __del__(self):\n        _SysFreeString(self)\n\n\nclass VARIANT(Structure):\n\n    '''\n    VARIANT structure in python. Does not match the definition in\n    MSDN exactly & it is only mapping the used fields.  Field names are also\n    slighty different.\n    '''\n\n    class _tagData(Union):\n\n        class _tagRecord(Structure):\n            _fields_ = [('pvoid', c_void_p), ('precord', c_void_p)]\n\n        _fields_ = [('llval', c_longlong),\n                    ('ullval', c_ulonglong),\n                    ('lval', c_long),\n                    ('ulval', c_ulong),\n                    ('ival', c_short),\n                    ('boolval', c_ushort),\n                    ('bstrval', BSTR),\n                    ('parray', c_void_p),\n                    ('record', _tagRecord)]\n\n    _fields_ = [('vt', c_ushort),\n                ('wReserved1', c_ushort),\n                ('wReserved2', c_ushort),\n                ('wReserved3', c_ushort),\n                ('vdata', _tagData)]\n\n    @staticmethod\n    def create_empty():\n        variant = VARIANT()\n        variant.vt = VT_EMPTY\n        variant.vdata.llval = 0\n        return variant\n\n    @staticmethod\n    def create_safearray_from_str(text):\n        variant = VARIANT()\n        variant.vt = VT_ARRAY | VT_UI1\n\n        length = len(text)\n        variant.vdata.parray = _SafeArrayCreateVector(VT_UI1, 0, length)\n        pvdata = c_void_p()\n        _SafeArrayAccessData(variant.vdata.parray, byref(pvdata))\n        ctypes.memmove(pvdata, text, length)\n        _SafeArrayUnaccessData(variant.vdata.parray)\n\n        return variant\n\n    @staticmethod\n    def create_bstr_from_str(text):\n        variant = VARIANT()\n        variant.vt = VT_BSTR\n        variant.vdata.bstrval = BSTR(text)\n        return variant\n\n    @staticmethod\n    def create_bool_false():\n        variant = VARIANT()\n        variant.vt = VT_BOOL\n        variant.vdata.boolval = 0\n        return variant\n\n    def is_safearray_of_bytes(self):\n        return self.vt == VT_ARRAY | VT_UI1\n\n    def str_from_safearray(self):\n        assert self.vt == VT_ARRAY | VT_UI1\n        pvdata = c_void_p()\n        count = c_long()\n        _SafeArrayGetUBound(self.vdata.parray, 1, byref(count))\n        count = c_long(count.value + 1)\n        _SafeArrayAccessData(self.vdata.parray, byref(pvdata))\n        text = ctypes.string_at(pvdata, count)\n        _SafeArrayUnaccessData(self.vdata.parray)\n        return text\n\n    def __del__(self):\n        _VariantClear(self)\n\n# HRESULT VariantClear(_Inout_ VARIANTARG *pvarg);\n_VariantClear = _oleaut32.VariantClear\n_VariantClear.argtypes = [POINTER(VARIANT)]\n\n\nclass GUID(Structure):\n\n    ''' GUID structure in python. '''\n\n    _fields_ = [(\"data1\", c_ulong),\n                (\"data2\", c_ushort),\n                (\"data3\", c_ushort),\n                (\"data4\", c_byte * 8)]\n\n    def __init__(self, name=None):\n        if name is not None:\n            _CLSIDFromString(unicode(name), byref(self))\n\n\nclass _WinHttpRequest(c_void_p):\n\n    '''\n    Maps the Com API to Python class functions. Not all methods in\n    IWinHttpWebRequest are mapped - only the methods we use.\n    '''\n    _AddRef = WINFUNCTYPE(c_long) \\\n        (1, 'AddRef')\n    _Release = WINFUNCTYPE(c_long) \\\n        (2, 'Release')\n    _SetProxy = WINFUNCTYPE(HRESULT,\n                            HTTPREQUEST_PROXY_SETTING,\n                            VARIANT,\n                            VARIANT) \\\n        (7, 'SetProxy')\n    _SetCredentials = WINFUNCTYPE(HRESULT,\n                                  BSTR,\n                                  BSTR,\n                                  HTTPREQUEST_SETCREDENTIALS_FLAGS) \\\n        (8, 'SetCredentials')\n    _Open = WINFUNCTYPE(HRESULT, BSTR, BSTR, VARIANT) \\\n        (9, 'Open')\n    _SetRequestHeader = WINFUNCTYPE(HRESULT, BSTR, BSTR) \\\n        (10, 'SetRequestHeader')\n    _GetResponseHeader = WINFUNCTYPE(HRESULT, BSTR, POINTER(c_void_p)) \\\n        (11, 'GetResponseHeader')\n    _GetAllResponseHeaders = WINFUNCTYPE(HRESULT, POINTER(c_void_p)) \\\n        (12, 'GetAllResponseHeaders')\n    _Send = WINFUNCTYPE(HRESULT, VARIANT) \\\n        (13, 'Send')\n    _Status = WINFUNCTYPE(HRESULT, POINTER(c_long)) \\\n        (14, 'Status')\n    _StatusText = WINFUNCTYPE(HRESULT, POINTER(c_void_p)) \\\n        (15, 'StatusText')\n    _ResponseText = WINFUNCTYPE(HRESULT, POINTER(c_void_p)) \\\n        (16, 'ResponseText')\n    _ResponseBody = WINFUNCTYPE(HRESULT, POINTER(VARIANT)) \\\n        (17, 'ResponseBody')\n    _ResponseStream = WINFUNCTYPE(HRESULT, POINTER(VARIANT)) \\\n        (18, 'ResponseStream')\n    _WaitForResponse = WINFUNCTYPE(HRESULT, VARIANT, POINTER(c_ushort)) \\\n        (21, 'WaitForResponse')\n    _Abort = WINFUNCTYPE(HRESULT) \\\n        (22, 'Abort')\n    _SetTimeouts = WINFUNCTYPE(HRESULT, c_long, c_long, c_long, c_long) \\\n        (23, 'SetTimeouts')\n    _SetClientCertificate = WINFUNCTYPE(HRESULT, BSTR) \\\n        (24, 'SetClientCertificate')\n\n    def open(self, method, url):\n        '''\n        Opens the request.\n\n        method: the request VERB 'GET', 'POST', etc.\n        url: the url to connect\n        '''\n        _WinHttpRequest._SetTimeouts(self, 0, 65000, 65000, 65000)\n\n        flag = VARIANT.create_bool_false()\n        _method = BSTR(method)\n        _url = BSTR(url)\n        _WinHttpRequest._Open(self, _method, _url, flag)\n\n    def set_request_header(self, name, value):\n        ''' Sets the request header. '''\n\n        _name = BSTR(name)\n        _value = BSTR(value)\n        _WinHttpRequest._SetRequestHeader(self, _name, _value)\n\n    def get_all_response_headers(self):\n        ''' Gets back all response headers. '''\n\n        bstr_headers = c_void_p()\n        _WinHttpRequest._GetAllResponseHeaders(self, byref(bstr_headers))\n        bstr_headers = ctypes.cast(bstr_headers, c_wchar_p)\n        headers = bstr_headers.value\n        _SysFreeString(bstr_headers)\n        return headers\n\n    def send(self, request=None):\n        ''' Sends the request body. '''\n\n        # Sends VT_EMPTY if it is GET, HEAD request.\n        if request is None:\n            var_empty = VARIANT.create_empty()\n            _WinHttpRequest._Send(self, var_empty)\n        else:  # Sends request body as SAFEArray.\n            _request = VARIANT.create_safearray_from_str(request)\n            _WinHttpRequest._Send(self, _request)\n\n    def status(self):\n        ''' Gets status of response. '''\n\n        status = c_long()\n        _WinHttpRequest._Status(self, byref(status))\n        return int(status.value)\n\n    def status_text(self):\n        ''' Gets status text of response. '''\n\n        bstr_status_text = c_void_p()\n        _WinHttpRequest._StatusText(self, byref(bstr_status_text))\n        bstr_status_text = ctypes.cast(bstr_status_text, c_wchar_p)\n        status_text = bstr_status_text.value\n        _SysFreeString(bstr_status_text)\n        return status_text\n\n    def response_body(self):\n        '''\n        Gets response body as a SAFEARRAY and converts the SAFEARRAY to str.\n        If it is an xml file, it always contains 3 characters before <?xml,\n        so we remove them.\n        '''\n        var_respbody = VARIANT()\n        _WinHttpRequest._ResponseBody(self, byref(var_respbody))\n        if var_respbody.is_safearray_of_bytes():\n            respbody = var_respbody.str_from_safearray()\n            if respbody[3:].startswith(b'<?xml') and\\\n               respbody.startswith(b'\\xef\\xbb\\xbf'):\n                respbody = respbody[3:]\n            return respbody\n        else:\n            return ''\n\n    def set_client_certificate(self, certificate):\n        '''Sets client certificate for the request. '''\n        _certificate = BSTR(certificate)\n        _WinHttpRequest._SetClientCertificate(self, _certificate)\n\n    def set_tunnel(self, host, port):\n        ''' Sets up the host and the port for the HTTP CONNECT Tunnelling.'''\n        url = host\n        if port:\n            url = url + u':' + port\n\n        var_host = VARIANT.create_bstr_from_str(url)\n        var_empty = VARIANT.create_empty()\n\n        _WinHttpRequest._SetProxy(\n            self, HTTPREQUEST_PROXYSETTING_PROXY, var_host, var_empty)\n\n    def set_proxy_credentials(self, user, password):\n        _WinHttpRequest._SetCredentials(\n            self, BSTR(user), BSTR(password),\n            HTTPREQUEST_SETCREDENTIALS_FOR_PROXY)\n\n    def __del__(self):\n        if self.value is not None:\n            _WinHttpRequest._Release(self)\n\n\nclass _Response(object):\n\n    ''' Response class corresponding to the response returned from httplib\n    HTTPConnection. '''\n\n    def __init__(self, _status, _status_text, _length, _headers, _respbody):\n        self.status = _status\n        self.reason = _status_text\n        self.length = _length\n        self.headers = _headers\n        self.respbody = _respbody\n\n    def getheaders(self):\n        '''Returns response headers.'''\n        return self.headers\n\n    def read(self, _length):\n        '''Returns resonse body. '''\n        return self.respbody[:_length]\n\n\nclass _HTTPConnection(object):\n\n    ''' Class corresponding to httplib HTTPConnection class. '''\n\n    def __init__(self, host, cert_file=None, key_file=None, protocol='http'):\n        ''' initialize the IWinHttpWebRequest Com Object.'''\n        self.host = unicode(host)\n        self.cert_file = cert_file\n        self._httprequest = _WinHttpRequest()\n        self.protocol = protocol\n        clsid = GUID('{2087C2F4-2CEF-4953-A8AB-66779B670495}')\n        iid = GUID('{016FE2EC-B2C8-45F8-B23B-39E53A75396B}')\n        _CoInitialize(None)\n        _CoCreateInstance(byref(clsid), 0, 1, byref(iid),\n                          byref(self._httprequest))\n\n    def close(self):\n        pass\n\n    def set_tunnel(self, host, port=None, headers=None):\n        ''' Sets up the host and the port for the HTTP CONNECT Tunnelling. '''\n        self._httprequest.set_tunnel(unicode(host), unicode(str(port)))\n\n    def set_proxy_credentials(self, user, password):\n        self._httprequest.set_proxy_credentials(\n            unicode(user), unicode(password))\n\n    def putrequest(self, method, uri):\n        ''' Connects to host and sends the request. '''\n\n        protocol = unicode(self.protocol + '://')\n        url = protocol + self.host + unicode(uri)\n        self._httprequest.open(unicode(method), url)\n\n        # sets certificate for the connection if cert_file is set.\n        if self.cert_file is not None:\n            self._httprequest.set_client_certificate(unicode(self.cert_file))\n\n    def putheader(self, name, value):\n        ''' Sends the headers of request. '''\n        if sys.version_info < (3,):\n            name = str(name).decode('utf-8')\n            value = str(value).decode('utf-8')\n        self._httprequest.set_request_header(name, value)\n\n    def endheaders(self):\n        ''' No operation. Exists only to provide the same interface of httplib\n        HTTPConnection.'''\n        pass\n\n    def send(self, request_body):\n        ''' Sends request body. '''\n        if not request_body:\n            self._httprequest.send()\n        else:\n            self._httprequest.send(request_body)\n\n    def getresponse(self):\n        ''' Gets the response and generates the _Response object'''\n        status = self._httprequest.status()\n        status_text = self._httprequest.status_text()\n\n        resp_headers = self._httprequest.get_all_response_headers()\n        fixed_headers = []\n        for resp_header in resp_headers.split('\\n'):\n            if (resp_header.startswith('\\t') or\\\n                resp_header.startswith(' ')) and fixed_headers:\n                # append to previous header\n                fixed_headers[-1] += resp_header\n            else:\n                fixed_headers.append(resp_header)\n\n        headers = []\n        for resp_header in fixed_headers:\n            if ':' in resp_header:\n                pos = resp_header.find(':')\n                headers.append(\n                    (resp_header[:pos].lower(), resp_header[pos + 1:].strip()))\n\n        body = self._httprequest.response_body()\n        length = len(body)\n\n        return _Response(status, status_text, length, headers, body)\n"
  },
  {
    "path": "OSPatching/azure/servicebus/__init__.py",
    "content": "#-------------------------------------------------------------------------\n# Copyright (c) Microsoft.  All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#--------------------------------------------------------------------------\nimport ast\nimport json\nimport sys\n\nfrom datetime import datetime\nfrom xml.dom import minidom\nfrom azure import (\n    WindowsAzureData,\n    WindowsAzureError,\n    xml_escape,\n    _create_entry,\n    _general_error_handler,\n    _get_entry_properties,\n    _get_child_nodes,\n    _get_children_from_path,\n    _get_first_child_node_value,\n    _ERROR_MESSAGE_NOT_PEEK_LOCKED_ON_DELETE,\n    _ERROR_MESSAGE_NOT_PEEK_LOCKED_ON_UNLOCK,\n    _ERROR_QUEUE_NOT_FOUND,\n    _ERROR_TOPIC_NOT_FOUND,\n    )\nfrom azure.http import HTTPError\n\n# default rule name for subscription\nDEFAULT_RULE_NAME = '$Default'\n\n#-----------------------------------------------------------------------------\n# Constants for Azure app environment settings.\nAZURE_SERVICEBUS_NAMESPACE = 'AZURE_SERVICEBUS_NAMESPACE'\nAZURE_SERVICEBUS_ACCESS_KEY = 'AZURE_SERVICEBUS_ACCESS_KEY'\nAZURE_SERVICEBUS_ISSUER = 'AZURE_SERVICEBUS_ISSUER'\n\n# namespace used for converting rules to objects\nXML_SCHEMA_NAMESPACE = 'http://www.w3.org/2001/XMLSchema-instance'\n\n\nclass Queue(WindowsAzureData):\n\n    ''' Queue class corresponding to Queue Description:\n    http://msdn.microsoft.com/en-us/library/windowsazure/hh780773'''\n\n    def __init__(self, lock_duration=None, max_size_in_megabytes=None,\n                 requires_duplicate_detection=None, requires_session=None,\n                 default_message_time_to_live=None,\n                 dead_lettering_on_message_expiration=None,\n                 duplicate_detection_history_time_window=None,\n                 max_delivery_count=None, enable_batched_operations=None,\n                 size_in_bytes=None, message_count=None):\n\n        self.lock_duration = lock_duration\n        self.max_size_in_megabytes = max_size_in_megabytes\n        self.requires_duplicate_detection = requires_duplicate_detection\n        self.requires_session = requires_session\n        self.default_message_time_to_live = default_message_time_to_live\n        self.dead_lettering_on_message_expiration = \\\n            dead_lettering_on_message_expiration\n        self.duplicate_detection_history_time_window = \\\n            duplicate_detection_history_time_window\n        self.max_delivery_count = max_delivery_count\n        self.enable_batched_operations = enable_batched_operations\n        self.size_in_bytes = size_in_bytes\n        self.message_count = message_count\n\n\nclass Topic(WindowsAzureData):\n\n    ''' Topic class corresponding to Topic Description:\n    http://msdn.microsoft.com/en-us/library/windowsazure/hh780749. '''\n\n    def __init__(self, default_message_time_to_live=None,\n                 max_size_in_megabytes=None, requires_duplicate_detection=None,\n                 duplicate_detection_history_time_window=None,\n                 enable_batched_operations=None, size_in_bytes=None):\n\n        self.default_message_time_to_live = default_message_time_to_live\n        self.max_size_in_megabytes = max_size_in_megabytes\n        self.requires_duplicate_detection = requires_duplicate_detection\n        self.duplicate_detection_history_time_window = \\\n            duplicate_detection_history_time_window\n        self.enable_batched_operations = enable_batched_operations\n        self.size_in_bytes = size_in_bytes\n\n    @property\n    def max_size_in_mega_bytes(self):\n        import warnings\n        warnings.warn(\n            'This attribute has been changed to max_size_in_megabytes.')\n        return self.max_size_in_megabytes\n\n    @max_size_in_mega_bytes.setter\n    def max_size_in_mega_bytes(self, value):\n        self.max_size_in_megabytes = value\n\n\nclass Subscription(WindowsAzureData):\n\n    ''' Subscription class corresponding to Subscription Description:\n    http://msdn.microsoft.com/en-us/library/windowsazure/hh780763. '''\n\n    def __init__(self, lock_duration=None, requires_session=None,\n                 default_message_time_to_live=None,\n                 dead_lettering_on_message_expiration=None,\n                 dead_lettering_on_filter_evaluation_exceptions=None,\n                 enable_batched_operations=None, max_delivery_count=None,\n                 message_count=None):\n\n        self.lock_duration = lock_duration\n        self.requires_session = requires_session\n        self.default_message_time_to_live = default_message_time_to_live\n        self.dead_lettering_on_message_expiration = \\\n            dead_lettering_on_message_expiration\n        self.dead_lettering_on_filter_evaluation_exceptions = \\\n            dead_lettering_on_filter_evaluation_exceptions\n        self.enable_batched_operations = enable_batched_operations\n        self.max_delivery_count = max_delivery_count\n        self.message_count = message_count\n\n\nclass Rule(WindowsAzureData):\n\n    ''' Rule class corresponding to Rule Description:\n    http://msdn.microsoft.com/en-us/library/windowsazure/hh780753. '''\n\n    def __init__(self, filter_type=None, filter_expression=None,\n                 action_type=None, action_expression=None):\n        self.filter_type = filter_type\n        self.filter_expression = filter_expression\n        self.action_type = action_type\n        self.action_expression = action_type\n\n\nclass Message(WindowsAzureData):\n\n    ''' Message class that used in send message/get mesage apis. '''\n\n    def __init__(self, body=None, service_bus_service=None, location=None,\n                 custom_properties=None,\n                 type='application/atom+xml;type=entry;charset=utf-8',\n                 broker_properties=None):\n        self.body = body\n        self.location = location\n        self.broker_properties = broker_properties\n        self.custom_properties = custom_properties\n        self.type = type\n        self.service_bus_service = service_bus_service\n        self._topic_name = None\n        self._subscription_name = None\n        self._queue_name = None\n\n        if not service_bus_service:\n            return\n\n        # if location is set, then extracts the queue name for queue message and\n        # extracts the topic and subscriptions name if it is topic message.\n        if location:\n            if '/subscriptions/' in location:\n                pos = location.find('/subscriptions/')\n                pos1 = location.rfind('/', 0, pos - 1)\n                self._topic_name = location[pos1 + 1:pos]\n                pos += len('/subscriptions/')\n                pos1 = location.find('/', pos)\n                self._subscription_name = location[pos:pos1]\n            elif '/messages/' in location:\n                pos = location.find('/messages/')\n                pos1 = location.rfind('/', 0, pos - 1)\n                self._queue_name = location[pos1 + 1:pos]\n\n    def delete(self):\n        ''' Deletes itself if find queue name or topic name and subscription\n        name. '''\n        if self._queue_name:\n            self.service_bus_service.delete_queue_message(\n                self._queue_name,\n                self.broker_properties['SequenceNumber'],\n                self.broker_properties['LockToken'])\n        elif self._topic_name and self._subscription_name:\n            self.service_bus_service.delete_subscription_message(\n                self._topic_name,\n                self._subscription_name,\n                self.broker_properties['SequenceNumber'],\n                self.broker_properties['LockToken'])\n        else:\n            raise WindowsAzureError(_ERROR_MESSAGE_NOT_PEEK_LOCKED_ON_DELETE)\n\n    def unlock(self):\n        ''' Unlocks itself if find queue name or topic name and subscription\n        name. '''\n        if self._queue_name:\n            self.service_bus_service.unlock_queue_message(\n                self._queue_name,\n                self.broker_properties['SequenceNumber'],\n                self.broker_properties['LockToken'])\n        elif self._topic_name and self._subscription_name:\n            self.service_bus_service.unlock_subscription_message(\n                self._topic_name,\n                self._subscription_name,\n                self.broker_properties['SequenceNumber'],\n                self.broker_properties['LockToken'])\n        else:\n            raise WindowsAzureError(_ERROR_MESSAGE_NOT_PEEK_LOCKED_ON_UNLOCK)\n\n    def add_headers(self, request):\n        ''' add addtional headers to request for message request.'''\n\n        # Adds custom properties\n        if self.custom_properties:\n            for name, value in self.custom_properties.items():\n                if sys.version_info < (3,) and isinstance(value, unicode):\n                    request.headers.append(\n                        (name, '\"' + value.encode('utf-8') + '\"'))\n                elif isinstance(value, str):\n                    request.headers.append((name, '\"' + str(value) + '\"'))\n                elif isinstance(value, datetime):\n                    request.headers.append(\n                        (name, '\"' + value.strftime('%a, %d %b %Y %H:%M:%S GMT') + '\"'))\n                else:\n                    request.headers.append((name, str(value).lower()))\n\n        # Adds content-type\n        request.headers.append(('Content-Type', self.type))\n\n        # Adds BrokerProperties\n        if self.broker_properties:\n            request.headers.append(\n                ('BrokerProperties', str(self.broker_properties)))\n\n        return request.headers\n\n\ndef _create_message(response, service_instance):\n    ''' Create message from response.\n\n    response: response from service bus cloud server.\n    service_instance: the service bus client.\n    '''\n    respbody = response.body\n    custom_properties = {}\n    broker_properties = None\n    message_type = None\n    message_location = None\n\n    # gets all information from respheaders.\n    for name, value in response.headers:\n        if name.lower() == 'brokerproperties':\n            broker_properties = json.loads(value)\n        elif name.lower() == 'content-type':\n            message_type = value\n        elif name.lower() == 'location':\n            message_location = value\n        elif name.lower() not in ['content-type',\n                                  'brokerproperties',\n                                  'transfer-encoding',\n                                  'server',\n                                  'location',\n                                  'date']:\n            if '\"' in value:\n                value = value[1:-1]\n                try:\n                    custom_properties[name] = datetime.strptime(\n                        value, '%a, %d %b %Y %H:%M:%S GMT')\n                except ValueError:\n                    custom_properties[name] = value\n            else:  # only int, float or boolean\n                if value.lower() == 'true':\n                    custom_properties[name] = True\n                elif value.lower() == 'false':\n                    custom_properties[name] = False\n                # int('3.1') doesn't work so need to get float('3.14') first\n                elif str(int(float(value))) == value:\n                    custom_properties[name] = int(value)\n                else:\n                    custom_properties[name] = float(value)\n\n    if message_type == None:\n        message = Message(\n            respbody, service_instance, message_location, custom_properties,\n            'application/atom+xml;type=entry;charset=utf-8', broker_properties)\n    else:\n        message = Message(respbody, service_instance, message_location,\n                          custom_properties, message_type, broker_properties)\n    return message\n\n# convert functions\n\n\ndef _convert_response_to_rule(response):\n    return _convert_xml_to_rule(response.body)\n\n\ndef _convert_xml_to_rule(xmlstr):\n    ''' Converts response xml to rule object.\n\n    The format of xml for rule:\n<entry xmlns='http://www.w3.org/2005/Atom'>\n<content type='application/xml'>\n<RuleDescription\n    xmlns:i=\"http://www.w3.org/2001/XMLSchema-instance\"\n    xmlns=\"http://schemas.microsoft.com/netservices/2010/10/servicebus/connect\">\n    <Filter i:type=\"SqlFilterExpression\">\n        <SqlExpression>MyProperty='XYZ'</SqlExpression>\n    </Filter>\n    <Action i:type=\"SqlFilterAction\">\n        <SqlExpression>set MyProperty2 = 'ABC'</SqlExpression>\n    </Action>\n</RuleDescription>\n</content>\n</entry>\n    '''\n    xmldoc = minidom.parseString(xmlstr)\n    rule = Rule()\n\n    for rule_desc in _get_children_from_path(xmldoc,\n                                             'entry',\n                                             'content',\n                                             'RuleDescription'):\n        for xml_filter in _get_child_nodes(rule_desc, 'Filter'):\n            filter_type = xml_filter.getAttributeNS(\n                XML_SCHEMA_NAMESPACE, 'type')\n            setattr(rule, 'filter_type', str(filter_type))\n            if xml_filter.childNodes:\n\n                for expr in _get_child_nodes(xml_filter, 'SqlExpression'):\n                    setattr(rule, 'filter_expression',\n                            expr.firstChild.nodeValue)\n\n        for xml_action in _get_child_nodes(rule_desc, 'Action'):\n            action_type = xml_action.getAttributeNS(\n                XML_SCHEMA_NAMESPACE, 'type')\n            setattr(rule, 'action_type', str(action_type))\n            if xml_action.childNodes:\n                action_expression = xml_action.childNodes[0].firstChild\n                if action_expression:\n                    setattr(rule, 'action_expression',\n                            action_expression.nodeValue)\n\n    # extract id, updated and name value from feed entry and set them of rule.\n    for name, value in _get_entry_properties(xmlstr, True, '/rules').items():\n        setattr(rule, name, value)\n\n    return rule\n\n\ndef _convert_response_to_queue(response):\n    return _convert_xml_to_queue(response.body)\n\n\ndef _parse_bool(value):\n    if value.lower() == 'true':\n        return True\n    return False\n\n\ndef _convert_xml_to_queue(xmlstr):\n    ''' Converts xml response to queue object.\n\n    The format of xml response for queue:\n<QueueDescription\n    xmlns=\\\"http://schemas.microsoft.com/netservices/2010/10/servicebus/connect\\\">\n    <MaxSizeInBytes>10000</MaxSizeInBytes>\n    <DefaultMessageTimeToLive>PT5M</DefaultMessageTimeToLive>\n    <LockDuration>PT2M</LockDuration>\n    <RequiresGroupedReceives>False</RequiresGroupedReceives>\n    <SupportsDuplicateDetection>False</SupportsDuplicateDetection>\n    ...\n</QueueDescription>\n\n    '''\n    xmldoc = minidom.parseString(xmlstr)\n    queue = Queue()\n\n    invalid_queue = True\n    # get node for each attribute in Queue class, if nothing found then the\n    # response is not valid xml for Queue.\n    for desc in _get_children_from_path(xmldoc,\n                                        'entry',\n                                        'content',\n                                        'QueueDescription'):\n        node_value = _get_first_child_node_value(desc, 'LockDuration')\n        if node_value is not None:\n            queue.lock_duration = node_value\n            invalid_queue = False\n\n        node_value = _get_first_child_node_value(desc, 'MaxSizeInMegabytes')\n        if node_value is not None:\n            queue.max_size_in_megabytes = int(node_value)\n            invalid_queue = False\n\n        node_value = _get_first_child_node_value(\n            desc, 'RequiresDuplicateDetection')\n        if node_value is not None:\n            queue.requires_duplicate_detection = _parse_bool(node_value)\n            invalid_queue = False\n\n        node_value = _get_first_child_node_value(desc, 'RequiresSession')\n        if node_value is not None:\n            queue.requires_session = _parse_bool(node_value)\n            invalid_queue = False\n\n        node_value = _get_first_child_node_value(\n            desc, 'DefaultMessageTimeToLive')\n        if node_value is not None:\n            queue.default_message_time_to_live = node_value\n            invalid_queue = False\n\n        node_value = _get_first_child_node_value(\n            desc, 'DeadLetteringOnMessageExpiration')\n        if node_value is not None:\n            queue.dead_lettering_on_message_expiration = _parse_bool(node_value)\n            invalid_queue = False\n\n        node_value = _get_first_child_node_value(\n            desc, 'DuplicateDetectionHistoryTimeWindow')\n        if node_value is not None:\n            queue.duplicate_detection_history_time_window = node_value\n            invalid_queue = False\n\n        node_value = _get_first_child_node_value(\n            desc, 'EnableBatchedOperations')\n        if node_value is not None:\n            queue.enable_batched_operations = _parse_bool(node_value)\n            invalid_queue = False\n\n        node_value = _get_first_child_node_value(desc, 'MaxDeliveryCount')\n        if node_value is not None:\n            queue.max_delivery_count = int(node_value)\n            invalid_queue = False\n\n        node_value = _get_first_child_node_value(desc, 'MessageCount')\n        if node_value is not None:\n            queue.message_count = int(node_value)\n            invalid_queue = False\n\n        node_value = _get_first_child_node_value(desc, 'SizeInBytes')\n        if node_value is not None:\n            queue.size_in_bytes = int(node_value)\n            invalid_queue = False\n\n    if invalid_queue:\n        raise WindowsAzureError(_ERROR_QUEUE_NOT_FOUND)\n\n    # extract id, updated and name value from feed entry and set them of queue.\n    for name, value in _get_entry_properties(xmlstr, True).items():\n        setattr(queue, name, value)\n\n    return queue\n\n\ndef _convert_response_to_topic(response):\n    return _convert_xml_to_topic(response.body)\n\n\ndef _convert_xml_to_topic(xmlstr):\n    '''Converts xml response to topic\n\n    The xml format for topic:\n<entry xmlns='http://www.w3.org/2005/Atom'>\n    <content type='application/xml'>\n    <TopicDescription\n        xmlns:i=\"http://www.w3.org/2001/XMLSchema-instance\"\n        xmlns=\"http://schemas.microsoft.com/netservices/2010/10/servicebus/connect\">\n        <DefaultMessageTimeToLive>P10675199DT2H48M5.4775807S</DefaultMessageTimeToLive>\n        <MaxSizeInMegabytes>1024</MaxSizeInMegabytes>\n        <RequiresDuplicateDetection>false</RequiresDuplicateDetection>\n        <DuplicateDetectionHistoryTimeWindow>P7D</DuplicateDetectionHistoryTimeWindow>\n        <DeadLetteringOnFilterEvaluationExceptions>true</DeadLetteringOnFilterEvaluationExceptions>\n    </TopicDescription>\n    </content>\n</entry>\n    '''\n    xmldoc = minidom.parseString(xmlstr)\n    topic = Topic()\n\n    invalid_topic = True\n\n    # get node for each attribute in Topic class, if nothing found then the\n    # response is not valid xml for Topic.\n    for desc in _get_children_from_path(xmldoc,\n                                        'entry',\n                                        'content',\n                                        'TopicDescription'):\n        invalid_topic = True\n        node_value = _get_first_child_node_value(\n            desc, 'DefaultMessageTimeToLive')\n        if node_value is not None:\n            topic.default_message_time_to_live = node_value\n            invalid_topic = False\n        node_value = _get_first_child_node_value(desc, 'MaxSizeInMegabytes')\n        if node_value is not None:\n            topic.max_size_in_megabytes = int(node_value)\n            invalid_topic = False\n        node_value = _get_first_child_node_value(\n            desc, 'RequiresDuplicateDetection')\n        if node_value is not None:\n            topic.requires_duplicate_detection = _parse_bool(node_value)\n            invalid_topic = False\n        node_value = _get_first_child_node_value(\n            desc, 'DuplicateDetectionHistoryTimeWindow')\n        if node_value is not None:\n            topic.duplicate_detection_history_time_window = node_value\n            invalid_topic = False\n        node_value = _get_first_child_node_value(\n            desc, 'EnableBatchedOperations')\n        if node_value is not None:\n            topic.enable_batched_operations = _parse_bool(node_value)\n            invalid_topic = False\n        node_value = _get_first_child_node_value(desc, 'SizeInBytes')\n        if node_value is not None:\n            topic.size_in_bytes = int(node_value)\n            invalid_topic = False\n\n    if invalid_topic:\n        raise WindowsAzureError(_ERROR_TOPIC_NOT_FOUND)\n\n    # extract id, updated and name value from feed entry and set them of topic.\n    for name, value in _get_entry_properties(xmlstr, True).items():\n        setattr(topic, name, value)\n    return topic\n\n\ndef _convert_response_to_subscription(response):\n    return _convert_xml_to_subscription(response.body)\n\n\ndef _convert_xml_to_subscription(xmlstr):\n    '''Converts xml response to subscription\n\n    The xml format for subscription:\n<entry xmlns='http://www.w3.org/2005/Atom'>\n    <content type='application/xml'>\n    <SubscriptionDescription\n        xmlns:i=\"http://www.w3.org/2001/XMLSchema-instance\"\n        xmlns=\"http://schemas.microsoft.com/netservices/2010/10/servicebus/connect\">\n        <LockDuration>PT5M</LockDuration>\n        <RequiresSession>false</RequiresSession>\n        <DefaultMessageTimeToLive>P10675199DT2H48M5.4775807S</DefaultMessageTimeToLive>\n        <DeadLetteringOnMessageExpiration>false</DeadLetteringOnMessageExpiration>\n        <DeadLetteringOnFilterEvaluationExceptions>true</DeadLetteringOnFilterEvaluationExceptions>\n    </SubscriptionDescription>\n    </content>\n</entry>\n    '''\n    xmldoc = minidom.parseString(xmlstr)\n    subscription = Subscription()\n\n    for desc in _get_children_from_path(xmldoc,\n                                        'entry',\n                                        'content',\n                                        'SubscriptionDescription'):\n        node_value = _get_first_child_node_value(desc, 'LockDuration')\n        if node_value is not None:\n            subscription.lock_duration = node_value\n\n        node_value = _get_first_child_node_value(\n            desc, 'RequiresSession')\n        if node_value is not None:\n            subscription.requires_session = _parse_bool(node_value)\n\n        node_value = _get_first_child_node_value(\n            desc, 'DefaultMessageTimeToLive')\n        if node_value is not None:\n            subscription.default_message_time_to_live = node_value\n\n        node_value = _get_first_child_node_value(\n            desc, 'DeadLetteringOnFilterEvaluationExceptions')\n        if node_value is not None:\n            subscription.dead_lettering_on_filter_evaluation_exceptions = \\\n                _parse_bool(node_value)\n\n        node_value = _get_first_child_node_value(\n            desc, 'DeadLetteringOnMessageExpiration')\n        if node_value is not None:\n            subscription.dead_lettering_on_message_expiration = \\\n                _parse_bool(node_value)\n\n        node_value = _get_first_child_node_value(\n            desc, 'EnableBatchedOperations')\n        if node_value is not None:\n            subscription.enable_batched_operations = _parse_bool(node_value)\n\n        node_value = _get_first_child_node_value(\n            desc, 'MaxDeliveryCount')\n        if node_value is not None:\n            subscription.max_delivery_count = int(node_value)\n\n        node_value = _get_first_child_node_value(\n            desc, 'MessageCount')\n        if node_value is not None:\n            subscription.message_count = int(node_value)\n\n    for name, value in _get_entry_properties(xmlstr,\n                                             True,\n                                             '/subscriptions').items():\n        setattr(subscription, name, value)\n\n    return subscription\n\n\ndef _convert_subscription_to_xml(subscription):\n    '''\n    Converts a subscription object to xml to send.  The order of each field of\n    subscription in xml is very important so we can't simple call\n    convert_class_to_xml.\n\n    subscription: the subsciption object to be converted.\n    '''\n\n    subscription_body = '<SubscriptionDescription xmlns:i=\"http://www.w3.org/2001/XMLSchema-instance\" xmlns=\"http://schemas.microsoft.com/netservices/2010/10/servicebus/connect\">'\n    if subscription:\n        if subscription.lock_duration is not None:\n            subscription_body += ''.join(\n                ['<LockDuration>',\n                 str(subscription.lock_duration),\n                 '</LockDuration>'])\n\n        if subscription.requires_session is not None:\n            subscription_body += ''.join(\n                ['<RequiresSession>',\n                 str(subscription.requires_session).lower(),\n                 '</RequiresSession>'])\n\n        if subscription.default_message_time_to_live is not None:\n            subscription_body += ''.join(\n                ['<DefaultMessageTimeToLive>',\n                 str(subscription.default_message_time_to_live),\n                 '</DefaultMessageTimeToLive>'])\n\n        if subscription.dead_lettering_on_message_expiration is not None:\n            subscription_body += ''.join(\n                ['<DeadLetteringOnMessageExpiration>',\n                 str(subscription.dead_lettering_on_message_expiration).lower(),\n                 '</DeadLetteringOnMessageExpiration>'])\n\n        if subscription.dead_lettering_on_filter_evaluation_exceptions is not None:\n            subscription_body += ''.join(\n                ['<DeadLetteringOnFilterEvaluationExceptions>',\n                 str(subscription.dead_lettering_on_filter_evaluation_exceptions).lower(),\n                 '</DeadLetteringOnFilterEvaluationExceptions>'])\n\n        if subscription.enable_batched_operations is not None:\n            subscription_body += ''.join(\n                ['<EnableBatchedOperations>',\n                 str(subscription.enable_batched_operations).lower(),\n                 '</EnableBatchedOperations>'])\n\n        if subscription.max_delivery_count is not None:\n            subscription_body += ''.join(\n                ['<MaxDeliveryCount>',\n                 str(subscription.max_delivery_count),\n                 '</MaxDeliveryCount>'])\n\n        if subscription.message_count is not None:\n            subscription_body += ''.join(\n                ['<MessageCount>',\n                 str(subscription.message_count),\n                 '</MessageCount>'])\n\n    subscription_body += '</SubscriptionDescription>'\n    return _create_entry(subscription_body)\n\n\ndef _convert_rule_to_xml(rule):\n    '''\n    Converts a rule object to xml to send.  The order of each field of rule\n    in xml is very important so we cann't simple call convert_class_to_xml.\n\n    rule: the rule object to be converted.\n    '''\n    rule_body = '<RuleDescription xmlns:i=\"http://www.w3.org/2001/XMLSchema-instance\" xmlns=\"http://schemas.microsoft.com/netservices/2010/10/servicebus/connect\">'\n    if rule:\n        if rule.filter_type:\n            rule_body += ''.join(\n                ['<Filter i:type=\"',\n                 xml_escape(rule.filter_type),\n                 '\">'])\n            if rule.filter_type == 'CorrelationFilter':\n                rule_body += ''.join(\n                    ['<CorrelationId>',\n                     xml_escape(rule.filter_expression),\n                     '</CorrelationId>'])\n            else:\n                rule_body += ''.join(\n                    ['<SqlExpression>',\n                     xml_escape(rule.filter_expression),\n                     '</SqlExpression>'])\n                rule_body += '<CompatibilityLevel>20</CompatibilityLevel>'\n            rule_body += '</Filter>'\n        if rule.action_type:\n            rule_body += ''.join(\n                ['<Action i:type=\"',\n                 xml_escape(rule.action_type),\n                 '\">'])\n            if rule.action_type == 'SqlRuleAction':\n                rule_body += ''.join(\n                    ['<SqlExpression>',\n                     xml_escape(rule.action_expression),\n                     '</SqlExpression>'])\n                rule_body += '<CompatibilityLevel>20</CompatibilityLevel>'\n            rule_body += '</Action>'\n    rule_body += '</RuleDescription>'\n\n    return _create_entry(rule_body)\n\n\ndef _convert_topic_to_xml(topic):\n    '''\n    Converts a topic object to xml to send.  The order of each field of topic\n    in xml is very important so we cann't simple call convert_class_to_xml.\n\n    topic: the topic object to be converted.\n    '''\n\n    topic_body = '<TopicDescription xmlns:i=\"http://www.w3.org/2001/XMLSchema-instance\" xmlns=\"http://schemas.microsoft.com/netservices/2010/10/servicebus/connect\">'\n    if topic:\n        if topic.default_message_time_to_live is not None:\n            topic_body += ''.join(\n                ['<DefaultMessageTimeToLive>',\n                 str(topic.default_message_time_to_live),\n                 '</DefaultMessageTimeToLive>'])\n\n        if topic.max_size_in_megabytes is not None:\n            topic_body += ''.join(\n                ['<MaxSizeInMegabytes>',\n                 str(topic.max_size_in_megabytes),\n                 '</MaxSizeInMegabytes>'])\n\n        if topic.requires_duplicate_detection is not None:\n            topic_body += ''.join(\n                ['<RequiresDuplicateDetection>',\n                 str(topic.requires_duplicate_detection).lower(),\n                 '</RequiresDuplicateDetection>'])\n\n        if topic.duplicate_detection_history_time_window is not None:\n            topic_body += ''.join(\n                ['<DuplicateDetectionHistoryTimeWindow>',\n                 str(topic.duplicate_detection_history_time_window),\n                 '</DuplicateDetectionHistoryTimeWindow>'])\n\n        if topic.enable_batched_operations is not None:\n            topic_body += ''.join(\n                ['<EnableBatchedOperations>',\n                 str(topic.enable_batched_operations).lower(),\n                 '</EnableBatchedOperations>'])\n\n        if topic.size_in_bytes is not None:\n            topic_body += ''.join(\n                ['<SizeInBytes>',\n                 str(topic.size_in_bytes),\n                 '</SizeInBytes>'])\n\n    topic_body += '</TopicDescription>'\n\n    return _create_entry(topic_body)\n\n\ndef _convert_queue_to_xml(queue):\n    '''\n    Converts a queue object to xml to send.  The order of each field of queue\n    in xml is very important so we cann't simple call convert_class_to_xml.\n\n    queue: the queue object to be converted.\n    '''\n    queue_body = '<QueueDescription xmlns:i=\"http://www.w3.org/2001/XMLSchema-instance\" xmlns=\"http://schemas.microsoft.com/netservices/2010/10/servicebus/connect\">'\n    if queue:\n        if queue.lock_duration:\n            queue_body += ''.join(\n                ['<LockDuration>',\n                 str(queue.lock_duration),\n                 '</LockDuration>'])\n\n        if queue.max_size_in_megabytes is not None:\n            queue_body += ''.join(\n                ['<MaxSizeInMegabytes>',\n                 str(queue.max_size_in_megabytes),\n                 '</MaxSizeInMegabytes>'])\n\n        if queue.requires_duplicate_detection is not None:\n            queue_body += ''.join(\n                ['<RequiresDuplicateDetection>',\n                 str(queue.requires_duplicate_detection).lower(),\n                 '</RequiresDuplicateDetection>'])\n\n        if queue.requires_session is not None:\n            queue_body += ''.join(\n                ['<RequiresSession>',\n                 str(queue.requires_session).lower(),\n                 '</RequiresSession>'])\n\n        if queue.default_message_time_to_live is not None:\n            queue_body += ''.join(\n                ['<DefaultMessageTimeToLive>',\n                 str(queue.default_message_time_to_live),\n                 '</DefaultMessageTimeToLive>'])\n\n        if queue.dead_lettering_on_message_expiration is not None:\n            queue_body += ''.join(\n                ['<DeadLetteringOnMessageExpiration>',\n                 str(queue.dead_lettering_on_message_expiration).lower(),\n                 '</DeadLetteringOnMessageExpiration>'])\n\n        if queue.duplicate_detection_history_time_window is not None:\n            queue_body += ''.join(\n                ['<DuplicateDetectionHistoryTimeWindow>',\n                 str(queue.duplicate_detection_history_time_window),\n                 '</DuplicateDetectionHistoryTimeWindow>'])\n\n        if queue.max_delivery_count is not None:\n            queue_body += ''.join(\n                ['<MaxDeliveryCount>',\n                 str(queue.max_delivery_count),\n                 '</MaxDeliveryCount>'])\n\n        if queue.enable_batched_operations is not None:\n            queue_body += ''.join(\n                ['<EnableBatchedOperations>',\n                 str(queue.enable_batched_operations).lower(),\n                 '</EnableBatchedOperations>'])\n\n        if queue.size_in_bytes is not None:\n            queue_body += ''.join(\n                ['<SizeInBytes>',\n                 str(queue.size_in_bytes),\n                 '</SizeInBytes>'])\n\n        if queue.message_count is not None:\n            queue_body += ''.join(\n                ['<MessageCount>',\n                 str(queue.message_count),\n                 '</MessageCount>'])\n\n    queue_body += '</QueueDescription>'\n    return _create_entry(queue_body)\n\n\ndef _service_bus_error_handler(http_error):\n    ''' Simple error handler for service bus service. '''\n    return _general_error_handler(http_error)\n\nfrom azure.servicebus.servicebusservice import ServiceBusService\n"
  },
  {
    "path": "OSPatching/azure/servicebus/servicebusservice.py",
    "content": "#-------------------------------------------------------------------------\n# Copyright (c) Microsoft.  All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#--------------------------------------------------------------------------\nimport datetime\nimport os\nimport time\n\nfrom azure import (\n    WindowsAzureError,\n    SERVICE_BUS_HOST_BASE,\n    _convert_response_to_feeds,\n    _dont_fail_not_exist,\n    _dont_fail_on_exist,\n    _encode_base64,\n    _get_request_body,\n    _get_request_body_bytes_only,\n    _int_or_none,\n    _sign_string,\n    _str,\n    _unicode_type,\n    _update_request_uri_query,\n    url_quote,\n    url_unquote,\n    _validate_not_none,\n    )\nfrom azure.http import (\n    HTTPError,\n    HTTPRequest,\n    )\nfrom azure.http.httpclient import _HTTPClient\nfrom azure.servicebus import (\n    AZURE_SERVICEBUS_NAMESPACE,\n    AZURE_SERVICEBUS_ACCESS_KEY,\n    AZURE_SERVICEBUS_ISSUER,\n    _convert_topic_to_xml,\n    _convert_response_to_topic,\n    _convert_queue_to_xml,\n    _convert_response_to_queue,\n    _convert_subscription_to_xml,\n    _convert_response_to_subscription,\n    _convert_rule_to_xml,\n    _convert_response_to_rule,\n    _convert_xml_to_queue,\n    _convert_xml_to_topic,\n    _convert_xml_to_subscription,\n    _convert_xml_to_rule,\n    _create_message,\n    _service_bus_error_handler,\n    )\n\n\nclass ServiceBusService(object):\n\n    def __init__(self, service_namespace=None, account_key=None, issuer=None,\n                 x_ms_version='2011-06-01', host_base=SERVICE_BUS_HOST_BASE,\n                 shared_access_key_name=None, shared_access_key_value=None,\n                 authentication=None):\n        '''\n        Initializes the service bus service for a namespace with the specified\n        authentication settings (SAS or ACS).\n\n        service_namespace:\n            Service bus namespace, required for all operations. If None,\n            the value is set to the AZURE_SERVICEBUS_NAMESPACE env variable.\n        account_key:\n            ACS authentication account key. If None, the value is set to the\n            AZURE_SERVICEBUS_ACCESS_KEY env variable.\n            Note that if both SAS and ACS settings are specified, SAS is used.\n        issuer:\n            ACS authentication issuer. If None, the value is set to the\n            AZURE_SERVICEBUS_ISSUER env variable.\n            Note that if both SAS and ACS settings are specified, SAS is used.\n        x_ms_version: Unused. Kept for backwards compatibility.\n        host_base:\n            Optional. Live host base url. Defaults to Azure url. Override this\n            for on-premise.\n        shared_access_key_name:\n            SAS authentication key name.\n            Note that if both SAS and ACS settings are specified, SAS is used.\n        shared_access_key_value:\n            SAS authentication key value.\n            Note that if both SAS and ACS settings are specified, SAS is used.\n        authentication:\n            Instance of authentication class. If this is specified, then\n            ACS and SAS parameters are ignored.\n        '''\n        self.requestid = None\n        self.service_namespace = service_namespace\n        self.host_base = host_base\n\n        if not self.service_namespace:\n            self.service_namespace = os.environ.get(AZURE_SERVICEBUS_NAMESPACE)\n\n        if not self.service_namespace:\n            raise WindowsAzureError('You need to provide servicebus namespace')\n\n        if authentication:\n            self.authentication = authentication\n        else:\n            if not account_key:\n                account_key = os.environ.get(AZURE_SERVICEBUS_ACCESS_KEY)\n            if not issuer:\n                issuer = os.environ.get(AZURE_SERVICEBUS_ISSUER)\n\n            if shared_access_key_name and shared_access_key_value:\n                self.authentication = ServiceBusSASAuthentication(\n                    shared_access_key_name,\n                    shared_access_key_value)\n            elif account_key and issuer:\n                self.authentication = ServiceBusWrapTokenAuthentication(\n                    account_key,\n                    issuer)\n            else:\n                raise WindowsAzureError(\n                    'You need to provide servicebus access key and Issuer OR shared access key and value')\n\n        self._httpclient = _HTTPClient(service_instance=self)\n        self._filter = self._httpclient.perform_request\n\n    # Backwards compatibility:\n    # account_key and issuer used to be stored on the service class, they are\n    # now stored on the authentication class.\n    @property\n    def account_key(self):\n        return self.authentication.account_key\n\n    @account_key.setter\n    def account_key(self, value):\n        self.authentication.account_key = value\n\n    @property\n    def issuer(self):\n        return self.authentication.issuer\n\n    @issuer.setter\n    def issuer(self, value):\n        self.authentication.issuer = value\n\n    def with_filter(self, filter):\n        '''\n        Returns a new service which will process requests with the specified\n        filter.  Filtering operations can include logging, automatic retrying,\n        etc...  The filter is a lambda which receives the HTTPRequest and\n        another lambda.  The filter can perform any pre-processing on the\n        request, pass it off to the next lambda, and then perform any\n        post-processing on the response.\n        '''\n        res = ServiceBusService(\n            service_namespace=self.service_namespace,\n            authentication=self.authentication)\n\n        old_filter = self._filter\n\n        def new_filter(request):\n            return filter(request, old_filter)\n\n        res._filter = new_filter\n        return res\n\n    def set_proxy(self, host, port, user=None, password=None):\n        '''\n        Sets the proxy server host and port for the HTTP CONNECT Tunnelling.\n\n        host: Address of the proxy. Ex: '192.168.0.100'\n        port: Port of the proxy. Ex: 6000\n        user: User for proxy authorization.\n        password: Password for proxy authorization.\n        '''\n        self._httpclient.set_proxy(host, port, user, password)\n\n    def create_queue(self, queue_name, queue=None, fail_on_exist=False):\n        '''\n        Creates a new queue. Once created, this queue's resource manifest is\n        immutable.\n\n        queue_name: Name of the queue to create.\n        queue: Queue object to create.\n        fail_on_exist:\n            Specify whether to throw an exception when the queue exists.\n        '''\n        _validate_not_none('queue_name', queue_name)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + _str(queue_name) + ''\n        request.body = _get_request_body(_convert_queue_to_xml(queue))\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        if not fail_on_exist:\n            try:\n                self._perform_request(request)\n                return True\n            except WindowsAzureError as ex:\n                _dont_fail_on_exist(ex)\n                return False\n        else:\n            self._perform_request(request)\n            return True\n\n    def delete_queue(self, queue_name, fail_not_exist=False):\n        '''\n        Deletes an existing queue. This operation will also remove all\n        associated state including messages in the queue.\n\n        queue_name: Name of the queue to delete.\n        fail_not_exist:\n            Specify whether to throw an exception if the queue doesn't exist.\n        '''\n        _validate_not_none('queue_name', queue_name)\n        request = HTTPRequest()\n        request.method = 'DELETE'\n        request.host = self._get_host()\n        request.path = '/' + _str(queue_name) + ''\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        if not fail_not_exist:\n            try:\n                self._perform_request(request)\n                return True\n            except WindowsAzureError as ex:\n                _dont_fail_not_exist(ex)\n                return False\n        else:\n            self._perform_request(request)\n            return True\n\n    def get_queue(self, queue_name):\n        '''\n        Retrieves an existing queue.\n\n        queue_name: Name of the queue.\n        '''\n        _validate_not_none('queue_name', queue_name)\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/' + _str(queue_name) + ''\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        response = self._perform_request(request)\n\n        return _convert_response_to_queue(response)\n\n    def list_queues(self):\n        '''\n        Enumerates the queues in the service namespace.\n        '''\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/$Resources/Queues'\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        response = self._perform_request(request)\n\n        return _convert_response_to_feeds(response, _convert_xml_to_queue)\n\n    def create_topic(self, topic_name, topic=None, fail_on_exist=False):\n        '''\n        Creates a new topic. Once created, this topic resource manifest is\n        immutable.\n\n        topic_name: Name of the topic to create.\n        topic: Topic object to create.\n        fail_on_exist:\n            Specify whether to throw an exception when the topic exists.\n        '''\n        _validate_not_none('topic_name', topic_name)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + _str(topic_name) + ''\n        request.body = _get_request_body(_convert_topic_to_xml(topic))\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        if not fail_on_exist:\n            try:\n                self._perform_request(request)\n                return True\n            except WindowsAzureError as ex:\n                _dont_fail_on_exist(ex)\n                return False\n        else:\n            self._perform_request(request)\n            return True\n\n    def delete_topic(self, topic_name, fail_not_exist=False):\n        '''\n        Deletes an existing topic. This operation will also remove all\n        associated state including associated subscriptions.\n\n        topic_name: Name of the topic to delete.\n        fail_not_exist:\n            Specify whether throw exception when topic doesn't exist.\n        '''\n        _validate_not_none('topic_name', topic_name)\n        request = HTTPRequest()\n        request.method = 'DELETE'\n        request.host = self._get_host()\n        request.path = '/' + _str(topic_name) + ''\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        if not fail_not_exist:\n            try:\n                self._perform_request(request)\n                return True\n            except WindowsAzureError as ex:\n                _dont_fail_not_exist(ex)\n                return False\n        else:\n            self._perform_request(request)\n            return True\n\n    def get_topic(self, topic_name):\n        '''\n        Retrieves the description for the specified topic.\n\n        topic_name: Name of the topic.\n        '''\n        _validate_not_none('topic_name', topic_name)\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/' + _str(topic_name) + ''\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        response = self._perform_request(request)\n\n        return _convert_response_to_topic(response)\n\n    def list_topics(self):\n        '''\n        Retrieves the topics in the service namespace.\n        '''\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/$Resources/Topics'\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        response = self._perform_request(request)\n\n        return _convert_response_to_feeds(response, _convert_xml_to_topic)\n\n    def create_rule(self, topic_name, subscription_name, rule_name, rule=None,\n                    fail_on_exist=False):\n        '''\n        Creates a new rule. Once created, this rule's resource manifest is\n        immutable.\n\n        topic_name: Name of the topic.\n        subscription_name: Name of the subscription.\n        rule_name: Name of the rule.\n        fail_on_exist:\n            Specify whether to throw an exception when the rule exists.\n        '''\n        _validate_not_none('topic_name', topic_name)\n        _validate_not_none('subscription_name', subscription_name)\n        _validate_not_none('rule_name', rule_name)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + _str(topic_name) + '/subscriptions/' + \\\n            _str(subscription_name) + \\\n            '/rules/' + _str(rule_name) + ''\n        request.body = _get_request_body(_convert_rule_to_xml(rule))\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        if not fail_on_exist:\n            try:\n                self._perform_request(request)\n                return True\n            except WindowsAzureError as ex:\n                _dont_fail_on_exist(ex)\n                return False\n        else:\n            self._perform_request(request)\n            return True\n\n    def delete_rule(self, topic_name, subscription_name, rule_name,\n                    fail_not_exist=False):\n        '''\n        Deletes an existing rule.\n\n        topic_name: Name of the topic.\n        subscription_name: Name of the subscription.\n        rule_name:\n            Name of the rule to delete.  DEFAULT_RULE_NAME=$Default.\n            Use DEFAULT_RULE_NAME to delete default rule for the subscription.\n        fail_not_exist:\n            Specify whether throw exception when rule doesn't exist.\n        '''\n        _validate_not_none('topic_name', topic_name)\n        _validate_not_none('subscription_name', subscription_name)\n        _validate_not_none('rule_name', rule_name)\n        request = HTTPRequest()\n        request.method = 'DELETE'\n        request.host = self._get_host()\n        request.path = '/' + _str(topic_name) + '/subscriptions/' + \\\n            _str(subscription_name) + \\\n            '/rules/' + _str(rule_name) + ''\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        if not fail_not_exist:\n            try:\n                self._perform_request(request)\n                return True\n            except WindowsAzureError as ex:\n                _dont_fail_not_exist(ex)\n                return False\n        else:\n            self._perform_request(request)\n            return True\n\n    def get_rule(self, topic_name, subscription_name, rule_name):\n        '''\n        Retrieves the description for the specified rule.\n\n        topic_name: Name of the topic.\n        subscription_name: Name of the subscription.\n        rule_name: Name of the rule.\n        '''\n        _validate_not_none('topic_name', topic_name)\n        _validate_not_none('subscription_name', subscription_name)\n        _validate_not_none('rule_name', rule_name)\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/' + _str(topic_name) + '/subscriptions/' + \\\n            _str(subscription_name) + \\\n            '/rules/' + _str(rule_name) + ''\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        response = self._perform_request(request)\n\n        return _convert_response_to_rule(response)\n\n    def list_rules(self, topic_name, subscription_name):\n        '''\n        Retrieves the rules that exist under the specified subscription.\n\n        topic_name: Name of the topic.\n        subscription_name: Name of the subscription.\n        '''\n        _validate_not_none('topic_name', topic_name)\n        _validate_not_none('subscription_name', subscription_name)\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(topic_name) + '/subscriptions/' + \\\n            _str(subscription_name) + '/rules/'\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        response = self._perform_request(request)\n\n        return _convert_response_to_feeds(response, _convert_xml_to_rule)\n\n    def create_subscription(self, topic_name, subscription_name,\n                            subscription=None, fail_on_exist=False):\n        '''\n        Creates a new subscription. Once created, this subscription resource\n        manifest is immutable.\n\n        topic_name: Name of the topic.\n        subscription_name: Name of the subscription.\n        fail_on_exist:\n            Specify whether throw exception when subscription exists.\n        '''\n        _validate_not_none('topic_name', topic_name)\n        _validate_not_none('subscription_name', subscription_name)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(topic_name) + '/subscriptions/' + _str(subscription_name) + ''\n        request.body = _get_request_body(\n            _convert_subscription_to_xml(subscription))\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        if not fail_on_exist:\n            try:\n                self._perform_request(request)\n                return True\n            except WindowsAzureError as ex:\n                _dont_fail_on_exist(ex)\n                return False\n        else:\n            self._perform_request(request)\n            return True\n\n    def delete_subscription(self, topic_name, subscription_name,\n                            fail_not_exist=False):\n        '''\n        Deletes an existing subscription.\n\n        topic_name: Name of the topic.\n        subscription_name: Name of the subscription to delete.\n        fail_not_exist:\n            Specify whether to throw an exception when the subscription\n            doesn't exist.\n        '''\n        _validate_not_none('topic_name', topic_name)\n        _validate_not_none('subscription_name', subscription_name)\n        request = HTTPRequest()\n        request.method = 'DELETE'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(topic_name) + '/subscriptions/' + _str(subscription_name) + ''\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        if not fail_not_exist:\n            try:\n                self._perform_request(request)\n                return True\n            except WindowsAzureError as ex:\n                _dont_fail_not_exist(ex)\n                return False\n        else:\n            self._perform_request(request)\n            return True\n\n    def get_subscription(self, topic_name, subscription_name):\n        '''\n        Gets an existing subscription.\n\n        topic_name: Name of the topic.\n        subscription_name: Name of the subscription.\n        '''\n        _validate_not_none('topic_name', topic_name)\n        _validate_not_none('subscription_name', subscription_name)\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(topic_name) + '/subscriptions/' + _str(subscription_name) + ''\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        response = self._perform_request(request)\n\n        return _convert_response_to_subscription(response)\n\n    def list_subscriptions(self, topic_name):\n        '''\n        Retrieves the subscriptions in the specified topic.\n\n        topic_name: Name of the topic.\n        '''\n        _validate_not_none('topic_name', topic_name)\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/' + _str(topic_name) + '/subscriptions/'\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        response = self._perform_request(request)\n\n        return _convert_response_to_feeds(response,\n                                          _convert_xml_to_subscription)\n\n    def send_topic_message(self, topic_name, message=None):\n        '''\n        Enqueues a message into the specified topic. The limit to the number\n        of messages which may be present in the topic is governed by the\n        message size in MaxTopicSizeInBytes. If this message causes the topic\n        to exceed its quota, a quota exceeded error is returned and the\n        message will be rejected.\n\n        topic_name: Name of the topic.\n        message: Message object containing message body and properties.\n        '''\n        _validate_not_none('topic_name', topic_name)\n        _validate_not_none('message', message)\n        request = HTTPRequest()\n        request.method = 'POST'\n        request.host = self._get_host()\n        request.path = '/' + _str(topic_name) + '/messages'\n        request.headers = message.add_headers(request)\n        request.body = _get_request_body_bytes_only(\n            'message.body', message.body)\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        self._perform_request(request)\n\n    def peek_lock_subscription_message(self, topic_name, subscription_name,\n                                       timeout='60'):\n        '''\n        This operation is used to atomically retrieve and lock a message for\n        processing. The message is guaranteed not to be delivered to other\n        receivers during the lock duration period specified in buffer\n        description. Once the lock expires, the message will be available to\n        other receivers (on the same subscription only) during the lock\n        duration period specified in the topic description. Once the lock\n        expires, the message will be available to other receivers. In order to\n        complete processing of the message, the receiver should issue a delete\n        command with the lock ID received from this operation. To abandon\n        processing of the message and unlock it for other receivers, an Unlock\n        Message command should be issued, or the lock duration period can\n        expire.\n\n        topic_name: Name of the topic.\n        subscription_name: Name of the subscription.\n        timeout: Optional. The timeout parameter is expressed in seconds.\n        '''\n        _validate_not_none('topic_name', topic_name)\n        _validate_not_none('subscription_name', subscription_name)\n        request = HTTPRequest()\n        request.method = 'POST'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(topic_name) + '/subscriptions/' + \\\n            _str(subscription_name) + '/messages/head'\n        request.query = [('timeout', _int_or_none(timeout))]\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        response = self._perform_request(request)\n\n        return _create_message(response, self)\n\n    def unlock_subscription_message(self, topic_name, subscription_name,\n                                    sequence_number, lock_token):\n        '''\n        Unlock a message for processing by other receivers on a given\n        subscription. This operation deletes the lock object, causing the\n        message to be unlocked. A message must have first been locked by a\n        receiver before this operation is called.\n\n        topic_name: Name of the topic.\n        subscription_name: Name of the subscription.\n        sequence_number:\n            The sequence number of the message to be unlocked as returned in\n            BrokerProperties['SequenceNumber'] by the Peek Message operation.\n        lock_token:\n            The ID of the lock as returned by the Peek Message operation in\n            BrokerProperties['LockToken']\n        '''\n        _validate_not_none('topic_name', topic_name)\n        _validate_not_none('subscription_name', subscription_name)\n        _validate_not_none('sequence_number', sequence_number)\n        _validate_not_none('lock_token', lock_token)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + _str(topic_name) + \\\n                       '/subscriptions/' + str(subscription_name) + \\\n                       '/messages/' + _str(sequence_number) + \\\n                       '/' + _str(lock_token) + ''\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        self._perform_request(request)\n\n    def read_delete_subscription_message(self, topic_name, subscription_name,\n                                         timeout='60'):\n        '''\n        Read and delete a message from a subscription as an atomic operation.\n        This operation should be used when a best-effort guarantee is\n        sufficient for an application; that is, using this operation it is\n        possible for messages to be lost if processing fails.\n\n        topic_name: Name of the topic.\n        subscription_name: Name of the subscription.\n        timeout: Optional. The timeout parameter is expressed in seconds.\n        '''\n        _validate_not_none('topic_name', topic_name)\n        _validate_not_none('subscription_name', subscription_name)\n        request = HTTPRequest()\n        request.method = 'DELETE'\n        request.host = self._get_host()\n        request.path = '/' + _str(topic_name) + \\\n                       '/subscriptions/' + _str(subscription_name) + \\\n                       '/messages/head'\n        request.query = [('timeout', _int_or_none(timeout))]\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        response = self._perform_request(request)\n\n        return _create_message(response, self)\n\n    def delete_subscription_message(self, topic_name, subscription_name,\n                                    sequence_number, lock_token):\n        '''\n        Completes processing on a locked message and delete it from the\n        subscription. This operation should only be called after processing a\n        previously locked message is successful to maintain At-Least-Once\n        delivery assurances.\n\n        topic_name: Name of the topic.\n        subscription_name: Name of the subscription.\n        sequence_number:\n            The sequence number of the message to be deleted as returned in\n            BrokerProperties['SequenceNumber'] by the Peek Message operation.\n        lock_token:\n            The ID of the lock as returned by the Peek Message operation in\n            BrokerProperties['LockToken']\n        '''\n        _validate_not_none('topic_name', topic_name)\n        _validate_not_none('subscription_name', subscription_name)\n        _validate_not_none('sequence_number', sequence_number)\n        _validate_not_none('lock_token', lock_token)\n        request = HTTPRequest()\n        request.method = 'DELETE'\n        request.host = self._get_host()\n        request.path = '/' + _str(topic_name) + \\\n                       '/subscriptions/' + _str(subscription_name) + \\\n                       '/messages/' + _str(sequence_number) + \\\n                       '/' + _str(lock_token) + ''\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        self._perform_request(request)\n\n    def send_queue_message(self, queue_name, message=None):\n        '''\n        Sends a message into the specified queue. The limit to the number of\n        messages which may be present in the topic is governed by the message\n        size the MaxTopicSizeInMegaBytes. If this message will cause the queue\n        to exceed its quota, a quota exceeded error is returned and the\n        message will be rejected.\n\n        queue_name: Name of the queue.\n        message: Message object containing message body and properties.\n        '''\n        _validate_not_none('queue_name', queue_name)\n        _validate_not_none('message', message)\n        request = HTTPRequest()\n        request.method = 'POST'\n        request.host = self._get_host()\n        request.path = '/' + _str(queue_name) + '/messages'\n        request.headers = message.add_headers(request)\n        request.body = _get_request_body_bytes_only('message.body',\n                                                    message.body)\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        self._perform_request(request)\n\n    def peek_lock_queue_message(self, queue_name, timeout='60'):\n        '''\n        Automically retrieves and locks a message from a queue for processing.\n        The message is guaranteed not to be delivered to other receivers (on\n        the same subscription only) during the lock duration period specified\n        in the queue description. Once the lock expires, the message will be\n        available to other receivers. In order to complete processing of the\n        message, the receiver should issue a delete command with the lock ID\n        received from this operation. To abandon processing of the message and\n        unlock it for other receivers, an Unlock Message command should be\n        issued, or the lock duration period can expire.\n\n        queue_name: Name of the queue.\n        timeout: Optional. The timeout parameter is expressed in seconds.\n        '''\n        _validate_not_none('queue_name', queue_name)\n        request = HTTPRequest()\n        request.method = 'POST'\n        request.host = self._get_host()\n        request.path = '/' + _str(queue_name) + '/messages/head'\n        request.query = [('timeout', _int_or_none(timeout))]\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        response = self._perform_request(request)\n\n        return _create_message(response, self)\n\n    def unlock_queue_message(self, queue_name, sequence_number, lock_token):\n        '''\n        Unlocks a message for processing by other receivers on a given\n        subscription. This operation deletes the lock object, causing the\n        message to be unlocked. A message must have first been locked by a\n        receiver before this operation is called.\n\n        queue_name: Name of the queue.\n        sequence_number:\n            The sequence number of the message to be unlocked as returned in\n            BrokerProperties['SequenceNumber'] by the Peek Message operation.\n        lock_token:\n            The ID of the lock as returned by the Peek Message operation in\n            BrokerProperties['LockToken']\n        '''\n        _validate_not_none('queue_name', queue_name)\n        _validate_not_none('sequence_number', sequence_number)\n        _validate_not_none('lock_token', lock_token)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + _str(queue_name) + \\\n                       '/messages/' + _str(sequence_number) + \\\n                       '/' + _str(lock_token) + ''\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        self._perform_request(request)\n\n    def read_delete_queue_message(self, queue_name, timeout='60'):\n        '''\n        Reads and deletes a message from a queue as an atomic operation. This\n        operation should be used when a best-effort guarantee is sufficient\n        for an application; that is, using this operation it is possible for\n        messages to be lost if processing fails.\n\n        queue_name: Name of the queue.\n        timeout: Optional. The timeout parameter is expressed in seconds.\n        '''\n        _validate_not_none('queue_name', queue_name)\n        request = HTTPRequest()\n        request.method = 'DELETE'\n        request.host = self._get_host()\n        request.path = '/' + _str(queue_name) + '/messages/head'\n        request.query = [('timeout', _int_or_none(timeout))]\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        response = self._perform_request(request)\n\n        return _create_message(response, self)\n\n    def delete_queue_message(self, queue_name, sequence_number, lock_token):\n        '''\n        Completes processing on a locked message and delete it from the queue.\n        This operation should only be called after processing a previously\n        locked message is successful to maintain At-Least-Once delivery\n        assurances.\n\n        queue_name: Name of the queue.\n        sequence_number:\n            The sequence number of the message to be deleted as returned in\n            BrokerProperties['SequenceNumber'] by the Peek Message operation.\n        lock_token:\n            The ID of the lock as returned by the Peek Message operation in\n            BrokerProperties['LockToken']\n        '''\n        _validate_not_none('queue_name', queue_name)\n        _validate_not_none('sequence_number', sequence_number)\n        _validate_not_none('lock_token', lock_token)\n        request = HTTPRequest()\n        request.method = 'DELETE'\n        request.host = self._get_host()\n        request.path = '/' + _str(queue_name) + \\\n                       '/messages/' + _str(sequence_number) + \\\n                       '/' + _str(lock_token) + ''\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = self._update_service_bus_header(request)\n        self._perform_request(request)\n\n    def receive_queue_message(self, queue_name, peek_lock=True, timeout=60):\n        '''\n        Receive a message from a queue for processing.\n\n        queue_name: Name of the queue.\n        peek_lock:\n            Optional. True to retrieve and lock the message. False to read and\n            delete the message. Default is True (lock).\n        timeout: Optional. The timeout parameter is expressed in seconds.\n        '''\n        if peek_lock:\n            return self.peek_lock_queue_message(queue_name, timeout)\n        else:\n            return self.read_delete_queue_message(queue_name, timeout)\n\n    def receive_subscription_message(self, topic_name, subscription_name,\n                                     peek_lock=True, timeout=60):\n        '''\n        Receive a message from a subscription for processing.\n\n        topic_name: Name of the topic.\n        subscription_name: Name of the subscription.\n        peek_lock:\n            Optional. True to retrieve and lock the message. False to read and\n            delete the message. Default is True (lock).\n        timeout: Optional. The timeout parameter is expressed in seconds.\n        '''\n        if peek_lock:\n            return self.peek_lock_subscription_message(topic_name,\n                                                       subscription_name,\n                                                       timeout)\n        else:\n            return self.read_delete_subscription_message(topic_name,\n                                                         subscription_name,\n                                                         timeout)\n\n    def _get_host(self):\n        return self.service_namespace + self.host_base\n\n    def _perform_request(self, request):\n        try:\n            resp = self._filter(request)\n        except HTTPError as ex:\n            return _service_bus_error_handler(ex)\n\n        return resp\n\n    def _update_service_bus_header(self, request):\n        ''' Add additional headers for service bus. '''\n\n        if request.method in ['PUT', 'POST', 'MERGE', 'DELETE']:\n            request.headers.append(('Content-Length', str(len(request.body))))\n\n        # if it is not GET or HEAD request, must set content-type.\n        if not request.method in ['GET', 'HEAD']:\n            for name, _ in request.headers:\n                if 'content-type' == name.lower():\n                    break\n            else:\n                request.headers.append(\n                    ('Content-Type',\n                     'application/atom+xml;type=entry;charset=utf-8'))\n\n        # Adds authorization header for authentication.\n        self.authentication.sign_request(request, self._httpclient)\n\n        return request.headers\n\n\n# Token cache for Authentication\n# Shared by the different instances of ServiceBusWrapTokenAuthentication\n_tokens = {}\n\n\nclass ServiceBusWrapTokenAuthentication:\n    def __init__(self, account_key, issuer):\n        self.account_key = account_key\n        self.issuer = issuer\n\n    def sign_request(self, request, httpclient):\n        request.headers.append(\n            ('Authorization', self._get_authorization(request, httpclient)))\n\n    def _get_authorization(self, request, httpclient):\n        ''' return the signed string with token. '''\n        return 'WRAP access_token=\"' + \\\n                self._get_token(request.host, request.path, httpclient) + '\"'\n\n    def _token_is_expired(self, token):\n        ''' Check if token expires or not. '''\n        time_pos_begin = token.find('ExpiresOn=') + len('ExpiresOn=')\n        time_pos_end = token.find('&', time_pos_begin)\n        token_expire_time = int(token[time_pos_begin:time_pos_end])\n        time_now = time.mktime(time.localtime())\n\n        # Adding 30 seconds so the token wouldn't be expired when we send the\n        # token to server.\n        return (token_expire_time - time_now) < 30\n\n    def _get_token(self, host, path, httpclient):\n        '''\n        Returns token for the request.\n\n        host: the service bus service request.\n        path: the service bus service request.\n        '''\n        wrap_scope = 'http://' + host + path + self.issuer + self.account_key\n\n        # Check whether has unexpired cache, return cached token if it is still\n        # usable.\n        if wrap_scope in _tokens:\n            token = _tokens[wrap_scope]\n            if not self._token_is_expired(token):\n                return token\n\n        # get token from accessconstrol server\n        request = HTTPRequest()\n        request.protocol_override = 'https'\n        request.host = host.replace('.servicebus.', '-sb.accesscontrol.')\n        request.method = 'POST'\n        request.path = '/WRAPv0.9'\n        request.body = ('wrap_name=' + url_quote(self.issuer) +\n                        '&wrap_password=' + url_quote(self.account_key) +\n                        '&wrap_scope=' +\n                        url_quote('http://' + host + path)).encode('utf-8')\n        request.headers.append(('Content-Length', str(len(request.body))))\n        resp = httpclient.perform_request(request)\n\n        token = resp.body.decode('utf-8')\n        token = url_unquote(token[token.find('=') + 1:token.rfind('&')])\n        _tokens[wrap_scope] = token\n\n        return token\n\n\nclass ServiceBusSASAuthentication:\n    def __init__(self, key_name, key_value):\n        self.key_name = key_name\n        self.key_value = key_value\n\n    def sign_request(self, request, httpclient):\n        request.headers.append(\n            ('Authorization', self._get_authorization(request, httpclient)))\n\n    def _get_authorization(self, request, httpclient):\n        uri = httpclient.get_uri(request)\n        uri = url_quote(uri, '').lower()\n        expiry = str(self._get_expiry())\n\n        to_sign = uri + '\\n' + expiry\n        signature = url_quote(_sign_string(self.key_value, to_sign, False), '')\n\n        auth_format = 'SharedAccessSignature sig={0}&se={1}&skn={2}&sr={3}'\n        auth = auth_format.format(signature, expiry, self.key_name, uri)\n\n        return auth\n\n    def _get_expiry(self):\n        '''Returns the UTC datetime, in seconds since Epoch, when this signed \n        request expires (5 minutes from now).'''\n        return int(round(time.time() + 300))\n"
  },
  {
    "path": "OSPatching/azure/servicemanagement/__init__.py",
    "content": "#-------------------------------------------------------------------------\n# Copyright (c) Microsoft.  All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#--------------------------------------------------------------------------\nfrom xml.dom import minidom\nfrom azure import (\n    WindowsAzureData,\n    _Base64String,\n    _create_entry,\n    _dict_of,\n    _encode_base64,\n    _general_error_handler,\n    _get_children_from_path,\n    _get_first_child_node_value,\n    _list_of,\n    _scalar_list_of,\n    _str,\n    _xml_attribute,\n    )\n\n#-----------------------------------------------------------------------------\n# Constants for Azure app environment settings.\nAZURE_MANAGEMENT_CERTFILE = 'AZURE_MANAGEMENT_CERTFILE'\nAZURE_MANAGEMENT_SUBSCRIPTIONID = 'AZURE_MANAGEMENT_SUBSCRIPTIONID'\n\n# x-ms-version for service management.\nX_MS_VERSION = '2013-06-01'\n\n#-----------------------------------------------------------------------------\n# Data classes\n\n\nclass StorageServices(WindowsAzureData):\n\n    def __init__(self):\n        self.storage_services = _list_of(StorageService)\n\n    def __iter__(self):\n        return iter(self.storage_services)\n\n    def __len__(self):\n        return len(self.storage_services)\n\n    def __getitem__(self, index):\n        return self.storage_services[index]\n\n\nclass StorageService(WindowsAzureData):\n\n    def __init__(self):\n        self.url = ''\n        self.service_name = ''\n        self.storage_service_properties = StorageAccountProperties()\n        self.storage_service_keys = StorageServiceKeys()\n        self.extended_properties = _dict_of(\n            'ExtendedProperty', 'Name', 'Value')\n        self.capabilities = _scalar_list_of(str, 'Capability')\n\n\nclass StorageAccountProperties(WindowsAzureData):\n\n    def __init__(self):\n        self.description = u''\n        self.affinity_group = u''\n        self.location = u''\n        self.label = _Base64String()\n        self.status = u''\n        self.endpoints = _scalar_list_of(str, 'Endpoint')\n        self.geo_replication_enabled = False\n        self.geo_primary_region = u''\n        self.status_of_primary = u''\n        self.geo_secondary_region = u''\n        self.status_of_secondary = u''\n        self.last_geo_failover_time = u''\n        self.creation_time = u''\n\n\nclass StorageServiceKeys(WindowsAzureData):\n\n    def __init__(self):\n        self.primary = u''\n        self.secondary = u''\n\n\nclass Locations(WindowsAzureData):\n\n    def __init__(self):\n        self.locations = _list_of(Location)\n\n    def __iter__(self):\n        return iter(self.locations)\n\n    def __len__(self):\n        return len(self.locations)\n\n    def __getitem__(self, index):\n        return self.locations[index]\n\n\nclass Location(WindowsAzureData):\n\n    def __init__(self):\n        self.name = u''\n        self.display_name = u''\n        self.available_services = _scalar_list_of(str, 'AvailableService')\n\n\nclass AffinityGroup(WindowsAzureData):\n\n    def __init__(self):\n        self.name = ''\n        self.label = _Base64String()\n        self.description = u''\n        self.location = u''\n        self.hosted_services = HostedServices()\n        self.storage_services = StorageServices()\n        self.capabilities = _scalar_list_of(str, 'Capability')\n\n\nclass AffinityGroups(WindowsAzureData):\n\n    def __init__(self):\n        self.affinity_groups = _list_of(AffinityGroup)\n\n    def __iter__(self):\n        return iter(self.affinity_groups)\n\n    def __len__(self):\n        return len(self.affinity_groups)\n\n    def __getitem__(self, index):\n        return self.affinity_groups[index]\n\n\nclass HostedServices(WindowsAzureData):\n\n    def __init__(self):\n        self.hosted_services = _list_of(HostedService)\n\n    def __iter__(self):\n        return iter(self.hosted_services)\n\n    def __len__(self):\n        return len(self.hosted_services)\n\n    def __getitem__(self, index):\n        return self.hosted_services[index]\n\n\nclass HostedService(WindowsAzureData):\n\n    def __init__(self):\n        self.url = u''\n        self.service_name = u''\n        self.hosted_service_properties = HostedServiceProperties()\n        self.deployments = Deployments()\n\n\nclass HostedServiceProperties(WindowsAzureData):\n\n    def __init__(self):\n        self.description = u''\n        self.location = u''\n        self.affinity_group = u''\n        self.label = _Base64String()\n        self.status = u''\n        self.date_created = u''\n        self.date_last_modified = u''\n        self.extended_properties = _dict_of(\n            'ExtendedProperty', 'Name', 'Value')\n\n\nclass VirtualNetworkSites(WindowsAzureData):\n\n    def __init__(self):\n        self.virtual_network_sites = _list_of(VirtualNetworkSite)\n\n    def __iter__(self):\n        return iter(self.virtual_network_sites)\n\n    def __len__(self):\n        return len(self.virtual_network_sites)\n\n    def __getitem__(self, index):\n        return self.virtual_network_sites[index]\n\n\nclass VirtualNetworkSite(WindowsAzureData):\n\n    def __init__(self):\n        self.name = u''\n        self.id = u''\n        self.affinity_group = u''\n        self.subnets = Subnets()\n\n\nclass Subnets(WindowsAzureData):\n\n    def __init__(self):\n        self.subnets = _list_of(Subnet)\n\n    def __iter__(self):\n        return iter(self.subnets)\n\n    def __len__(self):\n        return len(self.subnets)\n\n    def __getitem__(self, index):\n        return self.subnets[index]\n\n\nclass Subnet(WindowsAzureData):\n\n    def __init__(self):\n        self.name = u''\n        self.address_prefix = u''\n\n\n\nclass Deployments(WindowsAzureData):\n\n    def __init__(self):\n        self.deployments = _list_of(Deployment)\n\n    def __iter__(self):\n        return iter(self.deployments)\n\n    def __len__(self):\n        return len(self.deployments)\n\n    def __getitem__(self, index):\n        return self.deployments[index]\n\n\nclass Deployment(WindowsAzureData):\n\n    def __init__(self):\n        self.name = u''\n        self.deployment_slot = u''\n        self.private_id = u''\n        self.status = u''\n        self.label = _Base64String()\n        self.url = u''\n        self.configuration = _Base64String()\n        self.role_instance_list = RoleInstanceList()\n        self.upgrade_status = UpgradeStatus()\n        self.upgrade_domain_count = u''\n        self.role_list = RoleList()\n        self.sdk_version = u''\n        self.input_endpoint_list = InputEndpoints()\n        self.locked = False\n        self.rollback_allowed = False\n        self.persistent_vm_downtime_info = PersistentVMDowntimeInfo()\n        self.created_time = u''\n        self.virtual_network_name = u''\n        self.last_modified_time = u''\n        self.extended_properties = _dict_of(\n            'ExtendedProperty', 'Name', 'Value')\n\n\nclass RoleInstanceList(WindowsAzureData):\n\n    def __init__(self):\n        self.role_instances = _list_of(RoleInstance)\n\n    def __iter__(self):\n        return iter(self.role_instances)\n\n    def __len__(self):\n        return len(self.role_instances)\n\n    def __getitem__(self, index):\n        return self.role_instances[index]\n\n\nclass RoleInstance(WindowsAzureData):\n\n    def __init__(self):\n        self.role_name = u''\n        self.instance_name = u''\n        self.instance_status = u''\n        self.instance_upgrade_domain = 0\n        self.instance_fault_domain = 0\n        self.instance_size = u''\n        self.instance_state_details = u''\n        self.instance_error_code = u''\n        self.ip_address = u''\n        self.instance_endpoints = InstanceEndpoints()\n        self.power_state = u''\n        self.fqdn = u''\n        self.host_name = u''\n\n\nclass InstanceEndpoints(WindowsAzureData):\n\n    def __init__(self):\n        self.instance_endpoints = _list_of(InstanceEndpoint)\n\n    def __iter__(self):\n        return iter(self.instance_endpoints)\n\n    def __len__(self):\n        return len(self.instance_endpoints)\n\n    def __getitem__(self, index):\n        return self.instance_endpoints[index]\n\n\nclass InstanceEndpoint(WindowsAzureData):\n\n    def __init__(self):\n        self.name = u''\n        self.vip = u''\n        self.public_port = u''\n        self.local_port = u''\n        self.protocol = u''\n\n\nclass UpgradeStatus(WindowsAzureData):\n\n    def __init__(self):\n        self.upgrade_type = u''\n        self.current_upgrade_domain_state = u''\n        self.current_upgrade_domain = u''\n\n\nclass InputEndpoints(WindowsAzureData):\n\n    def __init__(self):\n        self.input_endpoints = _list_of(InputEndpoint)\n\n    def __iter__(self):\n        return iter(self.input_endpoints)\n\n    def __len__(self):\n        return len(self.input_endpoints)\n\n    def __getitem__(self, index):\n        return self.input_endpoints[index]\n\n\nclass InputEndpoint(WindowsAzureData):\n\n    def __init__(self):\n        self.role_name = u''\n        self.vip = u''\n        self.port = u''\n\n\nclass RoleList(WindowsAzureData):\n\n    def __init__(self):\n        self.roles = _list_of(Role)\n\n    def __iter__(self):\n        return iter(self.roles)\n\n    def __len__(self):\n        return len(self.roles)\n\n    def __getitem__(self, index):\n        return self.roles[index]\n\n\nclass Role(WindowsAzureData):\n\n    def __init__(self):\n        self.role_name = u''\n        self.role_type = u''\n        self.os_version = u''\n        self.configuration_sets = ConfigurationSets()\n        self.availability_set_name = u''\n        self.data_virtual_hard_disks = DataVirtualHardDisks()\n        self.os_virtual_hard_disk = OSVirtualHardDisk()\n        self.role_size = u''\n        self.default_win_rm_certificate_thumbprint = u''\n\n\nclass PersistentVMDowntimeInfo(WindowsAzureData):\n\n    def __init__(self):\n        self.start_time = u''\n        self.end_time = u''\n        self.status = u''\n\n\nclass Certificates(WindowsAzureData):\n\n    def __init__(self):\n        self.certificates = _list_of(Certificate)\n\n    def __iter__(self):\n        return iter(self.certificates)\n\n    def __len__(self):\n        return len(self.certificates)\n\n    def __getitem__(self, index):\n        return self.certificates[index]\n\n\nclass Certificate(WindowsAzureData):\n\n    def __init__(self):\n        self.certificate_url = u''\n        self.thumbprint = u''\n        self.thumbprint_algorithm = u''\n        self.data = u''\n\n\nclass OperationError(WindowsAzureData):\n\n    def __init__(self):\n        self.code = u''\n        self.message = u''\n\n\nclass Operation(WindowsAzureData):\n\n    def __init__(self):\n        self.id = u''\n        self.status = u''\n        self.http_status_code = u''\n        self.error = OperationError()\n\n\nclass OperatingSystem(WindowsAzureData):\n\n    def __init__(self):\n        self.version = u''\n        self.label = _Base64String()\n        self.is_default = True\n        self.is_active = True\n        self.family = 0\n        self.family_label = _Base64String()\n\n\nclass OperatingSystems(WindowsAzureData):\n\n    def __init__(self):\n        self.operating_systems = _list_of(OperatingSystem)\n\n    def __iter__(self):\n        return iter(self.operating_systems)\n\n    def __len__(self):\n        return len(self.operating_systems)\n\n    def __getitem__(self, index):\n        return self.operating_systems[index]\n\n\nclass OperatingSystemFamily(WindowsAzureData):\n\n    def __init__(self):\n        self.name = u''\n        self.label = _Base64String()\n        self.operating_systems = OperatingSystems()\n\n\nclass OperatingSystemFamilies(WindowsAzureData):\n\n    def __init__(self):\n        self.operating_system_families = _list_of(OperatingSystemFamily)\n\n    def __iter__(self):\n        return iter(self.operating_system_families)\n\n    def __len__(self):\n        return len(self.operating_system_families)\n\n    def __getitem__(self, index):\n        return self.operating_system_families[index]\n\n\nclass Subscription(WindowsAzureData):\n\n    def __init__(self):\n        self.subscription_id = u''\n        self.subscription_name = u''\n        self.subscription_status = u''\n        self.account_admin_live_email_id = u''\n        self.service_admin_live_email_id = u''\n        self.max_core_count = 0\n        self.max_storage_accounts = 0\n        self.max_hosted_services = 0\n        self.current_core_count = 0\n        self.current_hosted_services = 0\n        self.current_storage_accounts = 0\n        self.max_virtual_network_sites = 0\n        self.max_local_network_sites = 0\n        self.max_dns_servers = 0\n\n\nclass AvailabilityResponse(WindowsAzureData):\n\n    def __init__(self):\n        self.result = False\n\n\nclass SubscriptionCertificates(WindowsAzureData):\n\n    def __init__(self):\n        self.subscription_certificates = _list_of(SubscriptionCertificate)\n\n    def __iter__(self):\n        return iter(self.subscription_certificates)\n\n    def __len__(self):\n        return len(self.subscription_certificates)\n\n    def __getitem__(self, index):\n        return self.subscription_certificates[index]\n\n\nclass SubscriptionCertificate(WindowsAzureData):\n\n    def __init__(self):\n        self.subscription_certificate_public_key = u''\n        self.subscription_certificate_thumbprint = u''\n        self.subscription_certificate_data = u''\n        self.created = u''\n\n\nclass Images(WindowsAzureData):\n\n    def __init__(self):\n        self.images = _list_of(OSImage)\n\n    def __iter__(self):\n        return iter(self.images)\n\n    def __len__(self):\n        return len(self.images)\n\n    def __getitem__(self, index):\n        return self.images[index]\n\n\nclass OSImage(WindowsAzureData):\n\n    def __init__(self):\n        self.affinity_group = u''\n        self.category = u''\n        self.location = u''\n        self.logical_size_in_gb = 0\n        self.label = u''\n        self.media_link = u''\n        self.name = u''\n        self.os = u''\n        self.eula = u''\n        self.description = u''\n\n\nclass Disks(WindowsAzureData):\n\n    def __init__(self):\n        self.disks = _list_of(Disk)\n\n    def __iter__(self):\n        return iter(self.disks)\n\n    def __len__(self):\n        return len(self.disks)\n\n    def __getitem__(self, index):\n        return self.disks[index]\n\n\nclass Disk(WindowsAzureData):\n\n    def __init__(self):\n        self.affinity_group = u''\n        self.attached_to = AttachedTo()\n        self.has_operating_system = u''\n        self.is_corrupted = u''\n        self.location = u''\n        self.logical_disk_size_in_gb = 0\n        self.label = u''\n        self.media_link = u''\n        self.name = u''\n        self.os = u''\n        self.source_image_name = u''\n\n\nclass AttachedTo(WindowsAzureData):\n\n    def __init__(self):\n        self.hosted_service_name = u''\n        self.deployment_name = u''\n        self.role_name = u''\n\n\nclass PersistentVMRole(WindowsAzureData):\n\n    def __init__(self):\n        self.role_name = u''\n        self.role_type = u''\n        self.os_version = u''  # undocumented\n        self.configuration_sets = ConfigurationSets()\n        self.availability_set_name = u''\n        self.data_virtual_hard_disks = DataVirtualHardDisks()\n        self.os_virtual_hard_disk = OSVirtualHardDisk()\n        self.role_size = u''\n        self.default_win_rm_certificate_thumbprint = u''\n\n\nclass ConfigurationSets(WindowsAzureData):\n\n    def __init__(self):\n        self.configuration_sets = _list_of(ConfigurationSet)\n\n    def __iter__(self):\n        return iter(self.configuration_sets)\n\n    def __len__(self):\n        return len(self.configuration_sets)\n\n    def __getitem__(self, index):\n        return self.configuration_sets[index]\n\n\nclass ConfigurationSet(WindowsAzureData):\n\n    def __init__(self):\n        self.configuration_set_type = u'NetworkConfiguration'\n        self.role_type = u''\n        self.input_endpoints = ConfigurationSetInputEndpoints()\n        self.subnet_names = _scalar_list_of(str, 'SubnetName')\n\n\nclass ConfigurationSetInputEndpoints(WindowsAzureData):\n\n    def __init__(self):\n        self.input_endpoints = _list_of(\n            ConfigurationSetInputEndpoint, 'InputEndpoint')\n\n    def __iter__(self):\n        return iter(self.input_endpoints)\n\n    def __len__(self):\n        return len(self.input_endpoints)\n\n    def __getitem__(self, index):\n        return self.input_endpoints[index]\n\n\nclass ConfigurationSetInputEndpoint(WindowsAzureData):\n\n    '''\n    Initializes a network configuration input endpoint.\n\n    name: Specifies the name for the external endpoint.\n    protocol:\n        Specifies the protocol to use to inspect the virtual machine\n        availability status. Possible values are: HTTP, TCP.\n    port: Specifies the external port to use for the endpoint.\n    local_port:\n        Specifies the internal port on which the virtual machine is listening\n        to serve the endpoint.\n    load_balanced_endpoint_set_name:\n        Specifies a name for a set of load-balanced endpoints. Specifying this\n        element for a given endpoint adds it to the set. If you are setting an\n        endpoint to use to connect to the virtual machine via the Remote\n        Desktop, do not set this property.\n    enable_direct_server_return:\n        Specifies whether direct server return load balancing is enabled.\n    '''\n\n    def __init__(self, name=u'', protocol=u'', port=u'', local_port=u'',\n                 load_balanced_endpoint_set_name=u'',\n                 enable_direct_server_return=False):\n        self.enable_direct_server_return = enable_direct_server_return\n        self.load_balanced_endpoint_set_name = load_balanced_endpoint_set_name\n        self.local_port = local_port\n        self.name = name\n        self.port = port\n        self.load_balancer_probe = LoadBalancerProbe()\n        self.protocol = protocol\n\n\nclass WindowsConfigurationSet(WindowsAzureData):\n\n    def __init__(self, computer_name=None, admin_password=None,\n                 reset_password_on_first_logon=None,\n                 enable_automatic_updates=None, time_zone=None,\n                 admin_username=None):\n        self.configuration_set_type = u'WindowsProvisioningConfiguration'\n        self.computer_name = computer_name\n        self.admin_password = admin_password\n        self.admin_username = admin_username\n        self.reset_password_on_first_logon = reset_password_on_first_logon\n        self.enable_automatic_updates = enable_automatic_updates\n        self.time_zone = time_zone\n        self.domain_join = DomainJoin()\n        self.stored_certificate_settings = StoredCertificateSettings()\n        self.win_rm = WinRM()\n\n\nclass DomainJoin(WindowsAzureData):\n\n    def __init__(self):\n        self.credentials = Credentials()\n        self.join_domain = u''\n        self.machine_object_ou = u''\n\n\nclass Credentials(WindowsAzureData):\n\n    def __init__(self):\n        self.domain = u''\n        self.username = u''\n        self.password = u''\n\n\nclass StoredCertificateSettings(WindowsAzureData):\n\n    def __init__(self):\n        self.stored_certificate_settings = _list_of(CertificateSetting)\n\n    def __iter__(self):\n        return iter(self.stored_certificate_settings)\n\n    def __len__(self):\n        return len(self.stored_certificate_settings)\n\n    def __getitem__(self, index):\n        return self.stored_certificate_settings[index]\n\n\nclass CertificateSetting(WindowsAzureData):\n\n    '''\n    Initializes a certificate setting.\n\n    thumbprint:\n        Specifies the thumbprint of the certificate to be provisioned. The\n        thumbprint must specify an existing service certificate.\n    store_name:\n        Specifies the name of the certificate store from which retrieve\n        certificate.\n    store_location:\n        Specifies the target certificate store location on the virtual machine.\n        The only supported value is LocalMachine.\n    '''\n\n    def __init__(self, thumbprint=u'', store_name=u'', store_location=u''):\n        self.thumbprint = thumbprint\n        self.store_name = store_name\n        self.store_location = store_location\n\n\nclass WinRM(WindowsAzureData):\n\n    '''\n    Contains configuration settings for the Windows Remote Management service on\n    the Virtual Machine.\n    '''\n\n    def __init__(self):\n        self.listeners = Listeners()\n\n\nclass Listeners(WindowsAzureData):\n\n    def __init__(self):\n        self.listeners = _list_of(Listener)\n\n    def __iter__(self):\n        return iter(self.listeners)\n\n    def __len__(self):\n        return len(self.listeners)\n\n    def __getitem__(self, index):\n        return self.listeners[index]\n\n\nclass Listener(WindowsAzureData):\n\n    '''\n    Specifies the protocol and certificate information for the listener.\n\n    protocol:\n        Specifies the protocol of listener.  Possible values are: Http, Https.\n        The value is case sensitive.\n    certificate_thumbprint:\n        Optional. Specifies the certificate thumbprint for the secure\n        connection. If this value is not specified, a self-signed certificate is\n        generated and used for the Virtual Machine.\n    '''\n\n    def __init__(self, protocol=u'', certificate_thumbprint=u''):\n        self.protocol = protocol\n        self.certificate_thumbprint = certificate_thumbprint\n\n\nclass LinuxConfigurationSet(WindowsAzureData):\n\n    def __init__(self, host_name=None, user_name=None, user_password=None,\n                 disable_ssh_password_authentication=None):\n        self.configuration_set_type = u'LinuxProvisioningConfiguration'\n        self.host_name = host_name\n        self.user_name = user_name\n        self.user_password = user_password\n        self.disable_ssh_password_authentication =\\\n            disable_ssh_password_authentication\n        self.ssh = SSH()\n\n\nclass SSH(WindowsAzureData):\n\n    def __init__(self):\n        self.public_keys = PublicKeys()\n        self.key_pairs = KeyPairs()\n\n\nclass PublicKeys(WindowsAzureData):\n\n    def __init__(self):\n        self.public_keys = _list_of(PublicKey)\n\n    def __iter__(self):\n        return iter(self.public_keys)\n\n    def __len__(self):\n        return len(self.public_keys)\n\n    def __getitem__(self, index):\n        return self.public_keys[index]\n\n\nclass PublicKey(WindowsAzureData):\n\n    def __init__(self, fingerprint=u'', path=u''):\n        self.fingerprint = fingerprint\n        self.path = path\n\n\nclass KeyPairs(WindowsAzureData):\n\n    def __init__(self):\n        self.key_pairs = _list_of(KeyPair)\n\n    def __iter__(self):\n        return iter(self.key_pairs)\n\n    def __len__(self):\n        return len(self.key_pairs)\n\n    def __getitem__(self, index):\n        return self.key_pairs[index]\n\n\nclass KeyPair(WindowsAzureData):\n\n    def __init__(self, fingerprint=u'', path=u''):\n        self.fingerprint = fingerprint\n        self.path = path\n\n\nclass LoadBalancerProbe(WindowsAzureData):\n\n    def __init__(self):\n        self.path = u''\n        self.port = u''\n        self.protocol = u''\n\n\nclass DataVirtualHardDisks(WindowsAzureData):\n\n    def __init__(self):\n        self.data_virtual_hard_disks = _list_of(DataVirtualHardDisk)\n\n    def __iter__(self):\n        return iter(self.data_virtual_hard_disks)\n\n    def __len__(self):\n        return len(self.data_virtual_hard_disks)\n\n    def __getitem__(self, index):\n        return self.data_virtual_hard_disks[index]\n\n\nclass DataVirtualHardDisk(WindowsAzureData):\n\n    def __init__(self):\n        self.host_caching = u''\n        self.disk_label = u''\n        self.disk_name = u''\n        self.lun = 0\n        self.logical_disk_size_in_gb = 0\n        self.media_link = u''\n\n\nclass OSVirtualHardDisk(WindowsAzureData):\n\n    def __init__(self, source_image_name=None, media_link=None,\n                 host_caching=None, disk_label=None, disk_name=None):\n        self.source_image_name = source_image_name\n        self.media_link = media_link\n        self.host_caching = host_caching\n        self.disk_label = disk_label\n        self.disk_name = disk_name\n        self.os = u''  # undocumented, not used when adding a role\n\n\nclass AsynchronousOperationResult(WindowsAzureData):\n\n    def __init__(self, request_id=None):\n        self.request_id = request_id\n\n\nclass ServiceBusRegion(WindowsAzureData):\n\n    def __init__(self):\n        self.code = u''\n        self.fullname = u''\n\n\nclass ServiceBusNamespace(WindowsAzureData):\n\n    def __init__(self):\n        self.name = u''\n        self.region = u''\n        self.default_key = u''\n        self.status = u''\n        self.created_at = u''\n        self.acs_management_endpoint = u''\n        self.servicebus_endpoint = u''\n        self.connection_string = u''\n        self.subscription_id = u''\n        self.enabled = False\n\n\nclass WebSpaces(WindowsAzureData):\n\n    def __init__(self):\n        self.web_space = _list_of(WebSpace)\n\n    def __iter__(self):\n        return iter(self.web_space)\n\n    def __len__(self):\n        return len(self.web_space)\n\n    def __getitem__(self, index):\n        return self.web_space[index]\n    \n\nclass WebSpace(WindowsAzureData):\n    \n    def __init__(self):\n        self.availability_state = u''\n        self.geo_location = u''\n        self.geo_region = u''\n        self.name = u''\n        self.plan = u''\n        self.status = u''\n        self.subscription = u''\n\n\nclass Sites(WindowsAzureData):\n\n    def __init__(self):\n        self.site = _list_of(Site)\n\n    def __iter__(self):\n        return iter(self.site)\n\n    def __len__(self):\n        return len(self.site)\n\n    def __getitem__(self, index):\n        return self.site[index]\n    \n\nclass Site(WindowsAzureData):\n    \n    def __init__(self):\n        self.admin_enabled = False\n        self.availability_state = ''\n        self.compute_mode = ''\n        self.enabled = False\n        self.enabled_host_names = _scalar_list_of(str, 'a:string')\n        self.host_name_ssl_states = HostNameSslStates()\n        self.host_names = _scalar_list_of(str, 'a:string')\n        self.last_modified_time_utc = ''\n        self.name = ''\n        self.repository_site_name = ''\n        self.self_link = ''\n        self.server_farm = ''\n        self.site_mode = ''\n        self.state = ''\n        self.storage_recovery_default_state = ''\n        self.usage_state = ''\n        self.web_space = ''\n\n\nclass HostNameSslStates(WindowsAzureData):\n\n    def __init__(self):\n        self.host_name_ssl_state = _list_of(HostNameSslState)\n\n    def __iter__(self):\n        return iter(self.host_name_ssl_state)\n\n    def __len__(self):\n        return len(self.host_name_ssl_state)\n\n    def __getitem__(self, index):\n        return self.host_name_ssl_state[index]\n\n\nclass HostNameSslState(WindowsAzureData):\n    \n    def __init__(self):\n        self.name = u''\n        self.ssl_state = u''\n        \n\nclass PublishData(WindowsAzureData):\n    _xml_name = 'publishData'\n    \n    def __init__(self):\n        self.publish_profiles = _list_of(PublishProfile, 'publishProfile')\n\nclass PublishProfile(WindowsAzureData):\n    \n    def __init__(self):\n        self.profile_name = _xml_attribute('profileName')\n        self.publish_method = _xml_attribute('publishMethod')\n        self.publish_url = _xml_attribute('publishUrl')\n        self.msdeploysite = _xml_attribute('msdeploySite')\n        self.user_name = _xml_attribute('userName')\n        self.user_pwd = _xml_attribute('userPWD')\n        self.destination_app_url = _xml_attribute('destinationAppUrl')\n        self.sql_server_db_connection_string = _xml_attribute('SQLServerDBConnectionString')\n        self.my_sqldb_connection_string = _xml_attribute('mySQLDBConnectionString')\n        self.hosting_provider_forum_link = _xml_attribute('hostingProviderForumLink')\n        self.control_panel_link = _xml_attribute('controlPanelLink')\n    \nclass QueueDescription(WindowsAzureData):\n    \n    def __init__(self):\n        self.lock_duration = u''\n        self.max_size_in_megabytes = 0\n        self.requires_duplicate_detection = False\n        self.requires_session = False\n        self.default_message_time_to_live = u''\n        self.dead_lettering_on_message_expiration = False\n        self.duplicate_detection_history_time_window = u''\n        self.max_delivery_count = 0\n        self.enable_batched_operations = False\n        self.size_in_bytes = 0\n        self.message_count = 0\n        self.is_anonymous_accessible = False\n        self.authorization_rules = AuthorizationRules()\n        self.status = u''\n        self.created_at = u''\n        self.updated_at = u''\n        self.accessed_at = u''\n        self.support_ordering = False\n        self.auto_delete_on_idle = u''\n        self.count_details = CountDetails()\n        self.entity_availability_status = u''\n    \nclass TopicDescription(WindowsAzureData):\n    \n    def __init__(self):\n        self.default_message_time_to_live = u''\n        self.max_size_in_megabytes = 0\n        self.requires_duplicate_detection = False\n        self.duplicate_detection_history_time_window = u''\n        self.enable_batched_operations = False\n        self.size_in_bytes = 0\n        self.filtering_messages_before_publishing = False\n        self.is_anonymous_accessible = False\n        self.authorization_rules = AuthorizationRules()\n        self.status = u''\n        self.created_at = u''\n        self.updated_at = u''\n        self.accessed_at = u''\n        self.support_ordering = False\n        self.count_details = CountDetails()\n        self.subscription_count = 0\n\nclass CountDetails(WindowsAzureData):\n    \n    def __init__(self):\n        self.active_message_count = 0\n        self.dead_letter_message_count = 0\n        self.scheduled_message_count = 0\n        self.transfer_message_count = 0\n        self.transfer_dead_letter_message_count = 0\n\nclass NotificationHubDescription(WindowsAzureData):\n    \n    def __init__(self):\n        self.registration_ttl = u''\n        self.authorization_rules = AuthorizationRules()\n\nclass AuthorizationRules(WindowsAzureData):\n\n    def __init__(self):\n        self.authorization_rule = _list_of(AuthorizationRule)\n\n    def __iter__(self):\n        return iter(self.authorization_rule)\n\n    def __len__(self):\n        return len(self.authorization_rule)\n\n    def __getitem__(self, index):\n        return self.authorization_rule[index]\n    \nclass AuthorizationRule(WindowsAzureData):\n    \n    def __init__(self):\n        self.claim_type = u''\n        self.claim_value = u''\n        self.rights = _scalar_list_of(str, 'AccessRights')\n        self.created_time = u''\n        self.modified_time = u''\n        self.key_name = u''\n        self.primary_key = u''\n        self.secondary_keu = u''\n\nclass RelayDescription(WindowsAzureData):\n    \n    def __init__(self):\n        self.path = u''\n        self.listener_type = u''\n        self.listener_count = 0\n        self.created_at = u''\n        self.updated_at = u''\n\n\nclass MetricResponses(WindowsAzureData):\n\n    def __init__(self):\n        self.metric_response = _list_of(MetricResponse)\n\n    def __iter__(self):\n        return iter(self.metric_response)\n\n    def __len__(self):\n        return len(self.metric_response)\n\n    def __getitem__(self, index):\n        return self.metric_response[index]\n\n\nclass MetricResponse(WindowsAzureData):\n\n    def __init__(self):\n        self.code = u''\n        self.data = Data()\n        self.message = u''\n\n\nclass Data(WindowsAzureData):\n\n    def __init__(self):\n        self.display_name = u''\n        self.end_time = u''\n        self.name = u''\n        self.primary_aggregation_type = u''\n        self.start_time = u''\n        self.time_grain = u''\n        self.unit = u''\n        self.values = Values()\n\n\nclass Values(WindowsAzureData):\n\n    def __init__(self):\n        self.metric_sample = _list_of(MetricSample)\n\n    def __iter__(self):\n        return iter(self.metric_sample)\n\n    def __len__(self):\n        return len(self.metric_sample)\n\n    def __getitem__(self, index):\n        return self.metric_sample[index]\n\n\nclass MetricSample(WindowsAzureData):\n\n    def __init__(self):\n        self.count = 0\n        self.time_created = u''\n        self.total = 0\n\n\nclass MetricDefinitions(WindowsAzureData):\n\n    def __init__(self):\n        self.metric_definition = _list_of(MetricDefinition)\n\n    def __iter__(self):\n        return iter(self.metric_definition)\n\n    def __len__(self):\n        return len(self.metric_definition)\n\n    def __getitem__(self, index):\n        return self.metric_definition[index]\n\n\nclass MetricDefinition(WindowsAzureData):\n\n    def __init__(self):\n        self.display_name = u''\n        self.metric_availabilities = MetricAvailabilities()\n        self.name = u''\n        self.primary_aggregation_type = u''\n        self.unit = u''\n\n\nclass MetricAvailabilities(WindowsAzureData):\n\n    def __init__(self):\n        self.metric_availability = _list_of(MetricAvailability, 'MetricAvailabilily')\n\n    def __iter__(self):\n        return iter(self.metric_availability)\n\n    def __len__(self):\n        return len(self.metric_availability)\n\n    def __getitem__(self, index):\n        return self.metric_availability[index]\n\n\nclass MetricAvailability(WindowsAzureData):\n\n    def __init__(self):\n        self.retention = u''\n        self.time_grain = u''\n\n\nclass Servers(WindowsAzureData):\n\n    def __init__(self):\n        self.server = _list_of(Server)\n\n    def __iter__(self):\n        return iter(self.server)\n\n    def __len__(self):\n        return len(self.server)\n\n    def __getitem__(self, index):\n        return self.server[index]\n\n\nclass Server(WindowsAzureData):\n    \n    def __init__(self):\n        self.name = u''\n        self.administrator_login = u''\n        self.location = u''\n        self.fully_qualified_domain_name = u''\n        self.version = u''\n\n\nclass Database(WindowsAzureData):\n\n    def __init__(self):\n        self.name = u''\n        self.type = u''\n        self.state = u''\n        self.self_link = u''\n        self.parent_link = u''\n        self.id = 0\n        self.edition = u''\n        self.collation_name = u''\n        self.creation_date = u''\n        self.is_federation_root = False\n        self.is_system_object = False\n        self.max_size_bytes = 0\n\n\ndef _update_management_header(request):\n    ''' Add additional headers for management. '''\n\n    if request.method in ['PUT', 'POST', 'MERGE', 'DELETE']:\n        request.headers.append(('Content-Length', str(len(request.body))))\n\n    # append additional headers base on the service\n    request.headers.append(('x-ms-version', X_MS_VERSION))\n\n    # if it is not GET or HEAD request, must set content-type.\n    if not request.method in ['GET', 'HEAD']:\n        for name, _ in request.headers:\n            if 'content-type' == name.lower():\n                break\n        else:\n            request.headers.append(\n                ('Content-Type',\n                 'application/atom+xml;type=entry;charset=utf-8'))\n\n    return request.headers\n\n\ndef _parse_response_for_async_op(response):\n    ''' Extracts request id from response header. '''\n\n    if response is None:\n        return None\n\n    result = AsynchronousOperationResult()\n    if response.headers:\n        for name, value in response.headers:\n            if name.lower() == 'x-ms-request-id':\n                result.request_id = value\n\n    return result\n\n\ndef _management_error_handler(http_error):\n    ''' Simple error handler for management service. '''\n    return _general_error_handler(http_error)\n\n\ndef _lower(text):\n    return text.lower()\n\n\nclass _XmlSerializer(object):\n\n    @staticmethod\n    def create_storage_service_input_to_xml(service_name, description, label,\n                                            affinity_group, location,\n                                            geo_replication_enabled,\n                                            extended_properties):\n        return _XmlSerializer.doc_from_data(\n            'CreateStorageServiceInput',\n            [('ServiceName', service_name),\n             ('Description', description),\n             ('Label', label, _encode_base64),\n             ('AffinityGroup', affinity_group),\n             ('Location', location),\n             ('GeoReplicationEnabled', geo_replication_enabled, _lower)],\n            extended_properties)\n\n    @staticmethod\n    def update_storage_service_input_to_xml(description, label,\n                                            geo_replication_enabled,\n                                            extended_properties):\n        return _XmlSerializer.doc_from_data(\n            'UpdateStorageServiceInput',\n            [('Description', description),\n             ('Label', label, _encode_base64),\n             ('GeoReplicationEnabled', geo_replication_enabled, _lower)],\n            extended_properties)\n\n    @staticmethod\n    def regenerate_keys_to_xml(key_type):\n        return _XmlSerializer.doc_from_data('RegenerateKeys',\n                                            [('KeyType', key_type)])\n\n    @staticmethod\n    def update_hosted_service_to_xml(label, description, extended_properties):\n        return _XmlSerializer.doc_from_data('UpdateHostedService',\n                                            [('Label', label, _encode_base64),\n                                             ('Description', description)],\n                                            extended_properties)\n\n    @staticmethod\n    def create_hosted_service_to_xml(service_name, label, description,\n                                     location, affinity_group,\n                                     extended_properties):\n        return _XmlSerializer.doc_from_data(\n            'CreateHostedService',\n            [('ServiceName', service_name),\n             ('Label', label, _encode_base64),\n             ('Description', description),\n             ('Location', location),\n             ('AffinityGroup', affinity_group)],\n            extended_properties)\n\n    @staticmethod\n    def create_deployment_to_xml(name, package_url, label, configuration,\n                                 start_deployment, treat_warnings_as_error,\n                                 extended_properties):\n        return _XmlSerializer.doc_from_data(\n            'CreateDeployment',\n            [('Name', name),\n             ('PackageUrl', package_url),\n             ('Label', label, _encode_base64),\n             ('Configuration', configuration),\n             ('StartDeployment',\n             start_deployment, _lower),\n             ('TreatWarningsAsError', treat_warnings_as_error, _lower)],\n            extended_properties)\n\n    @staticmethod\n    def swap_deployment_to_xml(production, source_deployment):\n        return _XmlSerializer.doc_from_data(\n            'Swap',\n            [('Production', production),\n             ('SourceDeployment', source_deployment)])\n\n    @staticmethod\n    def update_deployment_status_to_xml(status):\n        return _XmlSerializer.doc_from_data(\n            'UpdateDeploymentStatus',\n            [('Status', status)])\n\n    @staticmethod\n    def change_deployment_to_xml(configuration, treat_warnings_as_error, mode,\n                                 extended_properties):\n        return _XmlSerializer.doc_from_data(\n            'ChangeConfiguration',\n            [('Configuration', configuration),\n             ('TreatWarningsAsError', treat_warnings_as_error, _lower),\n             ('Mode', mode)],\n            extended_properties)\n\n    @staticmethod\n    def upgrade_deployment_to_xml(mode, package_url, configuration, label,\n                                  role_to_upgrade, force, extended_properties):\n        return _XmlSerializer.doc_from_data(\n            'UpgradeDeployment',\n            [('Mode', mode),\n             ('PackageUrl', package_url),\n             ('Configuration', configuration),\n             ('Label', label, _encode_base64),\n             ('RoleToUpgrade', role_to_upgrade),\n             ('Force', force, _lower)],\n            extended_properties)\n\n    @staticmethod\n    def rollback_upgrade_to_xml(mode, force):\n        return _XmlSerializer.doc_from_data(\n            'RollbackUpdateOrUpgrade',\n            [('Mode', mode),\n             ('Force', force, _lower)])\n\n    @staticmethod\n    def walk_upgrade_domain_to_xml(upgrade_domain):\n        return _XmlSerializer.doc_from_data(\n            'WalkUpgradeDomain',\n            [('UpgradeDomain', upgrade_domain)])\n\n    @staticmethod\n    def certificate_file_to_xml(data, certificate_format, password):\n        return _XmlSerializer.doc_from_data(\n            'CertificateFile',\n            [('Data', data),\n             ('CertificateFormat', certificate_format),\n             ('Password', password)])\n\n    @staticmethod\n    def create_affinity_group_to_xml(name, label, description, location):\n        return _XmlSerializer.doc_from_data(\n            'CreateAffinityGroup',\n            [('Name', name),\n             ('Label', label, _encode_base64),\n             ('Description', description),\n             ('Location', location)])\n\n    @staticmethod\n    def update_affinity_group_to_xml(label, description):\n        return _XmlSerializer.doc_from_data(\n            'UpdateAffinityGroup',\n            [('Label', label, _encode_base64),\n             ('Description', description)])\n\n    @staticmethod\n    def subscription_certificate_to_xml(public_key, thumbprint, data):\n        return _XmlSerializer.doc_from_data(\n            'SubscriptionCertificate',\n            [('SubscriptionCertificatePublicKey', public_key),\n             ('SubscriptionCertificateThumbprint', thumbprint),\n             ('SubscriptionCertificateData', data)])\n\n    @staticmethod\n    def os_image_to_xml(label, media_link, name, os):\n        return _XmlSerializer.doc_from_data(\n            'OSImage',\n            [('Label', label),\n             ('MediaLink', media_link),\n             ('Name', name),\n             ('OS', os)])\n\n    @staticmethod\n    def data_virtual_hard_disk_to_xml(host_caching, disk_label, disk_name, lun,\n                                      logical_disk_size_in_gb, media_link,\n                                      source_media_link):\n        return _XmlSerializer.doc_from_data(\n            'DataVirtualHardDisk',\n            [('HostCaching', host_caching),\n             ('DiskLabel', disk_label),\n             ('DiskName', disk_name),\n             ('Lun', lun),\n             ('LogicalDiskSizeInGB', logical_disk_size_in_gb),\n             ('MediaLink', media_link),\n             ('SourceMediaLink', source_media_link)])\n\n    @staticmethod\n    def disk_to_xml(has_operating_system, label, media_link, name, os):\n        return _XmlSerializer.doc_from_data(\n            'Disk',\n            [('HasOperatingSystem', has_operating_system, _lower),\n             ('Label', label),\n             ('MediaLink', media_link),\n             ('Name', name),\n             ('OS', os)])\n\n    @staticmethod\n    def restart_role_operation_to_xml():\n        return _XmlSerializer.doc_from_xml(\n            'RestartRoleOperation',\n            '<OperationType>RestartRoleOperation</OperationType>')\n\n    @staticmethod\n    def shutdown_role_operation_to_xml(post_shutdown_action):\n        xml = _XmlSerializer.data_to_xml(\n            [('OperationType', 'ShutdownRoleOperation'),\n             ('PostShutdownAction', post_shutdown_action)])\n        return _XmlSerializer.doc_from_xml('ShutdownRoleOperation', xml)\n\n    @staticmethod\n    def shutdown_roles_operation_to_xml(role_names, post_shutdown_action):\n        xml = _XmlSerializer.data_to_xml(\n            [('OperationType', 'ShutdownRolesOperation')])\n        xml += '<Roles>'\n        for role_name in role_names:\n            xml += _XmlSerializer.data_to_xml([('Name', role_name)])\n        xml += '</Roles>'\n        xml += _XmlSerializer.data_to_xml(\n             [('PostShutdownAction', post_shutdown_action)])\n        return _XmlSerializer.doc_from_xml('ShutdownRolesOperation', xml)\n\n    @staticmethod\n    def start_role_operation_to_xml():\n        return _XmlSerializer.doc_from_xml(\n            'StartRoleOperation',\n            '<OperationType>StartRoleOperation</OperationType>')\n\n    @staticmethod\n    def start_roles_operation_to_xml(role_names):\n        xml = _XmlSerializer.data_to_xml(\n            [('OperationType', 'StartRolesOperation')])\n        xml += '<Roles>'\n        for role_name in role_names:\n            xml += _XmlSerializer.data_to_xml([('Name', role_name)])\n        xml += '</Roles>'\n        return _XmlSerializer.doc_from_xml('StartRolesOperation', xml)\n\n    @staticmethod\n    def windows_configuration_to_xml(configuration):\n        xml = _XmlSerializer.data_to_xml(\n            [('ConfigurationSetType', configuration.configuration_set_type),\n             ('ComputerName', configuration.computer_name),\n             ('AdminPassword', configuration.admin_password),\n             ('ResetPasswordOnFirstLogon',\n              configuration.reset_password_on_first_logon,\n              _lower),\n             ('EnableAutomaticUpdates',\n              configuration.enable_automatic_updates,\n              _lower),\n             ('TimeZone', configuration.time_zone)])\n\n        if configuration.domain_join is not None:\n            xml += '<DomainJoin>'\n            xml += '<Credentials>'\n            xml += _XmlSerializer.data_to_xml(\n                [('Domain', configuration.domain_join.credentials.domain),\n                 ('Username', configuration.domain_join.credentials.username),\n                 ('Password', configuration.domain_join.credentials.password)])\n            xml += '</Credentials>'\n            xml += _XmlSerializer.data_to_xml(\n                [('JoinDomain', configuration.domain_join.join_domain),\n                 ('MachineObjectOU',\n                  configuration.domain_join.machine_object_ou)])\n            xml += '</DomainJoin>'\n        if configuration.stored_certificate_settings is not None:\n            xml += '<StoredCertificateSettings>'\n            for cert in configuration.stored_certificate_settings:\n                xml += '<CertificateSetting>'\n                xml += _XmlSerializer.data_to_xml(\n                    [('StoreLocation', cert.store_location),\n                     ('StoreName', cert.store_name),\n                     ('Thumbprint', cert.thumbprint)])\n                xml += '</CertificateSetting>'\n            xml += '</StoredCertificateSettings>'\n        if configuration.win_rm is not None:\n            xml += '<WinRM><Listeners>'\n            for listener in configuration.win_rm.listeners:\n                xml += '<Listener>'\n                xml += _XmlSerializer.data_to_xml(\n                    [('Protocol', listener.protocol),\n                     ('CertificateThumbprint', listener.certificate_thumbprint)])\n                xml += '</Listener>'\n            xml += '</Listeners></WinRM>'\n        xml += _XmlSerializer.data_to_xml(\n            [('AdminUsername', configuration.admin_username)])\n        return xml\n\n    @staticmethod\n    def linux_configuration_to_xml(configuration):\n        xml = _XmlSerializer.data_to_xml(\n            [('ConfigurationSetType', configuration.configuration_set_type),\n             ('HostName', configuration.host_name),\n             ('UserName', configuration.user_name),\n             ('UserPassword', configuration.user_password),\n             ('DisableSshPasswordAuthentication',\n              configuration.disable_ssh_password_authentication,\n              _lower)])\n\n        if configuration.ssh is not None:\n            xml += '<SSH>'\n            xml += '<PublicKeys>'\n            for key in configuration.ssh.public_keys:\n                xml += '<PublicKey>'\n                xml += _XmlSerializer.data_to_xml(\n                    [('Fingerprint', key.fingerprint),\n                     ('Path', key.path)])\n                xml += '</PublicKey>'\n            xml += '</PublicKeys>'\n            xml += '<KeyPairs>'\n            for key in configuration.ssh.key_pairs:\n                xml += '<KeyPair>'\n                xml += _XmlSerializer.data_to_xml(\n                    [('Fingerprint', key.fingerprint),\n                     ('Path', key.path)])\n                xml += '</KeyPair>'\n            xml += '</KeyPairs>'\n            xml += '</SSH>'\n        return xml\n\n    @staticmethod\n    def network_configuration_to_xml(configuration):\n        xml = _XmlSerializer.data_to_xml(\n            [('ConfigurationSetType', configuration.configuration_set_type)])\n        xml += '<InputEndpoints>'\n        for endpoint in configuration.input_endpoints:\n            xml += '<InputEndpoint>'\n            xml += _XmlSerializer.data_to_xml(\n                [('LoadBalancedEndpointSetName',\n                  endpoint.load_balanced_endpoint_set_name),\n                 ('LocalPort', endpoint.local_port),\n                 ('Name', endpoint.name),\n                 ('Port', endpoint.port)])\n\n            if endpoint.load_balancer_probe.path or\\\n                endpoint.load_balancer_probe.port or\\\n                endpoint.load_balancer_probe.protocol:\n                xml += '<LoadBalancerProbe>'\n                xml += _XmlSerializer.data_to_xml(\n                    [('Path', endpoint.load_balancer_probe.path),\n                     ('Port', endpoint.load_balancer_probe.port),\n                     ('Protocol', endpoint.load_balancer_probe.protocol)])\n                xml += '</LoadBalancerProbe>'\n\n            xml += _XmlSerializer.data_to_xml(\n                [('Protocol', endpoint.protocol),\n                 ('EnableDirectServerReturn',\n                  endpoint.enable_direct_server_return,\n                  _lower)])\n\n            xml += '</InputEndpoint>'\n        xml += '</InputEndpoints>'\n        xml += '<SubnetNames>'\n        for name in configuration.subnet_names:\n            xml += _XmlSerializer.data_to_xml([('SubnetName', name)])\n        xml += '</SubnetNames>'\n        return xml\n\n    @staticmethod\n    def role_to_xml(availability_set_name, data_virtual_hard_disks,\n                    network_configuration_set, os_virtual_hard_disk, role_name,\n                    role_size, role_type, system_configuration_set):\n        xml = _XmlSerializer.data_to_xml([('RoleName', role_name),\n                                          ('RoleType', role_type)])\n\n        xml += '<ConfigurationSets>'\n\n        if system_configuration_set is not None:\n            xml += '<ConfigurationSet>'\n            if isinstance(system_configuration_set, WindowsConfigurationSet):\n                xml += _XmlSerializer.windows_configuration_to_xml(\n                    system_configuration_set)\n            elif isinstance(system_configuration_set, LinuxConfigurationSet):\n                xml += _XmlSerializer.linux_configuration_to_xml(\n                    system_configuration_set)\n            xml += '</ConfigurationSet>'\n\n        if network_configuration_set is not None:\n            xml += '<ConfigurationSet>'\n            xml += _XmlSerializer.network_configuration_to_xml(\n                network_configuration_set)\n            xml += '</ConfigurationSet>'\n\n        xml += '</ConfigurationSets>'\n\n        if availability_set_name is not None:\n            xml += _XmlSerializer.data_to_xml(\n                [('AvailabilitySetName', availability_set_name)])\n\n        if data_virtual_hard_disks is not None:\n            xml += '<DataVirtualHardDisks>'\n            for hd in data_virtual_hard_disks:\n                xml += '<DataVirtualHardDisk>'\n                xml += _XmlSerializer.data_to_xml(\n                    [('HostCaching', hd.host_caching),\n                     ('DiskLabel', hd.disk_label),\n                     ('DiskName', hd.disk_name),\n                     ('Lun', hd.lun),\n                     ('LogicalDiskSizeInGB', hd.logical_disk_size_in_gb),\n                     ('MediaLink', hd.media_link)])\n                xml += '</DataVirtualHardDisk>'\n            xml += '</DataVirtualHardDisks>'\n\n        if os_virtual_hard_disk is not None:\n            xml += '<OSVirtualHardDisk>'\n            xml += _XmlSerializer.data_to_xml(\n                [('HostCaching', os_virtual_hard_disk.host_caching),\n                 ('DiskLabel', os_virtual_hard_disk.disk_label),\n                 ('DiskName', os_virtual_hard_disk.disk_name),\n                 ('MediaLink', os_virtual_hard_disk.media_link),\n                 ('SourceImageName', os_virtual_hard_disk.source_image_name)])\n            xml += '</OSVirtualHardDisk>'\n\n        if role_size is not None:\n            xml += _XmlSerializer.data_to_xml([('RoleSize', role_size)])\n\n        return xml\n\n    @staticmethod\n    def add_role_to_xml(role_name, system_configuration_set,\n                        os_virtual_hard_disk, role_type,\n                        network_configuration_set, availability_set_name,\n                        data_virtual_hard_disks, role_size):\n        xml = _XmlSerializer.role_to_xml(\n            availability_set_name,\n            data_virtual_hard_disks,\n            network_configuration_set,\n            os_virtual_hard_disk,\n            role_name,\n            role_size,\n            role_type,\n            system_configuration_set)\n        return _XmlSerializer.doc_from_xml('PersistentVMRole', xml)\n\n    @staticmethod\n    def update_role_to_xml(role_name, os_virtual_hard_disk, role_type,\n                           network_configuration_set, availability_set_name,\n                           data_virtual_hard_disks, role_size):\n        xml = _XmlSerializer.role_to_xml(\n            availability_set_name,\n            data_virtual_hard_disks,\n            network_configuration_set,\n            os_virtual_hard_disk,\n            role_name,\n            role_size,\n            role_type,\n            None)\n        return _XmlSerializer.doc_from_xml('PersistentVMRole', xml)\n\n    @staticmethod\n    def capture_role_to_xml(post_capture_action, target_image_name,\n                            target_image_label, provisioning_configuration):\n        xml = _XmlSerializer.data_to_xml(\n            [('OperationType', 'CaptureRoleOperation'),\n             ('PostCaptureAction', post_capture_action)])\n\n        if provisioning_configuration is not None:\n            xml += '<ProvisioningConfiguration>'\n            if isinstance(provisioning_configuration, WindowsConfigurationSet):\n                xml += _XmlSerializer.windows_configuration_to_xml(\n                    provisioning_configuration)\n            elif isinstance(provisioning_configuration, LinuxConfigurationSet):\n                xml += _XmlSerializer.linux_configuration_to_xml(\n                    provisioning_configuration)\n            xml += '</ProvisioningConfiguration>'\n\n        xml += _XmlSerializer.data_to_xml(\n            [('TargetImageLabel', target_image_label),\n             ('TargetImageName', target_image_name)])\n\n        return _XmlSerializer.doc_from_xml('CaptureRoleOperation', xml)\n\n    @staticmethod\n    def virtual_machine_deployment_to_xml(deployment_name, deployment_slot,\n                                          label, role_name,\n                                          system_configuration_set,\n                                          os_virtual_hard_disk, role_type,\n                                          network_configuration_set,\n                                          availability_set_name,\n                                          data_virtual_hard_disks, role_size,\n                                          virtual_network_name):\n        xml = _XmlSerializer.data_to_xml([('Name', deployment_name),\n                                          ('DeploymentSlot', deployment_slot),\n                                          ('Label', label)])\n        xml += '<RoleList>'\n        xml += '<Role>'\n        xml += _XmlSerializer.role_to_xml(\n            availability_set_name,\n            data_virtual_hard_disks,\n            network_configuration_set,\n            os_virtual_hard_disk,\n            role_name,\n            role_size,\n            role_type,\n            system_configuration_set)\n        xml += '</Role>'\n        xml += '</RoleList>'\n\n        if virtual_network_name is not None:\n            xml += _XmlSerializer.data_to_xml(\n                [('VirtualNetworkName', virtual_network_name)])\n\n        return _XmlSerializer.doc_from_xml('Deployment', xml)\n\n    @staticmethod\n    def create_website_to_xml(webspace_name, website_name, geo_region, plan,\n                              host_names, compute_mode, server_farm, site_mode):\n        xml = '<HostNames xmlns:a=\"http://schemas.microsoft.com/2003/10/Serialization/Arrays\">'\n        for host_name in host_names:\n            xml += '<a:string>{0}</a:string>'.format(host_name)\n        xml += '</HostNames>'\n        xml += _XmlSerializer.data_to_xml(\n            [('Name', website_name),\n             ('ComputeMode', compute_mode),\n             ('ServerFarm', server_farm),\n             ('SiteMode', site_mode)])\n        xml += '<WebSpaceToCreate>'\n        xml += _XmlSerializer.data_to_xml(\n            [('GeoRegion', geo_region),\n             ('Name', webspace_name),\n             ('Plan', plan)])\n        xml += '</WebSpaceToCreate>'\n        return _XmlSerializer.doc_from_xml('Site', xml)\n\n    @staticmethod\n    def data_to_xml(data):\n        '''Creates an xml fragment from the specified data.\n           data: Array of tuples, where first: xml element name\n                                        second: xml element text\n                                        third: conversion function\n        '''\n        xml = ''\n        for element in data:\n            name = element[0]\n            val = element[1]\n            if len(element) > 2:\n                converter = element[2]\n            else:\n                converter = None\n\n            if val is not None:\n                if converter is not None:\n                    text = _str(converter(_str(val)))\n                else:\n                    text = _str(val)\n\n                xml += ''.join(['<', name, '>', text, '</', name, '>'])\n        return xml\n\n    @staticmethod\n    def doc_from_xml(document_element_name, inner_xml):\n        '''Wraps the specified xml in an xml root element with default azure\n        namespaces'''\n        xml = ''.join(['<', document_element_name,\n                      ' xmlns:i=\"http://www.w3.org/2001/XMLSchema-instance\"',\n                      ' xmlns=\"http://schemas.microsoft.com/windowsazure\">'])\n        xml += inner_xml\n        xml += ''.join(['</', document_element_name, '>'])\n        return xml\n\n    @staticmethod\n    def doc_from_data(document_element_name, data, extended_properties=None):\n        xml = _XmlSerializer.data_to_xml(data)\n        if extended_properties is not None:\n            xml += _XmlSerializer.extended_properties_dict_to_xml_fragment(\n                extended_properties)\n        return _XmlSerializer.doc_from_xml(document_element_name, xml)\n\n    @staticmethod\n    def extended_properties_dict_to_xml_fragment(extended_properties):\n        xml = ''\n        if extended_properties is not None and len(extended_properties) > 0:\n            xml += '<ExtendedProperties>'\n            for key, val in extended_properties.items():\n                xml += ''.join(['<ExtendedProperty>',\n                                '<Name>',\n                                _str(key),\n                                '</Name>',\n                               '<Value>',\n                               _str(val),\n                               '</Value>',\n                               '</ExtendedProperty>'])\n            xml += '</ExtendedProperties>'\n        return xml\n\n\ndef _parse_bool(value):\n    if value.lower() == 'true':\n        return True\n    return False\n\n\nclass _ServiceBusManagementXmlSerializer(object):\n\n    @staticmethod\n    def namespace_to_xml(region):\n        '''Converts a service bus namespace description to xml\n\n        The xml format:\n<?xml version=\"1.0\" encoding=\"utf-8\" standalone=\"yes\"?>\n<entry xmlns=\"http://www.w3.org/2005/Atom\">\n    <content type=\"application/xml\">\n        <NamespaceDescription\n            xmlns=\"http://schemas.microsoft.com/netservices/2010/10/servicebus/connect\">\n            <Region>West US</Region>\n        </NamespaceDescription>\n    </content>\n</entry>\n        '''\n        body = '<NamespaceDescription xmlns=\"http://schemas.microsoft.com/netservices/2010/10/servicebus/connect\">'\n        body += ''.join(['<Region>', region, '</Region>'])\n        body += '</NamespaceDescription>'\n\n        return _create_entry(body)\n\n    @staticmethod\n    def xml_to_namespace(xmlstr):\n        '''Converts xml response to service bus namespace\n\n        The xml format for namespace:\n<entry>\n<id>uuid:00000000-0000-0000-0000-000000000000;id=0000000</id>\n<title type=\"text\">myunittests</title>\n<updated>2012-08-22T16:48:10Z</updated>\n<content type=\"application/xml\">\n    <NamespaceDescription\n        xmlns=\"http://schemas.microsoft.com/netservices/2010/10/servicebus/connect\"\n        xmlns:i=\"http://www.w3.org/2001/XMLSchema-instance\">\n    <Name>myunittests</Name>\n    <Region>West US</Region>\n    <DefaultKey>0000000000000000000000000000000000000000000=</DefaultKey>\n    <Status>Active</Status>\n    <CreatedAt>2012-08-22T16:48:10.217Z</CreatedAt>\n    <AcsManagementEndpoint>https://myunittests-sb.accesscontrol.windows.net/</AcsManagementEndpoint>\n    <ServiceBusEndpoint>https://myunittests.servicebus.windows.net/</ServiceBusEndpoint>\n    <ConnectionString>Endpoint=sb://myunittests.servicebus.windows.net/;SharedSecretIssuer=owner;SharedSecretValue=0000000000000000000000000000000000000000000=</ConnectionString>\n    <SubscriptionId>00000000000000000000000000000000</SubscriptionId>\n    <Enabled>true</Enabled>\n    </NamespaceDescription>\n</content>\n</entry>\n        '''\n        xmldoc = minidom.parseString(xmlstr)\n        namespace = ServiceBusNamespace()\n\n        mappings = (\n            ('Name', 'name', None),\n            ('Region', 'region', None),\n            ('DefaultKey', 'default_key', None),\n            ('Status', 'status', None),\n            ('CreatedAt', 'created_at', None),\n            ('AcsManagementEndpoint', 'acs_management_endpoint', None),\n            ('ServiceBusEndpoint', 'servicebus_endpoint', None),\n            ('ConnectionString', 'connection_string', None),\n            ('SubscriptionId', 'subscription_id', None),\n            ('Enabled', 'enabled', _parse_bool),\n        )\n\n        for desc in _get_children_from_path(xmldoc,\n                                            'entry',\n                                            'content',\n                                            'NamespaceDescription'):\n            for xml_name, field_name, conversion_func in mappings:\n                node_value = _get_first_child_node_value(desc, xml_name)\n                if node_value is not None:\n                    if conversion_func is not None:\n                        node_value = conversion_func(node_value)\n                    setattr(namespace, field_name, node_value)\n\n        return namespace\n\n    @staticmethod\n    def xml_to_region(xmlstr):\n        '''Converts xml response to service bus region\n\n        The xml format for region:\n<entry>\n<id>uuid:157c311f-081f-4b4a-a0ba-a8f990ffd2a3;id=1756759</id>\n<title type=\"text\"></title>\n<updated>2013-04-10T18:25:29Z</updated>\n<content type=\"application/xml\">\n    <RegionCodeDescription\n        xmlns=\"http://schemas.microsoft.com/netservices/2010/10/servicebus/connect\"\n        xmlns:i=\"http://www.w3.org/2001/XMLSchema-instance\">\n    <Code>East Asia</Code>\n    <FullName>East Asia</FullName>\n    </RegionCodeDescription>\n</content>\n</entry>\n          '''\n        xmldoc = minidom.parseString(xmlstr)\n        region = ServiceBusRegion()\n\n        for desc in _get_children_from_path(xmldoc, 'entry', 'content',\n                                            'RegionCodeDescription'):\n            node_value = _get_first_child_node_value(desc, 'Code')\n            if node_value is not None:\n                region.code = node_value\n            node_value = _get_first_child_node_value(desc, 'FullName')\n            if node_value is not None:\n                region.fullname = node_value\n\n        return region\n\n    @staticmethod\n    def xml_to_namespace_availability(xmlstr):\n        '''Converts xml response to service bus namespace availability\n\n        The xml format:\n<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<entry xmlns=\"http://www.w3.org/2005/Atom\">\n    <id>uuid:9fc7c652-1856-47ab-8d74-cd31502ea8e6;id=3683292</id>\n    <title type=\"text\"></title>\n    <updated>2013-04-16T03:03:37Z</updated>\n    <content type=\"application/xml\">\n        <NamespaceAvailability\n            xmlns=\"http://schemas.microsoft.com/netservices/2010/10/servicebus/connect\"\n            xmlns:i=\"http://www.w3.org/2001/XMLSchema-instance\">\n            <Result>false</Result>\n        </NamespaceAvailability>\n    </content>\n</entry>\n        '''\n        xmldoc = minidom.parseString(xmlstr)\n        availability = AvailabilityResponse()\n\n        for desc in _get_children_from_path(xmldoc, 'entry', 'content',\n                                            'NamespaceAvailability'):\n            node_value = _get_first_child_node_value(desc, 'Result')\n            if node_value is not None:\n                availability.result = _parse_bool(node_value)\n\n        return availability\n\n\nfrom azure.servicemanagement.servicemanagementservice import (\n    ServiceManagementService)\nfrom azure.servicemanagement.servicebusmanagementservice import (\n    ServiceBusManagementService)\nfrom azure.servicemanagement.websitemanagementservice import (\n    WebsiteManagementService)\n"
  },
  {
    "path": "OSPatching/azure/servicemanagement/servicebusmanagementservice.py",
    "content": "#-------------------------------------------------------------------------\n# Copyright (c) Microsoft.  All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#--------------------------------------------------------------------------\nfrom azure import (\n    MANAGEMENT_HOST,\n    _convert_response_to_feeds,\n    _str,\n    _validate_not_none,\n    )\nfrom azure.servicemanagement import (\n    _ServiceBusManagementXmlSerializer,\n    QueueDescription,\n    TopicDescription,\n    NotificationHubDescription,\n    RelayDescription,\n    )\nfrom azure.servicemanagement.servicemanagementclient import (\n    _ServiceManagementClient,\n    )\n\n\nclass ServiceBusManagementService(_ServiceManagementClient):\n\n    def __init__(self, subscription_id=None, cert_file=None,\n                 host=MANAGEMENT_HOST):\n        super(ServiceBusManagementService, self).__init__(\n            subscription_id, cert_file, host)\n\n    #--Operations for service bus ----------------------------------------\n    def get_regions(self):\n        '''\n        Get list of available service bus regions.\n        '''\n        response = self._perform_get(\n            self._get_path('services/serviceBus/Regions/', None),\n            None)\n\n        return _convert_response_to_feeds(\n            response,\n            _ServiceBusManagementXmlSerializer.xml_to_region)\n\n    def list_namespaces(self):\n        '''\n        List the service bus namespaces defined on the account.\n        '''\n        response = self._perform_get(\n            self._get_path('services/serviceBus/Namespaces/', None),\n            None)\n\n        return _convert_response_to_feeds(\n            response,\n            _ServiceBusManagementXmlSerializer.xml_to_namespace)\n\n    def get_namespace(self, name):\n        '''\n        Get details about a specific namespace.\n\n        name: Name of the service bus namespace.\n        '''\n        response = self._perform_get(\n            self._get_path('services/serviceBus/Namespaces', name),\n            None)\n\n        return _ServiceBusManagementXmlSerializer.xml_to_namespace(\n            response.body)\n\n    def create_namespace(self, name, region):\n        '''\n        Create a new service bus namespace.\n\n        name: Name of the service bus namespace to create.\n        region: Region to create the namespace in.\n        '''\n        _validate_not_none('name', name)\n\n        return self._perform_put(\n            self._get_path('services/serviceBus/Namespaces', name),\n            _ServiceBusManagementXmlSerializer.namespace_to_xml(region))\n\n    def delete_namespace(self, name):\n        '''\n        Delete a service bus namespace.\n\n        name: Name of the service bus namespace to delete.\n        '''\n        _validate_not_none('name', name)\n\n        return self._perform_delete(\n            self._get_path('services/serviceBus/Namespaces', name),\n            None)\n\n    def check_namespace_availability(self, name):\n        '''\n        Checks to see if the specified service bus namespace is available, or\n        if it has already been taken.\n\n        name: Name of the service bus namespace to validate.\n        '''\n        _validate_not_none('name', name)\n\n        response = self._perform_get(\n            self._get_path('services/serviceBus/CheckNamespaceAvailability',\n                           None) + '/?namespace=' + _str(name), None)\n\n        return _ServiceBusManagementXmlSerializer.xml_to_namespace_availability(\n            response.body)\n\n    def list_queues(self, name):\n        '''\n        Enumerates the queues in the service namespace.\n        \n        name: Name of the service bus namespace.\n        '''\n        _validate_not_none('name', name)\n            \n        response = self._perform_get(\n            self._get_list_queues_path(name),\n            None)\n\n        return _convert_response_to_feeds(response, QueueDescription)    \n\n    def list_topics(self, name):\n        '''\n        Retrieves the topics in the service namespace.\n        \n        name: Name of the service bus namespace.\n        '''\n        response = self._perform_get(\n            self._get_list_topics_path(name),\n            None)\n\n        return _convert_response_to_feeds(response, TopicDescription)\n\n    def list_notification_hubs(self, name):\n        '''\n        Retrieves the notification hubs in the service namespace.\n        \n        name: Name of the service bus namespace.\n        '''\n        response = self._perform_get(\n            self._get_list_notification_hubs_path(name),\n            None)\n\n        return _convert_response_to_feeds(response, NotificationHubDescription)\n\n    def list_relays(self, name):\n        '''\n        Retrieves the relays in the service namespace.\n        \n        name: Name of the service bus namespace.\n        '''\n        response = self._perform_get(\n            self._get_list_relays_path(name),\n            None)\n\n        return _convert_response_to_feeds(response, RelayDescription)\n\n    #--Helper functions --------------------------------------------------\n    def _get_list_queues_path(self, namespace_name):\n        return self._get_path('services/serviceBus/Namespaces/',\n                              namespace_name) + '/Queues'\n\n    def _get_list_topics_path(self, namespace_name):\n        return self._get_path('services/serviceBus/Namespaces/',\n                              namespace_name) + '/Topics'\n\n    def _get_list_notification_hubs_path(self, namespace_name):\n        return self._get_path('services/serviceBus/Namespaces/',\n                              namespace_name) + '/NotificationHubs'\n\n    def _get_list_relays_path(self, namespace_name):\n        return self._get_path('services/serviceBus/Namespaces/',\n                              namespace_name) + '/Relays'\n"
  },
  {
    "path": "OSPatching/azure/servicemanagement/servicemanagementclient.py",
    "content": "#-------------------------------------------------------------------------\n# Copyright (c) Microsoft.  All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#--------------------------------------------------------------------------\nimport os\n\nfrom azure import (\n    WindowsAzureError,\n    MANAGEMENT_HOST,\n    _get_request_body,\n    _parse_response,\n    _str,\n    _update_request_uri_query,\n    )\nfrom azure.http import (\n    HTTPError,\n    HTTPRequest,\n    )\nfrom azure.http.httpclient import _HTTPClient\nfrom azure.servicemanagement import (\n    AZURE_MANAGEMENT_CERTFILE,\n    AZURE_MANAGEMENT_SUBSCRIPTIONID,\n    _management_error_handler,\n    _parse_response_for_async_op,\n    _update_management_header,\n    )\n\n\nclass _ServiceManagementClient(object):\n\n    def __init__(self, subscription_id=None, cert_file=None,\n                 host=MANAGEMENT_HOST):\n        self.requestid = None\n        self.subscription_id = subscription_id\n        self.cert_file = cert_file\n        self.host = host\n\n        if not self.cert_file:\n            if AZURE_MANAGEMENT_CERTFILE in os.environ:\n                self.cert_file = os.environ[AZURE_MANAGEMENT_CERTFILE]\n\n        if not self.subscription_id:\n            if AZURE_MANAGEMENT_SUBSCRIPTIONID in os.environ:\n                self.subscription_id = os.environ[\n                    AZURE_MANAGEMENT_SUBSCRIPTIONID]\n\n        if not self.cert_file or not self.subscription_id:\n            raise WindowsAzureError(\n                'You need to provide subscription id and certificate file')\n\n        self._httpclient = _HTTPClient(\n            service_instance=self, cert_file=self.cert_file)\n        self._filter = self._httpclient.perform_request\n\n    def with_filter(self, filter):\n        '''Returns a new service which will process requests with the\n        specified filter.  Filtering operations can include logging, automatic\n        retrying, etc...  The filter is a lambda which receives the HTTPRequest\n        and another lambda.  The filter can perform any pre-processing on the\n        request, pass it off to the next lambda, and then perform any\n        post-processing on the response.'''\n        res = type(self)(self.subscription_id, self.cert_file, self.host)\n        old_filter = self._filter\n\n        def new_filter(request):\n            return filter(request, old_filter)\n\n        res._filter = new_filter\n        return res\n\n    def set_proxy(self, host, port, user=None, password=None):\n        '''\n        Sets the proxy server host and port for the HTTP CONNECT Tunnelling.\n\n        host: Address of the proxy. Ex: '192.168.0.100'\n        port: Port of the proxy. Ex: 6000\n        user: User for proxy authorization.\n        password: Password for proxy authorization.\n        '''\n        self._httpclient.set_proxy(host, port, user, password)\n\n    #--Helper functions --------------------------------------------------\n    def _perform_request(self, request):\n        try:\n            resp = self._filter(request)\n        except HTTPError as ex:\n            return _management_error_handler(ex)\n\n        return resp\n\n    def _perform_get(self, path, response_type):\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self.host\n        request.path = path\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = _update_management_header(request)\n        response = self._perform_request(request)\n\n        if response_type is not None:\n            return _parse_response(response, response_type)\n\n        return response\n\n    def _perform_put(self, path, body, async=False):\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self.host\n        request.path = path\n        request.body = _get_request_body(body)\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = _update_management_header(request)\n        response = self._perform_request(request)\n\n        if async:\n            return _parse_response_for_async_op(response)\n\n        return None\n\n    def _perform_post(self, path, body, response_type=None, async=False):\n        request = HTTPRequest()\n        request.method = 'POST'\n        request.host = self.host\n        request.path = path\n        request.body = _get_request_body(body)\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = _update_management_header(request)\n        response = self._perform_request(request)\n\n        if response_type is not None:\n            return _parse_response(response, response_type)\n\n        if async:\n            return _parse_response_for_async_op(response)\n\n        return None\n\n    def _perform_delete(self, path, async=False):\n        request = HTTPRequest()\n        request.method = 'DELETE'\n        request.host = self.host\n        request.path = path\n        request.path, request.query = _update_request_uri_query(request)\n        request.headers = _update_management_header(request)\n        response = self._perform_request(request)\n\n        if async:\n            return _parse_response_for_async_op(response)\n\n        return None\n\n    def _get_path(self, resource, name):\n        path = '/' + self.subscription_id + '/' + resource\n        if name is not None:\n            path += '/' + _str(name)\n        return path\n"
  },
  {
    "path": "OSPatching/azure/servicemanagement/servicemanagementservice.py",
    "content": "#-------------------------------------------------------------------------\n# Copyright (c) Microsoft.  All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#--------------------------------------------------------------------------\nfrom azure import (\n    WindowsAzureError,\n    MANAGEMENT_HOST,\n    _str,\n    _validate_not_none,\n    )\nfrom azure.servicemanagement import (\n    AffinityGroups,\n    AffinityGroup,\n    AvailabilityResponse,\n    Certificate,\n    Certificates,\n    DataVirtualHardDisk,\n    Deployment,\n    Disk,\n    Disks,\n    Locations,\n    Operation,\n    HostedService,\n    HostedServices,\n    Images,\n    OperatingSystems,\n    OperatingSystemFamilies,\n    OSImage,\n    PersistentVMRole,\n    StorageService,\n    StorageServices,\n    Subscription,\n    SubscriptionCertificate,\n    SubscriptionCertificates,\n    VirtualNetworkSites,\n    _XmlSerializer,\n    )\nfrom azure.servicemanagement.servicemanagementclient import (\n    _ServiceManagementClient,\n    )\n\nclass ServiceManagementService(_ServiceManagementClient):\n\n    def __init__(self, subscription_id=None, cert_file=None,\n                 host=MANAGEMENT_HOST):\n        super(ServiceManagementService, self).__init__(\n            subscription_id, cert_file, host)\n\n    #--Operations for storage accounts -----------------------------------\n    def list_storage_accounts(self):\n        '''\n        Lists the storage accounts available under the current subscription.\n        '''\n        return self._perform_get(self._get_storage_service_path(),\n                                 StorageServices)\n\n    def get_storage_account_properties(self, service_name):\n        '''\n        Returns system properties for the specified storage account.\n\n        service_name: Name of the storage service account.\n        '''\n        _validate_not_none('service_name', service_name)\n        return self._perform_get(self._get_storage_service_path(service_name),\n                                 StorageService)\n\n    def get_storage_account_keys(self, service_name):\n        '''\n        Returns the primary and secondary access keys for the specified\n        storage account.\n\n        service_name: Name of the storage service account.\n        '''\n        _validate_not_none('service_name', service_name)\n        return self._perform_get(\n            self._get_storage_service_path(service_name) + '/keys',\n            StorageService)\n\n    def regenerate_storage_account_keys(self, service_name, key_type):\n        '''\n        Regenerates the primary or secondary access key for the specified\n        storage account.\n\n        service_name: Name of the storage service account.\n        key_type:\n            Specifies which key to regenerate. Valid values are:\n            Primary, Secondary\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('key_type', key_type)\n        return self._perform_post(\n            self._get_storage_service_path(\n                service_name) + '/keys?action=regenerate',\n            _XmlSerializer.regenerate_keys_to_xml(\n                key_type),\n            StorageService)\n\n    def create_storage_account(self, service_name, description, label,\n                               affinity_group=None, location=None,\n                               geo_replication_enabled=True,\n                               extended_properties=None):\n        '''\n        Creates a new storage account in Windows Azure.\n\n        service_name:\n            A name for the storage account that is unique within Windows Azure.\n            Storage account names must be between 3 and 24 characters in length\n            and use numbers and lower-case letters only.\n        description:\n            A description for the storage account. The description may be up\n            to 1024 characters in length.\n        label:\n            A name for the storage account. The name may be up to 100\n            characters in length. The name can be used to identify the storage\n            account for your tracking purposes.\n        affinity_group:\n            The name of an existing affinity group in the specified\n            subscription. You can specify either a location or affinity_group,\n            but not both.\n        location:\n            The location where the storage account is created. You can specify\n            either a location or affinity_group, but not both.\n        geo_replication_enabled:\n            Specifies whether the storage account is created with the\n            geo-replication enabled. If the element is not included in the\n            request body, the default value is true. If set to true, the data\n            in the storage account is replicated across more than one\n            geographic location so as to enable resilience in the face of\n            catastrophic service loss.\n        extended_properties:\n            Dictionary containing name/value pairs of storage account\n            properties. You can have a maximum of 50 extended property\n            name/value pairs. The maximum length of the Name element is 64\n            characters, only alphanumeric characters and underscores are valid\n            in the Name, and the name must start with a letter. The value has\n            a maximum length of 255 characters.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('description', description)\n        _validate_not_none('label', label)\n        if affinity_group is None and location is None:\n            raise WindowsAzureError(\n                'location or affinity_group must be specified')\n        if affinity_group is not None and location is not None:\n            raise WindowsAzureError(\n                'Only one of location or affinity_group needs to be specified')\n        return self._perform_post(\n            self._get_storage_service_path(),\n            _XmlSerializer.create_storage_service_input_to_xml(\n                service_name,\n                description,\n                label,\n                affinity_group,\n                location,\n                geo_replication_enabled,\n                extended_properties),\n            async=True)\n\n    def update_storage_account(self, service_name, description=None,\n                               label=None, geo_replication_enabled=None,\n                               extended_properties=None):\n        '''\n        Updates the label, the description, and enables or disables the\n        geo-replication status for a storage account in Windows Azure.\n\n        service_name: Name of the storage service account.\n        description:\n            A description for the storage account. The description may be up\n            to 1024 characters in length.\n        label:\n            A name for the storage account. The name may be up to 100\n            characters in length. The name can be used to identify the storage\n            account for your tracking purposes.\n        geo_replication_enabled:\n            Specifies whether the storage account is created with the\n            geo-replication enabled. If the element is not included in the\n            request body, the default value is true. If set to true, the data\n            in the storage account is replicated across more than one\n            geographic location so as to enable resilience in the face of\n            catastrophic service loss.\n        extended_properties:\n            Dictionary containing name/value pairs of storage account\n            properties. You can have a maximum of 50 extended property\n            name/value pairs. The maximum length of the Name element is 64\n            characters, only alphanumeric characters and underscores are valid\n            in the Name, and the name must start with a letter. The value has\n            a maximum length of 255 characters.\n        '''\n        _validate_not_none('service_name', service_name)\n        return self._perform_put(\n            self._get_storage_service_path(service_name),\n            _XmlSerializer.update_storage_service_input_to_xml(\n                description,\n                label,\n                geo_replication_enabled,\n                extended_properties))\n\n    def delete_storage_account(self, service_name):\n        '''\n        Deletes the specified storage account from Windows Azure.\n\n        service_name: Name of the storage service account.\n        '''\n        _validate_not_none('service_name', service_name)\n        return self._perform_delete(\n            self._get_storage_service_path(service_name))\n\n    def check_storage_account_name_availability(self, service_name):\n        '''\n        Checks to see if the specified storage account name is available, or\n        if it has already been taken.\n\n        service_name: Name of the storage service account.\n        '''\n        _validate_not_none('service_name', service_name)\n        return self._perform_get(\n            self._get_storage_service_path() +\n            '/operations/isavailable/' +\n            _str(service_name) + '',\n            AvailabilityResponse)\n\n    #--Operations for hosted services ------------------------------------\n    def list_hosted_services(self):\n        '''\n        Lists the hosted services available under the current subscription.\n        '''\n        return self._perform_get(self._get_hosted_service_path(),\n                                 HostedServices)\n\n    def get_hosted_service_properties(self, service_name, embed_detail=False):\n        '''\n        Retrieves system properties for the specified hosted service. These\n        properties include the service name and service type; the name of the\n        affinity group to which the service belongs, or its location if it is\n        not part of an affinity group; and optionally, information on the\n        service's deployments.\n\n        service_name: Name of the hosted service.\n        embed_detail:\n            When True, the management service returns properties for all\n            deployments of the service, as well as for the service itself.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('embed_detail', embed_detail)\n        return self._perform_get(\n            self._get_hosted_service_path(service_name) +\n            '?embed-detail=' +\n            _str(embed_detail).lower(),\n            HostedService)\n\n    def create_hosted_service(self, service_name, label, description=None,\n                              location=None, affinity_group=None,\n                              extended_properties=None):\n        '''\n        Creates a new hosted service in Windows Azure.\n\n        service_name:\n            A name for the hosted service that is unique within Windows Azure.\n            This name is the DNS prefix name and can be used to access the\n            hosted service.\n        label:\n            A name for the hosted service. The name can be up to 100 characters\n            in length. The name can be used to identify the storage account for\n            your tracking purposes.\n        description:\n            A description for the hosted service. The description can be up to\n            1024 characters in length.\n        location:\n            The location where the hosted service will be created. You can\n            specify either a location or affinity_group, but not both.\n        affinity_group:\n            The name of an existing affinity group associated with this\n            subscription. This name is a GUID and can be retrieved by examining\n            the name element of the response body returned by\n            list_affinity_groups. You can specify either a location or\n            affinity_group, but not both.\n        extended_properties:\n            Dictionary containing name/value pairs of storage account\n            properties. You can have a maximum of 50 extended property\n            name/value pairs. The maximum length of the Name element is 64\n            characters, only alphanumeric characters and underscores are valid\n            in the Name, and the name must start with a letter. The value has\n            a maximum length of 255 characters.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('label', label)\n        if affinity_group is None and location is None:\n            raise WindowsAzureError(\n                'location or affinity_group must be specified')\n        if affinity_group is not None and location is not None:\n            raise WindowsAzureError(\n                'Only one of location or affinity_group needs to be specified')\n        return self._perform_post(self._get_hosted_service_path(),\n                                  _XmlSerializer.create_hosted_service_to_xml(\n                                      service_name,\n                                      label,\n                                      description,\n                                      location,\n                                      affinity_group,\n                                      extended_properties))\n\n    def update_hosted_service(self, service_name, label=None, description=None,\n                              extended_properties=None):\n        '''\n        Updates the label and/or the description for a hosted service in\n        Windows Azure.\n\n        service_name: Name of the hosted service.\n        label:\n            A name for the hosted service. The name may be up to 100 characters\n            in length. You must specify a value for either Label or\n            Description, or for both. It is recommended that the label be\n            unique within the subscription. The name can be used\n            identify the hosted service for your tracking purposes.\n        description:\n            A description for the hosted service. The description may be up to\n            1024 characters in length. You must specify a value for either\n            Label or Description, or for both.\n        extended_properties:\n            Dictionary containing name/value pairs of storage account\n            properties. You can have a maximum of 50 extended property\n            name/value pairs. The maximum length of the Name element is 64\n            characters, only alphanumeric characters and underscores are valid\n            in the Name, and the name must start with a letter. The value has\n            a maximum length of 255 characters.\n        '''\n        _validate_not_none('service_name', service_name)\n        return self._perform_put(self._get_hosted_service_path(service_name),\n                                 _XmlSerializer.update_hosted_service_to_xml(\n                                     label,\n                                     description,\n                                     extended_properties))\n\n    def delete_hosted_service(self, service_name):\n        '''\n        Deletes the specified hosted service from Windows Azure.\n\n        service_name: Name of the hosted service.\n        '''\n        _validate_not_none('service_name', service_name)\n        return self._perform_delete(self._get_hosted_service_path(service_name))\n\n    def get_deployment_by_slot(self, service_name, deployment_slot):\n        '''\n        Returns configuration information, status, and system properties for\n        a deployment.\n\n        service_name: Name of the hosted service.\n        deployment_slot:\n            The environment to which the hosted service is deployed. Valid\n            values are: staging, production\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_slot', deployment_slot)\n        return self._perform_get(\n            self._get_deployment_path_using_slot(\n                service_name, deployment_slot),\n            Deployment)\n\n    def get_deployment_by_name(self, service_name, deployment_name):\n        '''\n        Returns configuration information, status, and system properties for a\n        deployment.\n\n        service_name: Name of the hosted service.\n        deployment_name: The name of the deployment.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        return self._perform_get(\n            self._get_deployment_path_using_name(\n                service_name, deployment_name),\n            Deployment)\n\n    def create_deployment(self, service_name, deployment_slot, name,\n                          package_url, label, configuration,\n                          start_deployment=False,\n                          treat_warnings_as_error=False,\n                          extended_properties=None):\n        '''\n        Uploads a new service package and creates a new deployment on staging\n        or production.\n\n        service_name: Name of the hosted service.\n        deployment_slot:\n            The environment to which the hosted service is deployed. Valid\n            values are: staging, production\n        name:\n            The name for the deployment. The deployment name must be unique\n            among other deployments for the hosted service.\n        package_url:\n            A URL that refers to the location of the service package in the\n            Blob service. The service package can be located either in a\n            storage account beneath the same subscription or a Shared Access\n            Signature (SAS) URI from any storage account.\n        label:\n            A name for the hosted service. The name can be up to 100 characters\n            in length. It is recommended that the label be unique within the\n            subscription. The name can be used to identify the hosted service\n            for your tracking purposes.\n        configuration:\n            The base-64 encoded service configuration file for the deployment.\n        start_deployment:\n            Indicates whether to start the deployment immediately after it is\n            created. If false, the service model is still deployed to the\n            virtual machines but the code is not run immediately. Instead, the\n            service is Suspended until you call Update Deployment Status and\n            set the status to Running, at which time the service will be\n            started. A deployed service still incurs charges, even if it is\n            suspended.\n        treat_warnings_as_error:\n            Indicates whether to treat package validation warnings as errors.\n            If set to true, the Created Deployment operation fails if there\n            are validation warnings on the service package.\n        extended_properties:\n            Dictionary containing name/value pairs of storage account\n            properties. You can have a maximum of 50 extended property\n            name/value pairs. The maximum length of the Name element is 64\n            characters, only alphanumeric characters and underscores are valid\n            in the Name, and the name must start with a letter. The value has\n            a maximum length of 255 characters.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_slot', deployment_slot)\n        _validate_not_none('name', name)\n        _validate_not_none('package_url', package_url)\n        _validate_not_none('label', label)\n        _validate_not_none('configuration', configuration)\n        return self._perform_post(\n            self._get_deployment_path_using_slot(\n                service_name, deployment_slot),\n            _XmlSerializer.create_deployment_to_xml(\n                name,\n                package_url,\n                label,\n                configuration,\n                start_deployment,\n                treat_warnings_as_error,\n                extended_properties),\n            async=True)\n\n    def delete_deployment(self, service_name, deployment_name):\n        '''\n        Deletes the specified deployment.\n\n        service_name: Name of the hosted service.\n        deployment_name: The name of the deployment.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        return self._perform_delete(\n            self._get_deployment_path_using_name(\n                service_name, deployment_name),\n            async=True)\n\n    def swap_deployment(self, service_name, production, source_deployment):\n        '''\n        Initiates a virtual IP swap between the staging and production\n        deployment environments for a service. If the service is currently\n        running in the staging environment, it will be swapped to the\n        production environment. If it is running in the production\n        environment, it will be swapped to staging.\n\n        service_name: Name of the hosted service.\n        production: The name of the production deployment.\n        source_deployment: The name of the source deployment.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('production', production)\n        _validate_not_none('source_deployment', source_deployment)\n        return self._perform_post(self._get_hosted_service_path(service_name),\n                                  _XmlSerializer.swap_deployment_to_xml(\n                                      production, source_deployment),\n                                  async=True)\n\n    def change_deployment_configuration(self, service_name, deployment_name,\n                                        configuration,\n                                        treat_warnings_as_error=False,\n                                        mode='Auto', extended_properties=None):\n        '''\n        Initiates a change to the deployment configuration.\n\n        service_name: Name of the hosted service.\n        deployment_name: The name of the deployment.\n        configuration:\n            The base-64 encoded service configuration file for the deployment.\n        treat_warnings_as_error:\n            Indicates whether to treat package validation warnings as errors.\n            If set to true, the Created Deployment operation fails if there\n            are validation warnings on the service package.\n        mode:\n            If set to Manual, WalkUpgradeDomain must be called to apply the\n            update. If set to Auto, the Windows Azure platform will\n            automatically apply the update To each upgrade domain for the\n            service. Possible values are: Auto, Manual\n        extended_properties:\n            Dictionary containing name/value pairs of storage account\n            properties. You can have a maximum of 50 extended property\n            name/value pairs. The maximum length of the Name element is 64\n            characters, only alphanumeric characters and underscores are valid\n            in the Name, and the name must start with a letter. The value has\n            a maximum length of 255 characters.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('configuration', configuration)\n        return self._perform_post(\n            self._get_deployment_path_using_name(\n                service_name, deployment_name) + '/?comp=config',\n            _XmlSerializer.change_deployment_to_xml(\n                configuration,\n                treat_warnings_as_error,\n                mode,\n                extended_properties),\n            async=True)\n\n    def update_deployment_status(self, service_name, deployment_name, status):\n        '''\n        Initiates a change in deployment status.\n\n        service_name: Name of the hosted service.\n        deployment_name: The name of the deployment.\n        status:\n            The change to initiate to the deployment status. Possible values\n            include: Running, Suspended\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('status', status)\n        return self._perform_post(\n            self._get_deployment_path_using_name(\n                service_name, deployment_name) + '/?comp=status',\n            _XmlSerializer.update_deployment_status_to_xml(\n                status),\n            async=True)\n\n    def upgrade_deployment(self, service_name, deployment_name, mode,\n                           package_url, configuration, label, force,\n                           role_to_upgrade=None, extended_properties=None):\n        '''\n        Initiates an upgrade.\n\n        service_name: Name of the hosted service.\n        deployment_name: The name of the deployment.\n        mode:\n            If set to Manual, WalkUpgradeDomain must be called to apply the\n            update. If set to Auto, the Windows Azure platform will\n            automatically apply the update To each upgrade domain for the\n            service. Possible values are: Auto, Manual\n        package_url:\n            A URL that refers to the location of the service package in the\n            Blob service. The service package can be located either in a\n            storage account beneath the same subscription or a Shared Access\n            Signature (SAS) URI from any storage account.\n        configuration:\n            The base-64 encoded service configuration file for the deployment.\n        label:\n            A name for the hosted service. The name can be up to 100 characters\n            in length. It is recommended that the label be unique within the\n            subscription. The name can be used to identify the hosted service\n            for your tracking purposes.\n        force:\n            Specifies whether the rollback should proceed even when it will\n            cause local data to be lost from some role instances. True if the\n            rollback should proceed; otherwise false if the rollback should\n            fail.\n        role_to_upgrade: The name of the specific role to upgrade.\n        extended_properties:\n            Dictionary containing name/value pairs of storage account\n            properties. You can have a maximum of 50 extended property\n            name/value pairs. The maximum length of the Name element is 64\n            characters, only alphanumeric characters and underscores are valid\n            in the Name, and the name must start with a letter. The value has\n            a maximum length of 255 characters.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('mode', mode)\n        _validate_not_none('package_url', package_url)\n        _validate_not_none('configuration', configuration)\n        _validate_not_none('label', label)\n        _validate_not_none('force', force)\n        return self._perform_post(\n            self._get_deployment_path_using_name(\n                service_name, deployment_name) + '/?comp=upgrade',\n            _XmlSerializer.upgrade_deployment_to_xml(\n                mode,\n                package_url,\n                configuration,\n                label,\n                role_to_upgrade,\n                force,\n                extended_properties),\n            async=True)\n\n    def walk_upgrade_domain(self, service_name, deployment_name,\n                            upgrade_domain):\n        '''\n        Specifies the next upgrade domain to be walked during manual in-place\n        upgrade or configuration change.\n\n        service_name: Name of the hosted service.\n        deployment_name: The name of the deployment.\n        upgrade_domain:\n            An integer value that identifies the upgrade domain to walk.\n            Upgrade domains are identified with a zero-based index: the first\n            upgrade domain has an ID of 0, the second has an ID of 1, and so on.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('upgrade_domain', upgrade_domain)\n        return self._perform_post(\n            self._get_deployment_path_using_name(\n                service_name, deployment_name) + '/?comp=walkupgradedomain',\n            _XmlSerializer.walk_upgrade_domain_to_xml(\n                upgrade_domain),\n            async=True)\n\n    def rollback_update_or_upgrade(self, service_name, deployment_name, mode,\n                                   force):\n        '''\n        Cancels an in progress configuration change (update) or upgrade and\n        returns the deployment to its state before the upgrade or\n        configuration change was started.\n\n        service_name: Name of the hosted service.\n        deployment_name: The name of the deployment.\n        mode:\n            Specifies whether the rollback should proceed automatically.\n                auto - The rollback proceeds without further user input.\n                manual - You must call the Walk Upgrade Domain operation to\n                         apply the rollback to each upgrade domain.\n        force:\n            Specifies whether the rollback should proceed even when it will\n            cause local data to be lost from some role instances. True if the\n            rollback should proceed; otherwise false if the rollback should\n            fail.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('mode', mode)\n        _validate_not_none('force', force)\n        return self._perform_post(\n            self._get_deployment_path_using_name(\n                service_name, deployment_name) + '/?comp=rollback',\n            _XmlSerializer.rollback_upgrade_to_xml(\n                mode, force),\n            async=True)\n\n    def reboot_role_instance(self, service_name, deployment_name,\n                             role_instance_name):\n        '''\n        Requests a reboot of a role instance that is running in a deployment.\n\n        service_name: Name of the hosted service.\n        deployment_name: The name of the deployment.\n        role_instance_name: The name of the role instance.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('role_instance_name', role_instance_name)\n        return self._perform_post(\n            self._get_deployment_path_using_name(\n                service_name, deployment_name) + \\\n                    '/roleinstances/' + _str(role_instance_name) + \\\n                    '?comp=reboot',\n            '',\n            async=True)\n\n    def reimage_role_instance(self, service_name, deployment_name,\n                              role_instance_name):\n        '''\n        Requests a reimage of a role instance that is running in a deployment.\n\n        service_name: Name of the hosted service.\n        deployment_name: The name of the deployment.\n        role_instance_name: The name of the role instance.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('role_instance_name', role_instance_name)\n        return self._perform_post(\n            self._get_deployment_path_using_name(\n                service_name, deployment_name) + \\\n                    '/roleinstances/' + _str(role_instance_name) + \\\n                    '?comp=reimage',\n            '',\n            async=True)\n\n    def check_hosted_service_name_availability(self, service_name):\n        '''\n        Checks to see if the specified hosted service name is available, or if\n        it has already been taken.\n\n        service_name: Name of the hosted service.\n        '''\n        _validate_not_none('service_name', service_name)\n        return self._perform_get(\n            '/' + self.subscription_id +\n            '/services/hostedservices/operations/isavailable/' +\n            _str(service_name) + '',\n            AvailabilityResponse)\n\n    #--Operations for service certificates -------------------------------\n    def list_service_certificates(self, service_name):\n        '''\n        Lists all of the service certificates associated with the specified\n        hosted service.\n\n        service_name: Name of the hosted service.\n        '''\n        _validate_not_none('service_name', service_name)\n        return self._perform_get(\n            '/' + self.subscription_id + '/services/hostedservices/' +\n            _str(service_name) + '/certificates',\n            Certificates)\n\n    def get_service_certificate(self, service_name, thumbalgorithm, thumbprint):\n        '''\n        Returns the public data for the specified X.509 certificate associated\n        with a hosted service.\n\n        service_name: Name of the hosted service.\n        thumbalgorithm: The algorithm for the certificate's thumbprint.\n        thumbprint: The hexadecimal representation of the thumbprint.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('thumbalgorithm', thumbalgorithm)\n        _validate_not_none('thumbprint', thumbprint)\n        return self._perform_get(\n            '/' + self.subscription_id + '/services/hostedservices/' +\n            _str(service_name) + '/certificates/' +\n            _str(thumbalgorithm) + '-' + _str(thumbprint) + '',\n            Certificate)\n\n    def add_service_certificate(self, service_name, data, certificate_format,\n                                password):\n        '''\n        Adds a certificate to a hosted service.\n\n        service_name: Name of the hosted service.\n        data: The base-64 encoded form of the pfx file.\n        certificate_format:\n            The service certificate format. The only supported value is pfx.\n        password: The certificate password.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('data', data)\n        _validate_not_none('certificate_format', certificate_format)\n        _validate_not_none('password', password)\n        return self._perform_post(\n            '/' + self.subscription_id + '/services/hostedservices/' +\n            _str(service_name) + '/certificates',\n            _XmlSerializer.certificate_file_to_xml(\n                data, certificate_format, password),\n            async=True)\n\n    def delete_service_certificate(self, service_name, thumbalgorithm,\n                                   thumbprint):\n        '''\n        Deletes a service certificate from the certificate store of a hosted\n        service.\n\n        service_name: Name of the hosted service.\n        thumbalgorithm: The algorithm for the certificate's thumbprint.\n        thumbprint: The hexadecimal representation of the thumbprint.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('thumbalgorithm', thumbalgorithm)\n        _validate_not_none('thumbprint', thumbprint)\n        return self._perform_delete(\n            '/' + self.subscription_id + '/services/hostedservices/' +\n            _str(service_name) + '/certificates/' +\n            _str(thumbalgorithm) + '-' + _str(thumbprint),\n            async=True)\n\n    #--Operations for management certificates ----------------------------\n    def list_management_certificates(self):\n        '''\n        The List Management Certificates operation lists and returns basic\n        information about all of the management certificates associated with\n        the specified subscription. Management certificates, which are also\n        known as subscription certificates, authenticate clients attempting to\n        connect to resources associated with your Windows Azure subscription.\n        '''\n        return self._perform_get('/' + self.subscription_id + '/certificates',\n                                 SubscriptionCertificates)\n\n    def get_management_certificate(self, thumbprint):\n        '''\n        The Get Management Certificate operation retrieves information about\n        the management certificate with the specified thumbprint. Management\n        certificates, which are also known as subscription certificates,\n        authenticate clients attempting to connect to resources associated\n        with your Windows Azure subscription.\n\n        thumbprint: The thumbprint value of the certificate.\n        '''\n        _validate_not_none('thumbprint', thumbprint)\n        return self._perform_get(\n            '/' + self.subscription_id + '/certificates/' + _str(thumbprint),\n            SubscriptionCertificate)\n\n    def add_management_certificate(self, public_key, thumbprint, data):\n        '''\n        The Add Management Certificate operation adds a certificate to the\n        list of management certificates. Management certificates, which are\n        also known as subscription certificates, authenticate clients\n        attempting to connect to resources associated with your Windows Azure\n        subscription.\n\n        public_key:\n            A base64 representation of the management certificate public key.\n        thumbprint:\n            The thumb print that uniquely identifies the management\n            certificate.\n        data: The certificate's raw data in base-64 encoded .cer format.\n        '''\n        _validate_not_none('public_key', public_key)\n        _validate_not_none('thumbprint', thumbprint)\n        _validate_not_none('data', data)\n        return self._perform_post(\n            '/' + self.subscription_id + '/certificates',\n            _XmlSerializer.subscription_certificate_to_xml(\n                public_key, thumbprint, data))\n\n    def delete_management_certificate(self, thumbprint):\n        '''\n        The Delete Management Certificate operation deletes a certificate from\n        the list of management certificates. Management certificates, which\n        are also known as subscription certificates, authenticate clients\n        attempting to connect to resources associated with your Windows Azure\n        subscription.\n\n        thumbprint:\n            The thumb print that uniquely identifies the management\n            certificate.\n        '''\n        _validate_not_none('thumbprint', thumbprint)\n        return self._perform_delete(\n            '/' + self.subscription_id + '/certificates/' + _str(thumbprint))\n\n    #--Operations for affinity groups ------------------------------------\n    def list_affinity_groups(self):\n        '''\n        Lists the affinity groups associated with the specified subscription.\n        '''\n        return self._perform_get(\n            '/' + self.subscription_id + '/affinitygroups',\n            AffinityGroups)\n\n    def get_affinity_group_properties(self, affinity_group_name):\n        '''\n        Returns the system properties associated with the specified affinity\n        group.\n\n        affinity_group_name: The name of the affinity group.\n        '''\n        _validate_not_none('affinity_group_name', affinity_group_name)\n        return self._perform_get(\n            '/' + self.subscription_id + '/affinitygroups/' +\n            _str(affinity_group_name) + '',\n            AffinityGroup)\n\n    def create_affinity_group(self, name, label, location, description=None):\n        '''\n        Creates a new affinity group for the specified subscription.\n\n        name: A name for the affinity group that is unique to the subscription.\n        label:\n            A name for the affinity group. The name can be up to 100 characters\n            in length.\n        location:\n            The data center location where the affinity group will be created.\n            To list available locations, use the list_location function.\n        description:\n            A description for the affinity group. The description can be up to\n            1024 characters in length.\n        '''\n        _validate_not_none('name', name)\n        _validate_not_none('label', label)\n        _validate_not_none('location', location)\n        return self._perform_post(\n            '/' + self.subscription_id + '/affinitygroups',\n            _XmlSerializer.create_affinity_group_to_xml(name,\n                                                        label,\n                                                        description,\n                                                        location))\n\n    def update_affinity_group(self, affinity_group_name, label,\n                              description=None):\n        '''\n        Updates the label and/or the description for an affinity group for the\n        specified subscription.\n\n        affinity_group_name: The name of the affinity group.\n        label:\n            A name for the affinity group. The name can be up to 100 characters\n            in length.\n        description:\n            A description for the affinity group. The description can be up to\n            1024 characters in length.\n        '''\n        _validate_not_none('affinity_group_name', affinity_group_name)\n        _validate_not_none('label', label)\n        return self._perform_put(\n            '/' + self.subscription_id + '/affinitygroups/' +\n            _str(affinity_group_name),\n            _XmlSerializer.update_affinity_group_to_xml(label, description))\n\n    def delete_affinity_group(self, affinity_group_name):\n        '''\n        Deletes an affinity group in the specified subscription.\n\n        affinity_group_name: The name of the affinity group.\n        '''\n        _validate_not_none('affinity_group_name', affinity_group_name)\n        return self._perform_delete('/' + self.subscription_id + \\\n                                    '/affinitygroups/' + \\\n                                    _str(affinity_group_name))\n\n    #--Operations for locations ------------------------------------------\n    def list_locations(self):\n        '''\n        Lists all of the data center locations that are valid for your\n        subscription.\n        '''\n        return self._perform_get('/' + self.subscription_id + '/locations',\n                                 Locations)\n\n    #--Operations for tracking asynchronous requests ---------------------\n    def get_operation_status(self, request_id):\n        '''\n        Returns the status of the specified operation. After calling an\n        asynchronous operation, you can call Get Operation Status to determine\n        whether the operation has succeeded, failed, or is still in progress.\n\n        request_id: The request ID for the request you wish to track.\n        '''\n        _validate_not_none('request_id', request_id)\n        return self._perform_get(\n            '/' + self.subscription_id + '/operations/' + _str(request_id),\n            Operation)\n\n    #--Operations for retrieving operating system information ------------\n    def list_operating_systems(self):\n        '''\n        Lists the versions of the guest operating system that are currently\n        available in Windows Azure.\n        '''\n        return self._perform_get(\n            '/' + self.subscription_id + '/operatingsystems',\n            OperatingSystems)\n\n    def list_operating_system_families(self):\n        '''\n        Lists the guest operating system families available in Windows Azure,\n        and also lists the operating system versions available for each family.\n        '''\n        return self._perform_get(\n            '/' + self.subscription_id + '/operatingsystemfamilies',\n            OperatingSystemFamilies)\n\n    #--Operations for retrieving subscription history --------------------\n    def get_subscription(self):\n        '''\n        Returns account and resource allocation information on the specified\n        subscription.\n        '''\n        return self._perform_get('/' + self.subscription_id + '',\n                                 Subscription)\n\n    #--Operations for virtual machines -----------------------------------\n    def get_role(self, service_name, deployment_name, role_name):\n        '''\n        Retrieves the specified virtual machine.\n\n        service_name: The name of the service.\n        deployment_name: The name of the deployment.\n        role_name: The name of the role.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('role_name', role_name)\n        return self._perform_get(\n            self._get_role_path(service_name, deployment_name, role_name),\n            PersistentVMRole)\n\n    def create_virtual_machine_deployment(self, service_name, deployment_name,\n                                          deployment_slot, label, role_name,\n                                          system_config, os_virtual_hard_disk,\n                                          network_config=None,\n                                          availability_set_name=None,\n                                          data_virtual_hard_disks=None,\n                                          role_size=None,\n                                          role_type='PersistentVMRole',\n                                          virtual_network_name=None):\n        '''\n        Provisions a virtual machine based on the supplied configuration.\n\n        service_name: Name of the hosted service.\n        deployment_name:\n            The name for the deployment. The deployment name must be unique\n            among other deployments for the hosted service.\n        deployment_slot:\n            The environment to which the hosted service is deployed. Valid\n            values are: staging, production\n        label:\n            Specifies an identifier for the deployment. The label can be up to\n            100 characters long. The label can be used for tracking purposes.\n        role_name: The name of the role.\n        system_config:\n            Contains the metadata required to provision a virtual machine from\n            a Windows or Linux OS image.  Use an instance of\n            WindowsConfigurationSet or LinuxConfigurationSet.\n        os_virtual_hard_disk:\n            Contains the parameters Windows Azure uses to create the operating\n            system disk for the virtual machine.\n        network_config:\n            Encapsulates the metadata required to create the virtual network\n            configuration for a virtual machine. If you do not include a\n            network configuration set you will not be able to access the VM\n            through VIPs over the internet. If your virtual machine belongs to\n            a virtual network you can not specify which subnet address space\n            it resides under.\n        availability_set_name:\n            Specifies the name of an availability set to which to add the\n            virtual machine. This value controls the virtual machine\n            allocation in the Windows Azure environment. Virtual machines\n            specified in the same availability set are allocated to different\n            nodes to maximize availability.\n        data_virtual_hard_disks:\n            Contains the parameters Windows Azure uses to create a data disk\n            for a virtual machine.\n        role_size:\n            The size of the virtual machine to allocate. The default value is\n            Small. Possible values are: ExtraSmall, Small, Medium, Large,\n            ExtraLarge. The specified value must be compatible with the disk\n            selected in the OSVirtualHardDisk values.\n        role_type:\n            The type of the role for the virtual machine. The only supported\n            value is PersistentVMRole.\n        virtual_network_name:\n            Specifies the name of an existing virtual network to which the\n            deployment will belong.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('deployment_slot', deployment_slot)\n        _validate_not_none('label', label)\n        _validate_not_none('role_name', role_name)\n        _validate_not_none('system_config', system_config)\n        _validate_not_none('os_virtual_hard_disk', os_virtual_hard_disk)\n        return self._perform_post(\n            self._get_deployment_path_using_name(service_name),\n            _XmlSerializer.virtual_machine_deployment_to_xml(\n                deployment_name,\n                deployment_slot,\n                label,\n                role_name,\n                system_config,\n                os_virtual_hard_disk,\n                role_type,\n                network_config,\n                availability_set_name,\n                data_virtual_hard_disks,\n                role_size,\n                virtual_network_name),\n            async=True)\n\n    def add_role(self, service_name, deployment_name, role_name, system_config,\n                 os_virtual_hard_disk, network_config=None,\n                 availability_set_name=None, data_virtual_hard_disks=None,\n                 role_size=None, role_type='PersistentVMRole'):\n        '''\n        Adds a virtual machine to an existing deployment.\n\n        service_name: The name of the service.\n        deployment_name: The name of the deployment.\n        role_name: The name of the role.\n        system_config:\n            Contains the metadata required to provision a virtual machine from\n            a Windows or Linux OS image.  Use an instance of\n            WindowsConfigurationSet or LinuxConfigurationSet.\n        os_virtual_hard_disk:\n            Contains the parameters Windows Azure uses to create the operating\n            system disk for the virtual machine.\n        network_config:\n            Encapsulates the metadata required to create the virtual network\n            configuration for a virtual machine. If you do not include a\n            network configuration set you will not be able to access the VM\n            through VIPs over the internet. If your virtual machine belongs to\n            a virtual network you can not specify which subnet address space\n            it resides under.\n        availability_set_name:\n            Specifies the name of an availability set to which to add the\n            virtual machine. This value controls the virtual machine allocation\n            in the Windows Azure environment. Virtual machines specified in the\n            same availability set are allocated to different nodes to maximize\n            availability.\n        data_virtual_hard_disks:\n            Contains the parameters Windows Azure uses to create a data disk\n            for a virtual machine.\n        role_size:\n            The size of the virtual machine to allocate. The default value is\n            Small. Possible values are: ExtraSmall, Small, Medium, Large,\n            ExtraLarge. The specified value must be compatible with the disk\n            selected in the OSVirtualHardDisk values.\n        role_type:\n            The type of the role for the virtual machine. The only supported\n            value is PersistentVMRole.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('role_name', role_name)\n        _validate_not_none('system_config', system_config)\n        _validate_not_none('os_virtual_hard_disk', os_virtual_hard_disk)\n        return self._perform_post(\n            self._get_role_path(service_name, deployment_name),\n            _XmlSerializer.add_role_to_xml(\n                role_name,\n                system_config,\n                os_virtual_hard_disk,\n                role_type,\n                network_config,\n                availability_set_name,\n                data_virtual_hard_disks,\n                role_size),\n            async=True)\n\n    def update_role(self, service_name, deployment_name, role_name,\n                    os_virtual_hard_disk=None, network_config=None,\n                    availability_set_name=None, data_virtual_hard_disks=None,\n                    role_size=None, role_type='PersistentVMRole'):\n        '''\n        Updates the specified virtual machine.\n\n        service_name: The name of the service.\n        deployment_name: The name of the deployment.\n        role_name: The name of the role.\n        os_virtual_hard_disk:\n            Contains the parameters Windows Azure uses to create the operating\n            system disk for the virtual machine.\n        network_config:\n            Encapsulates the metadata required to create the virtual network\n            configuration for a virtual machine. If you do not include a\n            network configuration set you will not be able to access the VM\n            through VIPs over the internet. If your virtual machine belongs to\n            a virtual network you can not specify which subnet address space\n            it resides under.\n        availability_set_name:\n            Specifies the name of an availability set to which to add the\n            virtual machine. This value controls the virtual machine allocation\n            in the Windows Azure environment. Virtual machines specified in the\n            same availability set are allocated to different nodes to maximize\n            availability.\n        data_virtual_hard_disks:\n            Contains the parameters Windows Azure uses to create a data disk\n            for a virtual machine.\n        role_size:\n            The size of the virtual machine to allocate. The default value is\n            Small. Possible values are: ExtraSmall, Small, Medium, Large,\n            ExtraLarge. The specified value must be compatible with the disk\n            selected in the OSVirtualHardDisk values.\n        role_type:\n            The type of the role for the virtual machine. The only supported\n            value is PersistentVMRole.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('role_name', role_name)\n        return self._perform_put(\n            self._get_role_path(service_name, deployment_name, role_name),\n            _XmlSerializer.update_role_to_xml(\n                role_name,\n                os_virtual_hard_disk,\n                role_type,\n                network_config,\n                availability_set_name,\n                data_virtual_hard_disks,\n                role_size),\n            async=True)\n\n    def delete_role(self, service_name, deployment_name, role_name):\n        '''\n        Deletes the specified virtual machine.\n\n        service_name: The name of the service.\n        deployment_name: The name of the deployment.\n        role_name: The name of the role.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('role_name', role_name)\n        return self._perform_delete(\n            self._get_role_path(service_name, deployment_name, role_name),\n            async=True)\n\n    def capture_role(self, service_name, deployment_name, role_name,\n                     post_capture_action, target_image_name,\n                     target_image_label, provisioning_configuration=None):\n        '''\n        The Capture Role operation captures a virtual machine image to your\n        image gallery. From the captured image, you can create additional\n        customized virtual machines.\n\n        service_name: The name of the service.\n        deployment_name: The name of the deployment.\n        role_name: The name of the role.\n        post_capture_action:\n            Specifies the action after capture operation completes. Possible\n            values are: Delete, Reprovision.\n        target_image_name:\n            Specifies the image name of the captured virtual machine.\n        target_image_label:\n            Specifies the friendly name of the captured virtual machine.\n        provisioning_configuration:\n            Use an instance of WindowsConfigurationSet or LinuxConfigurationSet.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('role_name', role_name)\n        _validate_not_none('post_capture_action', post_capture_action)\n        _validate_not_none('target_image_name', target_image_name)\n        _validate_not_none('target_image_label', target_image_label)\n        return self._perform_post(\n            self._get_role_instance_operations_path(\n                service_name, deployment_name, role_name),\n            _XmlSerializer.capture_role_to_xml(\n                post_capture_action,\n                target_image_name,\n                target_image_label,\n                provisioning_configuration),\n            async=True)\n\n    def start_role(self, service_name, deployment_name, role_name):\n        '''\n        Starts the specified virtual machine.\n\n        service_name: The name of the service.\n        deployment_name: The name of the deployment.\n        role_name: The name of the role.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('role_name', role_name)\n        return self._perform_post(\n            self._get_role_instance_operations_path(\n                service_name, deployment_name, role_name),\n            _XmlSerializer.start_role_operation_to_xml(),\n            async=True)\n\n    def start_roles(self, service_name, deployment_name, role_names):\n        '''\n        Starts the specified virtual machines.\n\n        service_name: The name of the service.\n        deployment_name: The name of the deployment.\n        role_names: The names of the roles, as an enumerable of strings.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('role_names', role_names)\n        return self._perform_post(\n            self._get_roles_operations_path(service_name, deployment_name),\n            _XmlSerializer.start_roles_operation_to_xml(role_names),\n            async=True)\n\n    def restart_role(self, service_name, deployment_name, role_name):\n        '''\n        Restarts the specified virtual machine.\n\n        service_name: The name of the service.\n        deployment_name: The name of the deployment.\n        role_name: The name of the role.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('role_name', role_name)\n        return self._perform_post(\n            self._get_role_instance_operations_path(\n                service_name, deployment_name, role_name),\n            _XmlSerializer.restart_role_operation_to_xml(\n            ),\n            async=True)\n\n    def shutdown_role(self, service_name, deployment_name, role_name,\n                      post_shutdown_action='Stopped'):\n        '''\n        Shuts down the specified virtual machine.\n\n        service_name: The name of the service.\n        deployment_name: The name of the deployment.\n        role_name: The name of the role.\n        post_shutdown_action:\n            Specifies how the Virtual Machine should be shut down. Values are:\n                Stopped\n                    Shuts down the Virtual Machine but retains the compute\n                    resources. You will continue to be billed for the resources\n                    that the stopped machine uses.\n                StoppedDeallocated\n                    Shuts down the Virtual Machine and releases the compute\n                    resources. You are not billed for the compute resources that\n                    this Virtual Machine uses. If a static Virtual Network IP\n                    address is assigned to the Virtual Machine, it is reserved.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('role_name', role_name)\n        _validate_not_none('post_shutdown_action', post_shutdown_action)\n        return self._perform_post(\n            self._get_role_instance_operations_path(\n                service_name, deployment_name, role_name),\n            _XmlSerializer.shutdown_role_operation_to_xml(post_shutdown_action),\n            async=True)\n\n    def shutdown_roles(self, service_name, deployment_name, role_names,\n                       post_shutdown_action='Stopped'):\n        '''\n        Shuts down the specified virtual machines.\n\n        service_name: The name of the service.\n        deployment_name: The name of the deployment.\n        role_names: The names of the roles, as an enumerable of strings.\n        post_shutdown_action:\n            Specifies how the Virtual Machine should be shut down. Values are:\n                Stopped\n                    Shuts down the Virtual Machine but retains the compute\n                    resources. You will continue to be billed for the resources\n                    that the stopped machine uses.\n                StoppedDeallocated\n                    Shuts down the Virtual Machine and releases the compute\n                    resources. You are not billed for the compute resources that\n                    this Virtual Machine uses. If a static Virtual Network IP\n                    address is assigned to the Virtual Machine, it is reserved.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('role_names', role_names)\n        _validate_not_none('post_shutdown_action', post_shutdown_action)\n        return self._perform_post(\n            self._get_roles_operations_path(service_name, deployment_name),\n            _XmlSerializer.shutdown_roles_operation_to_xml(\n                role_names, post_shutdown_action),\n            async=True)\n\n    #--Operations for virtual machine images -----------------------------\n    def list_os_images(self):\n        '''\n        Retrieves a list of the OS images from the image repository.\n        '''\n        return self._perform_get(self._get_image_path(),\n                                 Images)\n\n    def get_os_image(self, image_name):\n        '''\n        Retrieves an OS image from the image repository.\n        '''\n        return self._perform_get(self._get_image_path(image_name),\n                                 OSImage)\n\n    def add_os_image(self, label, media_link, name, os):\n        '''\n        Adds an OS image that is currently stored in a storage account in your\n        subscription to the image repository.\n\n        label: Specifies the friendly name of the image.\n        media_link:\n            Specifies the location of the blob in Windows Azure blob store\n            where the media for the image is located. The blob location must\n            belong to a storage account in the subscription specified by the\n            <subscription-id> value in the operation call. Example:\n            http://example.blob.core.windows.net/disks/mydisk.vhd\n        name:\n            Specifies a name for the OS image that Windows Azure uses to\n            identify the image when creating one or more virtual machines.\n        os:\n            The operating system type of the OS image. Possible values are:\n            Linux, Windows\n        '''\n        _validate_not_none('label', label)\n        _validate_not_none('media_link', media_link)\n        _validate_not_none('name', name)\n        _validate_not_none('os', os)\n        return self._perform_post(self._get_image_path(),\n                                  _XmlSerializer.os_image_to_xml(\n                                      label, media_link, name, os),\n                                  async=True)\n\n    def update_os_image(self, image_name, label, media_link, name, os):\n        '''\n        Updates an OS image that in your image repository.\n\n        image_name: The name of the image to update.\n        label:\n            Specifies the friendly name of the image to be updated. You cannot\n            use this operation to update images provided by the Windows Azure\n            platform.\n        media_link:\n            Specifies the location of the blob in Windows Azure blob store\n            where the media for the image is located. The blob location must\n            belong to a storage account in the subscription specified by the\n            <subscription-id> value in the operation call. Example:\n            http://example.blob.core.windows.net/disks/mydisk.vhd\n        name:\n            Specifies a name for the OS image that Windows Azure uses to\n            identify the image when creating one or more VM Roles.\n        os:\n            The operating system type of the OS image. Possible values are:\n            Linux, Windows\n        '''\n        _validate_not_none('image_name', image_name)\n        _validate_not_none('label', label)\n        _validate_not_none('media_link', media_link)\n        _validate_not_none('name', name)\n        _validate_not_none('os', os)\n        return self._perform_put(self._get_image_path(image_name),\n                                 _XmlSerializer.os_image_to_xml(\n                                     label, media_link, name, os),\n                                 async=True)\n\n    def delete_os_image(self, image_name, delete_vhd=False):\n        '''\n        Deletes the specified OS image from your image repository.\n\n        image_name: The name of the image.\n        delete_vhd: Deletes the underlying vhd blob in Azure storage.\n        '''\n        _validate_not_none('image_name', image_name)\n        path = self._get_image_path(image_name)\n        if delete_vhd:\n            path += '?comp=media'\n        return self._perform_delete(path, async=True)\n\n    #--Operations for virtual machine disks ------------------------------\n    def get_data_disk(self, service_name, deployment_name, role_name, lun):\n        '''\n        Retrieves the specified data disk from a virtual machine.\n\n        service_name: The name of the service.\n        deployment_name: The name of the deployment.\n        role_name: The name of the role.\n        lun: The Logical Unit Number (LUN) for the disk.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('role_name', role_name)\n        _validate_not_none('lun', lun)\n        return self._perform_get(\n            self._get_data_disk_path(\n                service_name, deployment_name, role_name, lun),\n            DataVirtualHardDisk)\n\n    def add_data_disk(self, service_name, deployment_name, role_name, lun,\n                      host_caching=None, media_link=None, disk_label=None,\n                      disk_name=None, logical_disk_size_in_gb=None,\n                      source_media_link=None):\n        '''\n        Adds a data disk to a virtual machine.\n\n        service_name: The name of the service.\n        deployment_name: The name of the deployment.\n        role_name: The name of the role.\n        lun:\n            Specifies the Logical Unit Number (LUN) for the disk. The LUN\n            specifies the slot in which the data drive appears when mounted\n            for usage by the virtual machine. Valid LUN values are 0 through 15.\n        host_caching:\n            Specifies the platform caching behavior of data disk blob for\n            read/write efficiency. The default vault is ReadOnly. Possible\n            values are: None, ReadOnly, ReadWrite\n        media_link:\n            Specifies the location of the blob in Windows Azure blob store\n            where the media for the disk is located. The blob location must\n            belong to the storage account in the subscription specified by the\n            <subscription-id> value in the operation call. Example:\n            http://example.blob.core.windows.net/disks/mydisk.vhd\n        disk_label:\n            Specifies the description of the data disk. When you attach a disk,\n            either by directly referencing a media using the MediaLink element\n            or specifying the target disk size, you can use the DiskLabel\n            element to customize the name property of the target data disk.\n        disk_name:\n            Specifies the name of the disk. Windows Azure uses the specified\n            disk to create the data disk for the machine and populates this\n            field with the disk name.\n        logical_disk_size_in_gb:\n            Specifies the size, in GB, of an empty disk to be attached to the\n            role. The disk can be created as part of disk attach or create VM\n            role call by specifying the value for this property. Windows Azure\n            creates the empty disk based on size preference and attaches the\n            newly created disk to the Role.\n        source_media_link:\n            Specifies the location of a blob in account storage which is\n            mounted as a data disk when the virtual machine is created.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('role_name', role_name)\n        _validate_not_none('lun', lun)\n        return self._perform_post(\n            self._get_data_disk_path(service_name, deployment_name, role_name),\n            _XmlSerializer.data_virtual_hard_disk_to_xml(\n                host_caching,\n                disk_label,\n                disk_name,\n                lun,\n                logical_disk_size_in_gb,\n                media_link,\n                source_media_link),\n            async=True)\n\n    def update_data_disk(self, service_name, deployment_name, role_name, lun,\n                         host_caching=None, media_link=None, updated_lun=None,\n                         disk_label=None, disk_name=None,\n                         logical_disk_size_in_gb=None):\n        '''\n        Updates the specified data disk attached to the specified virtual\n        machine.\n\n        service_name: The name of the service.\n        deployment_name: The name of the deployment.\n        role_name: The name of the role.\n        lun:\n            Specifies the Logical Unit Number (LUN) for the disk. The LUN\n            specifies the slot in which the data drive appears when mounted\n            for usage by the virtual machine. Valid LUN values are 0 through\n            15.\n        host_caching:\n            Specifies the platform caching behavior of data disk blob for\n            read/write efficiency. The default vault is ReadOnly. Possible\n            values are: None, ReadOnly, ReadWrite\n        media_link:\n            Specifies the location of the blob in Windows Azure blob store\n            where the media for the disk is located. The blob location must\n            belong to the storage account in the subscription specified by\n            the <subscription-id> value in the operation call. Example:\n            http://example.blob.core.windows.net/disks/mydisk.vhd\n        updated_lun:\n            Specifies the Logical Unit Number (LUN) for the disk. The LUN\n            specifies the slot in which the data drive appears when mounted\n            for usage by the virtual machine. Valid LUN values are 0 through 15.\n        disk_label:\n            Specifies the description of the data disk. When you attach a disk,\n            either by directly referencing a media using the MediaLink element\n            or specifying the target disk size, you can use the DiskLabel\n            element to customize the name property of the target data disk.\n        disk_name:\n            Specifies the name of the disk. Windows Azure uses the specified\n            disk to create the data disk for the machine and populates this\n            field with the disk name.\n        logical_disk_size_in_gb:\n            Specifies the size, in GB, of an empty disk to be attached to the\n            role. The disk can be created as part of disk attach or create VM\n            role call by specifying the value for this property. Windows Azure\n            creates the empty disk based on size preference and attaches the\n            newly created disk to the Role.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('role_name', role_name)\n        _validate_not_none('lun', lun)\n        return self._perform_put(\n            self._get_data_disk_path(\n                service_name, deployment_name, role_name, lun),\n            _XmlSerializer.data_virtual_hard_disk_to_xml(\n                host_caching,\n                disk_label,\n                disk_name,\n                updated_lun,\n                logical_disk_size_in_gb,\n                media_link,\n                None),\n            async=True)\n\n    def delete_data_disk(self, service_name, deployment_name, role_name, lun, delete_vhd=False):\n        '''\n        Removes the specified data disk from a virtual machine.\n\n        service_name: The name of the service.\n        deployment_name: The name of the deployment.\n        role_name: The name of the role.\n        lun: The Logical Unit Number (LUN) for the disk.\n        delete_vhd: Deletes the underlying vhd blob in Azure storage.\n        '''\n        _validate_not_none('service_name', service_name)\n        _validate_not_none('deployment_name', deployment_name)\n        _validate_not_none('role_name', role_name)\n        _validate_not_none('lun', lun)\n        path = self._get_data_disk_path(service_name, deployment_name, role_name, lun)\n        if delete_vhd:\n            path += '?comp=media'\n        return self._perform_delete(path, async=True)\n\n    #--Operations for virtual machine disks ------------------------------\n    def list_disks(self):\n        '''\n        Retrieves a list of the disks in your image repository.\n        '''\n        return self._perform_get(self._get_disk_path(),\n                                 Disks)\n\n    def get_disk(self, disk_name):\n        '''\n        Retrieves a disk from your image repository.\n        '''\n        return self._perform_get(self._get_disk_path(disk_name),\n                                 Disk)\n\n    def add_disk(self, has_operating_system, label, media_link, name, os):\n        '''\n        Adds a disk to the user image repository. The disk can be an OS disk\n        or a data disk.\n\n        has_operating_system:\n            Specifies whether the disk contains an operation system. Only a\n            disk with an operating system installed can be mounted as OS Drive.\n        label: Specifies the description of the disk.\n        media_link:\n            Specifies the location of the blob in Windows Azure blob store\n            where the media for the disk is located. The blob location must\n            belong to the storage account in the current subscription specified\n            by the <subscription-id> value in the operation call. Example:\n            http://example.blob.core.windows.net/disks/mydisk.vhd\n        name:\n            Specifies a name for the disk. Windows Azure uses the name to\n            identify the disk when creating virtual machines from the disk.\n        os: The OS type of the disk. Possible values are: Linux, Windows\n        '''\n        _validate_not_none('has_operating_system', has_operating_system)\n        _validate_not_none('label', label)\n        _validate_not_none('media_link', media_link)\n        _validate_not_none('name', name)\n        _validate_not_none('os', os)\n        return self._perform_post(self._get_disk_path(),\n                                  _XmlSerializer.disk_to_xml(\n                                      has_operating_system,\n                                      label,\n                                      media_link,\n                                      name,\n                                      os))\n\n    def update_disk(self, disk_name, has_operating_system, label, media_link,\n                    name, os):\n        '''\n        Updates an existing disk in your image repository.\n\n        disk_name: The name of the disk to update.\n        has_operating_system:\n            Specifies whether the disk contains an operation system. Only a\n            disk with an operating system installed can be mounted as OS Drive.\n        label: Specifies the description of the disk.\n        media_link:\n            Specifies the location of the blob in Windows Azure blob store\n            where the media for the disk is located. The blob location must\n            belong to the storage account in the current subscription specified\n            by the <subscription-id> value in the operation call. Example:\n            http://example.blob.core.windows.net/disks/mydisk.vhd\n        name:\n            Specifies a name for the disk. Windows Azure uses the name to\n            identify the disk when creating virtual machines from the disk.\n        os: The OS type of the disk. Possible values are: Linux, Windows\n        '''\n        _validate_not_none('disk_name', disk_name)\n        _validate_not_none('has_operating_system', has_operating_system)\n        _validate_not_none('label', label)\n        _validate_not_none('media_link', media_link)\n        _validate_not_none('name', name)\n        _validate_not_none('os', os)\n        return self._perform_put(self._get_disk_path(disk_name),\n                                 _XmlSerializer.disk_to_xml(\n                                     has_operating_system,\n                                     label,\n                                     media_link,\n                                     name,\n                                     os))\n\n    def delete_disk(self, disk_name, delete_vhd=False):\n        '''\n        Deletes the specified data or operating system disk from your image\n        repository.\n\n        disk_name: The name of the disk to delete.\n        delete_vhd: Deletes the underlying vhd blob in Azure storage.\n        '''\n        _validate_not_none('disk_name', disk_name)\n        path = self._get_disk_path(disk_name)\n        if delete_vhd:\n            path += '?comp=media'\n        return self._perform_delete(path)\n\n    #--Operations for virtual networks  ------------------------------\n    def list_virtual_network_sites(self):\n        '''\n        Retrieves a list of the virtual networks.\n        '''\n        return self._perform_get(self._get_virtual_network_site_path(), VirtualNetworkSites)\n  \n      #--Helper functions --------------------------------------------------\n    def _get_virtual_network_site_path(self):\n        return self._get_path('services/networking/virtualnetwork', None)\n\n    def _get_storage_service_path(self, service_name=None):\n        return self._get_path('services/storageservices', service_name)\n\n    def _get_hosted_service_path(self, service_name=None):\n        return self._get_path('services/hostedservices', service_name)\n\n    def _get_deployment_path_using_slot(self, service_name, slot=None):\n        return self._get_path('services/hostedservices/' + _str(service_name) +\n                              '/deploymentslots', slot)\n\n    def _get_deployment_path_using_name(self, service_name,\n                                        deployment_name=None):\n        return self._get_path('services/hostedservices/' + _str(service_name) +\n                              '/deployments', deployment_name)\n\n    def _get_role_path(self, service_name, deployment_name, role_name=None):\n        return self._get_path('services/hostedservices/' + _str(service_name) +\n                              '/deployments/' + deployment_name +\n                              '/roles', role_name)\n\n    def _get_role_instance_operations_path(self, service_name, deployment_name,\n                                           role_name=None):\n        return self._get_path('services/hostedservices/' + _str(service_name) +\n                              '/deployments/' + deployment_name +\n                              '/roleinstances', role_name) + '/Operations'\n\n    def _get_roles_operations_path(self, service_name, deployment_name):\n        return self._get_path('services/hostedservices/' + _str(service_name) +\n                              '/deployments/' + deployment_name +\n                              '/roles/Operations', None)\n\n    def _get_data_disk_path(self, service_name, deployment_name, role_name,\n                            lun=None):\n        return self._get_path('services/hostedservices/' + _str(service_name) +\n                              '/deployments/' + _str(deployment_name) +\n                              '/roles/' + _str(role_name) + '/DataDisks', lun)\n\n    def _get_disk_path(self, disk_name=None):\n        return self._get_path('services/disks', disk_name)\n\n    def _get_image_path(self, image_name=None):\n        return self._get_path('services/images', image_name)\n"
  },
  {
    "path": "OSPatching/azure/servicemanagement/sqldatabasemanagementservice.py",
    "content": "#-------------------------------------------------------------------------\n# Copyright (c) Microsoft.  All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#--------------------------------------------------------------------------\nfrom azure import (\n    MANAGEMENT_HOST,\n    _parse_service_resources_response,\n    )\nfrom azure.servicemanagement import (\n    Servers,\n    Database,\n    )\nfrom azure.servicemanagement.servicemanagementclient import (\n    _ServiceManagementClient,\n    )\n\nclass SqlDatabaseManagementService(_ServiceManagementClient):\n    ''' Note that this class is a preliminary work on SQL Database\n        management. Since it lack a lot a features, final version\n        can be slightly different from the current one.\n    '''\n\n    def __init__(self, subscription_id=None, cert_file=None,\n                 host=MANAGEMENT_HOST):\n        super(SqlDatabaseManagementService, self).__init__(\n            subscription_id, cert_file, host)\n\n    #--Operations for sql servers ----------------------------------------\n    def list_servers(self):\n        '''\n        List the SQL servers defined on the account.\n        '''\n        return self._perform_get(self._get_list_servers_path(),\n                                 Servers)\n\n    #--Operations for sql databases ----------------------------------------\n    def list_databases(self, name):\n        '''\n        List the SQL databases defined on the specified server name\n        '''\n        response = self._perform_get(self._get_list_databases_path(name),\n                                     None)\n        return _parse_service_resources_response(response, Database)\n\n\n    #--Helper functions --------------------------------------------------\n    def _get_list_servers_path(self):\n        return self._get_path('services/sqlservers/servers', None)\n\n    def _get_list_databases_path(self, name):\n        # *contentview=generic is mandatory*\n        return self._get_path('services/sqlservers/servers/',\n                              name) + '/databases?contentview=generic' \n    \n"
  },
  {
    "path": "OSPatching/azure/servicemanagement/websitemanagementservice.py",
    "content": "#-------------------------------------------------------------------------\n# Copyright (c) Microsoft.  All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#--------------------------------------------------------------------------\nfrom azure import (\n    MANAGEMENT_HOST,\n    _str,\n    )\nfrom azure.servicemanagement import (\n    WebSpaces,\n    WebSpace,\n    Sites,\n    Site,\n    MetricResponses,\n    MetricDefinitions,\n    PublishData,\n    _XmlSerializer,\n    )\nfrom azure.servicemanagement.servicemanagementclient import (\n    _ServiceManagementClient,\n    )\n\nclass WebsiteManagementService(_ServiceManagementClient):\n    ''' Note that this class is a preliminary work on WebSite\n        management. Since it lack a lot a features, final version\n        can be slightly different from the current one.\n    '''\n\n    def __init__(self, subscription_id=None, cert_file=None,\n                 host=MANAGEMENT_HOST):\n        super(WebsiteManagementService, self).__init__(\n            subscription_id, cert_file, host)\n\n    #--Operations for web sites ----------------------------------------\n    def list_webspaces(self):\n        '''\n        List the webspaces defined on the account.\n        '''\n        return self._perform_get(self._get_list_webspaces_path(),\n                                 WebSpaces)\n\n    def get_webspace(self, webspace_name):\n        '''\n        Get details of a specific webspace.\n\n        webspace_name: The name of the webspace.\n        '''\n        return self._perform_get(self._get_webspace_details_path(webspace_name),\n                                 WebSpace)\n\n    def list_sites(self, webspace_name):\n        '''\n        List the web sites defined on this webspace.\n\n        webspace_name: The name of the webspace.\n        '''\n        return self._perform_get(self._get_sites_path(webspace_name),\n                                 Sites)\n\n    def get_site(self, webspace_name, website_name):\n        '''\n        List the web sites defined on this webspace.\n\n        webspace_name: The name of the webspace.\n        website_name: The name of the website.\n        '''\n        return self._perform_get(self._get_sites_details_path(webspace_name,\n                                                              website_name),\n                                 Site)\n\n    def create_site(self, webspace_name, website_name, geo_region, host_names,\n                    plan='VirtualDedicatedPlan', compute_mode='Shared',\n                    server_farm=None, site_mode=None):\n        '''\n        Create a website.\n\n        webspace_name: The name of the webspace.\n        website_name: The name of the website.\n        geo_region:\n            The geographical region of the webspace that will be created.\n        host_names:\n            An array of fully qualified domain names for website. Only one\n            hostname can be specified in the azurewebsites.net domain.\n            The hostname should match the name of the website. Custom domains\n            can only be specified for Shared or Standard websites.\n        plan:\n            This value must be 'VirtualDedicatedPlan'.\n        compute_mode:\n            This value should be 'Shared' for the Free or Paid Shared\n            offerings, or 'Dedicated' for the Standard offering. The default\n            value is 'Shared'. If you set it to 'Dedicated', you must specify\n            a value for the server_farm parameter.\n        server_farm:\n            The name of the Server Farm associated with this website. This is\n            a required value for Standard mode.\n        site_mode:\n            Can be None, 'Limited' or 'Basic'. This value is 'Limited' for the\n            Free offering, and 'Basic' for the Paid Shared offering. Standard\n            mode does not use the site_mode parameter; it uses the compute_mode\n            parameter.\n        '''\n        xml = _XmlSerializer.create_website_to_xml(webspace_name, website_name, geo_region, plan, host_names, compute_mode, server_farm, site_mode)\n        return self._perform_post(\n            self._get_sites_path(webspace_name),\n            xml,\n            Site)\n\n    def delete_site(self, webspace_name, website_name,\n                    delete_empty_server_farm=False, delete_metrics=False):\n        '''\n        Delete a website.\n\n        webspace_name: The name of the webspace.\n        website_name: The name of the website.\n        delete_empty_server_farm:\n            If the site being deleted is the last web site in a server farm,\n            you can delete the server farm by setting this to True.\n        delete_metrics:\n            To also delete the metrics for the site that you are deleting, you\n            can set this to True.\n        '''\n        path = self._get_sites_details_path(webspace_name, website_name)\n        query = ''\n        if delete_empty_server_farm:\n            query += '&deleteEmptyServerFarm=true'\n        if delete_metrics:\n            query += '&deleteMetrics=true'\n        if query:\n            path = path + '?' + query.lstrip('&')\n        return self._perform_delete(path)\n\n    def restart_site(self, webspace_name, website_name):\n        '''\n        Restart a web site.\n\n        webspace_name: The name of the webspace.\n        website_name: The name of the website.\n        '''\n        return self._perform_post(\n            self._get_restart_path(webspace_name, website_name),\n            '')\n\n    def get_historical_usage_metrics(self, webspace_name, website_name,\n                                     metrics = None, start_time=None, end_time=None, time_grain=None):\n        '''\n        Get historical usage metrics.\n\n        webspace_name: The name of the webspace.\n        website_name: The name of the website.\n        metrics: Optional. List of metrics name. Otherwise, all metrics returned.\n        start_time: Optional. An ISO8601 date. Otherwise, current hour is used.\n        end_time: Optional. An ISO8601 date. Otherwise, current time is used.\n        time_grain: Optional. A rollup name, as P1D. OTherwise, default rollup for the metrics is used.\n        More information and metrics name at:\n        http://msdn.microsoft.com/en-us/library/azure/dn166964.aspx\n        '''        \n        metrics = ('names='+','.join(metrics)) if metrics else ''\n        start_time = ('StartTime='+start_time) if start_time else ''\n        end_time = ('EndTime='+end_time) if end_time else ''\n        time_grain = ('TimeGrain='+time_grain) if time_grain else ''\n        parameters = ('&'.join(v for v in (metrics, start_time, end_time, time_grain) if v))\n        parameters = '?'+parameters if parameters else ''\n        return self._perform_get(self._get_historical_usage_metrics_path(webspace_name, website_name) + parameters,\n                                 MetricResponses)\n\n    def get_metric_definitions(self, webspace_name, website_name):\n        '''\n        Get metric definitions of metrics available of this web site.\n\n        webspace_name: The name of the webspace.\n        website_name: The name of the website.\n        '''\n        return self._perform_get(self._get_metric_definitions_path(webspace_name, website_name),\n                                 MetricDefinitions)\n\n    def get_publish_profile_xml(self, webspace_name, website_name):\n        '''\n        Get a site's publish profile as a string\n\n        webspace_name: The name of the webspace.\n        website_name: The name of the website.\n        '''\n        return self._perform_get(self._get_publishxml_path(webspace_name, website_name),\n                                 None).body.decode(\"utf-8\")\n\n    def get_publish_profile(self, webspace_name, website_name):\n        '''\n        Get a site's publish profile as an object\n\n        webspace_name: The name of the webspace.\n        website_name: The name of the website.\n        '''\n        return self._perform_get(self._get_publishxml_path(webspace_name, website_name),\n                                 PublishData)\n\n    #--Helper functions --------------------------------------------------\n    def _get_list_webspaces_path(self):\n        return self._get_path('services/webspaces', None)\n\n    def _get_webspace_details_path(self, webspace_name):\n        return self._get_path('services/webspaces/', webspace_name)\n\n    def _get_sites_path(self, webspace_name):\n        return self._get_path('services/webspaces/',\n                              webspace_name) + '/sites'\n\n    def _get_sites_details_path(self, webspace_name, website_name):\n        return self._get_path('services/webspaces/',\n                              webspace_name) + '/sites/' + _str(website_name)\n\n    def _get_restart_path(self, webspace_name, website_name):\n        return self._get_path('services/webspaces/',\n                              webspace_name) + '/sites/' + _str(website_name) + '/restart/' \n\n    def _get_historical_usage_metrics_path(self, webspace_name, website_name):\n        return self._get_path('services/webspaces/',\n                              webspace_name) + '/sites/' + _str(website_name) + '/metrics/' \n                               \n    def _get_metric_definitions_path(self, webspace_name, website_name):\n        return self._get_path('services/webspaces/',\n                              webspace_name) + '/sites/' + _str(website_name) + '/metricdefinitions/' \n\n    def _get_publishxml_path(self, webspace_name, website_name):\n        return self._get_path('services/webspaces/',\n                              webspace_name) + '/sites/' + _str(website_name) + '/publishxml/' \n"
  },
  {
    "path": "OSPatching/azure/storage/__init__.py",
    "content": "#-------------------------------------------------------------------------\n# Copyright (c) Microsoft.  All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#--------------------------------------------------------------------------\nimport sys\nimport types\n\nfrom datetime import datetime\nfrom xml.dom import minidom\nfrom azure import (WindowsAzureData,\n                   WindowsAzureError,\n                   METADATA_NS,\n                   xml_escape,\n                   _create_entry,\n                   _decode_base64_to_text,\n                   _decode_base64_to_bytes,\n                   _encode_base64,\n                   _fill_data_minidom,\n                   _fill_instance_element,\n                   _get_child_nodes,\n                   _get_child_nodesNS,\n                   _get_children_from_path,\n                   _get_entry_properties,\n                   _general_error_handler,\n                   _list_of,\n                   _parse_response_for_dict,\n                   _sign_string,\n                   _unicode_type,\n                   _ERROR_CANNOT_SERIALIZE_VALUE_TO_ENTITY,\n                   )\n\n# x-ms-version for storage service.\nX_MS_VERSION = '2012-02-12'\n\n\nclass EnumResultsBase(object):\n\n    ''' base class for EnumResults. '''\n\n    def __init__(self):\n        self.prefix = u''\n        self.marker = u''\n        self.max_results = 0\n        self.next_marker = u''\n\n\nclass ContainerEnumResults(EnumResultsBase):\n\n    ''' Blob Container list. '''\n\n    def __init__(self):\n        EnumResultsBase.__init__(self)\n        self.containers = _list_of(Container)\n\n    def __iter__(self):\n        return iter(self.containers)\n\n    def __len__(self):\n        return len(self.containers)\n\n    def __getitem__(self, index):\n        return self.containers[index]\n\n\nclass Container(WindowsAzureData):\n\n    ''' Blob container class. '''\n\n    def __init__(self):\n        self.name = u''\n        self.url = u''\n        self.properties = Properties()\n        self.metadata = {}\n\n\nclass Properties(WindowsAzureData):\n\n    ''' Blob container's properties class. '''\n\n    def __init__(self):\n        self.last_modified = u''\n        self.etag = u''\n\n\nclass RetentionPolicy(WindowsAzureData):\n\n    ''' RetentionPolicy in service properties. '''\n\n    def __init__(self):\n        self.enabled = False\n        self.__dict__['days'] = None\n\n    def get_days(self):\n        # convert days to int value\n        return int(self.__dict__['days'])\n\n    def set_days(self, value):\n        ''' set default days if days is set to empty. '''\n        self.__dict__['days'] = value\n\n    days = property(fget=get_days, fset=set_days)\n\n\nclass Logging(WindowsAzureData):\n\n    ''' Logging class in service properties. '''\n\n    def __init__(self):\n        self.version = u'1.0'\n        self.delete = False\n        self.read = False\n        self.write = False\n        self.retention_policy = RetentionPolicy()\n\n\nclass Metrics(WindowsAzureData):\n\n    ''' Metrics class in service properties. '''\n\n    def __init__(self):\n        self.version = u'1.0'\n        self.enabled = False\n        self.include_apis = None\n        self.retention_policy = RetentionPolicy()\n\n\nclass StorageServiceProperties(WindowsAzureData):\n\n    ''' Storage Service Propeties class. '''\n\n    def __init__(self):\n        self.logging = Logging()\n        self.metrics = Metrics()\n\n\nclass AccessPolicy(WindowsAzureData):\n\n    ''' Access Policy class in service properties. '''\n\n    def __init__(self, start=u'', expiry=u'', permission='u'):\n        self.start = start\n        self.expiry = expiry\n        self.permission = permission\n\n\nclass SignedIdentifier(WindowsAzureData):\n\n    ''' Signed Identifier class for service properties. '''\n\n    def __init__(self):\n        self.id = u''\n        self.access_policy = AccessPolicy()\n\n\nclass SignedIdentifiers(WindowsAzureData):\n\n    ''' SignedIdentifier list. '''\n\n    def __init__(self):\n        self.signed_identifiers = _list_of(SignedIdentifier)\n\n    def __iter__(self):\n        return iter(self.signed_identifiers)\n\n    def __len__(self):\n        return len(self.signed_identifiers)\n\n    def __getitem__(self, index):\n        return self.signed_identifiers[index]\n\n\nclass BlobEnumResults(EnumResultsBase):\n\n    ''' Blob list.'''\n\n    def __init__(self):\n        EnumResultsBase.__init__(self)\n        self.blobs = _list_of(Blob)\n        self.prefixes = _list_of(BlobPrefix)\n        self.delimiter = ''\n\n    def __iter__(self):\n        return iter(self.blobs)\n\n    def __len__(self):\n        return len(self.blobs)\n\n    def __getitem__(self, index):\n        return self.blobs[index]\n\n\nclass BlobResult(bytes):\n\n    def __new__(cls, blob, properties):\n        return bytes.__new__(cls, blob if blob else b'')\n\n    def __init__(self, blob, properties):\n        self.properties = properties\n\n\nclass Blob(WindowsAzureData):\n\n    ''' Blob class. '''\n\n    def __init__(self):\n        self.name = u''\n        self.snapshot = u''\n        self.url = u''\n        self.properties = BlobProperties()\n        self.metadata = {}\n\n\nclass BlobProperties(WindowsAzureData):\n\n    ''' Blob Properties '''\n\n    def __init__(self):\n        self.last_modified = u''\n        self.etag = u''\n        self.content_length = 0\n        self.content_type = u''\n        self.content_encoding = u''\n        self.content_language = u''\n        self.content_md5 = u''\n        self.xms_blob_sequence_number = 0\n        self.blob_type = u''\n        self.lease_status = u''\n        self.lease_state = u''\n        self.lease_duration = u''\n        self.copy_id = u''\n        self.copy_source = u''\n        self.copy_status = u''\n        self.copy_progress = u''\n        self.copy_completion_time = u''\n        self.copy_status_description = u''\n\n\nclass BlobPrefix(WindowsAzureData):\n\n    ''' BlobPrefix in Blob. '''\n\n    def __init__(self):\n        self.name = ''\n\n\nclass BlobBlock(WindowsAzureData):\n\n    ''' BlobBlock class '''\n\n    def __init__(self, id=None, size=None):\n        self.id = id\n        self.size = size\n\n\nclass BlobBlockList(WindowsAzureData):\n\n    ''' BlobBlockList class '''\n\n    def __init__(self):\n        self.committed_blocks = []\n        self.uncommitted_blocks = []\n\n\nclass PageRange(WindowsAzureData):\n\n    ''' Page Range for page blob. '''\n\n    def __init__(self):\n        self.start = 0\n        self.end = 0\n\n\nclass PageList(object):\n\n    ''' Page list for page blob. '''\n\n    def __init__(self):\n        self.page_ranges = _list_of(PageRange)\n\n    def __iter__(self):\n        return iter(self.page_ranges)\n\n    def __len__(self):\n        return len(self.page_ranges)\n\n    def __getitem__(self, index):\n        return self.page_ranges[index]\n\n\nclass QueueEnumResults(EnumResultsBase):\n\n    ''' Queue list'''\n\n    def __init__(self):\n        EnumResultsBase.__init__(self)\n        self.queues = _list_of(Queue)\n\n    def __iter__(self):\n        return iter(self.queues)\n\n    def __len__(self):\n        return len(self.queues)\n\n    def __getitem__(self, index):\n        return self.queues[index]\n\n\nclass Queue(WindowsAzureData):\n\n    ''' Queue class '''\n\n    def __init__(self):\n        self.name = u''\n        self.url = u''\n        self.metadata = {}\n\n\nclass QueueMessagesList(WindowsAzureData):\n\n    ''' Queue message list. '''\n\n    def __init__(self):\n        self.queue_messages = _list_of(QueueMessage)\n\n    def __iter__(self):\n        return iter(self.queue_messages)\n\n    def __len__(self):\n        return len(self.queue_messages)\n\n    def __getitem__(self, index):\n        return self.queue_messages[index]\n\n\nclass QueueMessage(WindowsAzureData):\n\n    ''' Queue message class. '''\n\n    def __init__(self):\n        self.message_id = u''\n        self.insertion_time = u''\n        self.expiration_time = u''\n        self.pop_receipt = u''\n        self.time_next_visible = u''\n        self.dequeue_count = u''\n        self.message_text = u''\n\n\nclass Entity(WindowsAzureData):\n\n    ''' Entity class. The attributes of entity will be created dynamically. '''\n    pass\n\n\nclass EntityProperty(WindowsAzureData):\n\n    ''' Entity property. contains type and value.  '''\n\n    def __init__(self, type=None, value=None):\n        self.type = type\n        self.value = value\n\n\nclass Table(WindowsAzureData):\n\n    ''' Only for intellicens and telling user the return type. '''\n    pass\n\n\ndef _parse_blob_enum_results_list(response):\n    respbody = response.body\n    return_obj = BlobEnumResults()\n    doc = minidom.parseString(respbody)\n\n    for enum_results in _get_child_nodes(doc, 'EnumerationResults'):\n        for child in _get_children_from_path(enum_results, 'Blobs', 'Blob'):\n            return_obj.blobs.append(_fill_instance_element(child, Blob))\n\n        for child in _get_children_from_path(enum_results,\n                                             'Blobs',\n                                             'BlobPrefix'):\n            return_obj.prefixes.append(\n                _fill_instance_element(child, BlobPrefix))\n\n        for name, value in vars(return_obj).items():\n            if name == 'blobs' or name == 'prefixes':\n                continue\n            value = _fill_data_minidom(enum_results, name, value)\n            if value is not None:\n                setattr(return_obj, name, value)\n\n    return return_obj\n\n\ndef _update_storage_header(request):\n    ''' add additional headers for storage request. '''\n    if request.body:\n        assert isinstance(request.body, bytes)\n\n    # if it is PUT, POST, MERGE, DELETE, need to add content-lengt to header.\n    if request.method in ['PUT', 'POST', 'MERGE', 'DELETE']:\n        request.headers.append(('Content-Length', str(len(request.body))))\n\n    # append addtional headers base on the service\n    request.headers.append(('x-ms-version', X_MS_VERSION))\n\n    # append x-ms-meta name, values to header\n    for name, value in request.headers:\n        if 'x-ms-meta-name-values' in name and value:\n            for meta_name, meta_value in value.items():\n                request.headers.append(('x-ms-meta-' + meta_name, meta_value))\n            request.headers.remove((name, value))\n            break\n    return request\n\n\ndef _update_storage_blob_header(request, account_name, account_key):\n    ''' add additional headers for storage blob request. '''\n\n    request = _update_storage_header(request)\n    current_time = datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')\n    request.headers.append(('x-ms-date', current_time))\n    request.headers.append(\n        ('Content-Type', 'application/octet-stream Charset=UTF-8'))\n    request.headers.append(('Authorization',\n                            _sign_storage_blob_request(request,\n                                                       account_name,\n                                                       account_key)))\n\n    return request.headers\n\n\ndef _update_storage_queue_header(request, account_name, account_key):\n    ''' add additional headers for storage queue request. '''\n    return _update_storage_blob_header(request, account_name, account_key)\n\n\ndef _update_storage_table_header(request):\n    ''' add additional headers for storage table request. '''\n\n    request = _update_storage_header(request)\n    for name, _ in request.headers:\n        if name.lower() == 'content-type':\n            break\n    else:\n        request.headers.append(('Content-Type', 'application/atom+xml'))\n    request.headers.append(('DataServiceVersion', '2.0;NetFx'))\n    request.headers.append(('MaxDataServiceVersion', '2.0;NetFx'))\n    current_time = datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')\n    request.headers.append(('x-ms-date', current_time))\n    request.headers.append(('Date', current_time))\n    return request.headers\n\n\ndef _sign_storage_blob_request(request, account_name, account_key):\n    '''\n    Returns the signed string for blob request which is used to set\n    Authorization header. This is also used to sign queue request.\n    '''\n\n    uri_path = request.path.split('?')[0]\n\n    # method to sign\n    string_to_sign = request.method + '\\n'\n\n    # get headers to sign\n    headers_to_sign = [\n        'content-encoding', 'content-language', 'content-length',\n        'content-md5', 'content-type', 'date', 'if-modified-since',\n        'if-match', 'if-none-match', 'if-unmodified-since', 'range']\n\n    request_header_dict = dict((name.lower(), value)\n                               for name, value in request.headers if value)\n    string_to_sign += '\\n'.join(request_header_dict.get(x, '')\n                                for x in headers_to_sign) + '\\n'\n\n    # get x-ms header to sign\n    x_ms_headers = []\n    for name, value in request.headers:\n        if 'x-ms' in name:\n            x_ms_headers.append((name.lower(), value))\n    x_ms_headers.sort()\n    for name, value in x_ms_headers:\n        if value:\n            string_to_sign += ''.join([name, ':', value, '\\n'])\n\n    # get account_name and uri path to sign\n    string_to_sign += '/' + account_name + uri_path\n\n    # get query string to sign if it is not table service\n    query_to_sign = request.query\n    query_to_sign.sort()\n\n    current_name = ''\n    for name, value in query_to_sign:\n        if value:\n            if current_name != name:\n                string_to_sign += '\\n' + name + ':' + value\n            else:\n                string_to_sign += '\\n' + ',' + value\n\n    # sign the request\n    auth_string = 'SharedKey ' + account_name + ':' + \\\n        _sign_string(account_key, string_to_sign)\n    return auth_string\n\n\ndef _sign_storage_table_request(request, account_name, account_key):\n    uri_path = request.path.split('?')[0]\n\n    string_to_sign = request.method + '\\n'\n    headers_to_sign = ['content-md5', 'content-type', 'date']\n    request_header_dict = dict((name.lower(), value)\n                               for name, value in request.headers if value)\n    string_to_sign += '\\n'.join(request_header_dict.get(x, '')\n                                for x in headers_to_sign) + '\\n'\n\n    # get account_name and uri path to sign\n    string_to_sign += ''.join(['/', account_name, uri_path])\n\n    for name, value in request.query:\n        if name == 'comp' and uri_path == '/':\n            string_to_sign += '?comp=' + value\n            break\n\n    # sign the request\n    auth_string = 'SharedKey ' + account_name + ':' + \\\n        _sign_string(account_key, string_to_sign)\n    return auth_string\n\n\ndef _to_python_bool(value):\n    if value.lower() == 'true':\n        return True\n    return False\n\n\ndef _to_entity_int(data):\n    int_max = (2 << 30) - 1\n    if data > (int_max) or data < (int_max + 1) * (-1):\n        return 'Edm.Int64', str(data)\n    else:\n        return 'Edm.Int32', str(data)\n\n\ndef _to_entity_bool(value):\n    if value:\n        return 'Edm.Boolean', 'true'\n    return 'Edm.Boolean', 'false'\n\n\ndef _to_entity_datetime(value):\n    return 'Edm.DateTime', value.strftime('%Y-%m-%dT%H:%M:%S')\n\n\ndef _to_entity_float(value):\n    return 'Edm.Double', str(value)\n\n\ndef _to_entity_property(value):\n    if value.type == 'Edm.Binary':\n        return value.type, _encode_base64(value.value)\n\n    return value.type, str(value.value)\n\n\ndef _to_entity_none(value):\n    return None, None\n\n\ndef _to_entity_str(value):\n    return 'Edm.String', value\n\n\n# Tables of conversions to and from entity types.  We support specific\n# datatypes, and beyond that the user can use an EntityProperty to get\n# custom data type support.\n\ndef _from_entity_binary(value):\n    return EntityProperty('Edm.Binary', _decode_base64_to_bytes(value))\n\n\ndef _from_entity_int(value):\n    return int(value)\n\n\ndef _from_entity_datetime(value):\n    format = '%Y-%m-%dT%H:%M:%S'\n    if '.' in value:\n        format = format + '.%f'\n    if value.endswith('Z'):\n        format = format + 'Z'\n    return datetime.strptime(value, format)\n\n_ENTITY_TO_PYTHON_CONVERSIONS = {\n    'Edm.Binary': _from_entity_binary,\n    'Edm.Int32': _from_entity_int,\n    'Edm.Int64': _from_entity_int,\n    'Edm.Double': float,\n    'Edm.Boolean': _to_python_bool,\n    'Edm.DateTime': _from_entity_datetime,\n}\n\n# Conversion from Python type to a function which returns a tuple of the\n# type string and content string.\n_PYTHON_TO_ENTITY_CONVERSIONS = {\n    int: _to_entity_int,\n    bool: _to_entity_bool,\n    datetime: _to_entity_datetime,\n    float: _to_entity_float,\n    EntityProperty: _to_entity_property,\n    str: _to_entity_str,\n}\n\nif sys.version_info < (3,):\n    _PYTHON_TO_ENTITY_CONVERSIONS.update({\n        long: _to_entity_int,\n        types.NoneType: _to_entity_none,\n        unicode: _to_entity_str,\n    })\n\n\ndef _convert_entity_to_xml(source):\n    ''' Converts an entity object to xml to send.\n\n    The entity format is:\n    <entry xmlns:d=\"http://schemas.microsoft.com/ado/2007/08/dataservices\" xmlns:m=\"http://schemas.microsoft.com/ado/2007/08/dataservices/metadata\" xmlns=\"http://www.w3.org/2005/Atom\">\n      <title />\n      <updated>2008-09-18T23:46:19.3857256Z</updated>\n      <author>\n        <name />\n      </author>\n      <id />\n      <content type=\"application/xml\">\n        <m:properties>\n          <d:Address>Mountain View</d:Address>\n          <d:Age m:type=\"Edm.Int32\">23</d:Age>\n          <d:AmountDue m:type=\"Edm.Double\">200.23</d:AmountDue>\n          <d:BinaryData m:type=\"Edm.Binary\" m:null=\"true\" />\n          <d:CustomerCode m:type=\"Edm.Guid\">c9da6455-213d-42c9-9a79-3e9149a57833</d:CustomerCode>\n          <d:CustomerSince m:type=\"Edm.DateTime\">2008-07-10T00:00:00</d:CustomerSince>\n          <d:IsActive m:type=\"Edm.Boolean\">true</d:IsActive>\n          <d:NumOfOrders m:type=\"Edm.Int64\">255</d:NumOfOrders>\n          <d:PartitionKey>mypartitionkey</d:PartitionKey>\n          <d:RowKey>myrowkey1</d:RowKey>\n          <d:Timestamp m:type=\"Edm.DateTime\">0001-01-01T00:00:00</d:Timestamp>\n        </m:properties>\n      </content>\n    </entry>\n    '''\n\n    # construct the entity body included in <m:properties> and </m:properties>\n    entity_body = '<m:properties xml:space=\"preserve\">{properties}</m:properties>'\n\n    if isinstance(source, WindowsAzureData):\n        source = vars(source)\n\n    properties_str = ''\n\n    # set properties type for types we know if value has no type info.\n    # if value has type info, then set the type to value.type\n    for name, value in source.items():\n        mtype = ''\n        conv = _PYTHON_TO_ENTITY_CONVERSIONS.get(type(value))\n        if conv is None and sys.version_info >= (3,) and value is None:\n            conv = _to_entity_none\n        if conv is None:\n            raise WindowsAzureError(\n                _ERROR_CANNOT_SERIALIZE_VALUE_TO_ENTITY.format(\n                    type(value).__name__))\n\n        mtype, value = conv(value)\n\n        # form the property node\n        properties_str += ''.join(['<d:', name])\n        if value is None:\n            properties_str += ' m:null=\"true\" />'\n        else:\n            if mtype:\n                properties_str += ''.join([' m:type=\"', mtype, '\"'])\n            properties_str += ''.join(['>',\n                                      xml_escape(value), '</d:', name, '>'])\n\n    if sys.version_info < (3,):\n        if isinstance(properties_str, unicode):\n            properties_str = properties_str.encode('utf-8')\n\n    # generate the entity_body\n    entity_body = entity_body.format(properties=properties_str)\n    xmlstr = _create_entry(entity_body)\n    return xmlstr\n\n\ndef _convert_table_to_xml(table_name):\n    '''\n    Create xml to send for a given table name. Since xml format for table is\n    the same as entity and the only difference is that table has only one\n    property 'TableName', so we just call _convert_entity_to_xml.\n\n    table_name: the name of the table\n    '''\n    return _convert_entity_to_xml({'TableName': table_name})\n\n\ndef _convert_block_list_to_xml(block_id_list):\n    '''\n    Convert a block list to xml to send.\n\n    block_id_list:\n        a str list containing the block ids that are used in put_block_list.\n    Only get block from latest blocks.\n    '''\n    if block_id_list is None:\n        return ''\n    xml = '<?xml version=\"1.0\" encoding=\"utf-8\"?><BlockList>'\n    for value in block_id_list:\n        xml += '<Latest>{0}</Latest>'.format(_encode_base64(value))\n\n    return xml + '</BlockList>'\n\n\ndef _create_blob_result(response):\n    blob_properties = _parse_response_for_dict(response)\n    return BlobResult(response.body, blob_properties)\n\n\ndef _convert_response_to_block_list(response):\n    '''\n    Converts xml response to block list class.\n    '''\n    blob_block_list = BlobBlockList()\n\n    xmldoc = minidom.parseString(response.body)\n    for xml_block in _get_children_from_path(xmldoc,\n                                             'BlockList',\n                                             'CommittedBlocks',\n                                             'Block'):\n        xml_block_id = _decode_base64_to_text(\n            _get_child_nodes(xml_block, 'Name')[0].firstChild.nodeValue)\n        xml_block_size = int(\n            _get_child_nodes(xml_block, 'Size')[0].firstChild.nodeValue)\n        blob_block_list.committed_blocks.append(\n            BlobBlock(xml_block_id, xml_block_size))\n\n    for xml_block in _get_children_from_path(xmldoc,\n                                             'BlockList',\n                                             'UncommittedBlocks',\n                                             'Block'):\n        xml_block_id = _decode_base64_to_text(\n            _get_child_nodes(xml_block, 'Name')[0].firstChild.nodeValue)\n        xml_block_size = int(\n            _get_child_nodes(xml_block, 'Size')[0].firstChild.nodeValue)\n        blob_block_list.uncommitted_blocks.append(\n            BlobBlock(xml_block_id, xml_block_size))\n\n    return blob_block_list\n\n\ndef _remove_prefix(name):\n    colon = name.find(':')\n    if colon != -1:\n        return name[colon + 1:]\n    return name\n\n\ndef _convert_response_to_entity(response):\n    if response is None:\n        return response\n    return _convert_xml_to_entity(response.body)\n\n\ndef _convert_xml_to_entity(xmlstr):\n    ''' Convert xml response to entity.\n\n    The format of entity:\n    <entry xmlns:d=\"http://schemas.microsoft.com/ado/2007/08/dataservices\" xmlns:m=\"http://schemas.microsoft.com/ado/2007/08/dataservices/metadata\" xmlns=\"http://www.w3.org/2005/Atom\">\n      <title />\n      <updated>2008-09-18T23:46:19.3857256Z</updated>\n      <author>\n        <name />\n      </author>\n      <id />\n      <content type=\"application/xml\">\n        <m:properties>\n          <d:Address>Mountain View</d:Address>\n          <d:Age m:type=\"Edm.Int32\">23</d:Age>\n          <d:AmountDue m:type=\"Edm.Double\">200.23</d:AmountDue>\n          <d:BinaryData m:type=\"Edm.Binary\" m:null=\"true\" />\n          <d:CustomerCode m:type=\"Edm.Guid\">c9da6455-213d-42c9-9a79-3e9149a57833</d:CustomerCode>\n          <d:CustomerSince m:type=\"Edm.DateTime\">2008-07-10T00:00:00</d:CustomerSince>\n          <d:IsActive m:type=\"Edm.Boolean\">true</d:IsActive>\n          <d:NumOfOrders m:type=\"Edm.Int64\">255</d:NumOfOrders>\n          <d:PartitionKey>mypartitionkey</d:PartitionKey>\n          <d:RowKey>myrowkey1</d:RowKey>\n          <d:Timestamp m:type=\"Edm.DateTime\">0001-01-01T00:00:00</d:Timestamp>\n        </m:properties>\n      </content>\n    </entry>\n    '''\n    xmldoc = minidom.parseString(xmlstr)\n\n    xml_properties = None\n    for entry in _get_child_nodes(xmldoc, 'entry'):\n        for content in _get_child_nodes(entry, 'content'):\n            # TODO: Namespace\n            xml_properties = _get_child_nodesNS(\n                content, METADATA_NS, 'properties')\n\n    if not xml_properties:\n        return None\n\n    entity = Entity()\n    # extract each property node and get the type from attribute and node value\n    for xml_property in xml_properties[0].childNodes:\n        name = _remove_prefix(xml_property.nodeName)\n        # exclude the Timestamp since it is auto added by azure when\n        # inserting entity. We don't want this to mix with real properties\n        if name in ['Timestamp']:\n            continue\n\n        if xml_property.firstChild:\n            value = xml_property.firstChild.nodeValue\n        else:\n            value = ''\n\n        isnull = xml_property.getAttributeNS(METADATA_NS, 'null')\n        mtype = xml_property.getAttributeNS(METADATA_NS, 'type')\n\n        # if not isnull and no type info, then it is a string and we just\n        # need the str type to hold the property.\n        if not isnull and not mtype:\n            _set_entity_attr(entity, name, value)\n        elif isnull == 'true':\n            if mtype:\n                property = EntityProperty(mtype, None)\n            else:\n                property = EntityProperty('Edm.String', None)\n        else:  # need an object to hold the property\n            conv = _ENTITY_TO_PYTHON_CONVERSIONS.get(mtype)\n            if conv is not None:\n                property = conv(value)\n            else:\n                property = EntityProperty(mtype, value)\n            _set_entity_attr(entity, name, property)\n\n        # extract id, updated and name value from feed entry and set them of\n        # rule.\n    for name, value in _get_entry_properties(xmlstr, True).items():\n        if name in ['etag']:\n            _set_entity_attr(entity, name, value)\n\n    return entity\n\n\ndef _set_entity_attr(entity, name, value):\n    try:\n        setattr(entity, name, value)\n    except UnicodeEncodeError:\n        # Python 2 doesn't support unicode attribute names, so we'll\n        # add them and access them directly through the dictionary\n        entity.__dict__[name] = value\n\n\ndef _convert_xml_to_table(xmlstr):\n    ''' Converts the xml response to table class.\n    Simply call convert_xml_to_entity and extract the table name, and add\n    updated and author info\n    '''\n    table = Table()\n    entity = _convert_xml_to_entity(xmlstr)\n    setattr(table, 'name', entity.TableName)\n    for name, value in _get_entry_properties(xmlstr, False).items():\n        setattr(table, name, value)\n    return table\n\n\ndef _storage_error_handler(http_error):\n    ''' Simple error handler for storage service. '''\n    return _general_error_handler(http_error)\n\n# make these available just from storage.\nfrom azure.storage.blobservice import BlobService\nfrom azure.storage.queueservice import QueueService\nfrom azure.storage.tableservice import TableService\nfrom azure.storage.cloudstorageaccount import CloudStorageAccount\nfrom azure.storage.sharedaccesssignature import (\n    SharedAccessSignature,\n    SharedAccessPolicy,\n    Permission,\n    WebResource,\n    )\n"
  },
  {
    "path": "OSPatching/azure/storage/blobservice.py",
    "content": "#-------------------------------------------------------------------------\n# Copyright (c) Microsoft.  All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#--------------------------------------------------------------------------\nfrom azure import (\n    WindowsAzureError,\n    BLOB_SERVICE_HOST_BASE,\n    DEV_BLOB_HOST,\n    _ERROR_VALUE_NEGATIVE,\n    _ERROR_PAGE_BLOB_SIZE_ALIGNMENT,\n    _convert_class_to_xml,\n    _dont_fail_not_exist,\n    _dont_fail_on_exist,\n    _encode_base64,\n    _get_request_body,\n    _get_request_body_bytes_only,\n    _int_or_none,\n    _parse_enum_results_list,\n    _parse_response,\n    _parse_response_for_dict,\n    _parse_response_for_dict_filter,\n    _parse_response_for_dict_prefix,\n    _parse_simple_list,\n    _str,\n    _str_or_none,\n    _update_request_uri_query_local_storage,\n    _validate_type_bytes,\n    _validate_not_none,\n    )\nfrom azure.http import HTTPRequest\nfrom azure.storage import (\n    Container,\n    ContainerEnumResults,\n    PageList,\n    PageRange,\n    SignedIdentifiers,\n    StorageServiceProperties,\n    _convert_block_list_to_xml,\n    _convert_response_to_block_list,\n    _create_blob_result,\n    _parse_blob_enum_results_list,\n    _update_storage_blob_header,\n    )\nfrom azure.storage.storageclient import _StorageClient\nfrom os import path\nimport sys\nif sys.version_info >= (3,):\n    from io import BytesIO\nelse:\n    from cStringIO import StringIO as BytesIO\n\n# Keep this value sync with _ERROR_PAGE_BLOB_SIZE_ALIGNMENT\n_PAGE_SIZE = 512\n\nclass BlobService(_StorageClient):\n\n    '''\n    This is the main class managing Blob resources.\n    '''\n\n    def __init__(self, account_name=None, account_key=None, protocol='https',\n                 host_base=BLOB_SERVICE_HOST_BASE, dev_host=DEV_BLOB_HOST):\n        '''\n        account_name: your storage account name, required for all operations.\n        account_key: your storage account key, required for all operations.\n        protocol: Optional. Protocol. Defaults to https.\n        host_base:\n            Optional. Live host base url. Defaults to Azure url. Override this\n            for on-premise.\n        dev_host: Optional. Dev host url. Defaults to localhost.\n        '''\n        self._BLOB_MAX_DATA_SIZE = 64 * 1024 * 1024\n        self._BLOB_MAX_CHUNK_DATA_SIZE = 4 * 1024 * 1024\n        super(BlobService, self).__init__(\n            account_name, account_key, protocol, host_base, dev_host)\n\n    def make_blob_url(self, container_name, blob_name, account_name=None,\n                      protocol=None, host_base=None):\n        '''\n        Creates the url to access a blob.\n\n        container_name: Name of container.\n        blob_name: Name of blob.\n        account_name:\n            Name of the storage account. If not specified, uses the account\n            specified when BlobService was initialized.\n        protocol:\n            Protocol to use: 'http' or 'https'. If not specified, uses the\n            protocol specified when BlobService was initialized.\n        host_base:\n            Live host base url.  If not specified, uses the host base specified\n            when BlobService was initialized.\n        '''\n        if not account_name:\n            account_name = self.account_name\n        if not protocol:\n            protocol = self.protocol\n        if not host_base:\n            host_base = self.host_base\n\n        return '{0}://{1}{2}/{3}/{4}'.format(protocol,\n                                             account_name,\n                                             host_base,\n                                             container_name,\n                                             blob_name)\n\n    def list_containers(self, prefix=None, marker=None, maxresults=None,\n                        include=None):\n        '''\n        The List Containers operation returns a list of the containers under\n        the specified account.\n\n        prefix:\n            Optional. Filters the results to return only containers whose names\n            begin with the specified prefix.\n        marker:\n            Optional. A string value that identifies the portion of the list to\n            be returned with the next list operation.\n        maxresults:\n            Optional. Specifies the maximum number of containers to return.\n        include:\n            Optional. Include this parameter to specify that the container's\n            metadata be returned as part of the response body. set this\n            parameter to string 'metadata' to get container's metadata.\n        '''\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/?comp=list'\n        request.query = [\n            ('prefix', _str_or_none(prefix)),\n            ('marker', _str_or_none(marker)),\n            ('maxresults', _int_or_none(maxresults)),\n            ('include', _str_or_none(include))\n        ]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request)\n\n        return _parse_enum_results_list(response,\n                                        ContainerEnumResults,\n                                        \"Containers\",\n                                        Container)\n\n    def create_container(self, container_name, x_ms_meta_name_values=None,\n                         x_ms_blob_public_access=None, fail_on_exist=False):\n        '''\n        Creates a new container under the specified account. If the container\n        with the same name already exists, the operation fails.\n\n        container_name: Name of container to create.\n        x_ms_meta_name_values:\n            Optional. A dict with name_value pairs to associate with the\n            container as metadata. Example:{'Category':'test'}\n        x_ms_blob_public_access:\n            Optional. Possible values include: container, blob\n        fail_on_exist:\n            specify whether to throw an exception when the container exists.\n        '''\n        _validate_not_none('container_name', container_name)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + _str(container_name) + '?restype=container'\n        request.headers = [\n            ('x-ms-meta-name-values', x_ms_meta_name_values),\n            ('x-ms-blob-public-access', _str_or_none(x_ms_blob_public_access))\n        ]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        if not fail_on_exist:\n            try:\n                self._perform_request(request)\n                return True\n            except WindowsAzureError as ex:\n                _dont_fail_on_exist(ex)\n                return False\n        else:\n            self._perform_request(request)\n            return True\n\n    def get_container_properties(self, container_name, x_ms_lease_id=None):\n        '''\n        Returns all user-defined metadata and system properties for the\n        specified container.\n\n        container_name: Name of existing container.\n        x_ms_lease_id:\n            If specified, get_container_properties only succeeds if the\n            container's lease is active and matches this ID.\n        '''\n        _validate_not_none('container_name', container_name)\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/' + _str(container_name) + '?restype=container'\n        request.headers = [('x-ms-lease-id', _str_or_none(x_ms_lease_id))]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request)\n\n        return _parse_response_for_dict(response)\n\n    def get_container_metadata(self, container_name, x_ms_lease_id=None):\n        '''\n        Returns all user-defined metadata for the specified container. The\n        metadata will be in returned dictionary['x-ms-meta-(name)'].\n\n        container_name: Name of existing container.\n        x_ms_lease_id:\n            If specified, get_container_metadata only succeeds if the\n            container's lease is active and matches this ID.\n        '''\n        _validate_not_none('container_name', container_name)\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(container_name) + '?restype=container&comp=metadata'\n        request.headers = [('x-ms-lease-id', _str_or_none(x_ms_lease_id))]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request)\n\n        return _parse_response_for_dict_prefix(response, prefixes=['x-ms-meta'])\n\n    def set_container_metadata(self, container_name,\n                               x_ms_meta_name_values=None, x_ms_lease_id=None):\n        '''\n        Sets one or more user-defined name-value pairs for the specified\n        container.\n\n        container_name: Name of existing container.\n        x_ms_meta_name_values:\n            A dict containing name, value for metadata.\n            Example: {'category':'test'}\n        x_ms_lease_id:\n            If specified, set_container_metadata only succeeds if the\n            container's lease is active and matches this ID.\n        '''\n        _validate_not_none('container_name', container_name)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(container_name) + '?restype=container&comp=metadata'\n        request.headers = [\n            ('x-ms-meta-name-values', x_ms_meta_name_values),\n            ('x-ms-lease-id', _str_or_none(x_ms_lease_id)),\n        ]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        self._perform_request(request)\n\n    def get_container_acl(self, container_name, x_ms_lease_id=None):\n        '''\n        Gets the permissions for the specified container.\n\n        container_name: Name of existing container.\n        x_ms_lease_id:\n            If specified, get_container_acl only succeeds if the\n            container's lease is active and matches this ID.\n        '''\n        _validate_not_none('container_name', container_name)\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(container_name) + '?restype=container&comp=acl'\n        request.headers = [('x-ms-lease-id', _str_or_none(x_ms_lease_id))]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request)\n\n        return _parse_response(response, SignedIdentifiers)\n\n    def set_container_acl(self, container_name, signed_identifiers=None,\n                          x_ms_blob_public_access=None, x_ms_lease_id=None):\n        '''\n        Sets the permissions for the specified container.\n\n        container_name: Name of existing container.\n        signed_identifiers: SignedIdentifers instance\n        x_ms_blob_public_access:\n            Optional. Possible values include: container, blob\n        x_ms_lease_id:\n            If specified, set_container_acl only succeeds if the\n            container's lease is active and matches this ID.\n        '''\n        _validate_not_none('container_name', container_name)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(container_name) + '?restype=container&comp=acl'\n        request.headers = [\n            ('x-ms-blob-public-access', _str_or_none(x_ms_blob_public_access)),\n            ('x-ms-lease-id', _str_or_none(x_ms_lease_id)),\n        ]\n        request.body = _get_request_body(\n            _convert_class_to_xml(signed_identifiers))\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        self._perform_request(request)\n\n    def delete_container(self, container_name, fail_not_exist=False,\n                         x_ms_lease_id=None):\n        '''\n        Marks the specified container for deletion.\n\n        container_name: Name of container to delete.\n        fail_not_exist:\n            Specify whether to throw an exception when the container doesn't\n            exist.\n        x_ms_lease_id: Required if the container has an active lease.\n        '''\n        _validate_not_none('container_name', container_name)\n        request = HTTPRequest()\n        request.method = 'DELETE'\n        request.host = self._get_host()\n        request.path = '/' + _str(container_name) + '?restype=container'\n        request.headers = [('x-ms-lease-id', _str_or_none(x_ms_lease_id))]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        if not fail_not_exist:\n            try:\n                self._perform_request(request)\n                return True\n            except WindowsAzureError as ex:\n                _dont_fail_not_exist(ex)\n                return False\n        else:\n            self._perform_request(request)\n            return True\n\n    def lease_container(self, container_name, x_ms_lease_action,\n                        x_ms_lease_id=None, x_ms_lease_duration=60,\n                        x_ms_lease_break_period=None,\n                        x_ms_proposed_lease_id=None):\n        '''\n        Establishes and manages a lock on a container for delete operations.\n        The lock duration can be 15 to 60 seconds, or can be infinite.\n\n        container_name: Name of existing container.\n        x_ms_lease_action:\n            Required. Possible values: acquire|renew|release|break|change\n        x_ms_lease_id: Required if the container has an active lease.\n        x_ms_lease_duration:\n            Specifies the duration of the lease, in seconds, or negative one\n            (-1) for a lease that never expires. A non-infinite lease can be\n            between 15 and 60 seconds. A lease duration cannot be changed\n            using renew or change. For backwards compatibility, the default is\n            60, and the value is only used on an acquire operation.\n        x_ms_lease_break_period:\n            Optional. For a break operation, this is the proposed duration of\n            seconds that the lease should continue before it is broken, between\n            0 and 60 seconds. This break period is only used if it is shorter\n            than the time remaining on the lease. If longer, the time remaining\n            on the lease is used. A new lease will not be available before the\n            break period has expired, but the lease may be held for longer than\n            the break period. If this header does not appear with a break\n            operation, a fixed-duration lease breaks after the remaining lease\n            period elapses, and an infinite lease breaks immediately.\n        x_ms_proposed_lease_id:\n            Optional for acquire, required for change. Proposed lease ID, in a\n            GUID string format.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('x_ms_lease_action', x_ms_lease_action)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(container_name) + '?restype=container&comp=lease'\n        request.headers = [\n            ('x-ms-lease-id', _str_or_none(x_ms_lease_id)),\n            ('x-ms-lease-action', _str_or_none(x_ms_lease_action)),\n            ('x-ms-lease-duration',\n             _str_or_none(\n                 x_ms_lease_duration if x_ms_lease_action == 'acquire'\\\n                     else None)),\n            ('x-ms-lease-break-period', _str_or_none(x_ms_lease_break_period)),\n            ('x-ms-proposed-lease-id', _str_or_none(x_ms_proposed_lease_id)),\n        ]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request)\n\n        return _parse_response_for_dict_filter(\n            response,\n            filter=['x-ms-lease-id', 'x-ms-lease-time'])\n\n    def list_blobs(self, container_name, prefix=None, marker=None,\n                   maxresults=None, include=None, delimiter=None):\n        '''\n        Returns the list of blobs under the specified container.\n\n        container_name: Name of existing container.\n        prefix:\n            Optional. Filters the results to return only blobs whose names\n            begin with the specified prefix.\n        marker:\n            Optional. A string value that identifies the portion of the list\n            to be returned with the next list operation. The operation returns\n            a marker value within the response body if the list returned was\n            not complete. The marker value may then be used in a subsequent\n            call to request the next set of list items. The marker value is\n            opaque to the client.\n        maxresults:\n            Optional. Specifies the maximum number of blobs to return,\n            including all BlobPrefix elements. If the request does not specify\n            maxresults or specifies a value greater than 5,000, the server will\n            return up to 5,000 items. Setting maxresults to a value less than\n            or equal to zero results in error response code 400 (Bad Request).\n        include:\n            Optional. Specifies one or more datasets to include in the\n            response. To specify more than one of these options on the URI,\n            you must separate each option with a comma. Valid values are:\n                snapshots:\n                    Specifies that snapshots should be included in the\n                    enumeration. Snapshots are listed from oldest to newest in\n                    the response.\n                metadata:\n                    Specifies that blob metadata be returned in the response.\n                uncommittedblobs:\n                    Specifies that blobs for which blocks have been uploaded,\n                    but which have not been committed using Put Block List\n                    (REST API), be included in the response.\n                copy:\n                    Version 2012-02-12 and newer. Specifies that metadata\n                    related to any current or previous Copy Blob operation\n                    should be included in the response.\n        delimiter:\n            Optional. When the request includes this parameter, the operation\n            returns a BlobPrefix element in the response body that acts as a\n            placeholder for all blobs whose names begin with the same\n            substring up to the appearance of the delimiter character. The\n            delimiter may be a single character or a string.\n        '''\n        _validate_not_none('container_name', container_name)\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(container_name) + '?restype=container&comp=list'\n        request.query = [\n            ('prefix', _str_or_none(prefix)),\n            ('delimiter', _str_or_none(delimiter)),\n            ('marker', _str_or_none(marker)),\n            ('maxresults', _int_or_none(maxresults)),\n            ('include', _str_or_none(include))\n        ]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request)\n\n        return _parse_blob_enum_results_list(response)\n\n    def set_blob_service_properties(self, storage_service_properties,\n                                    timeout=None):\n        '''\n        Sets the properties of a storage account's Blob service, including\n        Windows Azure Storage Analytics. You can also use this operation to\n        set the default request version for all incoming requests that do not\n        have a version specified.\n\n        storage_service_properties: a StorageServiceProperties object.\n        timeout: Optional. The timeout parameter is expressed in seconds.\n        '''\n        _validate_not_none('storage_service_properties',\n                           storage_service_properties)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/?restype=service&comp=properties'\n        request.query = [('timeout', _int_or_none(timeout))]\n        request.body = _get_request_body(\n            _convert_class_to_xml(storage_service_properties))\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        self._perform_request(request)\n\n    def get_blob_service_properties(self, timeout=None):\n        '''\n        Gets the properties of a storage account's Blob service, including\n        Windows Azure Storage Analytics.\n\n        timeout: Optional. The timeout parameter is expressed in seconds.\n        '''\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/?restype=service&comp=properties'\n        request.query = [('timeout', _int_or_none(timeout))]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request)\n\n        return _parse_response(response, StorageServiceProperties)\n\n    def get_blob_properties(self, container_name, blob_name,\n                            x_ms_lease_id=None):\n        '''\n        Returns all user-defined metadata, standard HTTP properties, and\n        system properties for the blob.\n\n        container_name: Name of existing container.\n        blob_name: Name of existing blob.\n        x_ms_lease_id: Required if the blob has an active lease.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        request = HTTPRequest()\n        request.method = 'HEAD'\n        request.host = self._get_host()\n        request.path = '/' + _str(container_name) + '/' + _str(blob_name) + ''\n        request.headers = [('x-ms-lease-id', _str_or_none(x_ms_lease_id))]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request)\n\n        return _parse_response_for_dict(response)\n\n    def set_blob_properties(self, container_name, blob_name,\n                            x_ms_blob_cache_control=None,\n                            x_ms_blob_content_type=None,\n                            x_ms_blob_content_md5=None,\n                            x_ms_blob_content_encoding=None,\n                            x_ms_blob_content_language=None,\n                            x_ms_lease_id=None):\n        '''\n        Sets system properties on the blob.\n\n        container_name: Name of existing container.\n        blob_name: Name of existing blob.\n        x_ms_blob_cache_control:\n            Optional. Modifies the cache control string for the blob.\n        x_ms_blob_content_type: Optional. Sets the blob's content type.\n        x_ms_blob_content_md5: Optional. Sets the blob's MD5 hash.\n        x_ms_blob_content_encoding: Optional. Sets the blob's content encoding.\n        x_ms_blob_content_language: Optional. Sets the blob's content language.\n        x_ms_lease_id: Required if the blob has an active lease.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(container_name) + '/' + _str(blob_name) + '?comp=properties'\n        request.headers = [\n            ('x-ms-blob-cache-control', _str_or_none(x_ms_blob_cache_control)),\n            ('x-ms-blob-content-type', _str_or_none(x_ms_blob_content_type)),\n            ('x-ms-blob-content-md5', _str_or_none(x_ms_blob_content_md5)),\n            ('x-ms-blob-content-encoding',\n             _str_or_none(x_ms_blob_content_encoding)),\n            ('x-ms-blob-content-language',\n             _str_or_none(x_ms_blob_content_language)),\n            ('x-ms-lease-id', _str_or_none(x_ms_lease_id))\n        ]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        self._perform_request(request)\n\n    def put_blob(self, container_name, blob_name, blob, x_ms_blob_type,\n                 content_encoding=None, content_language=None,\n                 content_md5=None, cache_control=None,\n                 x_ms_blob_content_type=None, x_ms_blob_content_encoding=None,\n                 x_ms_blob_content_language=None, x_ms_blob_content_md5=None,\n                 x_ms_blob_cache_control=None, x_ms_meta_name_values=None,\n                 x_ms_lease_id=None, x_ms_blob_content_length=None,\n                 x_ms_blob_sequence_number=None):\n        '''\n        Creates a new block blob or page blob, or updates the content of an\n        existing block blob.\n\n        See put_block_blob_from_* and put_page_blob_from_* for high level\n        functions that handle the creation and upload of large blobs with\n        automatic chunking and progress notifications.\n\n        container_name: Name of existing container.\n        blob_name: Name of blob to create or update.\n        blob:\n            For BlockBlob:\n                Content of blob as bytes (size < 64MB). For larger size, you\n                must call put_block and put_block_list to set content of blob.\n            For PageBlob:\n                Use None and call put_page to set content of blob.\n        x_ms_blob_type: Required. Could be BlockBlob or PageBlob.\n        content_encoding:\n            Optional. Specifies which content encodings have been applied to\n            the blob. This value is returned to the client when the Get Blob\n            (REST API) operation is performed on the blob resource. The client\n            can use this value when returned to decode the blob content.\n        content_language:\n            Optional. Specifies the natural languages used by this resource.\n        content_md5:\n            Optional. An MD5 hash of the blob content. This hash is used to\n            verify the integrity of the blob during transport. When this header\n            is specified, the storage service checks the hash that has arrived\n            with the one that was sent. If the two hashes do not match, the\n            operation will fail with error code 400 (Bad Request).\n        cache_control:\n            Optional. The Blob service stores this value but does not use or\n            modify it.\n        x_ms_blob_content_type: Optional. Set the blob's content type.\n        x_ms_blob_content_encoding: Optional. Set the blob's content encoding.\n        x_ms_blob_content_language: Optional. Set the blob's content language.\n        x_ms_blob_content_md5: Optional. Set the blob's MD5 hash.\n        x_ms_blob_cache_control: Optional. Sets the blob's cache control.\n        x_ms_meta_name_values: A dict containing name, value for metadata.\n        x_ms_lease_id: Required if the blob has an active lease.\n        x_ms_blob_content_length:\n            Required for page blobs. This header specifies the maximum size\n            for the page blob, up to 1 TB. The page blob size must be aligned\n            to a 512-byte boundary.\n        x_ms_blob_sequence_number:\n            Optional. Set for page blobs only. The sequence number is a\n            user-controlled value that you can use to track requests. The\n            value of the sequence number must be between 0 and 2^63 - 1. The\n            default value is 0.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        _validate_not_none('x_ms_blob_type', x_ms_blob_type)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + _str(container_name) + '/' + _str(blob_name) + ''\n        request.headers = [\n            ('x-ms-blob-type', _str_or_none(x_ms_blob_type)),\n            ('Content-Encoding', _str_or_none(content_encoding)),\n            ('Content-Language', _str_or_none(content_language)),\n            ('Content-MD5', _str_or_none(content_md5)),\n            ('Cache-Control', _str_or_none(cache_control)),\n            ('x-ms-blob-content-type', _str_or_none(x_ms_blob_content_type)),\n            ('x-ms-blob-content-encoding',\n             _str_or_none(x_ms_blob_content_encoding)),\n            ('x-ms-blob-content-language',\n             _str_or_none(x_ms_blob_content_language)),\n            ('x-ms-blob-content-md5', _str_or_none(x_ms_blob_content_md5)),\n            ('x-ms-blob-cache-control', _str_or_none(x_ms_blob_cache_control)),\n            ('x-ms-meta-name-values', x_ms_meta_name_values),\n            ('x-ms-lease-id', _str_or_none(x_ms_lease_id)),\n            ('x-ms-blob-content-length',\n             _str_or_none(x_ms_blob_content_length)),\n            ('x-ms-blob-sequence-number',\n             _str_or_none(x_ms_blob_sequence_number))\n        ]\n        request.body = _get_request_body_bytes_only('blob', blob)\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        self._perform_request(request)\n\n    def put_block_blob_from_path(self, container_name, blob_name, file_path,\n                                 content_encoding=None, content_language=None,\n                                 content_md5=None, cache_control=None,\n                                 x_ms_blob_content_type=None,\n                                 x_ms_blob_content_encoding=None,\n                                 x_ms_blob_content_language=None,\n                                 x_ms_blob_content_md5=None,\n                                 x_ms_blob_cache_control=None,\n                                 x_ms_meta_name_values=None,\n                                 x_ms_lease_id=None, progress_callback=None):\n        '''\n        Creates a new block blob from a file path, or updates the content of an\n        existing block blob, with automatic chunking and progress notifications.\n\n        container_name: Name of existing container.\n        blob_name: Name of blob to create or update.\n        file_path: Path of the file to upload as the blob content.\n        content_encoding:\n            Optional. Specifies which content encodings have been applied to\n            the blob. This value is returned to the client when the Get Blob\n            (REST API) operation is performed on the blob resource. The client\n            can use this value when returned to decode the blob content.\n        content_language:\n            Optional. Specifies the natural languages used by this resource.\n        content_md5:\n            Optional. An MD5 hash of the blob content. This hash is used to\n            verify the integrity of the blob during transport. When this header\n            is specified, the storage service checks the hash that has arrived\n            with the one that was sent. If the two hashes do not match, the\n            operation will fail with error code 400 (Bad Request).\n        cache_control:\n            Optional. The Blob service stores this value but does not use or\n            modify it.\n        x_ms_blob_content_type: Optional. Set the blob's content type.\n        x_ms_blob_content_encoding: Optional. Set the blob's content encoding.\n        x_ms_blob_content_language: Optional. Set the blob's content language.\n        x_ms_blob_content_md5: Optional. Set the blob's MD5 hash.\n        x_ms_blob_cache_control: Optional. Sets the blob's cache control.\n        x_ms_meta_name_values: A dict containing name, value for metadata.\n        x_ms_lease_id: Required if the blob has an active lease.\n        progress_callback:\n            Callback for progress with signature function(current, total) where\n            current is the number of bytes transfered so far, and total is the\n            size of the blob, or None if the total size is unknown.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        _validate_not_none('file_path', file_path)\n\n        count = path.getsize(file_path)\n        with open(file_path, 'rb') as stream:\n            self.put_block_blob_from_file(container_name,\n                                          blob_name,\n                                          stream,\n                                          count,\n                                          content_encoding,\n                                          content_language,\n                                          content_md5,\n                                          cache_control,\n                                          x_ms_blob_content_type,\n                                          x_ms_blob_content_encoding,\n                                          x_ms_blob_content_language,\n                                          x_ms_blob_content_md5,\n                                          x_ms_blob_cache_control,\n                                          x_ms_meta_name_values,\n                                          x_ms_lease_id,\n                                          progress_callback)\n\n    def put_block_blob_from_file(self, container_name, blob_name, stream,\n                                 count=None, content_encoding=None,\n                                 content_language=None, content_md5=None,\n                                 cache_control=None,\n                                 x_ms_blob_content_type=None,\n                                 x_ms_blob_content_encoding=None,\n                                 x_ms_blob_content_language=None,\n                                 x_ms_blob_content_md5=None,\n                                 x_ms_blob_cache_control=None,\n                                 x_ms_meta_name_values=None,\n                                 x_ms_lease_id=None, progress_callback=None):\n        '''\n        Creates a new block blob from a file/stream, or updates the content of\n        an existing block blob, with automatic chunking and progress\n        notifications.\n\n        container_name: Name of existing container.\n        blob_name: Name of blob to create or update.\n        stream: Opened file/stream to upload as the blob content.\n        count:\n            Number of bytes to read from the stream. This is optional, but\n            should be supplied for optimal performance.\n        content_encoding:\n            Optional. Specifies which content encodings have been applied to\n            the blob. This value is returned to the client when the Get Blob\n            (REST API) operation is performed on the blob resource. The client\n            can use this value when returned to decode the blob content.\n        content_language:\n            Optional. Specifies the natural languages used by this resource.\n        content_md5:\n            Optional. An MD5 hash of the blob content. This hash is used to\n            verify the integrity of the blob during transport. When this header\n            is specified, the storage service checks the hash that has arrived\n            with the one that was sent. If the two hashes do not match, the\n            operation will fail with error code 400 (Bad Request).\n        cache_control:\n            Optional. The Blob service stores this value but does not use or\n            modify it.\n        x_ms_blob_content_type: Optional. Set the blob's content type.\n        x_ms_blob_content_encoding: Optional. Set the blob's content encoding.\n        x_ms_blob_content_language: Optional. Set the blob's content language.\n        x_ms_blob_content_md5: Optional. Set the blob's MD5 hash.\n        x_ms_blob_cache_control: Optional. Sets the blob's cache control.\n        x_ms_meta_name_values: A dict containing name, value for metadata.\n        x_ms_lease_id: Required if the blob has an active lease.\n        progress_callback:\n            Callback for progress with signature function(current, total) where\n            current is the number of bytes transfered so far, and total is the\n            size of the blob, or None if the total size is unknown.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        _validate_not_none('stream', stream)\n\n        if count and count < self._BLOB_MAX_DATA_SIZE:\n            if progress_callback:\n                progress_callback(0, count)\n\n            data = stream.read(count)\n            self.put_blob(container_name,\n                          blob_name,\n                          data,\n                          'BlockBlob',\n                          content_encoding,\n                          content_language,\n                          content_md5,\n                          cache_control,\n                          x_ms_blob_content_type,\n                          x_ms_blob_content_encoding,\n                          x_ms_blob_content_language,\n                          x_ms_blob_content_md5,\n                          x_ms_blob_cache_control,\n                          x_ms_meta_name_values,\n                          x_ms_lease_id)\n\n            if progress_callback:\n                progress_callback(count, count)\n        else:\n            if progress_callback:\n                progress_callback(0, count)\n\n            self.put_blob(container_name,\n                          blob_name,\n                          None,\n                          'BlockBlob',\n                          content_encoding,\n                          content_language,\n                          content_md5,\n                          cache_control,\n                          x_ms_blob_content_type,\n                          x_ms_blob_content_encoding,\n                          x_ms_blob_content_language,\n                          x_ms_blob_content_md5,\n                          x_ms_blob_cache_control,\n                          x_ms_meta_name_values,\n                          x_ms_lease_id)\n\n            remain_bytes = count\n            block_ids = []\n            block_index = 0\n            index = 0\n            while True:\n                request_count = self._BLOB_MAX_CHUNK_DATA_SIZE\\\n                    if remain_bytes is None else min(\n                        remain_bytes,\n                        self._BLOB_MAX_CHUNK_DATA_SIZE)\n                data = stream.read(request_count)\n                if data:\n                    length = len(data)\n                    index += length\n                    remain_bytes = remain_bytes - \\\n                        length if remain_bytes else None\n                    block_id = '{0:08d}'.format(block_index)\n                    self.put_block(container_name, blob_name,\n                                   data, block_id, x_ms_lease_id=x_ms_lease_id)\n                    block_ids.append(block_id)\n                    block_index += 1\n                    if progress_callback:\n                        progress_callback(index, count)\n                else:\n                    break\n\n            self.put_block_list(container_name, blob_name, block_ids,\n                                content_md5, x_ms_blob_cache_control,\n                                x_ms_blob_content_type,\n                                x_ms_blob_content_encoding,\n                                x_ms_blob_content_language,\n                                x_ms_blob_content_md5,\n                                x_ms_meta_name_values,\n                                x_ms_lease_id)\n\n    def put_block_blob_from_bytes(self, container_name, blob_name, blob,\n                                  index=0, count=None, content_encoding=None,\n                                  content_language=None, content_md5=None,\n                                  cache_control=None,\n                                  x_ms_blob_content_type=None,\n                                  x_ms_blob_content_encoding=None,\n                                  x_ms_blob_content_language=None,\n                                  x_ms_blob_content_md5=None,\n                                  x_ms_blob_cache_control=None,\n                                  x_ms_meta_name_values=None,\n                                  x_ms_lease_id=None, progress_callback=None):\n        '''\n        Creates a new block blob from an array of bytes, or updates the content\n        of an existing block blob, with automatic chunking and progress\n        notifications.\n\n        container_name: Name of existing container.\n        blob_name: Name of blob to create or update.\n        blob: Content of blob as an array of bytes.\n        index: Start index in the array of bytes.\n        count:\n            Number of bytes to upload. Set to None or negative value to upload\n            all bytes starting from index.\n        content_encoding:\n            Optional. Specifies which content encodings have been applied to\n            the blob. This value is returned to the client when the Get Blob\n            (REST API) operation is performed on the blob resource. The client\n            can use this value when returned to decode the blob content.\n        content_language:\n            Optional. Specifies the natural languages used by this resource.\n        content_md5:\n            Optional. An MD5 hash of the blob content. This hash is used to\n            verify the integrity of the blob during transport. When this header\n            is specified, the storage service checks the hash that has arrived\n            with the one that was sent. If the two hashes do not match, the\n            operation will fail with error code 400 (Bad Request).\n        cache_control:\n            Optional. The Blob service stores this value but does not use or\n            modify it.\n        x_ms_blob_content_type: Optional. Set the blob's content type.\n        x_ms_blob_content_encoding: Optional. Set the blob's content encoding.\n        x_ms_blob_content_language: Optional. Set the blob's content language.\n        x_ms_blob_content_md5: Optional. Set the blob's MD5 hash.\n        x_ms_blob_cache_control: Optional. Sets the blob's cache control.\n        x_ms_meta_name_values: A dict containing name, value for metadata.\n        x_ms_lease_id: Required if the blob has an active lease.\n        progress_callback:\n            Callback for progress with signature function(current, total) where\n            current is the number of bytes transfered so far, and total is the\n            size of the blob, or None if the total size is unknown.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        _validate_not_none('blob', blob)\n        _validate_not_none('index', index)\n        _validate_type_bytes('blob', blob)\n\n        if index < 0:\n            raise TypeError(_ERROR_VALUE_NEGATIVE.format('index'))\n\n        if count is None or count < 0:\n            count = len(blob) - index\n\n        if count < self._BLOB_MAX_DATA_SIZE:\n            if progress_callback:\n                progress_callback(0, count)\n\n            data = blob[index: index + count]\n            self.put_blob(container_name,\n                          blob_name,\n                          data,\n                          'BlockBlob',\n                          content_encoding,\n                          content_language,\n                          content_md5,\n                          cache_control,\n                          x_ms_blob_content_type,\n                          x_ms_blob_content_encoding,\n                          x_ms_blob_content_language,\n                          x_ms_blob_content_md5,\n                          x_ms_blob_cache_control,\n                          x_ms_meta_name_values,\n                          x_ms_lease_id)\n\n            if progress_callback:\n                progress_callback(count, count)\n        else:\n            stream = BytesIO(blob)\n            stream.seek(index)\n\n            self.put_block_blob_from_file(container_name,\n                                          blob_name,\n                                          stream,\n                                          count,\n                                          content_encoding,\n                                          content_language,\n                                          content_md5,\n                                          cache_control,\n                                          x_ms_blob_content_type,\n                                          x_ms_blob_content_encoding,\n                                          x_ms_blob_content_language,\n                                          x_ms_blob_content_md5,\n                                          x_ms_blob_cache_control,\n                                          x_ms_meta_name_values,\n                                          x_ms_lease_id,\n                                          progress_callback)\n\n    def put_block_blob_from_text(self, container_name, blob_name, text,\n                                 text_encoding='utf-8',\n                                 content_encoding=None, content_language=None,\n                                 content_md5=None, cache_control=None,\n                                 x_ms_blob_content_type=None,\n                                 x_ms_blob_content_encoding=None,\n                                 x_ms_blob_content_language=None,\n                                 x_ms_blob_content_md5=None,\n                                 x_ms_blob_cache_control=None,\n                                 x_ms_meta_name_values=None,\n                                 x_ms_lease_id=None, progress_callback=None):\n        '''\n        Creates a new block blob from str/unicode, or updates the content of an\n        existing block blob, with automatic chunking and progress notifications.\n\n        container_name: Name of existing container.\n        blob_name: Name of blob to create or update.\n        text: Text to upload to the blob.\n        text_encoding: Encoding to use to convert the text to bytes.\n        content_encoding:\n            Optional. Specifies which content encodings have been applied to\n            the blob. This value is returned to the client when the Get Blob\n            (REST API) operation is performed on the blob resource. The client\n            can use this value when returned to decode the blob content.\n        content_language:\n            Optional. Specifies the natural languages used by this resource.\n        content_md5:\n            Optional. An MD5 hash of the blob content. This hash is used to\n            verify the integrity of the blob during transport. When this header\n            is specified, the storage service checks the hash that has arrived\n            with the one that was sent. If the two hashes do not match, the\n            operation will fail with error code 400 (Bad Request).\n        cache_control:\n            Optional. The Blob service stores this value but does not use or\n            modify it.\n        x_ms_blob_content_type: Optional. Set the blob's content type.\n        x_ms_blob_content_encoding: Optional. Set the blob's content encoding.\n        x_ms_blob_content_language: Optional. Set the blob's content language.\n        x_ms_blob_content_md5: Optional. Set the blob's MD5 hash.\n        x_ms_blob_cache_control: Optional. Sets the blob's cache control.\n        x_ms_meta_name_values: A dict containing name, value for metadata.\n        x_ms_lease_id: Required if the blob has an active lease.\n        progress_callback:\n            Callback for progress with signature function(current, total) where\n            current is the number of bytes transfered so far, and total is the\n            size of the blob, or None if the total size is unknown.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        _validate_not_none('text', text)\n\n        if not isinstance(text, bytes):\n            _validate_not_none('text_encoding', text_encoding)\n            text = text.encode(text_encoding)\n\n        self.put_block_blob_from_bytes(container_name,\n                                       blob_name,\n                                       text,\n                                       0,\n                                       len(text),\n                                       content_encoding,\n                                       content_language,\n                                       content_md5,\n                                       cache_control,\n                                       x_ms_blob_content_type,\n                                       x_ms_blob_content_encoding,\n                                       x_ms_blob_content_language,\n                                       x_ms_blob_content_md5,\n                                       x_ms_blob_cache_control,\n                                       x_ms_meta_name_values,\n                                       x_ms_lease_id,\n                                       progress_callback)\n\n    def put_page_blob_from_path(self, container_name, blob_name, file_path,\n                                content_encoding=None, content_language=None,\n                                content_md5=None, cache_control=None,\n                                x_ms_blob_content_type=None,\n                                x_ms_blob_content_encoding=None,\n                                x_ms_blob_content_language=None,\n                                x_ms_blob_content_md5=None,\n                                x_ms_blob_cache_control=None,\n                                x_ms_meta_name_values=None,\n                                x_ms_lease_id=None,\n                                x_ms_blob_sequence_number=None,\n                                progress_callback=None):\n        '''\n        Creates a new page blob from a file path, or updates the content of an\n        existing page blob, with automatic chunking and progress notifications.\n\n        container_name: Name of existing container.\n        blob_name: Name of blob to create or update.\n        file_path: Path of the file to upload as the blob content.\n        content_encoding:\n            Optional. Specifies which content encodings have been applied to\n            the blob. This value is returned to the client when the Get Blob\n            (REST API) operation is performed on the blob resource. The client\n            can use this value when returned to decode the blob content.\n        content_language:\n            Optional. Specifies the natural languages used by this resource.\n        content_md5:\n            Optional. An MD5 hash of the blob content. This hash is used to\n            verify the integrity of the blob during transport. When this header\n            is specified, the storage service checks the hash that has arrived\n            with the one that was sent. If the two hashes do not match, the\n            operation will fail with error code 400 (Bad Request).\n        cache_control:\n            Optional. The Blob service stores this value but does not use or\n            modify it.\n        x_ms_blob_content_type: Optional. Set the blob's content type.\n        x_ms_blob_content_encoding: Optional. Set the blob's content encoding.\n        x_ms_blob_content_language: Optional. Set the blob's content language.\n        x_ms_blob_content_md5: Optional. Set the blob's MD5 hash.\n        x_ms_blob_cache_control: Optional. Sets the blob's cache control.\n        x_ms_meta_name_values: A dict containing name, value for metadata.\n        x_ms_lease_id: Required if the blob has an active lease.\n        x_ms_blob_sequence_number:\n            Optional. Set for page blobs only. The sequence number is a\n            user-controlled value that you can use to track requests. The\n            value of the sequence number must be between 0 and 2^63 - 1. The\n            default value is 0.\n        progress_callback:\n            Callback for progress with signature function(current, total) where\n            current is the number of bytes transfered so far, and total is the\n            size of the blob, or None if the total size is unknown.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        _validate_not_none('file_path', file_path)\n\n        count = path.getsize(file_path)\n        with open(file_path, 'rb') as stream:\n            self.put_page_blob_from_file(container_name,\n                                         blob_name,\n                                         stream,\n                                         count,\n                                         content_encoding,\n                                         content_language,\n                                         content_md5,\n                                         cache_control,\n                                         x_ms_blob_content_type,\n                                         x_ms_blob_content_encoding,\n                                         x_ms_blob_content_language,\n                                         x_ms_blob_content_md5,\n                                         x_ms_blob_cache_control,\n                                         x_ms_meta_name_values,\n                                         x_ms_lease_id,\n                                         x_ms_blob_sequence_number,\n                                         progress_callback)\n\n    def put_page_blob_from_file(self, container_name, blob_name, stream, count,\n                                content_encoding=None, content_language=None,\n                                content_md5=None, cache_control=None,\n                                x_ms_blob_content_type=None,\n                                x_ms_blob_content_encoding=None,\n                                x_ms_blob_content_language=None,\n                                x_ms_blob_content_md5=None,\n                                x_ms_blob_cache_control=None,\n                                x_ms_meta_name_values=None,\n                                x_ms_lease_id=None,\n                                x_ms_blob_sequence_number=None,\n                                progress_callback=None):\n        '''\n        Creates a new page blob from a file/stream, or updates the content of an\n        existing page blob, with automatic chunking and progress notifications.\n\n        container_name: Name of existing container.\n        blob_name: Name of blob to create or update.\n        stream: Opened file/stream to upload as the blob content.\n        count:\n            Number of bytes to read from the stream. This is required, a page\n            blob cannot be created if the count is unknown.\n        content_encoding:\n            Optional. Specifies which content encodings have been applied to\n            the blob. This value is returned to the client when the Get Blob\n            (REST API) operation is performed on the blob resource. The client\n            can use this value when returned to decode the blob content.\n        content_language:\n            Optional. Specifies the natural languages used by this resource.\n        content_md5:\n            Optional. An MD5 hash of the blob content. This hash is used to\n            verify the integrity of the blob during transport. When this header\n            is specified, the storage service checks the hash that has arrived\n            with the one that was sent. If the two hashes do not match, the\n            operation will fail with error code 400 (Bad Request).\n        cache_control:\n            Optional. The Blob service stores this value but does not use or\n            modify it.\n        x_ms_blob_content_type: Optional. Set the blob's content type.\n        x_ms_blob_content_encoding: Optional. Set the blob's content encoding.\n        x_ms_blob_content_language: Optional. Set the blob's content language.\n        x_ms_blob_content_md5: Optional. Set the blob's MD5 hash.\n        x_ms_blob_cache_control: Optional. Sets the blob's cache control.\n        x_ms_meta_name_values: A dict containing name, value for metadata.\n        x_ms_lease_id: Required if the blob has an active lease.\n        x_ms_blob_sequence_number:\n            Optional. Set for page blobs only. The sequence number is a\n            user-controlled value that you can use to track requests. The\n            value of the sequence number must be between 0 and 2^63 - 1. The\n            default value is 0.\n        progress_callback:\n            Callback for progress with signature function(current, total) where\n            current is the number of bytes transfered so far, and total is the\n            size of the blob, or None if the total size is unknown.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        _validate_not_none('stream', stream)\n        _validate_not_none('count', count)\n\n        if count < 0:\n            raise TypeError(_ERROR_VALUE_NEGATIVE.format('count'))\n\n        if count % _PAGE_SIZE != 0:\n            raise TypeError(_ERROR_PAGE_BLOB_SIZE_ALIGNMENT.format(count))\n\n        if progress_callback:\n            progress_callback(0, count)\n\n        self.put_blob(container_name,\n                      blob_name,\n                      b'',\n                      'PageBlob',\n                      content_encoding,\n                      content_language,\n                      content_md5,\n                      cache_control,\n                      x_ms_blob_content_type,\n                      x_ms_blob_content_encoding,\n                      x_ms_blob_content_language,\n                      x_ms_blob_content_md5,\n                      x_ms_blob_cache_control,\n                      x_ms_meta_name_values,\n                      x_ms_lease_id,\n                      count,\n                      x_ms_blob_sequence_number)\n\n        remain_bytes = count\n        page_start = 0\n        while True:\n            request_count = min(remain_bytes, self._BLOB_MAX_CHUNK_DATA_SIZE)\n            data = stream.read(request_count)\n            if data:\n                length = len(data)\n                remain_bytes = remain_bytes - length\n                page_end = page_start + length - 1\n                self.put_page(container_name,\n                              blob_name,\n                              data,\n                              'bytes={0}-{1}'.format(page_start, page_end),\n                              'update',\n                              x_ms_lease_id=x_ms_lease_id)\n                page_start = page_start + length\n\n                if progress_callback:\n                    progress_callback(page_start, count)\n            else:\n                break\n\n    def put_page_blob_from_bytes(self, container_name, blob_name, blob,\n                                 index=0, count=None, content_encoding=None,\n                                 content_language=None, content_md5=None,\n                                 cache_control=None,\n                                 x_ms_blob_content_type=None,\n                                 x_ms_blob_content_encoding=None,\n                                 x_ms_blob_content_language=None,\n                                 x_ms_blob_content_md5=None,\n                                 x_ms_blob_cache_control=None,\n                                 x_ms_meta_name_values=None,\n                                 x_ms_lease_id=None,\n                                 x_ms_blob_sequence_number=None,\n                                 progress_callback=None):\n        '''\n        Creates a new page blob from an array of bytes, or updates the content\n        of an existing page blob, with automatic chunking and progress\n        notifications.\n\n        container_name: Name of existing container.\n        blob_name: Name of blob to create or update.\n        blob: Content of blob as an array of bytes.\n        index: Start index in the array of bytes.\n        count:\n            Number of bytes to upload. Set to None or negative value to upload\n            all bytes starting from index.\n        content_encoding:\n            Optional. Specifies which content encodings have been applied to\n            the blob. This value is returned to the client when the Get Blob\n            (REST API) operation is performed on the blob resource. The client\n            can use this value when returned to decode the blob content.\n        content_language:\n            Optional. Specifies the natural languages used by this resource.\n        content_md5:\n            Optional. An MD5 hash of the blob content. This hash is used to\n            verify the integrity of the blob during transport. When this header\n            is specified, the storage service checks the hash that has arrived\n            with the one that was sent. If the two hashes do not match, the\n            operation will fail with error code 400 (Bad Request).\n        cache_control:\n            Optional. The Blob service stores this value but does not use or\n            modify it.\n        x_ms_blob_content_type: Optional. Set the blob's content type.\n        x_ms_blob_content_encoding: Optional. Set the blob's content encoding.\n        x_ms_blob_content_language: Optional. Set the blob's content language.\n        x_ms_blob_content_md5: Optional. Set the blob's MD5 hash.\n        x_ms_blob_cache_control: Optional. Sets the blob's cache control.\n        x_ms_meta_name_values: A dict containing name, value for metadata.\n        x_ms_lease_id: Required if the blob has an active lease.\n        x_ms_blob_sequence_number:\n            Optional. Set for page blobs only. The sequence number is a\n            user-controlled value that you can use to track requests. The\n            value of the sequence number must be between 0 and 2^63 - 1. The\n            default value is 0.\n        progress_callback:\n            Callback for progress with signature function(current, total) where\n            current is the number of bytes transfered so far, and total is the\n            size of the blob, or None if the total size is unknown.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        _validate_not_none('blob', blob)\n        _validate_type_bytes('blob', blob)\n\n        if index < 0:\n            raise TypeError(_ERROR_VALUE_NEGATIVE.format('index'))\n\n        if count is None or count < 0:\n            count = len(blob) - index\n\n        stream = BytesIO(blob)\n        stream.seek(index)\n\n        self.put_page_blob_from_file(container_name,\n                                     blob_name,\n                                     stream,\n                                     count,\n                                     content_encoding,\n                                     content_language,\n                                     content_md5,\n                                     cache_control,\n                                     x_ms_blob_content_type,\n                                     x_ms_blob_content_encoding,\n                                     x_ms_blob_content_language,\n                                     x_ms_blob_content_md5,\n                                     x_ms_blob_cache_control,\n                                     x_ms_meta_name_values,\n                                     x_ms_lease_id,\n                                     x_ms_blob_sequence_number,\n                                     progress_callback)\n\n    def get_blob(self, container_name, blob_name, snapshot=None,\n                 x_ms_range=None, x_ms_lease_id=None,\n                 x_ms_range_get_content_md5=None):\n        '''\n        Reads or downloads a blob from the system, including its metadata and\n        properties.\n\n        See get_blob_to_* for high level functions that handle the download\n        of large blobs with automatic chunking and progress notifications.\n\n        container_name: Name of existing container.\n        blob_name: Name of existing blob.\n        snapshot:\n            Optional. The snapshot parameter is an opaque DateTime value that,\n            when present, specifies the blob snapshot to retrieve.\n        x_ms_range:\n            Optional. Return only the bytes of the blob in the specified range.\n        x_ms_lease_id: Required if the blob has an active lease.\n        x_ms_range_get_content_md5:\n            Optional. When this header is set to true and specified together\n            with the Range header, the service returns the MD5 hash for the\n            range, as long as the range is less than or equal to 4 MB in size.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/' + _str(container_name) + '/' + _str(blob_name) + ''\n        request.headers = [\n            ('x-ms-range', _str_or_none(x_ms_range)),\n            ('x-ms-lease-id', _str_or_none(x_ms_lease_id)),\n            ('x-ms-range-get-content-md5',\n             _str_or_none(x_ms_range_get_content_md5))\n        ]\n        request.query = [('snapshot', _str_or_none(snapshot))]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request, None)\n\n        return _create_blob_result(response)\n\n    def get_blob_to_path(self, container_name, blob_name, file_path,\n                         open_mode='wb', snapshot=None, x_ms_lease_id=None,\n                         progress_callback=None):\n        '''\n        Downloads a blob to a file path, with automatic chunking and progress\n        notifications.\n\n        container_name: Name of existing container.\n        blob_name: Name of existing blob.\n        file_path: Path of file to write to.\n        open_mode: Mode to use when opening the file.\n        snapshot:\n            Optional. The snapshot parameter is an opaque DateTime value that,\n            when present, specifies the blob snapshot to retrieve.\n        x_ms_lease_id: Required if the blob has an active lease.\n        progress_callback:\n            Callback for progress with signature function(current, total) where\n            current is the number of bytes transfered so far, and total is the\n            size of the blob.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        _validate_not_none('file_path', file_path)\n        _validate_not_none('open_mode', open_mode)\n\n        with open(file_path, open_mode) as stream:\n            self.get_blob_to_file(container_name,\n                                  blob_name,\n                                  stream,\n                                  snapshot,\n                                  x_ms_lease_id,\n                                  progress_callback)\n\n    def get_blob_to_file(self, container_name, blob_name, stream,\n                         snapshot=None, x_ms_lease_id=None,\n                         progress_callback=None):\n        '''\n        Downloads a blob to a file/stream, with automatic chunking and progress\n        notifications.\n\n        container_name: Name of existing container.\n        blob_name: Name of existing blob.\n        stream: Opened file/stream to write to.\n        snapshot:\n            Optional. The snapshot parameter is an opaque DateTime value that,\n            when present, specifies the blob snapshot to retrieve.\n        x_ms_lease_id: Required if the blob has an active lease.\n        progress_callback:\n            Callback for progress with signature function(current, total) where\n            current is the number of bytes transfered so far, and total is the\n            size of the blob.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        _validate_not_none('stream', stream)\n\n        props = self.get_blob_properties(container_name, blob_name)\n        blob_size = int(props['content-length'])\n\n        if blob_size < self._BLOB_MAX_DATA_SIZE:\n            if progress_callback:\n                progress_callback(0, blob_size)\n\n            data = self.get_blob(container_name,\n                                 blob_name,\n                                 snapshot,\n                                 x_ms_lease_id=x_ms_lease_id)\n\n            stream.write(data)\n\n            if progress_callback:\n                progress_callback(blob_size, blob_size)\n        else:\n            if progress_callback:\n                progress_callback(0, blob_size)\n\n            index = 0\n            while index < blob_size:\n                chunk_range = 'bytes={0}-{1}'.format(\n                    index,\n                    index + self._BLOB_MAX_CHUNK_DATA_SIZE - 1)\n                data = self.get_blob(\n                    container_name, blob_name, x_ms_range=chunk_range)\n                length = len(data)\n                index += length\n                if length > 0:\n                    stream.write(data)\n                    if progress_callback:\n                        progress_callback(index, blob_size)\n                    if length < self._BLOB_MAX_CHUNK_DATA_SIZE:\n                        break\n                else:\n                    break\n\n    def get_blob_to_bytes(self, container_name, blob_name, snapshot=None,\n                          x_ms_lease_id=None, progress_callback=None):\n        '''\n        Downloads a blob as an array of bytes, with automatic chunking and\n        progress notifications.\n\n        container_name: Name of existing container.\n        blob_name: Name of existing blob.\n        snapshot:\n            Optional. The snapshot parameter is an opaque DateTime value that,\n            when present, specifies the blob snapshot to retrieve.\n        x_ms_lease_id: Required if the blob has an active lease.\n        progress_callback:\n            Callback for progress with signature function(current, total) where\n            current is the number of bytes transfered so far, and total is the\n            size of the blob.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n\n        stream = BytesIO()\n        self.get_blob_to_file(container_name,\n                              blob_name,\n                              stream,\n                              snapshot,\n                              x_ms_lease_id,\n                              progress_callback)\n\n        return stream.getvalue()\n\n    def get_blob_to_text(self, container_name, blob_name, text_encoding='utf-8',\n                         snapshot=None, x_ms_lease_id=None,\n                         progress_callback=None):\n        '''\n        Downloads a blob as unicode text, with automatic chunking and progress\n        notifications.\n\n        container_name: Name of existing container.\n        blob_name: Name of existing blob.\n        text_encoding: Encoding to use when decoding the blob data.\n        snapshot:\n            Optional. The snapshot parameter is an opaque DateTime value that,\n            when present, specifies the blob snapshot to retrieve.\n        x_ms_lease_id: Required if the blob has an active lease.\n        progress_callback:\n            Callback for progress with signature function(current, total) where\n            current is the number of bytes transfered so far, and total is the\n            size of the blob.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        _validate_not_none('text_encoding', text_encoding)\n\n        result = self.get_blob_to_bytes(container_name,\n                                        blob_name,\n                                        snapshot,\n                                        x_ms_lease_id,\n                                        progress_callback)\n\n        return result.decode(text_encoding)\n\n    def get_blob_metadata(self, container_name, blob_name, snapshot=None,\n                          x_ms_lease_id=None):\n        '''\n        Returns all user-defined metadata for the specified blob or snapshot.\n\n        container_name: Name of existing container.\n        blob_name: Name of existing blob.\n        snapshot:\n            Optional. The snapshot parameter is an opaque DateTime value that,\n            when present, specifies the blob snapshot to retrieve.\n        x_ms_lease_id: Required if the blob has an active lease.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(container_name) + '/' + _str(blob_name) + '?comp=metadata'\n        request.headers = [('x-ms-lease-id', _str_or_none(x_ms_lease_id))]\n        request.query = [('snapshot', _str_or_none(snapshot))]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request)\n\n        return _parse_response_for_dict_prefix(response, prefixes=['x-ms-meta'])\n\n    def set_blob_metadata(self, container_name, blob_name,\n                          x_ms_meta_name_values=None, x_ms_lease_id=None):\n        '''\n        Sets user-defined metadata for the specified blob as one or more\n        name-value pairs.\n\n        container_name: Name of existing container.\n        blob_name: Name of existing blob.\n        x_ms_meta_name_values: Dict containing name and value pairs.\n        x_ms_lease_id: Required if the blob has an active lease.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(container_name) + '/' + _str(blob_name) + '?comp=metadata'\n        request.headers = [\n            ('x-ms-meta-name-values', x_ms_meta_name_values),\n            ('x-ms-lease-id', _str_or_none(x_ms_lease_id))\n        ]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        self._perform_request(request)\n\n    def lease_blob(self, container_name, blob_name, x_ms_lease_action,\n                   x_ms_lease_id=None, x_ms_lease_duration=60,\n                   x_ms_lease_break_period=None, x_ms_proposed_lease_id=None):\n        '''\n        Establishes and manages a one-minute lock on a blob for write\n        operations.\n\n        container_name: Name of existing container.\n        blob_name: Name of existing blob.\n        x_ms_lease_action:\n            Required. Possible values: acquire|renew|release|break|change\n        x_ms_lease_id: Required if the blob has an active lease.\n        x_ms_lease_duration:\n            Specifies the duration of the lease, in seconds, or negative one\n            (-1) for a lease that never expires. A non-infinite lease can be\n            between 15 and 60 seconds. A lease duration cannot be changed\n            using renew or change. For backwards compatibility, the default is\n            60, and the value is only used on an acquire operation.\n        x_ms_lease_break_period:\n            Optional. For a break operation, this is the proposed duration of\n            seconds that the lease should continue before it is broken, between\n            0 and 60 seconds. This break period is only used if it is shorter\n            than the time remaining on the lease. If longer, the time remaining\n            on the lease is used. A new lease will not be available before the\n            break period has expired, but the lease may be held for longer than\n            the break period. If this header does not appear with a break\n            operation, a fixed-duration lease breaks after the remaining lease\n            period elapses, and an infinite lease breaks immediately.\n        x_ms_proposed_lease_id:\n            Optional for acquire, required for change. Proposed lease ID, in a\n            GUID string format.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        _validate_not_none('x_ms_lease_action', x_ms_lease_action)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(container_name) + '/' + _str(blob_name) + '?comp=lease'\n        request.headers = [\n            ('x-ms-lease-id', _str_or_none(x_ms_lease_id)),\n            ('x-ms-lease-action', _str_or_none(x_ms_lease_action)),\n            ('x-ms-lease-duration', _str_or_none(x_ms_lease_duration\\\n                if x_ms_lease_action == 'acquire' else None)),\n            ('x-ms-lease-break-period', _str_or_none(x_ms_lease_break_period)),\n            ('x-ms-proposed-lease-id', _str_or_none(x_ms_proposed_lease_id)),\n        ]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request)\n\n        return _parse_response_for_dict_filter(\n            response,\n            filter=['x-ms-lease-id', 'x-ms-lease-time'])\n\n    def snapshot_blob(self, container_name, blob_name,\n                      x_ms_meta_name_values=None, if_modified_since=None,\n                      if_unmodified_since=None, if_match=None,\n                      if_none_match=None, x_ms_lease_id=None):\n        '''\n        Creates a read-only snapshot of a blob.\n\n        container_name: Name of existing container.\n        blob_name: Name of existing blob.\n        x_ms_meta_name_values: Optional. Dict containing name and value pairs.\n        if_modified_since: Optional. Datetime string.\n        if_unmodified_since: DateTime string.\n        if_match:\n            Optional. snapshot the blob only if its ETag value matches the\n            value specified.\n        if_none_match: Optional. An ETag value\n        x_ms_lease_id: Required if the blob has an active lease.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(container_name) + '/' + _str(blob_name) + '?comp=snapshot'\n        request.headers = [\n            ('x-ms-meta-name-values', x_ms_meta_name_values),\n            ('If-Modified-Since', _str_or_none(if_modified_since)),\n            ('If-Unmodified-Since', _str_or_none(if_unmodified_since)),\n            ('If-Match', _str_or_none(if_match)),\n            ('If-None-Match', _str_or_none(if_none_match)),\n            ('x-ms-lease-id', _str_or_none(x_ms_lease_id))\n        ]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request)\n\n        return _parse_response_for_dict_filter(\n            response,\n            filter=['x-ms-snapshot', 'etag', 'last-modified'])\n\n    def copy_blob(self, container_name, blob_name, x_ms_copy_source,\n                  x_ms_meta_name_values=None,\n                  x_ms_source_if_modified_since=None,\n                  x_ms_source_if_unmodified_since=None,\n                  x_ms_source_if_match=None, x_ms_source_if_none_match=None,\n                  if_modified_since=None, if_unmodified_since=None,\n                  if_match=None, if_none_match=None, x_ms_lease_id=None,\n                  x_ms_source_lease_id=None):\n        '''\n        Copies a blob to a destination within the storage account.\n\n        container_name: Name of existing container.\n        blob_name: Name of existing blob.\n        x_ms_copy_source:\n            URL up to 2 KB in length that specifies a blob. A source blob in\n            the same account can be private, but a blob in another account\n            must be public or accept credentials included in this URL, such as\n            a Shared Access Signature. Examples:\n            https://myaccount.blob.core.windows.net/mycontainer/myblob\n            https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot=<DateTime>\n        x_ms_meta_name_values: Optional. Dict containing name and value pairs.\n        x_ms_source_if_modified_since:\n            Optional. An ETag value. Specify this conditional header to copy\n            the source blob only if its ETag matches the value specified.\n        x_ms_source_if_unmodified_since:\n            Optional. An ETag value. Specify this conditional header to copy\n            the blob only if its ETag does not match the value specified.\n        x_ms_source_if_match:\n            Optional. A DateTime value. Specify this conditional header to\n            copy the blob only if the source blob has been modified since the\n            specified date/time.\n        x_ms_source_if_none_match:\n            Optional. An ETag value. Specify this conditional header to copy\n            the source blob only if its ETag matches the value specified.\n        if_modified_since: Optional. Datetime string.\n        if_unmodified_since: DateTime string.\n        if_match:\n            Optional. Snapshot the blob only if its ETag value matches the\n            value specified.\n        if_none_match: Optional. An ETag value\n        x_ms_lease_id: Required if the blob has an active lease.\n        x_ms_source_lease_id:\n            Optional. Specify this to perform the Copy Blob operation only if\n            the lease ID given matches the active lease ID of the source blob.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        _validate_not_none('x_ms_copy_source', x_ms_copy_source)\n\n        if x_ms_copy_source.startswith('/'):\n            # Backwards compatibility for earlier versions of the SDK where\n            # the copy source can be in the following formats:\n            # - Blob in named container:\n            #     /accountName/containerName/blobName\n            # - Snapshot in named container:\n            #     /accountName/containerName/blobName?snapshot=<DateTime>\n            # - Blob in root container:\n            #     /accountName/blobName\n            # - Snapshot in root container:\n            #     /accountName/blobName?snapshot=<DateTime>\n            account, _, source =\\\n                x_ms_copy_source.partition('/')[2].partition('/')\n            x_ms_copy_source = self.protocol + '://' + \\\n                account + self.host_base + '/' + source\n\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + _str(container_name) + '/' + _str(blob_name) + ''\n        request.headers = [\n            ('x-ms-copy-source', _str_or_none(x_ms_copy_source)),\n            ('x-ms-meta-name-values', x_ms_meta_name_values),\n            ('x-ms-source-if-modified-since',\n             _str_or_none(x_ms_source_if_modified_since)),\n            ('x-ms-source-if-unmodified-since',\n             _str_or_none(x_ms_source_if_unmodified_since)),\n            ('x-ms-source-if-match', _str_or_none(x_ms_source_if_match)),\n            ('x-ms-source-if-none-match',\n             _str_or_none(x_ms_source_if_none_match)),\n            ('If-Modified-Since', _str_or_none(if_modified_since)),\n            ('If-Unmodified-Since', _str_or_none(if_unmodified_since)),\n            ('If-Match', _str_or_none(if_match)),\n            ('If-None-Match', _str_or_none(if_none_match)),\n            ('x-ms-lease-id', _str_or_none(x_ms_lease_id)),\n            ('x-ms-source-lease-id', _str_or_none(x_ms_source_lease_id))\n        ]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request)\n\n        return _parse_response_for_dict(response)\n\n    def abort_copy_blob(self, container_name, blob_name, x_ms_copy_id,\n                        x_ms_lease_id=None):\n        '''\n         Aborts a pending copy_blob operation, and leaves a destination blob\n         with zero length and full metadata.\n\n         container_name: Name of destination container.\n         blob_name: Name of destination blob.\n         x_ms_copy_id:\n            Copy identifier provided in the x-ms-copy-id of the original\n            copy_blob operation.\n         x_ms_lease_id:\n            Required if the destination blob has an active infinite lease.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        _validate_not_none('x_ms_copy_id', x_ms_copy_id)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + _str(container_name) + '/' + \\\n            _str(blob_name) + '?comp=copy&copyid=' + \\\n            _str(x_ms_copy_id)\n        request.headers = [\n            ('x-ms-lease-id', _str_or_none(x_ms_lease_id)),\n            ('x-ms-copy-action', 'abort'),\n        ]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        self._perform_request(request)\n\n    def delete_blob(self, container_name, blob_name, snapshot=None,\n                    x_ms_lease_id=None):\n        '''\n        Marks the specified blob or snapshot for deletion. The blob is later\n        deleted during garbage collection.\n\n        To mark a specific snapshot for deletion provide the date/time of the\n        snapshot via the snapshot parameter.\n\n        container_name: Name of existing container.\n        blob_name: Name of existing blob.\n        snapshot:\n            Optional. The snapshot parameter is an opaque DateTime value that,\n            when present, specifies the blob snapshot to delete.\n        x_ms_lease_id: Required if the blob has an active lease.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        request = HTTPRequest()\n        request.method = 'DELETE'\n        request.host = self._get_host()\n        request.path = '/' + _str(container_name) + '/' + _str(blob_name) + ''\n        request.headers = [('x-ms-lease-id', _str_or_none(x_ms_lease_id))]\n        request.query = [('snapshot', _str_or_none(snapshot))]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        self._perform_request(request)\n\n    def put_block(self, container_name, blob_name, block, blockid,\n                  content_md5=None, x_ms_lease_id=None):\n        '''\n        Creates a new block to be committed as part of a blob.\n\n        container_name: Name of existing container.\n        blob_name: Name of existing blob.\n        block: Content of the block.\n        blockid:\n            Required. A value that identifies the block. The string must be\n            less than or equal to 64 bytes in size.\n        content_md5:\n            Optional. An MD5 hash of the block content. This hash is used to\n            verify the integrity of the blob during transport. When this\n            header is specified, the storage service checks the hash that has\n            arrived with the one that was sent.\n        x_ms_lease_id: Required if the blob has an active lease.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        _validate_not_none('block', block)\n        _validate_not_none('blockid', blockid)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(container_name) + '/' + _str(blob_name) + '?comp=block'\n        request.headers = [\n            ('Content-MD5', _str_or_none(content_md5)),\n            ('x-ms-lease-id', _str_or_none(x_ms_lease_id))\n        ]\n        request.query = [('blockid', _encode_base64(_str_or_none(blockid)))]\n        request.body = _get_request_body_bytes_only('block', block)\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        self._perform_request(request)\n\n    def put_block_list(self, container_name, blob_name, block_list,\n                       content_md5=None, x_ms_blob_cache_control=None,\n                       x_ms_blob_content_type=None,\n                       x_ms_blob_content_encoding=None,\n                       x_ms_blob_content_language=None,\n                       x_ms_blob_content_md5=None, x_ms_meta_name_values=None,\n                       x_ms_lease_id=None):\n        '''\n        Writes a blob by specifying the list of block IDs that make up the\n        blob. In order to be written as part of a blob, a block must have been\n        successfully written to the server in a prior Put Block (REST API)\n        operation.\n\n        container_name: Name of existing container.\n        blob_name: Name of existing blob.\n        block_list: A str list containing the block ids.\n        content_md5:\n            Optional. An MD5 hash of the block content. This hash is used to\n            verify the integrity of the blob during transport. When this header\n            is specified, the storage service checks the hash that has arrived\n            with the one that was sent.\n        x_ms_blob_cache_control:\n            Optional. Sets the blob's cache control. If specified, this\n            property is stored with the blob and returned with a read request.\n        x_ms_blob_content_type:\n            Optional. Sets the blob's content type. If specified, this property\n            is stored with the blob and returned with a read request.\n        x_ms_blob_content_encoding:\n            Optional. Sets the blob's content encoding. If specified, this\n            property is stored with the blob and returned with a read request.\n        x_ms_blob_content_language:\n            Optional. Set the blob's content language. If specified, this\n            property is stored with the blob and returned with a read request.\n        x_ms_blob_content_md5:\n            Optional. An MD5 hash of the blob content. Note that this hash is\n            not validated, as the hashes for the individual blocks were\n            validated when each was uploaded.\n        x_ms_meta_name_values: Optional. Dict containing name and value pairs.\n        x_ms_lease_id: Required if the blob has an active lease.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        _validate_not_none('block_list', block_list)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(container_name) + '/' + _str(blob_name) + '?comp=blocklist'\n        request.headers = [\n            ('Content-MD5', _str_or_none(content_md5)),\n            ('x-ms-blob-cache-control', _str_or_none(x_ms_blob_cache_control)),\n            ('x-ms-blob-content-type', _str_or_none(x_ms_blob_content_type)),\n            ('x-ms-blob-content-encoding',\n             _str_or_none(x_ms_blob_content_encoding)),\n            ('x-ms-blob-content-language',\n             _str_or_none(x_ms_blob_content_language)),\n            ('x-ms-blob-content-md5', _str_or_none(x_ms_blob_content_md5)),\n            ('x-ms-meta-name-values', x_ms_meta_name_values),\n            ('x-ms-lease-id', _str_or_none(x_ms_lease_id))\n        ]\n        request.body = _get_request_body(\n            _convert_block_list_to_xml(block_list))\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        self._perform_request(request)\n\n    def get_block_list(self, container_name, blob_name, snapshot=None,\n                       blocklisttype=None, x_ms_lease_id=None):\n        '''\n        Retrieves the list of blocks that have been uploaded as part of a\n        block blob.\n\n        container_name: Name of existing container.\n        blob_name: Name of existing blob.\n        snapshot:\n            Optional. Datetime to determine the time to retrieve the blocks.\n        blocklisttype:\n            Specifies whether to return the list of committed blocks, the list\n            of uncommitted blocks, or both lists together. Valid values are:\n            committed, uncommitted, or all.\n        x_ms_lease_id: Required if the blob has an active lease.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(container_name) + '/' + _str(blob_name) + '?comp=blocklist'\n        request.headers = [('x-ms-lease-id', _str_or_none(x_ms_lease_id))]\n        request.query = [\n            ('snapshot', _str_or_none(snapshot)),\n            ('blocklisttype', _str_or_none(blocklisttype))\n        ]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request)\n\n        return _convert_response_to_block_list(response)\n\n    def put_page(self, container_name, blob_name, page, x_ms_range,\n                 x_ms_page_write, timeout=None, content_md5=None,\n                 x_ms_lease_id=None, x_ms_if_sequence_number_lte=None,\n                 x_ms_if_sequence_number_lt=None,\n                 x_ms_if_sequence_number_eq=None,\n                 if_modified_since=None, if_unmodified_since=None,\n                 if_match=None, if_none_match=None):\n        '''\n        Writes a range of pages to a page blob.\n\n        container_name: Name of existing container.\n        blob_name: Name of existing blob.\n        page: Content of the page.\n        x_ms_range:\n            Required. Specifies the range of bytes to be written as a page.\n            Both the start and end of the range must be specified. Must be in\n            format: bytes=startByte-endByte. Given that pages must be aligned\n            with 512-byte boundaries, the start offset must be a modulus of\n            512 and the end offset must be a modulus of 512-1. Examples of\n            valid byte ranges are 0-511, 512-1023, etc.\n        x_ms_page_write:\n            Required. You may specify one of the following options:\n                update (lower case):\n                    Writes the bytes specified by the request body into the\n                    specified range. The Range and Content-Length headers must\n                    match to perform the update.\n                clear (lower case):\n                    Clears the specified range and releases the space used in\n                    storage for that range. To clear a range, set the\n                    Content-Length header to zero, and the Range header to a\n                    value that indicates the range to clear, up to maximum\n                    blob size.\n        timeout: the timeout parameter is expressed in seconds.\n        content_md5:\n            Optional. An MD5 hash of the page content. This hash is used to\n            verify the integrity of the page during transport. When this header\n            is specified, the storage service compares the hash of the content\n            that has arrived with the header value that was sent. If the two\n            hashes do not match, the operation will fail with error code 400\n            (Bad Request).\n        x_ms_lease_id: Required if the blob has an active lease.\n        x_ms_if_sequence_number_lte:\n            Optional. If the blob's sequence number is less than or equal to\n            the specified value, the request proceeds; otherwise it fails.\n        x_ms_if_sequence_number_lt:\n            Optional. If the blob's sequence number is less than the specified\n            value, the request proceeds; otherwise it fails.\n        x_ms_if_sequence_number_eq:\n            Optional. If the blob's sequence number is equal to the specified\n            value, the request proceeds; otherwise it fails.\n        if_modified_since:\n            Optional. A DateTime value. Specify this conditional header to\n            write the page only if the blob has been modified since the\n            specified date/time. If the blob has not been modified, the Blob\n            service fails.\n        if_unmodified_since:\n            Optional. A DateTime value. Specify this conditional header to\n            write the page only if the blob has not been modified since the\n            specified date/time. If the blob has been modified, the Blob\n            service fails.\n        if_match:\n            Optional. An ETag value. Specify an ETag value for this conditional\n            header to write the page only if the blob's ETag value matches the\n            value specified. If the values do not match, the Blob service fails.\n        if_none_match:\n            Optional. An ETag value. Specify an ETag value for this conditional\n            header to write the page only if the blob's ETag value does not\n            match the value specified. If the values are identical, the Blob\n            service fails.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        _validate_not_none('page', page)\n        _validate_not_none('x_ms_range', x_ms_range)\n        _validate_not_none('x_ms_page_write', x_ms_page_write)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(container_name) + '/' + _str(blob_name) + '?comp=page'\n        request.headers = [\n            ('x-ms-range', _str_or_none(x_ms_range)),\n            ('Content-MD5', _str_or_none(content_md5)),\n            ('x-ms-page-write', _str_or_none(x_ms_page_write)),\n            ('x-ms-lease-id', _str_or_none(x_ms_lease_id)),\n            ('x-ms-if-sequence-number-le',\n             _str_or_none(x_ms_if_sequence_number_lte)),\n            ('x-ms-if-sequence-number-lt',\n             _str_or_none(x_ms_if_sequence_number_lt)),\n            ('x-ms-if-sequence-number-eq',\n             _str_or_none(x_ms_if_sequence_number_eq)),\n            ('If-Modified-Since', _str_or_none(if_modified_since)),\n            ('If-Unmodified-Since', _str_or_none(if_unmodified_since)),\n            ('If-Match', _str_or_none(if_match)),\n            ('If-None-Match', _str_or_none(if_none_match))\n        ]\n        request.query = [('timeout', _int_or_none(timeout))]\n        request.body = _get_request_body_bytes_only('page', page)\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        self._perform_request(request)\n\n    def get_page_ranges(self, container_name, blob_name, snapshot=None,\n                        range=None, x_ms_range=None, x_ms_lease_id=None):\n        '''\n        Retrieves the page ranges for a blob.\n\n        container_name: Name of existing container.\n        blob_name: Name of existing blob.\n        snapshot:\n            Optional. The snapshot parameter is an opaque DateTime value that,\n            when present, specifies the blob snapshot to retrieve information\n            from.\n        range:\n            Optional. Specifies the range of bytes over which to list ranges,\n            inclusively. If omitted, then all ranges for the blob are returned.\n        x_ms_range:\n            Optional. Specifies the range of bytes to be written as a page.\n            Both the start and end of the range must be specified. Must be in\n            format: bytes=startByte-endByte. Given that pages must be aligned\n            with 512-byte boundaries, the start offset must be a modulus of\n            512 and the end offset must be a modulus of 512-1. Examples of\n            valid byte ranges are 0-511, 512-1023, etc.\n        x_ms_lease_id: Required if the blob has an active lease.\n        '''\n        _validate_not_none('container_name', container_name)\n        _validate_not_none('blob_name', blob_name)\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(container_name) + '/' + _str(blob_name) + '?comp=pagelist'\n        request.headers = [\n            ('Range', _str_or_none(range)),\n            ('x-ms-range', _str_or_none(x_ms_range)),\n            ('x-ms-lease-id', _str_or_none(x_ms_lease_id))\n        ]\n        request.query = [('snapshot', _str_or_none(snapshot))]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_blob_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request)\n\n        return _parse_simple_list(response, PageList, PageRange, \"page_ranges\")\n"
  },
  {
    "path": "OSPatching/azure/storage/cloudstorageaccount.py",
    "content": "#-------------------------------------------------------------------------\n# Copyright (c) Microsoft.  All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#--------------------------------------------------------------------------\nfrom azure.storage.blobservice import BlobService\nfrom azure.storage.tableservice import TableService\nfrom azure.storage.queueservice import QueueService\n\n\nclass CloudStorageAccount(object):\n\n    \"\"\"\n    Provides a factory for creating the blob, queue, and table services\n    with a common account name and account key.  Users can either use the\n    factory or can construct the appropriate service directly.\n    \"\"\"\n\n    def __init__(self, account_name=None, account_key=None):\n        self.account_name = account_name\n        self.account_key = account_key\n\n    def create_blob_service(self):\n        return BlobService(self.account_name, self.account_key)\n\n    def create_table_service(self):\n        return TableService(self.account_name, self.account_key)\n\n    def create_queue_service(self):\n        return QueueService(self.account_name, self.account_key)\n"
  },
  {
    "path": "OSPatching/azure/storage/queueservice.py",
    "content": "#-------------------------------------------------------------------------\n# Copyright (c) Microsoft.  All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#--------------------------------------------------------------------------\nfrom azure import (\n    WindowsAzureConflictError,\n    WindowsAzureError,\n    DEV_QUEUE_HOST,\n    QUEUE_SERVICE_HOST_BASE,\n    xml_escape,\n    _convert_class_to_xml,\n    _dont_fail_not_exist,\n    _dont_fail_on_exist,\n    _get_request_body,\n    _int_or_none,\n    _parse_enum_results_list,\n    _parse_response,\n    _parse_response_for_dict_filter,\n    _parse_response_for_dict_prefix,\n    _str,\n    _str_or_none,\n    _update_request_uri_query_local_storage,\n    _validate_not_none,\n    _ERROR_CONFLICT,\n    )\nfrom azure.http import (\n    HTTPRequest,\n    HTTP_RESPONSE_NO_CONTENT,\n    )\nfrom azure.storage import (\n    Queue,\n    QueueEnumResults,\n    QueueMessagesList,\n    StorageServiceProperties,\n    _update_storage_queue_header,\n    )\nfrom azure.storage.storageclient import _StorageClient\n\n\nclass QueueService(_StorageClient):\n\n    '''\n    This is the main class managing queue resources.\n    '''\n\n    def __init__(self, account_name=None, account_key=None, protocol='https',\n                 host_base=QUEUE_SERVICE_HOST_BASE, dev_host=DEV_QUEUE_HOST):\n        '''\n        account_name: your storage account name, required for all operations.\n        account_key: your storage account key, required for all operations.\n        protocol: Optional. Protocol. Defaults to http.\n        host_base:\n            Optional. Live host base url. Defaults to Azure url. Override this\n            for on-premise.\n        dev_host: Optional. Dev host url. Defaults to localhost.\n        '''\n        super(QueueService, self).__init__(\n            account_name, account_key, protocol, host_base, dev_host)\n\n    def get_queue_service_properties(self, timeout=None):\n        '''\n        Gets the properties of a storage account's Queue Service, including\n        Windows Azure Storage Analytics.\n\n        timeout: Optional. The timeout parameter is expressed in seconds.\n        '''\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/?restype=service&comp=properties'\n        request.query = [('timeout', _int_or_none(timeout))]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_queue_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request)\n\n        return _parse_response(response, StorageServiceProperties)\n\n    def list_queues(self, prefix=None, marker=None, maxresults=None,\n                    include=None):\n        '''\n        Lists all of the queues in a given storage account.\n\n        prefix:\n            Filters the results to return only queues with names that begin\n            with the specified prefix.\n        marker:\n            A string value that identifies the portion of the list to be\n            returned with the next list operation. The operation returns a\n            NextMarker element within the response body if the list returned\n            was not complete. This value may then be used as a query parameter\n            in a subsequent call to request the next portion of the list of\n            queues. The marker value is opaque to the client.\n        maxresults:\n            Specifies the maximum number of queues to return. If maxresults is\n            not specified, the server will return up to 5,000 items.\n        include:\n            Optional. Include this parameter to specify that the container's\n            metadata be returned as part of the response body.\n        '''\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/?comp=list'\n        request.query = [\n            ('prefix', _str_or_none(prefix)),\n            ('marker', _str_or_none(marker)),\n            ('maxresults', _int_or_none(maxresults)),\n            ('include', _str_or_none(include))\n        ]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_queue_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request)\n\n        return _parse_enum_results_list(\n            response, QueueEnumResults, \"Queues\", Queue)\n\n    def create_queue(self, queue_name, x_ms_meta_name_values=None,\n                     fail_on_exist=False):\n        '''\n        Creates a queue under the given account.\n\n        queue_name: name of the queue.\n        x_ms_meta_name_values:\n            Optional. A dict containing name-value pairs to associate with the\n            queue as metadata.\n        fail_on_exist: Specify whether throw exception when queue exists.\n        '''\n        _validate_not_none('queue_name', queue_name)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + _str(queue_name) + ''\n        request.headers = [('x-ms-meta-name-values', x_ms_meta_name_values)]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_queue_header(\n            request, self.account_name, self.account_key)\n        if not fail_on_exist:\n            try:\n                response = self._perform_request(request)\n                if response.status == HTTP_RESPONSE_NO_CONTENT:\n                    return False\n                return True\n            except WindowsAzureError as ex:\n                _dont_fail_on_exist(ex)\n                return False\n        else:\n            response = self._perform_request(request)\n            if response.status == HTTP_RESPONSE_NO_CONTENT:\n                raise WindowsAzureConflictError(\n                    _ERROR_CONFLICT.format(response.message))\n            return True\n\n    def delete_queue(self, queue_name, fail_not_exist=False):\n        '''\n        Permanently deletes the specified queue.\n\n        queue_name: Name of the queue.\n        fail_not_exist:\n            Specify whether throw exception when queue doesn't exist.\n        '''\n        _validate_not_none('queue_name', queue_name)\n        request = HTTPRequest()\n        request.method = 'DELETE'\n        request.host = self._get_host()\n        request.path = '/' + _str(queue_name) + ''\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_queue_header(\n            request, self.account_name, self.account_key)\n        if not fail_not_exist:\n            try:\n                self._perform_request(request)\n                return True\n            except WindowsAzureError as ex:\n                _dont_fail_not_exist(ex)\n                return False\n        else:\n            self._perform_request(request)\n            return True\n\n    def get_queue_metadata(self, queue_name):\n        '''\n        Retrieves user-defined metadata and queue properties on the specified\n        queue. Metadata is associated with the queue as name-values pairs.\n\n        queue_name: Name of the queue.\n        '''\n        _validate_not_none('queue_name', queue_name)\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/' + _str(queue_name) + '?comp=metadata'\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_queue_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request)\n\n        return _parse_response_for_dict_prefix(\n            response,\n            prefixes=['x-ms-meta', 'x-ms-approximate-messages-count'])\n\n    def set_queue_metadata(self, queue_name, x_ms_meta_name_values=None):\n        '''\n        Sets user-defined metadata on the specified queue. Metadata is\n        associated with the queue as name-value pairs.\n\n        queue_name: Name of the queue.\n        x_ms_meta_name_values:\n            Optional. A dict containing name-value pairs to associate with the\n            queue as metadata.\n        '''\n        _validate_not_none('queue_name', queue_name)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + _str(queue_name) + '?comp=metadata'\n        request.headers = [('x-ms-meta-name-values', x_ms_meta_name_values)]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_queue_header(\n            request, self.account_name, self.account_key)\n        self._perform_request(request)\n\n    def put_message(self, queue_name, message_text, visibilitytimeout=None,\n                    messagettl=None):\n        '''\n        Adds a new message to the back of the message queue. A visibility\n        timeout can also be specified to make the message invisible until the\n        visibility timeout expires. A message must be in a format that can be\n        included in an XML request with UTF-8 encoding. The encoded message can\n        be up to 64KB in size for versions 2011-08-18 and newer, or 8KB in size\n        for previous versions.\n\n        queue_name: Name of the queue.\n        message_text: Message content.\n        visibilitytimeout:\n            Optional. If not specified, the default value is 0. Specifies the\n            new visibility timeout value, in seconds, relative to server time.\n            The new value must be larger than or equal to 0, and cannot be\n            larger than 7 days. The visibility timeout of a message cannot be\n            set to a value later than the expiry time. visibilitytimeout\n            should be set to a value smaller than the time-to-live value.\n        messagettl:\n            Optional. Specifies the time-to-live interval for the message, in\n            seconds. The maximum time-to-live allowed is 7 days. If this\n            parameter is omitted, the default time-to-live is 7 days.\n        '''\n        _validate_not_none('queue_name', queue_name)\n        _validate_not_none('message_text', message_text)\n        request = HTTPRequest()\n        request.method = 'POST'\n        request.host = self._get_host()\n        request.path = '/' + _str(queue_name) + '/messages'\n        request.query = [\n            ('visibilitytimeout', _str_or_none(visibilitytimeout)),\n            ('messagettl', _str_or_none(messagettl))\n        ]\n        request.body = _get_request_body(\n            '<?xml version=\"1.0\" encoding=\"utf-8\"?> \\\n<QueueMessage> \\\n    <MessageText>' + xml_escape(_str(message_text)) + '</MessageText> \\\n</QueueMessage>')\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_queue_header(\n            request, self.account_name, self.account_key)\n        self._perform_request(request)\n\n    def get_messages(self, queue_name, numofmessages=None,\n                     visibilitytimeout=None):\n        '''\n        Retrieves one or more messages from the front of the queue.\n\n        queue_name: Name of the queue.\n        numofmessages:\n            Optional. A nonzero integer value that specifies the number of\n            messages to retrieve from the queue, up to a maximum of 32. If\n            fewer are visible, the visible messages are returned. By default,\n            a single message is retrieved from the queue with this operation.\n        visibilitytimeout:\n            Specifies the new visibility timeout value, in seconds, relative\n            to server time. The new value must be larger than or equal to 1\n            second, and cannot be larger than 7 days, or larger than 2 hours\n            on REST protocol versions prior to version 2011-08-18. The\n            visibility timeout of a message can be set to a value later than\n            the expiry time.\n        '''\n        _validate_not_none('queue_name', queue_name)\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/' + _str(queue_name) + '/messages'\n        request.query = [\n            ('numofmessages', _str_or_none(numofmessages)),\n            ('visibilitytimeout', _str_or_none(visibilitytimeout))\n        ]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_queue_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request)\n\n        return _parse_response(response, QueueMessagesList)\n\n    def peek_messages(self, queue_name, numofmessages=None):\n        '''\n        Retrieves one or more messages from the front of the queue, but does\n        not alter the visibility of the message.\n\n        queue_name: Name of the queue.\n        numofmessages:\n            Optional. A nonzero integer value that specifies the number of\n            messages to peek from the queue, up to a maximum of 32. By default,\n            a single message is peeked from the queue with this operation.\n        '''\n        _validate_not_none('queue_name', queue_name)\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/' + _str(queue_name) + '/messages?peekonly=true'\n        request.query = [('numofmessages', _str_or_none(numofmessages))]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_queue_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request)\n\n        return _parse_response(response, QueueMessagesList)\n\n    def delete_message(self, queue_name, message_id, popreceipt):\n        '''\n        Deletes the specified message.\n\n        queue_name: Name of the queue.\n        message_id: Message to delete.\n        popreceipt:\n            Required. A valid pop receipt value returned from an earlier call\n            to the Get Messages or Update Message operation.\n        '''\n        _validate_not_none('queue_name', queue_name)\n        _validate_not_none('message_id', message_id)\n        _validate_not_none('popreceipt', popreceipt)\n        request = HTTPRequest()\n        request.method = 'DELETE'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(queue_name) + '/messages/' + _str(message_id) + ''\n        request.query = [('popreceipt', _str_or_none(popreceipt))]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_queue_header(\n            request, self.account_name, self.account_key)\n        self._perform_request(request)\n\n    def clear_messages(self, queue_name):\n        '''\n        Deletes all messages from the specified queue.\n\n        queue_name: Name of the queue.\n        '''\n        _validate_not_none('queue_name', queue_name)\n        request = HTTPRequest()\n        request.method = 'DELETE'\n        request.host = self._get_host()\n        request.path = '/' + _str(queue_name) + '/messages'\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_queue_header(\n            request, self.account_name, self.account_key)\n        self._perform_request(request)\n\n    def update_message(self, queue_name, message_id, message_text, popreceipt,\n                       visibilitytimeout):\n        '''\n        Updates the visibility timeout of a message. You can also use this\n        operation to update the contents of a message.\n\n        queue_name: Name of the queue.\n        message_id: Message to update.\n        message_text: Content of message.\n        popreceipt:\n            Required. A valid pop receipt value returned from an earlier call\n            to the Get Messages or Update Message operation.\n        visibilitytimeout:\n            Required. Specifies the new visibility timeout value, in seconds,\n            relative to server time. The new value must be larger than or equal\n            to 0, and cannot be larger than 7 days. The visibility timeout of a\n            message cannot be set to a value later than the expiry time. A\n            message can be updated until it has been deleted or has expired.\n        '''\n        _validate_not_none('queue_name', queue_name)\n        _validate_not_none('message_id', message_id)\n        _validate_not_none('message_text', message_text)\n        _validate_not_none('popreceipt', popreceipt)\n        _validate_not_none('visibilitytimeout', visibilitytimeout)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(queue_name) + '/messages/' + _str(message_id) + ''\n        request.query = [\n            ('popreceipt', _str_or_none(popreceipt)),\n            ('visibilitytimeout', _str_or_none(visibilitytimeout))\n        ]\n        request.body = _get_request_body(\n            '<?xml version=\"1.0\" encoding=\"utf-8\"?> \\\n<QueueMessage> \\\n    <MessageText>' + xml_escape(_str(message_text)) + '</MessageText> \\\n</QueueMessage>')\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_queue_header(\n            request, self.account_name, self.account_key)\n        response = self._perform_request(request)\n\n        return _parse_response_for_dict_filter(\n            response,\n            filter=['x-ms-popreceipt', 'x-ms-time-next-visible'])\n\n    def set_queue_service_properties(self, storage_service_properties,\n                                     timeout=None):\n        '''\n        Sets the properties of a storage account's Queue service, including\n        Windows Azure Storage Analytics.\n\n        storage_service_properties: StorageServiceProperties object.\n        timeout: Optional. The timeout parameter is expressed in seconds.\n        '''\n        _validate_not_none('storage_service_properties',\n                           storage_service_properties)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/?restype=service&comp=properties'\n        request.query = [('timeout', _int_or_none(timeout))]\n        request.body = _get_request_body(\n            _convert_class_to_xml(storage_service_properties))\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_queue_header(\n            request, self.account_name, self.account_key)\n        self._perform_request(request)\n"
  },
  {
    "path": "OSPatching/azure/storage/sharedaccesssignature.py",
    "content": "#-------------------------------------------------------------------------\n# Copyright (c) Microsoft.  All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#--------------------------------------------------------------------------\nfrom azure import _sign_string, url_quote\nfrom azure.storage import X_MS_VERSION\n\n#-------------------------------------------------------------------------\n# Constants for the share access signature\nSIGNED_START = 'st'\nSIGNED_EXPIRY = 'se'\nSIGNED_RESOURCE = 'sr'\nSIGNED_PERMISSION = 'sp'\nSIGNED_IDENTIFIER = 'si'\nSIGNED_SIGNATURE = 'sig'\nSIGNED_VERSION = 'sv'\nRESOURCE_BLOB = 'b'\nRESOURCE_CONTAINER = 'c'\nSIGNED_RESOURCE_TYPE = 'resource'\nSHARED_ACCESS_PERMISSION = 'permission'\n\n#--------------------------------------------------------------------------\n\n\nclass WebResource(object):\n\n    '''\n    Class that stands for the resource to get the share access signature\n\n    path: the resource path.\n    properties: dict of name and values. Contains 2 item: resource type and\n            permission\n    request_url: the url of the webresource include all the queries.\n    '''\n\n    def __init__(self, path=None, request_url=None, properties=None):\n        self.path = path\n        self.properties = properties or {}\n        self.request_url = request_url\n\n\nclass Permission(object):\n\n    '''\n    Permission class. Contains the path and query_string for the path.\n\n    path: the resource path\n    query_string: dict of name, values. Contains SIGNED_START, SIGNED_EXPIRY\n            SIGNED_RESOURCE, SIGNED_PERMISSION, SIGNED_IDENTIFIER,\n            SIGNED_SIGNATURE name values.\n    '''\n\n    def __init__(self, path=None, query_string=None):\n        self.path = path\n        self.query_string = query_string\n\n\nclass SharedAccessPolicy(object):\n\n    ''' SharedAccessPolicy class. '''\n\n    def __init__(self, access_policy, signed_identifier=None):\n        self.id = signed_identifier\n        self.access_policy = access_policy\n\n\nclass SharedAccessSignature(object):\n\n    '''\n    The main class used to do the signing and generating the signature.\n\n    account_name:\n        the storage account name used to generate shared access signature\n    account_key: the access key to genenerate share access signature\n    permission_set: the permission cache used to signed the request url.\n    '''\n\n    def __init__(self, account_name, account_key, permission_set=None):\n        self.account_name = account_name\n        self.account_key = account_key\n        self.permission_set = permission_set\n\n    def generate_signed_query_string(self, path, resource_type,\n                                     shared_access_policy,\n                                     version=X_MS_VERSION):\n        '''\n        Generates the query string for path, resource type and shared access\n        policy.\n\n        path: the resource\n        resource_type: could be blob or container\n        shared_access_policy: shared access policy\n        version:\n            x-ms-version for storage service, or None to get a signed query\n            string compatible with pre 2012-02-12 clients, where the version\n            is not included in the query string.\n        '''\n\n        query_string = {}\n        if shared_access_policy.access_policy.start:\n            query_string[\n                SIGNED_START] = shared_access_policy.access_policy.start\n\n        if version:\n            query_string[SIGNED_VERSION] = version\n        query_string[SIGNED_EXPIRY] = shared_access_policy.access_policy.expiry\n        query_string[SIGNED_RESOURCE] = resource_type\n        query_string[\n            SIGNED_PERMISSION] = shared_access_policy.access_policy.permission\n\n        if shared_access_policy.id:\n            query_string[SIGNED_IDENTIFIER] = shared_access_policy.id\n\n        query_string[SIGNED_SIGNATURE] = self._generate_signature(\n            path, shared_access_policy, version)\n        return query_string\n\n    def sign_request(self, web_resource):\n        ''' sign request to generate request_url with sharedaccesssignature\n        info for web_resource.'''\n\n        if self.permission_set:\n            for shared_access_signature in self.permission_set:\n                if self._permission_matches_request(\n                        shared_access_signature, web_resource,\n                        web_resource.properties[\n                            SIGNED_RESOURCE_TYPE],\n                        web_resource.properties[SHARED_ACCESS_PERMISSION]):\n                    if web_resource.request_url.find('?') == -1:\n                        web_resource.request_url += '?'\n                    else:\n                        web_resource.request_url += '&'\n\n                    web_resource.request_url += self._convert_query_string(\n                        shared_access_signature.query_string)\n                    break\n        return web_resource\n\n    def _convert_query_string(self, query_string):\n        ''' Converts query string to str. The order of name, values is very\n        important and can't be wrong.'''\n\n        convert_str = ''\n        if SIGNED_START in query_string:\n            convert_str += SIGNED_START + '=' + \\\n                url_quote(query_string[SIGNED_START]) + '&'\n        convert_str += SIGNED_EXPIRY + '=' + \\\n            url_quote(query_string[SIGNED_EXPIRY]) + '&'\n        convert_str += SIGNED_PERMISSION + '=' + \\\n            query_string[SIGNED_PERMISSION] + '&'\n        convert_str += SIGNED_RESOURCE + '=' + \\\n            query_string[SIGNED_RESOURCE] + '&'\n\n        if SIGNED_IDENTIFIER in query_string:\n            convert_str += SIGNED_IDENTIFIER + '=' + \\\n                query_string[SIGNED_IDENTIFIER] + '&'\n        if SIGNED_VERSION in query_string:\n            convert_str += SIGNED_VERSION + '=' + \\\n                query_string[SIGNED_VERSION] + '&'\n        convert_str += SIGNED_SIGNATURE + '=' + \\\n            url_quote(query_string[SIGNED_SIGNATURE]) + '&'\n        return convert_str\n\n    def _generate_signature(self, path, shared_access_policy, version):\n        ''' Generates signature for a given path and shared access policy. '''\n\n        def get_value_to_append(value, no_new_line=False):\n            return_value = ''\n            if value:\n                return_value = value\n            if not no_new_line:\n                return_value += '\\n'\n            return return_value\n\n        if path[0] != '/':\n            path = '/' + path\n\n        canonicalized_resource = '/' + self.account_name + path\n\n        # Form the string to sign from shared_access_policy and canonicalized\n        # resource. The order of values is important.\n        string_to_sign = \\\n            (get_value_to_append(shared_access_policy.access_policy.permission) +\n             get_value_to_append(shared_access_policy.access_policy.start) +\n             get_value_to_append(shared_access_policy.access_policy.expiry) +\n             get_value_to_append(canonicalized_resource))\n\n        if version:\n            string_to_sign += get_value_to_append(shared_access_policy.id)\n            string_to_sign += get_value_to_append(version, True)\n        else:\n            string_to_sign += get_value_to_append(shared_access_policy.id, True)\n\n        return self._sign(string_to_sign)\n\n    def _permission_matches_request(self, shared_access_signature,\n                                    web_resource, resource_type,\n                                    required_permission):\n        ''' Check whether requested permission matches given\n        shared_access_signature, web_resource and resource type. '''\n\n        required_resource_type = resource_type\n        if required_resource_type == RESOURCE_BLOB:\n            required_resource_type += RESOURCE_CONTAINER\n\n        for name, value in shared_access_signature.query_string.items():\n            if name == SIGNED_RESOURCE and \\\n                required_resource_type.find(value) == -1:\n                return False\n            elif name == SIGNED_PERMISSION and \\\n                required_permission.find(value) == -1:\n                return False\n\n        return web_resource.path.find(shared_access_signature.path) != -1\n\n    def _sign(self, string_to_sign):\n        ''' use HMAC-SHA256 to sign the string and convert it as base64\n        encoded string. '''\n\n        return _sign_string(self.account_key, string_to_sign)\n"
  },
  {
    "path": "OSPatching/azure/storage/storageclient.py",
    "content": "#-------------------------------------------------------------------------\n# Copyright (c) Microsoft.  All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#--------------------------------------------------------------------------\nimport os\nimport sys\n\nfrom azure import (\n    WindowsAzureError,\n    DEV_ACCOUNT_NAME,\n    DEV_ACCOUNT_KEY,\n    _ERROR_STORAGE_MISSING_INFO,\n    )\nfrom azure.http import HTTPError\nfrom azure.http.httpclient import _HTTPClient\nfrom azure.storage import _storage_error_handler\n\n#--------------------------------------------------------------------------\n# constants for azure app setting environment variables\nAZURE_STORAGE_ACCOUNT = 'AZURE_STORAGE_ACCOUNT'\nAZURE_STORAGE_ACCESS_KEY = 'AZURE_STORAGE_ACCESS_KEY'\nEMULATED = 'EMULATED'\n\n#--------------------------------------------------------------------------\n\n\nclass _StorageClient(object):\n\n    '''\n    This is the base class for BlobManager, TableManager and QueueManager.\n    '''\n\n    def __init__(self, account_name=None, account_key=None, protocol='https',\n                 host_base='', dev_host=''):\n        '''\n        account_name: your storage account name, required for all operations.\n        account_key: your storage account key, required for all operations.\n        protocol: Optional. Protocol. Defaults to http.\n        host_base:\n            Optional. Live host base url. Defaults to Azure url. Override this\n            for on-premise.\n        dev_host: Optional. Dev host url. Defaults to localhost.\n        '''\n        self.account_name = account_name\n        self.account_key = account_key\n        self.requestid = None\n        self.protocol = protocol\n        self.host_base = host_base\n        self.dev_host = dev_host\n\n        # the app is not run in azure emulator or use default development\n        # storage account and key if app is run in emulator.\n        self.use_local_storage = False\n\n        # check whether it is run in emulator.\n        if EMULATED in os.environ:\n            self.is_emulated = os.environ[EMULATED].lower() != 'false'\n        else:\n            self.is_emulated = False\n\n        # get account_name and account key. If they are not set when\n        # constructing, get the account and key from environment variables if\n        # the app is not run in azure emulator or use default development\n        # storage account and key if app is run in emulator.\n        if not self.account_name or not self.account_key:\n            if self.is_emulated:\n                self.account_name = DEV_ACCOUNT_NAME\n                self.account_key = DEV_ACCOUNT_KEY\n                self.protocol = 'http'\n                self.use_local_storage = True\n            else:\n                self.account_name = os.environ.get(AZURE_STORAGE_ACCOUNT)\n                self.account_key = os.environ.get(AZURE_STORAGE_ACCESS_KEY)\n\n        if not self.account_name or not self.account_key:\n            raise WindowsAzureError(_ERROR_STORAGE_MISSING_INFO)\n\n        self._httpclient = _HTTPClient(\n            service_instance=self,\n            account_key=self.account_key,\n            account_name=self.account_name,\n            protocol=self.protocol)\n        self._batchclient = None\n        self._filter = self._perform_request_worker\n\n    def with_filter(self, filter):\n        '''\n        Returns a new service which will process requests with the specified\n        filter.  Filtering operations can include logging, automatic retrying,\n        etc...  The filter is a lambda which receives the HTTPRequest and\n        another lambda.  The filter can perform any pre-processing on the\n        request, pass it off to the next lambda, and then perform any\n        post-processing on the response.\n        '''\n        res = type(self)(self.account_name, self.account_key, self.protocol)\n        old_filter = self._filter\n\n        def new_filter(request):\n            return filter(request, old_filter)\n\n        res._filter = new_filter\n        return res\n\n    def set_proxy(self, host, port, user=None, password=None):\n        '''\n        Sets the proxy server host and port for the HTTP CONNECT Tunnelling.\n\n        host: Address of the proxy. Ex: '192.168.0.100'\n        port: Port of the proxy. Ex: 6000\n        user: User for proxy authorization.\n        password: Password for proxy authorization.\n        '''\n        self._httpclient.set_proxy(host, port, user, password)\n\n    def _get_host(self):\n        if self.use_local_storage:\n            return self.dev_host\n        else:\n            return self.account_name + self.host_base\n\n    def _perform_request_worker(self, request):\n        return self._httpclient.perform_request(request)\n\n    def _perform_request(self, request, text_encoding='utf-8'):\n        '''\n        Sends the request and return response. Catches HTTPError and hand it\n        to error handler\n        '''\n        try:\n            if self._batchclient is not None:\n                return self._batchclient.insert_request_to_batch(request)\n            else:\n                resp = self._filter(request)\n\n            if sys.version_info >= (3,) and isinstance(resp, bytes) and \\\n                text_encoding:\n                resp = resp.decode(text_encoding)\n\n        except HTTPError as ex:\n            _storage_error_handler(ex)\n\n        return resp\n"
  },
  {
    "path": "OSPatching/azure/storage/tableservice.py",
    "content": "#-------------------------------------------------------------------------\n# Copyright (c) Microsoft.  All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#--------------------------------------------------------------------------\nfrom azure import (\n    WindowsAzureError,\n    TABLE_SERVICE_HOST_BASE,\n    DEV_TABLE_HOST,\n    _convert_class_to_xml,\n    _convert_response_to_feeds,\n    _dont_fail_not_exist,\n    _dont_fail_on_exist,\n    _get_request_body,\n    _int_or_none,\n    _parse_response,\n    _parse_response_for_dict,\n    _parse_response_for_dict_filter,\n    _str,\n    _str_or_none,\n    _update_request_uri_query_local_storage,\n    _validate_not_none,\n    )\nfrom azure.http import HTTPRequest\nfrom azure.http.batchclient import _BatchClient\nfrom azure.storage import (\n    StorageServiceProperties,\n    _convert_entity_to_xml,\n    _convert_response_to_entity,\n    _convert_table_to_xml,\n    _convert_xml_to_entity,\n    _convert_xml_to_table,\n    _sign_storage_table_request,\n    _update_storage_table_header,\n    )\nfrom azure.storage.storageclient import _StorageClient\n\n\nclass TableService(_StorageClient):\n\n    '''\n    This is the main class managing Table resources.\n    '''\n\n    def __init__(self, account_name=None, account_key=None, protocol='https',\n                 host_base=TABLE_SERVICE_HOST_BASE, dev_host=DEV_TABLE_HOST):\n        '''\n        account_name: your storage account name, required for all operations.\n        account_key: your storage account key, required for all operations.\n        protocol: Optional. Protocol. Defaults to http.\n        host_base:\n            Optional. Live host base url. Defaults to Azure url. Override this\n            for on-premise.\n        dev_host: Optional. Dev host url. Defaults to localhost.\n        '''\n        super(TableService, self).__init__(\n            account_name, account_key, protocol, host_base, dev_host)\n\n    def begin_batch(self):\n        if self._batchclient is None:\n            self._batchclient = _BatchClient(\n                service_instance=self,\n                account_key=self.account_key,\n                account_name=self.account_name)\n        return self._batchclient.begin_batch()\n\n    def commit_batch(self):\n        try:\n            ret = self._batchclient.commit_batch()\n        finally:\n            self._batchclient = None\n        return ret\n\n    def cancel_batch(self):\n        self._batchclient = None\n\n    def get_table_service_properties(self):\n        '''\n        Gets the properties of a storage account's Table service, including\n        Windows Azure Storage Analytics.\n        '''\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/?restype=service&comp=properties'\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_table_header(request)\n        response = self._perform_request(request)\n\n        return _parse_response(response, StorageServiceProperties)\n\n    def set_table_service_properties(self, storage_service_properties):\n        '''\n        Sets the properties of a storage account's Table Service, including\n        Windows Azure Storage Analytics.\n\n        storage_service_properties: StorageServiceProperties object.\n        '''\n        _validate_not_none('storage_service_properties',\n                           storage_service_properties)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/?restype=service&comp=properties'\n        request.body = _get_request_body(\n            _convert_class_to_xml(storage_service_properties))\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_table_header(request)\n        response = self._perform_request(request)\n\n        return _parse_response_for_dict(response)\n\n    def query_tables(self, table_name=None, top=None, next_table_name=None):\n        '''\n        Returns a list of tables under the specified account.\n\n        table_name: Optional.  The specific table to query.\n        top: Optional. Maximum number of tables to return.\n        next_table_name:\n            Optional. When top is used, the next table name is stored in\n            result.x_ms_continuation['NextTableName']\n        '''\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        if table_name is not None:\n            uri_part_table_name = \"('\" + table_name + \"')\"\n        else:\n            uri_part_table_name = \"\"\n        request.path = '/Tables' + uri_part_table_name + ''\n        request.query = [\n            ('$top', _int_or_none(top)),\n            ('NextTableName', _str_or_none(next_table_name))\n        ]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_table_header(request)\n        response = self._perform_request(request)\n\n        return _convert_response_to_feeds(response, _convert_xml_to_table)\n\n    def create_table(self, table, fail_on_exist=False):\n        '''\n        Creates a new table in the storage account.\n\n        table:\n            Name of the table to create. Table name may contain only\n            alphanumeric characters and cannot begin with a numeric character.\n            It is case-insensitive and must be from 3 to 63 characters long.\n        fail_on_exist: Specify whether throw exception when table exists.\n        '''\n        _validate_not_none('table', table)\n        request = HTTPRequest()\n        request.method = 'POST'\n        request.host = self._get_host()\n        request.path = '/Tables'\n        request.body = _get_request_body(_convert_table_to_xml(table))\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_table_header(request)\n        if not fail_on_exist:\n            try:\n                self._perform_request(request)\n                return True\n            except WindowsAzureError as ex:\n                _dont_fail_on_exist(ex)\n                return False\n        else:\n            self._perform_request(request)\n            return True\n\n    def delete_table(self, table_name, fail_not_exist=False):\n        '''\n        table_name: Name of the table to delete.\n        fail_not_exist:\n            Specify whether throw exception when table doesn't exist.\n        '''\n        _validate_not_none('table_name', table_name)\n        request = HTTPRequest()\n        request.method = 'DELETE'\n        request.host = self._get_host()\n        request.path = '/Tables(\\'' + _str(table_name) + '\\')'\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_table_header(request)\n        if not fail_not_exist:\n            try:\n                self._perform_request(request)\n                return True\n            except WindowsAzureError as ex:\n                _dont_fail_not_exist(ex)\n                return False\n        else:\n            self._perform_request(request)\n            return True\n\n    def get_entity(self, table_name, partition_key, row_key, select=''):\n        '''\n        Get an entity in a table; includes the $select options.\n\n        partition_key: PartitionKey of the entity.\n        row_key: RowKey of the entity.\n        select: Property names to select.\n        '''\n        _validate_not_none('table_name', table_name)\n        _validate_not_none('partition_key', partition_key)\n        _validate_not_none('row_key', row_key)\n        _validate_not_none('select', select)\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/' + _str(table_name) + \\\n            '(PartitionKey=\\'' + _str(partition_key) + \\\n            '\\',RowKey=\\'' + \\\n            _str(row_key) + '\\')?$select=' + \\\n            _str(select) + ''\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_table_header(request)\n        response = self._perform_request(request)\n\n        return _convert_response_to_entity(response)\n\n    def query_entities(self, table_name, filter=None, select=None, top=None,\n                       next_partition_key=None, next_row_key=None):\n        '''\n        Get entities in a table; includes the $filter and $select options.\n\n        table_name: Table to query.\n        filter:\n            Optional. Filter as described at\n            http://msdn.microsoft.com/en-us/library/windowsazure/dd894031.aspx\n        select: Optional. Property names to select from the entities.\n        top: Optional. Maximum number of entities to return.\n        next_partition_key:\n            Optional. When top is used, the next partition key is stored in\n            result.x_ms_continuation['NextPartitionKey']\n        next_row_key:\n            Optional. When top is used, the next partition key is stored in\n            result.x_ms_continuation['NextRowKey']\n        '''\n        _validate_not_none('table_name', table_name)\n        request = HTTPRequest()\n        request.method = 'GET'\n        request.host = self._get_host()\n        request.path = '/' + _str(table_name) + '()'\n        request.query = [\n            ('$filter', _str_or_none(filter)),\n            ('$select', _str_or_none(select)),\n            ('$top', _int_or_none(top)),\n            ('NextPartitionKey', _str_or_none(next_partition_key)),\n            ('NextRowKey', _str_or_none(next_row_key))\n        ]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_table_header(request)\n        response = self._perform_request(request)\n\n        return _convert_response_to_feeds(response, _convert_xml_to_entity)\n\n    def insert_entity(self, table_name, entity,\n                      content_type='application/atom+xml'):\n        '''\n        Inserts a new entity into a table.\n\n        table_name: Table name.\n        entity:\n            Required. The entity object to insert. Could be a dict format or\n            entity object.\n        content_type: Required. Must be set to application/atom+xml\n        '''\n        _validate_not_none('table_name', table_name)\n        _validate_not_none('entity', entity)\n        _validate_not_none('content_type', content_type)\n        request = HTTPRequest()\n        request.method = 'POST'\n        request.host = self._get_host()\n        request.path = '/' + _str(table_name) + ''\n        request.headers = [('Content-Type', _str_or_none(content_type))]\n        request.body = _get_request_body(_convert_entity_to_xml(entity))\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_table_header(request)\n        response = self._perform_request(request)\n\n        return _convert_response_to_entity(response)\n\n    def update_entity(self, table_name, partition_key, row_key, entity,\n                      content_type='application/atom+xml', if_match='*'):\n        '''\n        Updates an existing entity in a table. The Update Entity operation\n        replaces the entire entity and can be used to remove properties.\n\n        table_name: Table name.\n        partition_key: PartitionKey of the entity.\n        row_key: RowKey of the entity.\n        entity:\n            Required. The entity object to insert. Could be a dict format or\n            entity object.\n        content_type: Required. Must be set to application/atom+xml\n        if_match:\n            Optional. Specifies the condition for which the merge should be\n            performed. To force an unconditional merge, set to the wildcard\n            character (*).\n        '''\n        _validate_not_none('table_name', table_name)\n        _validate_not_none('partition_key', partition_key)\n        _validate_not_none('row_key', row_key)\n        _validate_not_none('entity', entity)\n        _validate_not_none('content_type', content_type)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(table_name) + '(PartitionKey=\\'' + \\\n            _str(partition_key) + '\\',RowKey=\\'' + _str(row_key) + '\\')'\n        request.headers = [\n            ('Content-Type', _str_or_none(content_type)),\n            ('If-Match', _str_or_none(if_match))\n        ]\n        request.body = _get_request_body(_convert_entity_to_xml(entity))\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_table_header(request)\n        response = self._perform_request(request)\n\n        return _parse_response_for_dict_filter(response, filter=['etag'])\n\n    def merge_entity(self, table_name, partition_key, row_key, entity,\n                     content_type='application/atom+xml', if_match='*'):\n        '''\n        Updates an existing entity by updating the entity's properties. This\n        operation does not replace the existing entity as the Update Entity\n        operation does.\n\n        table_name: Table name.\n        partition_key: PartitionKey of the entity.\n        row_key: RowKey of the entity.\n        entity:\n            Required. The entity object to insert. Can be a dict format or\n            entity object.\n        content_type: Required. Must be set to application/atom+xml\n        if_match:\n            Optional. Specifies the condition for which the merge should be\n            performed. To force an unconditional merge, set to the wildcard\n            character (*).\n        '''\n        _validate_not_none('table_name', table_name)\n        _validate_not_none('partition_key', partition_key)\n        _validate_not_none('row_key', row_key)\n        _validate_not_none('entity', entity)\n        _validate_not_none('content_type', content_type)\n        request = HTTPRequest()\n        request.method = 'MERGE'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(table_name) + '(PartitionKey=\\'' + \\\n            _str(partition_key) + '\\',RowKey=\\'' + _str(row_key) + '\\')'\n        request.headers = [\n            ('Content-Type', _str_or_none(content_type)),\n            ('If-Match', _str_or_none(if_match))\n        ]\n        request.body = _get_request_body(_convert_entity_to_xml(entity))\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_table_header(request)\n        response = self._perform_request(request)\n\n        return _parse_response_for_dict_filter(response, filter=['etag'])\n\n    def delete_entity(self, table_name, partition_key, row_key,\n                      content_type='application/atom+xml', if_match='*'):\n        '''\n        Deletes an existing entity in a table.\n\n        table_name: Table name.\n        partition_key: PartitionKey of the entity.\n        row_key: RowKey of the entity.\n        content_type: Required. Must be set to application/atom+xml\n        if_match:\n            Optional. Specifies the condition for which the delete should be\n            performed. To force an unconditional delete, set to the wildcard\n            character (*).\n        '''\n        _validate_not_none('table_name', table_name)\n        _validate_not_none('partition_key', partition_key)\n        _validate_not_none('row_key', row_key)\n        _validate_not_none('content_type', content_type)\n        _validate_not_none('if_match', if_match)\n        request = HTTPRequest()\n        request.method = 'DELETE'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(table_name) + '(PartitionKey=\\'' + \\\n            _str(partition_key) + '\\',RowKey=\\'' + _str(row_key) + '\\')'\n        request.headers = [\n            ('Content-Type', _str_or_none(content_type)),\n            ('If-Match', _str_or_none(if_match))\n        ]\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_table_header(request)\n        self._perform_request(request)\n\n    def insert_or_replace_entity(self, table_name, partition_key, row_key,\n                                 entity, content_type='application/atom+xml'):\n        '''\n        Replaces an existing entity or inserts a new entity if it does not\n        exist in the table. Because this operation can insert or update an\n        entity, it is also known as an \"upsert\" operation.\n\n        table_name: Table name.\n        partition_key: PartitionKey of the entity.\n        row_key: RowKey of the entity.\n        entity:\n            Required. The entity object to insert. Could be a dict format or\n            entity object.\n        content_type: Required. Must be set to application/atom+xml\n        '''\n        _validate_not_none('table_name', table_name)\n        _validate_not_none('partition_key', partition_key)\n        _validate_not_none('row_key', row_key)\n        _validate_not_none('entity', entity)\n        _validate_not_none('content_type', content_type)\n        request = HTTPRequest()\n        request.method = 'PUT'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(table_name) + '(PartitionKey=\\'' + \\\n            _str(partition_key) + '\\',RowKey=\\'' + _str(row_key) + '\\')'\n        request.headers = [('Content-Type', _str_or_none(content_type))]\n        request.body = _get_request_body(_convert_entity_to_xml(entity))\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_table_header(request)\n        response = self._perform_request(request)\n\n        return _parse_response_for_dict_filter(response, filter=['etag'])\n\n    def insert_or_merge_entity(self, table_name, partition_key, row_key,\n                               entity, content_type='application/atom+xml'):\n        '''\n        Merges an existing entity or inserts a new entity if it does not exist\n        in the table. Because this operation can insert or update an entity,\n        it is also known as an \"upsert\" operation.\n\n        table_name: Table name.\n        partition_key: PartitionKey of the entity.\n        row_key: RowKey of the entity.\n        entity:\n            Required. The entity object to insert. Could be a dict format or\n            entity object.\n        content_type: Required. Must be set to application/atom+xml\n        '''\n        _validate_not_none('table_name', table_name)\n        _validate_not_none('partition_key', partition_key)\n        _validate_not_none('row_key', row_key)\n        _validate_not_none('entity', entity)\n        _validate_not_none('content_type', content_type)\n        request = HTTPRequest()\n        request.method = 'MERGE'\n        request.host = self._get_host()\n        request.path = '/' + \\\n            _str(table_name) + '(PartitionKey=\\'' + \\\n            _str(partition_key) + '\\',RowKey=\\'' + _str(row_key) + '\\')'\n        request.headers = [('Content-Type', _str_or_none(content_type))]\n        request.body = _get_request_body(_convert_entity_to_xml(entity))\n        request.path, request.query = _update_request_uri_query_local_storage(\n            request, self.use_local_storage)\n        request.headers = _update_storage_table_header(request)\n        response = self._perform_request(request)\n\n        return _parse_response_for_dict_filter(response, filter=['etag'])\n\n    def _perform_request_worker(self, request):\n        auth = _sign_storage_table_request(request,\n                                           self.account_name,\n                                           self.account_key)\n        request.headers.append(('Authorization', auth))\n        return self._httpclient.perform_request(request)\n"
  },
  {
    "path": "OSPatching/check.py",
    "content": "#!/usr/bin/python\n#\n# OSPatching extension\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.4+\n\nimport os\nimport sys\nimport datetime\n\n\ndef main():\n    intervalOfWeeks = int(sys.argv[1])\n    if intervalOfWeeks == 1:\n        sys.exit(0)\n\n    history_scheduled = os.path.join(os.path.dirname(sys.argv[0]),\n                                     'scheduled/history')\n    today = datetime.date.today()\n    today_dayOfWeek = today.strftime('%a')\n\n    last_scheduled_date = None\n    with open(history_scheduled) as f:\n        lines = f.readlines()\n        lines.reverse()\n        for line in lines:\n            line = line.strip()\n            if line.endswith(today_dayOfWeek):\n                last_scheduled_date = datetime.datetime.strptime(line,\n                                                                 '%Y-%m-%d %a')\n                break\n\n    if (last_scheduled_date is not None and last_scheduled_date.date() +\n            datetime.timedelta(days=intervalOfWeeks*7) > today):\n        sys.exit(1)\n    else:\n        sys.exit(0)\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "OSPatching/handler.py",
    "content": "#!/usr/bin/python\n#\n# OSPatching extension\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.4+\n\n\nimport os\nimport sys\nimport re\nimport time\nimport json\nimport tempfile\nimport urllib2\nimport urlparse\nimport platform\nimport shutil\nimport traceback\nimport logging\nfrom azure.storage import BlobService\nfrom Utils.WAAgentUtil import waagent\nimport Utils.HandlerUtil as Util\nfrom patch import *\n\n# Global variables definition\nExtensionShortName = \"DSCForLinux\"\nDownloadDirectory = 'download'\nidleTestScriptName = \"idleTest.py\"\nhealthyTestScriptName = \"healthyTest.py\"\n\ndef install():\n    hutil.do_parse_context('Install')\n    try:\n        MyPatching.install()\n        hutil.do_exit(0, 'Install', 'success', '0', 'Install Succeeded.')\n    except Exception, e:\n        hutil.error(\"Failed to install the extension with error: %s, stack trace: %s\" %(str(e), traceback.format_exc()))\n        hutil.do_exit(1, 'Install', 'error', '0', 'Install Failed.')\n\ndef enable():\n    hutil.log(\"WARNING: The OSPatching extension for Linux has been deprecated. \"\n              \"Please see the GitHub project \"\n              \"(https://github.com/Azure/azure-linux-extensions/tree/master/OSPatching) \"\n              \"for more information.\")\n\n    hutil.do_parse_context('Enable')\n    try:\n        protected_settings = hutil.get_protected_settings()\n        public_settings = hutil.get_public_settings()\n        if protected_settings:\n            settings = protected_settings.copy()\n        else:\n            settings = dict()\n        if public_settings:\n            settings.update(public_settings)\n        MyPatching.parse_settings(settings)\n        # Ensure the same configuration is executed only once\n        hutil.exit_if_seq_smaller()\n        oneoff = settings.get(\"oneoff\")\n        download_customized_vmstatustest()\n        copy_vmstatustestscript(hutil.get_seq_no(), oneoff)\n        MyPatching.enable()\n        current_config = MyPatching.get_current_config()\n        hutil.do_exit(0, 'Enable', 'warning', '0', 'Enable Succeeded. OSPatching is deprecated. See https://github.com/Azure/azure-linux-extensions/tree/master/OSPatching for more info. Current Configuration: ' + current_config)\n    except Exception, e:\n        current_config = MyPatching.get_current_config()\n        hutil.error(\"Failed to enable the extension with error: %s, stack trace: %s\" %(str(e), traceback.format_exc()))\n        hutil.do_exit(1, 'Enable', 'error', '0', 'Enable Failed. Current Configuation: ' + current_config)\n\ndef uninstall():\n    hutil.do_parse_context('Uninstall')\n    hutil.do_exit(0, 'Uninstall', 'success', '0', 'Uninstall Succeeded.')\n\ndef disable():\n    hutil.do_parse_context('Disable')\n    try:\n        MyPatching.disable()\n        hutil.do_exit(0, 'Disable', 'success', '0', 'Disable Succeeded.')\n    except Exception, e:\n        hutil.error(\"Failed to disable the extension with error: %s, stack trace: %s\" %(str(e), traceback.format_exc()))\n        hutil.do_exit(1, 'Disable', 'error', '0', 'Disable Failed.')\n\ndef update():\n    hutil.do_parse_context('Upadate')\n    hutil.do_exit(0, 'Update', 'success', '0', 'Update Succeeded.')\n\ndef download():\n    hutil.do_parse_context('Download')\n    try:\n        protected_settings = hutil.get_protected_settings()\n        public_settings = hutil.get_public_settings()\n        if protected_settings:\n            settings = protected_settings.copy()\n        else:\n            settings = dict()\n        if public_settings:\n            settings.update(public_settings)\n        MyPatching.parse_settings(settings)\n        MyPatching.download()\n        current_config = MyPatching.get_current_config()\n        hutil.do_exit(0,'Enable','success','0', 'Download Succeeded. Current Configuation: ' + current_config)\n    except Exception, e:\n        current_config = MyPatching.get_current_config()\n        hutil.error(\"Failed to download updates with error: %s, stack trace: %s\" %(str(e), traceback.format_exc()))\n        hutil.do_exit(1, 'Enable','error','0', 'Download Failed. Current Configuation: ' + current_config)\n\ndef patch():\n    hutil.do_parse_context('Patch')\n    try:\n        protected_settings = hutil.get_protected_settings()\n        public_settings = hutil.get_public_settings()\n        if protected_settings:\n            settings = protected_settings.copy()\n        else:\n            settings = dict()\n        if public_settings:\n            settings.update(public_settings)\n        MyPatching.parse_settings(settings)\n        MyPatching.patch()\n        current_config = MyPatching.get_current_config()\n        hutil.do_exit(0,'Enable','success','0', 'Patch Succeeded. Current Configuation: ' + current_config)\n    except Exception, e:\n        current_config = MyPatching.get_current_config()\n        hutil.error(\"Failed to patch with error: %s, stack trace: %s\" %(str(e), traceback.format_exc()))\n        hutil.do_exit(1, 'Enable','error','0', 'Patch Failed. Current Configuation: ' + current_config)\n\ndef oneoff():\n    hutil.do_parse_context('Oneoff')\n    try:\n        protected_settings = hutil.get_protected_settings()\n        public_settings = hutil.get_public_settings()\n        if protected_settings:\n            settings = protected_settings.copy()\n        else:\n            settings = dict()\n        if public_settings:\n            settings.update(public_settings)\n        MyPatching.parse_settings(settings)\n        MyPatching.patch_one_off()\n        current_config = MyPatching.get_current_config()\n        hutil.do_exit(0,'Enable','success','0', 'Oneoff Patch Succeeded. Current Configuation: ' + current_config)\n    except Exception, e:\n        current_config = MyPatching.get_current_config()\n        hutil.error(\"Failed to one-off patch with error: %s, stack trace: %s\" %(str(e), traceback.format_exc()))\n        hutil.do_exit(1, 'Enable','error','0', 'Oneoff Patch Failed. Current Configuation: ' + current_config)\n\ndef download_files(hutil):\n    protected_settings = hutil.get_protected_settings()\n    public_settings = hutil.get_public_settings()\n    if protected_settings:\n        settings = protected_settings.copy()\n    else:\n        settings = dict()\n    if public_settings:\n        settings.update(public_settings)\n    local = settings.get(\"vmStatusTest\", dict()).get(\"local\", \"\")\n    if str(local).lower() == \"true\":\n        local = True\n    elif str(local).lower() == \"false\":\n        local = False\n    else:\n        hutil.log(\"WARNING: The parameter \\\"local\\\" \"\n                  \"is empty or invalid. Set it as False. Continue...\")\n        local = False\n    idle_test_script = settings.get(\"vmStatusTest\", dict()).get('idleTestScript')\n    healthy_test_script = settings.get(\"vmStatusTest\", dict()).get('healthyTestScript')\n\n    if (not idle_test_script and not healthy_test_script):\n        hutil.log(\"WARNING: The parameter \\\"idleTestScript\\\" and \\\"healthyTestScript\\\" \"\n                  \"are both empty. Exit downloading VMStatusTest scripts...\")\n        return\n    elif local:\n        if (idle_test_script and idle_test_script.startswith(\"http\")) or \\\n           (healthy_test_script and healthy_test_script.startswith(\"http\")):\n            hutil.log(\"WARNING: The parameter \\\"idleTestScript\\\" or \\\"healthyTestScript\\\" \"\n                  \"should not be uri. Exit downloading VMStatusTest scripts...\")\n            return\n    elif not local:\n        if (idle_test_script and not idle_test_script.startswith(\"http\")) or \\\n           (healthy_test_script and not healthy_test_script.startswith(\"http\")):\n            hutil.log(\"WARNING: The parameter \\\"idleTestScript\\\" or \\\"healthyTestScript\\\" \"\n                  \"should be uri. Exit downloading VMStatusTest scripts...\")\n            return\n\n    hutil.do_status_report('Downloading','transitioning', '0',\n                           'Downloading VMStatusTest scripts...')\n\n    vmStatusTestScripts = dict()\n    vmStatusTestScripts[idle_test_script] = idleTestScriptName\n    vmStatusTestScripts[healthy_test_script] = healthyTestScriptName\n\n    if local:\n        hutil.log(\"Saving VMStatusTest scripts from user's configurations...\")\n        for src,dst in vmStatusTestScripts.items():\n            if not src:\n                continue\n            file_path = save_local_file(src, dst, hutil)\n            preprocess_files(file_path, hutil)\n        return\n\n    storage_account_name = None\n    storage_account_key = None\n    if settings:\n        storage_account_name = settings.get(\"storageAccountName\", \"\").strip()\n        storage_account_key = settings.get(\"storageAccountKey\", \"\").strip()\n    if storage_account_name and storage_account_key:\n        hutil.log(\"Downloading VMStatusTest scripts from azure storage...\")\n        for src,dst in vmStatusTestScripts.items():\n            if not src:\n                continue\n            file_path = download_blob(storage_account_name,\n                                      storage_account_key,\n                                      src,\n                                      dst,\n                                      hutil)\n            preprocess_files(file_path, hutil)\n    elif not(storage_account_name or storage_account_key):\n        hutil.log(\"No azure storage account and key specified in protected \"\n                  \"settings. Downloading VMStatusTest scripts from external links...\")\n        for src,dst in vmStatusTestScripts.items():\n            if not src:\n                continue\n            file_path = download_external_file(src, dst, hutil)\n            preprocess_files(file_path, hutil)\n    else:\n        #Storage account and key should appear in pairs\n        error_msg = \"Azure storage account or storage key is not provided\"\n        hutil.error(error_msg)\n        raise ValueError(error_msg)\n\ndef download_blob(storage_account_name, storage_account_key,\n                  blob_uri, dst, hutil):\n    seqNo = hutil.get_seq_no()\n    container_name = get_container_name_from_uri(blob_uri)\n    blob_name = get_blob_name_from_uri(blob_uri)\n    download_dir = prepare_download_dir(seqNo)\n    download_path = os.path.join(download_dir, dst)\n    #Guest agent already ensure the plugin is enabled one after another.\n    #The blob download will not conflict.\n    blob_service = BlobService(storage_account_name, storage_account_key)\n    try:\n        hutil.log(\"Downloading to {0}\".format(download_path))\n        blob_service.get_blob_to_path(container_name, blob_name, download_path)\n    except Exception, e:\n        hutil.error((\"Failed to download blob with uri:{0} \"\n                     \"with error {1}\").format(blob_uri,e))\n        raise\n    return download_path\n\ndef download_external_file(uri, dst, hutil):\n    seqNo = hutil.get_seq_no()\n    download_dir = prepare_download_dir(seqNo)\n    file_path = os.path.join(download_dir, dst)\n    try:\n        hutil.log(\"Downloading to {0}\".format(file_path))\n        download_and_save_file(uri, file_path)\n    except Exception, e:\n        hutil.error((\"Failed to download external file with uri:{0} \"\n                     \"with error {1}\").format(uri, e))\n        raise\n    return file_path\n\ndef save_local_file(src, dst, hutil):\n    seqNo = hutil.get_seq_no()\n    download_dir = prepare_download_dir(seqNo)\n    file_path = os.path.join(download_dir, dst)\n    try:\n        hutil.log(\"Downloading to {0}\".format(file_path))\n        waagent.SetFileContents(file_path, src)\n    except Exception, e:\n        hutil.error((\"Failed to save file from user's configuration \"\n                     \"with error {0}\").format(e))\n        raise\n    return file_path\n\ndef preprocess_files(file_path, hutil):\n    \"\"\"\n        Preprocess the text file. If it is a binary file, skip it.\n    \"\"\"\n    is_text, code_type = is_text_file(file_path)\n    if is_text:\n        dos2unix(file_path)\n        hutil.log(\"Converting text files from DOS to Unix formats: Done\")\n        if code_type in ['UTF-8', 'UTF-16LE', 'UTF-16BE']:\n            remove_bom(file_path)\n            hutil.log(\"Removing BOM: Done\")\n\ndef is_text_file(file_path):\n    with open(file_path, 'rb') as f:\n        contents = f.read(512)\n    return is_text(contents)\n\ndef is_text(contents):\n    supported_encoding = ['ascii', 'UTF-8', 'UTF-16LE', 'UTF-16BE']\n    # Openlogic and Oracle distros don't have python-chardet\n    waagent.Run('yum -y install python-chardet', False)\n    import chardet\n    code_type = chardet.detect(contents)['encoding']\n    if code_type in supported_encoding:\n        return True, code_type\n    else:\n        return False, code_type\n\ndef dos2unix(file_path):\n    temp_file_path = tempfile.mkstemp()[1]\n    f_temp = open(temp_file_path, 'wb')\n    with open(file_path, 'rU') as f:\n        contents = f.read()\n    f_temp.write(contents)\n    f_temp.close()\n    shutil.move(temp_file_path, file_path)\n\ndef remove_bom(file_path):\n    temp_file_path = tempfile.mkstemp()[1]\n    f_temp = open(temp_file_path, 'wb')\n    with open(file_path, 'rb') as f:\n        contents = f.read()\n    for encoding in [\"utf-8-sig\", \"utf-16\"]:\n        try:\n            f_temp.write(contents.decode(encoding).encode('utf-8'))\n            break\n        except UnicodeDecodeError:\n            continue\n    f_temp.close()\n    shutil.move(temp_file_path, file_path)\n\ndef download_and_save_file(uri, file_path):\n    src = urllib2.urlopen(uri)\n    dest = open(file_path, 'wb')\n    buf_size = 1024\n    buf = src.read(buf_size)\n    while(buf):\n        dest.write(buf)\n        buf = src.read(buf_size)\n\ndef prepare_download_dir(seqNo):\n    download_dir_main = os.path.join(os.getcwd(), DownloadDirectory)\n    create_directory_if_not_exists(download_dir_main)\n    download_dir = os.path.join(download_dir_main, seqNo)\n    create_directory_if_not_exists(download_dir)\n    return download_dir\n\ndef create_directory_if_not_exists(directory):\n    \"\"\"create directory if no exists\"\"\"\n    if not os.path.exists(directory):\n        os.makedirs(directory)\n\ndef get_path_from_uri(uriStr):\n    uri = urlparse.urlparse(uriStr)\n    return uri.path\n\ndef get_blob_name_from_uri(uri):\n    return get_properties_from_uri(uri)['blob_name']\n\ndef get_container_name_from_uri(uri):\n    return get_properties_from_uri(uri)['container_name']\n\ndef get_properties_from_uri(uri):\n    path = get_path_from_uri(uri)\n    if path.endswith('/'):\n        path = path[:-1]\n    if path[0] == '/':\n        path = path[1:]\n    first_sep = path.find('/')\n    if first_sep == -1:\n        hutil.error(\"Failed to extract container, blob, from {}\".format(path))\n    blob_name = path[first_sep+1:]\n    container_name = path[:first_sep]\n    return {'blob_name': blob_name, 'container_name': container_name}\n\ndef download_customized_vmstatustest():\n    download_dir = prepare_download_dir(hutil.get_seq_no())\n    maxRetry = 2\n    for retry in range(0, maxRetry + 1):\n        try:\n            download_files(hutil)\n            break\n        except Exception, e:\n            hutil.error(\"Failed to download files, retry=\" + str(retry) + \", maxRetry=\" + str(maxRetry))\n            if retry != maxRetry:\n                hutil.log(\"Sleep 10 seconds\")\n                time.sleep(10)\n            else:\n                raise\n\ndef copy_vmstatustestscript(seqNo, oneoff):\n    src_dir = prepare_download_dir(seqNo)\n    for filename in (idleTestScriptName, healthyTestScriptName):\n        src = os.path.join(src_dir, filename)\n        if oneoff is not None and str(oneoff).lower() == \"true\":\n            dst = \"oneoff\"\n        else:\n            dst = \"scheduled\"\n        dst = os.path.join(os.getcwd(), dst)\n        current_vmstatustestscript = os.path.join(dst, filename)\n        if os.path.isfile(current_vmstatustestscript):\n            os.remove(current_vmstatustestscript)\n        # Remove the .pyc file\n        if os.path.isfile(current_vmstatustestscript+'c'):\n            os.remove(current_vmstatustestscript+'c')\n        if os.path.isfile(src):\n            shutil.copy(src, dst)\n\n\n# Main function is the only entrance to this extension handler\ndef main():\n    waagent.LoggerInit('/var/log/waagent.log', '/dev/stdout')\n    waagent.Log(\"%s started to handle.\" %(ExtensionShortName))\n\n    global hutil\n    hutil = Util.HandlerUtility(waagent.Log, waagent.Error)\n\n    global MyPatching\n    MyPatching = GetMyPatching(hutil)\n    if MyPatching is None:\n        sys.exit(1)\n\n    for a in sys.argv[1:]:\n        if re.match(\"^([-/]*)(disable)\", a):\n            disable()\n        elif re.match(\"^([-/]*)(uninstall)\", a):\n            uninstall()\n        elif re.match(\"^([-/]*)(install)\", a):\n            install()\n        elif re.match(\"^([-/]*)(enable)\", a):\n            enable()\n        elif re.match(\"^([-/]*)(update)\", a):\n            update()\n        elif re.match(\"^([-/]*)(download)\", a):\n            download()\n        elif re.match(\"^([-/]*)(patch)\", a):\n            patch()\n        elif re.match(\"^([-/]*)(oneoff)\", a):\n            oneoff()\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "OSPatching/manifest.xml",
    "content": "<?xml version='1.0' encoding='utf-8' ?>\n<ExtensionImage xmlns=\"http://schemas.microsoft.com/windowsazure\">\n  <ProviderNameSpace>Microsoft.OSTCExtensions</ProviderNameSpace>\n  <Type>OSPatchingForLinux</Type>\n  <Version>2.3.1.0</Version>\n  <Label>Microsoft Azure OS Patching Extension for Linux Virtual Machines</Label>\n  <HostingResources>VmRole</HostingResources>\n  <MediaLink></MediaLink>\n  <Description>Microsoft Azure OS Patching Extension for Linux Virtual Machines</Description>\n  <IsInternalExtension>true</IsInternalExtension>\n  <Eula>https://github.com/Azure/azure-linux-extensions/blob/master/LICENSE-2_0.txt</Eula>\n  <PrivacyUri>http://www.microsoft.com/privacystatement/en-us/OnlineServices/Default.aspx</PrivacyUri>\n  <HomepageUri>https://github.com/Azure/azure-linux-extensions</HomepageUri>\n  <IsJsonExtension>true</IsJsonExtension>\n  <SupportedOS>Linux</SupportedOS>\n  <CompanyName>Microsoft</CompanyName>\n  <!--%REGIONS%-->\n</ExtensionImage>\n"
  },
  {
    "path": "OSPatching/oneoff/__init__.py",
    "content": ""
  },
  {
    "path": "OSPatching/patch/AbstractPatching.py",
    "content": "#!/usr/bin/python\n#\n# AbstractPatching is the base patching class of all the linux distros\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport sys\nimport re\nimport json\nimport random\nimport shutil\nimport time\nimport datetime\nimport logging\nimport logging.handlers\n\nfrom Utils.WAAgentUtil import waagent\nfrom ConfigOptions import ConfigOptions\n\nmfile = os.path.join(os.getcwd(), 'HandlerManifest.json')\nwith open(mfile,'r') as f:\n    manifest = json.loads(f.read())[0]\n    Version = manifest['version']\n\nStatusTest = {\n    \"Scheduled\" : {\n        \"Idle\" : None,\n        \"Healthy\" : None\n    },\n    \"Oneoff\" : {\n        \"Idle\" : None,\n        \"Healthy\" : None\n    }\n}\n\ntry:\n    from scheduled.idleTest import is_vm_idle\n    StatusTest[\"Scheduled\"][\"Idle\"] = is_vm_idle\nexcept:\n    pass\n\ntry:\n    from oneoff.idleTest import is_vm_idle\n    StatusTest[\"Oneoff\"][\"Idle\"] = is_vm_idle\nexcept:\n    pass\n\ntry:\n    from scheduled.healthyTest import is_vm_healthy\n    StatusTest[\"Scheduled\"][\"Healthy\"] = is_vm_healthy\nexcept:\n    pass\n\ntry:\n    from oneoff.healthyTest import is_vm_healthy\n    StatusTest[\"Oneoff\"][\"Healthy\"] = is_vm_healthy\nexcept:\n    pass\n\n\nclass AbstractPatching(object):\n    \"\"\"\n    AbstractPatching defines a skeleton neccesary for a concrete Patching class.\n    \"\"\"\n    def __init__(self, hutil):\n        self.hutil = hutil\n        self.syslogger = None\n\n        self.patched = []\n        self.to_patch = []\n        self.downloaded = []\n        self.download_retry_queue = []\n\n        # Patching Configuration\n        self.disabled = None\n        self.stop = None\n        self.reboot_after_patch = None\n        self.category = None\n        self.install_duration = None\n        self.oneoff = None\n        self.interval_of_weeks = None\n        self.day_of_week = None\n        self.start_time = None\n        self.download_time = None\n        self.download_duration = 3600\n        self.gap_between_stage = 60\n        self.current_configs = dict()\n\n        self.category_required = ConfigOptions.category[\"required\"]\n        self.category_all = ConfigOptions.category[\"all\"]\n\n        # Crontab Variables\n        self.crontab = '/etc/crontab'\n        self.cron_restart_cmd = 'service cron restart'\n        self.cron_chkconfig_cmd = 'chkconfig cron on'\n\n        # Path Variables\n        self.cwd = os.getcwd()\n        self.package_downloaded_path = os.path.join(self.cwd, 'package.downloaded')\n        self.package_patched_path = os.path.join(self.cwd, 'package.patched')\n        self.stop_flag_path = os.path.join(self.cwd, 'StopOSPatching')\n        self.history_scheduled = os.path.join(self.cwd, 'scheduled/history')\n        self.scheduled_configs_file = os.path.join(self.cwd, 'scheduled/configs')\n        self.dist_upgrade_list = None\n        self.dist_upgrade_list_key = 'distUpgradeList'\n        self.dist_upgrade_all = False\n        self.dist_upgrade_all_key = 'distUpgradeAll'\n\n        # Reboot Requirements\n        self.reboot_required = False\n        self.open_deleted_files_before = list()\n        self.open_deleted_files_after = list()\n        self.needs_restart = list()\n\n    def is_string_none_or_empty(self, str):\n        if str is None or len(str) < 1:\n            return True\n        return False\n    \n    def parse_settings(self, settings):\n        disabled = settings.get(\"disabled\")\n        if disabled is None or str(disabled).lower() not in ConfigOptions.disabled:\n            msg = \"The value of parameter \\\"disabled\\\" is empty or invalid. Set it False by default.\"\n            self.log_and_syslog(logging.WARNING, msg)\n            self.disabled = False\n        else:\n            if str(disabled).lower() == \"true\":\n                self.disabled = True\n            else:\n                self.disabled = False\n        self.current_configs[\"disabled\"] = str(self.disabled)\n        if self.disabled:\n            msg = \"The extension is disabled.\"\n            self.log_and_syslog(logging.WARNING, msg)\n            return\n\n        stop = settings.get(\"stop\")\n        if stop is None or str(stop).lower() not in ConfigOptions.stop:\n            msg = \"The value of parameter \\\"stop\\\" is empty or invalid. Set it False by default.\"\n            self.log_and_syslog(logging.WARNING, msg)\n            self.stop = False\n        else:\n            if str(stop).lower() == 'true':\n                self.stop = True\n            else:\n                self.stop = False\n        self.current_configs[\"stop\"] = str(self.stop)\n\n        reboot_after_patch = settings.get(\"rebootAfterPatch\")\n        if reboot_after_patch is None or reboot_after_patch.lower() not in ConfigOptions.reboot_after_patch:\n            msg = \"The value of parameter \\\"rebootAfterPatch\\\" is empty or invalid. Set it \\\"rebootifneed\\\" by default.\"\n            self.log_and_syslog(logging.WARNING, msg)\n            self.reboot_after_patch = ConfigOptions.reboot_after_patch[0]\n        else:\n            self.reboot_after_patch = reboot_after_patch.lower()\n        waagent.AddExtensionEvent(name=self.hutil.get_name(),\n                                  op=waagent.WALAEventOperation.Enable,\n                                  isSuccess=True,\n                                  version=Version,\n                                  message=\"rebootAfterPatch=\"+self.reboot_after_patch)\n        self.current_configs[\"rebootAfterPatch\"] = self.reboot_after_patch\n\n        category = settings.get('category')\n        if category is None or category.lower() not in ConfigOptions.category.values():\n            msg = \"The value of parameter \\\"category\\\" is empty or invalid. Set it \" + self.category_required + \" by default.\"\n            self.log_and_syslog(logging.WARNING, msg)\n            self.category = self.category_required\n        else:\n            self.category = category.lower()\n        waagent.AddExtensionEvent(name=self.hutil.get_name(),\n                                  op=waagent.WALAEventOperation.Enable,\n                                  isSuccess=True,\n                                  version=Version,\n                                  message=\"category=\"+self.category)\n        self.current_configs[\"category\"] =  self.category\n        \n        self.dist_upgrade_list = settings.get(self.dist_upgrade_list_key)\n        if not self.is_string_none_or_empty(self.dist_upgrade_list):\n            self.current_configs[self.dist_upgrade_list_key] = self.dist_upgrade_list\n\n        dist_upgrade_all = settings.get(self.dist_upgrade_all_key)\n        if dist_upgrade_all is None:\n            msg = \"The value of parameter \\\"{0}\\\" is empty or invalid. Set it false by default.\".format(self.dist_upgrade_all_key)\n            self.log_and_syslog(logging.INFO, msg)\n            self.dist_upgrade_all = False\n        elif str(dist_upgrade_all).lower() == 'true':\n            self.dist_upgrade_all = True\n        else:\n            self.dist_upgrade_all = False\n        self.current_configs[self.dist_upgrade_all_key] = str(self.dist_upgrade_all)\n        \n        check_hrmin = re.compile(r'^[0-9]{1,2}:[0-9]{1,2}$')\n        install_duration = settings.get('installDuration')\n        if install_duration is None or not re.match(check_hrmin, install_duration):\n            msg = \"The value of parameter \\\"installDuration\\\" is empty or invalid. Set it 1 hour by default.\"\n            self.log_and_syslog(logging.WARNING, msg)\n            self.install_duration = 3600\n            self.current_configs[\"installDuration\"] = \"01:00\"\n        else:\n            hr_min = install_duration.split(':')\n            self.install_duration = int(hr_min[0]) * 3600 + int(hr_min[1]) * 60\n            self.current_configs[\"installDuration\"] = install_duration\n        if self.install_duration <= 300:\n            msg = \"The value of parameter \\\"installDuration\\\" is smaller than 5 minutes. The extension will not reserve 5 minutes for reboot. It is recommended to set \\\"installDuration\\\" more than 30 minutes.\"\n            self.log_and_syslog(logging.WARNING, msg)\n        else:\n            msg = \"The extension will reserve 5 minutes for reboot.\"\n            # 5 min for reboot\n            self.install_duration -= 300\n            self.log_and_syslog(logging.INFO, msg)\n\n        # The parameter \"downloadDuration\" is not exposed to users. So there's no log.\n        download_duration = settings.get('downloadDuration')\n        if download_duration is not None and re.match(check_hrmin, download_duration):\n            hr_min = download_duration.split(':')\n            self.download_duration = int(hr_min[0]) * 3600 + int(hr_min[1]) * 60\n\n        oneoff = settings.get('oneoff')\n        if oneoff is None or str(oneoff).lower() not in ConfigOptions.oneoff:\n            msg = \"The value of parameter \\\"oneoff\\\" is empty or invalid. Set it False by default.\"\n            self.log_and_syslog(logging.WARNING, msg)\n            self.oneoff = False\n        else:\n            if str(oneoff).lower() == \"true\":\n                self.oneoff = True\n                msg = \"The extension will run in one-off mode.\"\n            else:\n                self.oneoff = False\n                msg = \"The extension will run in scheduled task mode.\"\n            self.log_and_syslog(logging.INFO, msg)\n        self.current_configs[\"oneoff\"] = str(self.oneoff)\n\n        if not self.oneoff:\n            start_time = settings.get('startTime')\n            if start_time is None or not re.match(check_hrmin, start_time):\n                msg = \"The parameter \\\"startTime\\\" is empty or invalid. It defaults to 03:00.\"\n                self.log_and_syslog(logging.WARNING, msg)\n                start_time = \"03:00\"\n            try:\n                start_time_dt = datetime.datetime.strptime(start_time, '%H:%M')\n                self.start_time = datetime.time(start_time_dt.hour, start_time_dt.minute)\n            except ValueError:\n                msg = \"The parameter \\\"startTime\\\" is invalid. It defaults to 03:00.\"\n                self.log_and_syslog(logging.WARNING, msg)\n                self.start_time = datetime.time(3)\n            download_time_dt = start_time_dt - datetime.timedelta(seconds=self.download_duration)\n            self.download_time = datetime.time(download_time_dt.hour, download_time_dt.minute)\n            self.current_configs[\"startTime\"] = start_time\n \n            day_of_week = settings.get(\"dayOfWeek\")\n            if day_of_week is None or day_of_week == \"\":\n                msg = \"The parameter \\\"dayOfWeek\\\" is empty. dayOfWeek defaults to Everyday.\"\n                self.log_and_syslog(logging.WARNING, msg)\n                day_of_week = \"everyday\"\n                self.day_of_week = ConfigOptions.day_of_week[\"everyday\"]\n            else:\n                for day in day_of_week.split('|'):\n                    day = day.strip().lower()\n                    if day not in ConfigOptions.day_of_week:\n                        msg = \"The parameter \\\"dayOfWeek\\\" is invalid. dayOfWeek defaults to Everyday.\"\n                        self.log_and_syslog(logging.WARNING, msg)\n                        day_of_week = \"everyday\"\n                        break\n                if \"everyday\" in day_of_week:\n                    self.day_of_week = ConfigOptions.day_of_week[\"everyday\"]\n                else:\n                    self.day_of_week = [ConfigOptions.day_of_week[day.strip().lower()] for day in day_of_week.split('|')]\n            waagent.AddExtensionEvent(name=self.hutil.get_name(),\n                                      op=waagent.WALAEventOperation.Enable,\n                                      isSuccess=True,\n                                      version=Version,\n                                      message=\"dayOfWeek=\" + day_of_week)\n            self.current_configs[\"dayOfWeek\"] = day_of_week\n\n            interval_of_weeks = settings.get('intervalOfWeeks')\n            if interval_of_weeks is None or interval_of_weeks not in ConfigOptions.interval_of_weeks:\n                msg = \"The parameter \\\"intervalOfWeeks\\\" is empty or invalid. intervalOfWeeks defaults to 1.\"\n                self.log_and_syslog(logging.WARNING, msg)\n                self.interval_of_weeks = '1'\n            else:\n                self.interval_of_weeks = interval_of_weeks\n            waagent.AddExtensionEvent(name=self.hutil.get_name(),\n                                      op=waagent.WALAEventOperation.Enable,\n                                      isSuccess=True,\n                                      version=Version,\n                                      message=\"intervalOfWeeks=\"+self.interval_of_weeks)\n            self.current_configs[\"intervalOfWeeks\"] = self.interval_of_weeks\n\n            # Save the latest configuration for scheduled task to avoid one-off mode's affection\n            waagent.SetFileContents(self.scheduled_configs_file, json.dumps(self.current_configs))\n\n        msg = \"Current Configuration: \" + self.get_current_config()\n        self.log_and_syslog(logging.INFO, msg)\n\n    def install(self):\n        pass\n\n    def enable(self):\n        if self.stop:\n            self.stop_download()\n            self.create_stop_flag()\n            return\n        self.delete_stop_flag()\n        if not self.disabled and self.oneoff:\n            script_file_path = os.path.realpath(sys.argv[0])\n            os.system(' '.join(['python', script_file_path, '-oneoff', '>/dev/null 2>&1 &']))\n        else:\n            waagent.SetFileContents(self.history_scheduled, '')\n            self.set_download_cron()\n            self.set_patch_cron()\n            self.restart_cron()\n\n    def disable(self):\n        self.disabled = True\n        self.enable()\n\n    def stop_download(self):\n        '''\n        kill the process of downloading and its subprocess.\n        return code:\n            100  - There are no downloading process to stop\n            0    - The downloading process is stopped\n        '''\n        script_file_path = os.path.realpath(sys.argv[0])\n        script_file = os.path.basename(script_file_path)\n        retcode, output = waagent.RunGetOutput('ps -ef | grep \"' + script_file + ' -download\" | grep -v grep | grep -v sh | awk \\'{print $2}\\'')\n        if retcode > 0:\n            self.log_and_syslog(logging.ERROR, output)\n        if output != '':\n            retcode, output2 = waagent.RunGetOutput(\"ps -ef | awk '{if($3==\" + output.strip() + \") {print $2}}'\")\n            if retcode > 0:\n                self.log_and_syslog(logging.ERROR, output2)\n            if output2 != '':\n                waagent.Run('kill -9 ' + output2.strip())\n            waagent.Run('kill -9 ' + output.strip())\n            return 0\n        return 100\n\n    def set_download_cron(self):\n        script_file_path = os.path.realpath(sys.argv[0])\n        script_dir = os.path.dirname(script_file_path)\n        script_file = os.path.basename(script_file_path)\n        old_line_end = ' '.join([script_file, '-download'])\n        if self.disabled:\n            new_line = '\\n'\n        else:\n            if self.download_time > self.start_time:\n                dow = ','.join([str((day - 1) % 7) for day in self.day_of_week])\n            else:\n                dow = ','.join([str(day % 7) for day in self.day_of_week])\n            hr = str(self.download_time.hour)\n            minute = str(self.download_time.minute)\n            new_line = ' '.join(['\\n' + minute, hr, '* *', dow, 'root cd', script_dir, '&& python check.py', self.interval_of_weeks, '&& python', script_file, '-download > /dev/null 2>&1\\n'])\n        waagent.ReplaceFileContentsAtomic(self.crontab, '\\n'.join(filter(lambda a: a and (old_line_end not in a), waagent.GetFileContents(self.crontab).split('\\n'))) + new_line)\n\n    def set_patch_cron(self):\n        script_file_path = os.path.realpath(sys.argv[0])\n        script_dir = os.path.dirname(script_file_path)\n        script_file = os.path.basename(script_file_path)\n        old_line_end = ' '.join([script_file, '-patch'])\n        if self.disabled:\n            new_line = '\\n'\n        else:\n            start_time_dt = datetime.datetime(100, 1, 1, self.start_time.hour, self.start_time.minute)\n            start_hr = str(self.start_time.hour)\n            start_minute = str(self.start_time.minute)\n            start_dow = ','.join([str(day % 7) for day in self.day_of_week])\n            cleanup_time_dt = start_time_dt + datetime.timedelta(minutes=1)\n            cleanup_hr = str(cleanup_time_dt.hour)\n            cleanup_minute = str(cleanup_time_dt.minute)\n            if start_time_dt.day < cleanup_time_dt.day:\n                cleanup_dow = ','.join([str((day + 1) % 7) for day in self.day_of_week])\n            else:\n                cleanup_dow = ','.join([str(day % 7) for day in self.day_of_week])\n            new_line = ' '.join(['\\n' + start_minute, start_hr, '* *', start_dow, 'root cd', script_dir, '&& python check.py', self.interval_of_weeks, '&& python', script_file, '-patch >/dev/null 2>&1\\n'])\n            new_line += ' '.join([cleanup_minute, cleanup_hr, '* *', cleanup_dow, 'root rm -f', self.stop_flag_path, '\\n'])\n        waagent.ReplaceFileContentsAtomic(self.crontab, \"\\n\".join(filter(lambda a: a and (old_line_end not in a) and (self.stop_flag_path not in a), waagent.GetFileContents(self.crontab).split('\\n'))) + new_line)\n\n    def restart_cron(self):\n        retcode,output = waagent.RunGetOutput(self.cron_restart_cmd)\n        if retcode > 0:\n            self.log_and_syslog(logging.ERROR, output)\n\n    def download(self):\n        # Read the latest configuration for scheduled task\n        settings = json.loads(waagent.GetFileContents(self.scheduled_configs_file))\n        self.parse_settings(settings)\n\n        self.provide_vm_status_test(StatusTest[\"Scheduled\"])\n        if not self.check_vm_idle(StatusTest[\"Scheduled\"]):\n            return\n\n        if self.exists_stop_flag():\n            self.log_and_syslog(logging.INFO, \"Downloading patches is stopped/canceled\")\n            return\n\n        waagent.SetFileContents(self.package_downloaded_path, '')\n        waagent.SetFileContents(self.package_patched_path, '')\n\n        start_download_time = time.time()\n        # Installing security patches is mandatory\n        self._download(self.category_required)\n        if self.category == self.category_all:\n            self._download(self.category_all)\n        self.retry_download()\n        end_download_time = time.time()\n        waagent.AddExtensionEvent(name=self.hutil.get_name(),\n                                  op=waagent.WALAEventOperation.Download,\n                                  isSuccess=True,\n                                  version=Version,\n                                  message=\" \".join([\"Real downloading time is\", str(round(end_download_time-start_download_time,3)), \"s\"]))\n\n    def _download(self, category):\n        self.log_and_syslog(logging.INFO, \"Start to check&download patches (Category:\" + category + \")\")\n        retcode, downloadlist = self.check(category)\n        if retcode > 0:\n            msg = \"Failed to check valid upgrades\"\n            self.log_and_syslog(logging.ERROR, msg)\n            self.hutil.do_exit(1, 'Enable', 'error', '0', msg)\n        if 'walinuxagent' in downloadlist:\n            downloadlist.remove('walinuxagent')\n        if not downloadlist:\n            self.log_and_syslog(logging.INFO, \"No packages are available for update.\")\n            return\n        self.log_and_syslog(logging.INFO, \"There are \" + str(len(downloadlist)) + \" packages to upgrade.\")\n        self.log_and_syslog(logging.INFO, \"Download list: \" + ' '.join(downloadlist))\n        for pkg_name in downloadlist:\n            if pkg_name in self.downloaded:\n                continue\n            retcode = self.download_package(pkg_name)\n            if retcode != 0:\n                self.log_and_syslog(logging.ERROR, \"Failed to download the package: \" + pkg_name)\n                self.log_and_syslog(logging.INFO, \"Put {0} into a retry queue\".format(pkg_name))\n                self.download_retry_queue.append((pkg_name, category))\n                continue\n            self.downloaded.append(pkg_name)\n            self.log_and_syslog(logging.INFO, \"Package \" + pkg_name + \" is downloaded.\")\n            waagent.AppendFileContents(self.package_downloaded_path, pkg_name + ' ' + category + '\\n')\n\n    def retry_download(self):\n        retry_count = 0\n        max_retry_count = 12\n        self.log_and_syslog(logging.INFO, \"Retry queue: {0}\".format(\n            \" \".join([pkg_name for pkg_name,category in self.download_retry_queue])))\n        while self.download_retry_queue:\n            pkg_name, category = self.download_retry_queue[0]\n            self.download_retry_queue = self.download_retry_queue[1:]\n            retcode = self.download_package(pkg_name)\n            if retcode == 0:\n                self.downloaded.append(pkg_name)\n                self.log_and_syslog(logging.INFO, \"Package \" + pkg_name + \" is downloaded.\")\n                waagent.AppendFileContents(self.package_downloaded_path, pkg_name + ' ' + category + '\\n')\n            else:\n                self.log_and_syslog(logging.ERROR, \"Failed to download the package: \" + pkg_name)\n                self.log_and_syslog(logging.INFO, \"Put {0} back into a retry queue\".format(pkg_name))\n                self.download_retry_queue.append((pkg_name,category))\n                retry_count = retry_count + 1\n                if retry_count > max_retry_count:\n                    err_msg = (\"Failed to download after {0} retries, \"\n                        \"retry queue: {1}\").format(max_retry_count,\n                        \" \".join([pkg_name for pkg_name,category in self.download_retry_queue]))\n                    self.log_and_syslog(logging.ERROR, err_msg)\n                    waagent.AddExtensionEvent(name=self.hutil.get_name(),\n                                              op=waagent.WALAEventOperation.Download,\n                                              isSuccess=False,\n                                              version=Version,\n                                              message=err_msg)\n                    break\n                k = retry_count if (retry_count < 10) else 10\n                interval = int(random.uniform(0, 2 ** k))\n                self.log_and_syslog(logging.INFO, (\"Sleep {0}s before \"\n                    \"the next retry, current retry_count = {1}\").format(interval, retry_count))\n                time.sleep(interval)\n\n    def patch(self):\n        # Read the latest configuration for scheduled task\n        settings = json.loads(waagent.GetFileContents(self.scheduled_configs_file))\n        self.parse_settings(settings)\n\n        if not self.check_vm_idle(StatusTest[\"Scheduled\"]):\n            return\n\n        if self.exists_stop_flag():\n            self.log_and_syslog(logging.INFO, \"Installing patches is stopped/canceled\")\n            self.delete_stop_flag()\n            return\n\n        # Record the scheduled time\n        waagent.AppendFileContents(self.history_scheduled, time.strftime(\"%Y-%m-%d %a\", time.localtime()) + '\\n' )\n        # Record the open deleted files before patching\n        self.open_deleted_files_before = self.check_open_deleted_files()\n\n        retcode = self.stop_download()\n        if retcode == 0:\n            self.log_and_syslog(logging.WARNING, \"Download time exceeded. The pending package will be downloaded in the next cycle\")\n            waagent.AddExtensionEvent(name=self.hutil.get_name(),\n                                      op=waagent.WALAEventOperation.Download,\n                                      isSuccess=False,\n                                      version=Version,\n                                      message=\"Downloading time out\")\n\n        global start_patch_time\n        start_patch_time = time.time()\n\n        pkg_failed = []\n        is_time_out = [False, False]\n        patchlist = self.get_pkg_to_patch(self.category_required)\n        is_time_out[0],failed = self._patch(self.category_required, patchlist)\n        pkg_failed.extend(failed)\n        if not self.exists_stop_flag():\n            if not is_time_out[0]:\n                patchlist = self.get_pkg_to_patch(self.category_all)\n                if len(patchlist) == 0:\n                    self.log_and_syslog(logging.INFO, \"No packages are available for update. (Category:\" + self.category_all + \")\")\n                else:\n                    self.log_and_syslog(logging.INFO, \"Going to sleep for \" + str(self.gap_between_stage) + \"s\")\n                    time.sleep(self.gap_between_stage)\n                    is_time_out[1],failed = self._patch(self.category_all, patchlist)\n                    pkg_failed.extend(failed)\n        else:\n            msg = \"Installing patches (Category:\" + self.category_all + \") is stopped/canceled\"\n            self.log_and_syslog(logging.INFO, msg)\n        if is_time_out[0] or is_time_out[1]:\n            msg = \"Patching time out\"\n            self.log_and_syslog(logging.WARNING, msg)\n            waagent.AddExtensionEvent(name=self.hutil.get_name(),\n                                      op=\"Patch\",\n                                      isSuccess=False,\n                                      version=Version,\n                                      message=msg)\n\n        self.open_deleted_files_after = self.check_open_deleted_files()\n        self.delete_stop_flag()\n        #self.report()\n        if StatusTest[\"Scheduled\"][\"Healthy\"]:\n            is_healthy = StatusTest[\"Scheduled\"][\"Healthy\"]()\n            msg = \"Checking the VM is healthy after patching: \" + str(is_healthy)\n            self.log_and_syslog(logging.INFO, msg)\n            waagent.AddExtensionEvent(name=self.hutil.get_name(),\n                                      op=\"Check healthy\",\n                                      isSuccess=is_healthy,\n                                      version=Version,\n                                      message=msg)\n        if self.patched is not None and len(self.patched) > 0:\n            self.reboot_if_required()\n\n    def _patch(self, category, patchlist):\n        if self.exists_stop_flag():\n            self.log_and_syslog(logging.INFO, \"Installing patches (Category:\" + category + \") is stopped/canceled\")\n            return False,list()\n        if not patchlist:\n            self.log_and_syslog(logging.INFO, \"No packages are available for update.\")\n            return False,list()\n        self.log_and_syslog(logging.INFO, \"Start to install \" + str(len(patchlist)) +\" patches (Category:\" + category + \")\")\n        self.log_and_syslog(logging.INFO, \"Patch list: \" + ' '.join(patchlist))\n        pkg_failed = []\n        for pkg_name in patchlist:\n            if pkg_name == 'walinuxagent':\n                continue\n            current_patch_time = time.time()\n            if current_patch_time - start_patch_time > self.install_duration:\n                msg = \"Patching time exceeded. The pending package will be patched in the next cycle\"\n                self.log_and_syslog(logging.WARNING, msg)\n                return True,pkg_failed\n            retcode = self.patch_package(pkg_name)\n            if retcode != 0:\n                self.log_and_syslog(logging.ERROR, \"Failed to patch the package:\" + pkg_name)\n                pkg_failed.append(' '.join([pkg_name, category]))\n                continue\n            self.patched.append(pkg_name)\n            self.log_and_syslog(logging.INFO, \"Package \" + pkg_name + \" is patched.\")\n            waagent.AppendFileContents(self.package_patched_path, pkg_name + ' ' + category + '\\n')\n        return False,pkg_failed\n\n    def patch_one_off(self):\n        \"\"\"\n        Called when startTime is empty string, which means a on-demand patch.\n        \"\"\"\n        self.provide_vm_status_test(StatusTest[\"Oneoff\"])\n        if not self.check_vm_idle(StatusTest[\"Oneoff\"]):\n            return\n\n        global start_patch_time\n        start_patch_time = time.time()\n\n        self.log_and_syslog(logging.INFO, \"Going to patch one-off\")\n        waagent.SetFileContents(self.package_downloaded_path, '')\n        waagent.SetFileContents(self.package_patched_path, '')\n\n        # Record the open deleted files before patching\n        self.open_deleted_files_before = self.check_open_deleted_files()\n\n        pkg_failed = []\n        is_time_out = [False, False]\n        retcode, patchlist_required = self.check(self.category_required)\n        if retcode > 0:\n            msg = \"Failed to check valid upgrades\"\n            self.log_and_syslog(logging.ERROR, msg)\n            self.hutil.do_exit(1, 'Enable', 'error', '0', msg)\n        if not patchlist_required:\n            self.log_and_syslog(logging.INFO, \"No packages are available for update. (Category:\" + self.category_required + \")\")\n        else:\n            is_time_out[0],failed = self._patch(self.category_required, patchlist_required)\n            pkg_failed.extend(failed)\n        if self.category == self.category_all:\n            if not self.exists_stop_flag():\n                if not is_time_out[0]:\n                    retcode, patchlist_other = self.check(self.category_all)\n                    if retcode > 0:\n                        msg = \"Failed to check valid upgrades\"\n                        self.log_and_syslog(logging.ERROR, msg)\n                        self.hutil.do_exit(1, 'Enable', 'error', '0', msg)\n                    patchlist_other = [pkg for pkg in patchlist_other if pkg not in patchlist_required]\n                    if len(patchlist_other) == 0:\n                        self.log_and_syslog(logging.INFO, \"No packages are available for update. (Category:\" + self.category_all + \")\")\n                    else:\n                        self.log_and_syslog(logging.INFO, \"Going to sleep for \" + str(self.gap_between_stage) + \"s\")\n                        time.sleep(self.gap_between_stage)\n                        self.log_and_syslog(logging.INFO, \"Going to patch one-off (Category:\" + self.category_all + \")\")\n                        is_time_out[1],failed = self._patch(self.category_all, patchlist_other)\n                        pkg_failed.extend(failed)\n            else:\n                self.log_and_syslog(logging.INFO, \"Installing patches (Category:\" + self.category_all + \") is stopped/canceled\")\n\n        if is_time_out[0] or is_time_out[1]:\n            waagent.AddExtensionEvent(name=self.hutil.get_name(),\n                                      op=\"Oneoff Patch\",\n                                      isSuccess=False,\n                                      version=Version,\n                                      message=\"Patching time out\")\n\n        shutil.copy2(self.package_patched_path, self.package_downloaded_path)\n        for pkg in pkg_failed:\n            waagent.AppendFileContents(self.package_downloaded_path, pkg + '\\n')\n\n        self.open_deleted_files_after = self.check_open_deleted_files()\n        self.delete_stop_flag()\n        #self.report()\n        if StatusTest[\"Oneoff\"][\"Healthy\"]:\n            is_healthy = StatusTest[\"Oneoff\"][\"Healthy\"]()\n            msg = \"Checking the VM is healthy after patching: \" + str(is_healthy)\n            self.log_and_syslog(logging.INFO, msg)\n            waagent.AddExtensionEvent(name=self.hutil.get_name(),\n                                      op=\"Check healthy\",\n                                      isSuccess=is_healthy,\n                                      version=Version,\n                                      message=msg)\n        if self.patched is not None and len(self.patched) > 0:\n            self.reboot_if_required()\n\n    def reboot_if_required(self):\n        self.check_reboot()\n        self.check_needs_restart()\n        msg = ''\n        if self.reboot_after_patch == 'notrequired' and self.reboot_required:\n            msg += 'Pending Reboot'\n            if self.needs_restart:\n                msg += ': ' + ' '.join(self.needs_restart)\n            waagent.AddExtensionEvent(name=self.hutil.get_name(),\n                                      op=\"Reboot\",\n                                      isSuccess=False,\n                                      version=Version,\n                                      message=\" \".join([self.reboot_after_patch, msg,\n                                                       str(len(self.needs_restart)),\n                                                       \"packages need to restart\"]))\n            self.hutil.do_exit(0, 'Enable', 'success', '0', msg)\n        if self.reboot_after_patch == 'required':\n            msg += \"System going to reboot(Required)\"\n        elif self.reboot_after_patch == 'auto' and self.reboot_required:\n            msg += \"System going to reboot(Auto)\"\n        elif self.reboot_after_patch == 'rebootifneed':\n            if (self.reboot_required or self.needs_restart):\n                msg += \"System going to reboot(RebootIfNeed)\"\n        if msg:\n            if self.needs_restart:\n                msg += ': ' + ' '.join(self.needs_restart)\n            self.log_and_syslog(logging.INFO, msg)\n            waagent.AddExtensionEvent(name=self.hutil.get_name(),\n                                      op=\"Reboot\",\n                                      isSuccess=True,\n                                      version=Version,\n                                      message=\"Reboot\")\n            retcode = waagent.Run('reboot')\n            if retcode != 0:\n                self.log_and_syslog(logging.ERROR, \"Failed to reboot\")\n                waagent.AddExtensionEvent(name=self.hutil.get_name(),\n                                          op=\"Reboot\",\n                                          isSuccess=False,\n                                          version=Version,\n                                          message=\"Failed to reboot\")\n        else:\n            waagent.AddExtensionEvent(name=self.hutil.get_name(),\n                                      op=\"Reboot\",\n                                      isSuccess=False,\n                                      version=Version,\n                                      message=\"Not reboot\")\n\n    def check_needs_restart(self):\n        self.needs_restart.extend(self.get_pkg_needs_restart())\n        patched_files = dict()\n        for pkg in self.get_pkg_patched():\n            cmd = ' '.join([self.pkg_query_cmd, pkg])\n            try:\n                retcode, output = waagent.RunGetOutput(cmd)\n                patched_files[os.path.basename(pkg)] = [filename for filename in output.split(\"\\n\") if os.path.isfile(filename)]\n            except Exception:\n                self.log_and_syslog(logging.ERROR, \"Failed to \" + cmd)\n        # for k,v in patched_files.items():\n        #     self.log_and_syslog(logging.INFO, k + \": \" + \" \".join(v))\n        open_deleted_files = list()\n        for filename in self.open_deleted_files_after:\n            if filename not in self.open_deleted_files_before:\n                open_deleted_files.append(filename)\n        # self.log_and_syslog(logging.INFO, \"Open deleted files: \" + \" \".join(open_deleted_files))\n        for pkg,files in patched_files.items():\n            for filename in files:\n                realpath = os.path.realpath(filename)\n                if realpath in open_deleted_files and pkg not in self.needs_restart:\n                     self.needs_restart.append(pkg)\n        msg = \"Packages needs to restart: \"\n        pkgs = \" \".join(self.needs_restart)\n        if pkgs:\n            msg += pkgs\n        else:\n            msg = \"There is no package which needs to restart\"\n        self.log_and_syslog(logging.INFO, msg)\n\n    def get_pkg_needs_restart(self):\n        return []\n\n    def check_open_deleted_files(self):\n        ret = list()\n        retcode,output = waagent.RunGetOutput('lsof | grep \"DEL\"')\n        if retcode == 0:\n            for line in output.split('\\n'):\n                if line:\n                    filename = line.split()[-1]\n                    if filename not in ret:\n                        ret.append(filename)\n        return ret\n\n    def create_stop_flag(self):\n        waagent.SetFileContents(self.stop_flag_path, '')\n\n    def delete_stop_flag(self):\n        if self.exists_stop_flag():\n            os.remove(self.stop_flag_path)\n\n    def exists_stop_flag(self):\n        if os.path.isfile(self.stop_flag_path):\n            return True\n        else:\n            return False\n\n    def get_pkg_to_patch(self, category):\n        if not os.path.isfile(self.package_downloaded_path):\n            return []\n        pkg_to_patch = waagent.GetFileContents(self.package_downloaded_path)\n        if not pkg_to_patch:\n            return []\n        patchlist = [line.split()[0] for line in pkg_to_patch.split('\\n') if line.endswith(category)]\n        if patchlist is None:\n            return []\n        return patchlist\n\n    def get_pkg_patched(self):\n        if not os.path.isfile(self.package_patched_path):\n            return []\n        pkg_patched = waagent.GetFileContents(self.package_patched_path)\n        if not pkg_patched:\n            return []\n        patchedlist = [line.split()[0] for line in pkg_patched.split('\\n') if line]\n        return patchedlist\n\n    def get_current_config(self):\n        current_configs = []\n        for k,v in self.current_configs.items():\n            current_configs.append(k + \"=\" + v)\n        return \",\".join(current_configs)\n\n    def provide_vm_status_test(self, status_test):\n        for status,provided in status_test.items():\n            if provided is None:\n                provided = \"False\"\n                level = logging.WARNING\n            else:\n                provided = \"True\"\n                level = logging.INFO\n            msg = \"The VM %s test script is provided: %s\" % (status, provided)\n            self.log_and_syslog(level, msg)\n            waagent.AddExtensionEvent(name=self.hutil.get_name(),\n                                      op=\"provides %s test script\" % (status,),\n                                      isSuccess=provided,\n                                      version=Version,\n                                      message=msg)\n\n    def check_vm_idle(self, status_test):\n        is_idle = True\n        if status_test[\"Idle\"]:\n            is_idle = status_test[\"Idle\"]()\n            msg = \"Checking the VM is idle: \" + str(is_idle)\n            self.log_and_syslog(logging.INFO, msg)\n            waagent.AddExtensionEvent(name=self.hutil.get_name(),\n                                      op=\"Check idle\",\n                                      isSuccess=is_idle,\n                                      version=Version,\n                                      message=msg)\n            if not is_idle:\n                self.log_and_syslog(logging.WARNING, \"Current Operation is skipped.\")\n        return is_idle\n\n    def log_and_syslog(self, level, message):\n        if level == logging.INFO:\n            self.hutil.log(message)\n        elif level == logging.WARNING:\n            self.hutil.log(\" \".join([\"Warning:\", message]))\n        elif level == logging.ERROR:\n            self.hutil.error(message)\n        if self.syslogger is None:\n            self.init_syslog()\n        self.syslog(level, message)\n\n    def init_syslog(self):\n        self.syslogger = logging.getLogger(self.hutil.get_name())\n        self.syslogger.setLevel(logging.INFO)\n        formatter = logging.Formatter('%(name)s: %(levelname)s %(message)s')\n        try:\n            handler = logging.handlers.SysLogHandler(address='/dev/log')\n            handler.setFormatter(formatter)\n            self.syslogger.addHandler(handler)\n        except:\n            self.syslogger = None\n            self.hutil.error(\"Syslog is not ready.\")\n\n    def syslog(self, level, message):\n        if self.syslogger is None:\n            return\n        if level == logging.INFO:\n            self.syslogger.info(message)\n        elif level == logging.WARNING:\n            self.syslogger.warning(message)\n        elif level == logging.ERROR:\n            self.syslogger.error(message)\n\n"
  },
  {
    "path": "OSPatching/patch/ConfigOptions.py",
    "content": "#!/usr/bin/python\n#\n# AbstractPatching is the base patching class of all the linux distros\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nclass ConfigOptions(object):\n    disabled = [\"true\", \"false\"]             # Default value is \"false\"\n    stop = [\"true\", \"false\"]                 # Default value is \"false\"\n    reboot_after_patch = [\"rebootifneed\",    # Default value is \"rebootifneed\"\n                          \"auto\",\n                          \"required\",\n                          \"notrequired\"]\n    category = {\"required\" : \"important\",    # Default value is \"important\"\n                \"all\"      : \"importantandrecommended\"}\n    oneoff = [\"true\", \"false\"]               # Default value is \"false\"\n    interval_of_weeks = [str(i) for i in range(1, 53)]  # Default value is \"1\"\n    day_of_week = {\"everyday\" : range(1,8),  # Default value is \"everyday\"\n                   \"monday\"   : 1,\n                   \"tuesday\"  : 2,\n                   \"wednesday\": 3,\n                   \"thursday\" : 4,\n                   \"friday\"   : 5,\n                   \"saturday\" : 6,\n                   \"sunday\"   : 7}\n"
  },
  {
    "path": "OSPatching/patch/OraclePatching.py",
    "content": "#!/usr/bin/python\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom redhatPatching import redhatPatching\n\nclass OraclePatching(redhatPatching):\n    def __init__(self, hutil):\n        super(OraclePatching,self).__init__(hutil)\n"
  },
  {
    "path": "OSPatching/patch/SuSEPatching.py",
    "content": "#!/usr/bin/python\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport sys\nimport shutil\n\nfrom Utils.WAAgentUtil import waagent\nfrom AbstractPatching import AbstractPatching\n\n\nclass SuSEPatching(AbstractPatching):\n    def __init__(self, hutil):\n        super(SuSEPatching,self).__init__(hutil)\n        self.patched_pkgs = None\n        self.cache_dir = os.path.join(os.path.dirname(sys.argv[0]), 'packages')\n        if not os.path.isdir(self.cache_dir):\n            os.mkdir(self.cache_dir)\n        self.clean_cmd = 'zypper clean'\n        self.check_cmd = 'zypper -q --gpg-auto-import-keys --non-interactive list-patches'\n        self.check_security_cmd = self.check_cmd + ' --category security'\n        self.download_cmd = 'zypper --non-interactive --pkg-cache-dir ' + self.cache_dir + ' install -d --auto-agree-with-licenses -t patch '\n        self.patch_cmd = 'zypper --non-interactive --pkg-cache-dir ' + self.cache_dir + ' install --auto-agree-with-licenses -t patch '\n        self.pkg_query_cmd = 'rpm -qlp'\n        waagent.Run('zypper -q --gpg-auto-import-keys --non-interactive refresh', False)\n    \n    def check(self, category):\n        \"\"\"\n        Check valid upgrades,\n        Return the package list to upgrade\n        \"\"\"\n        if category == self.category_all:\n            check_cmd = self.check_cmd\n        elif category == self.category_required:\n            check_cmd = self.check_security_cmd\n        retcode, output = waagent.RunGetOutput(check_cmd)\n        output_lines = output.split('\\n')\n        patch_list = []\n        name_position = 1\n        for line in output_lines:\n            properties = [elem.strip() for elem in line.split('|')]\n            if len(properties) > 1:\n                if 'Name' in properties:\n                    name_position = properties.index('Name')\n                elif not properties[name_position] in self.to_patch:\n                    patch_list.append(properties[name_position])\n        return retcode, patch_list\n\n    def download_package(self, package):\n        retcode = waagent.Run(self.download_cmd + package, False)\n        if 0 < retcode and retcode < 100:\n            return 1\n        else:\n            return 0\n\n    def patch_package(self, package):\n        if self.patched_pkgs == None:\n            self.patched_pkgs = list()\n            for root,dirs,files in os.walk(self.cache_dir):\n                for filename in files:\n                    if filename.endswith('rpm'):\n                        shutil.copy(os.path.join(root, filename), \"/tmp/\")\n                        self.patched_pkgs.append(\"/tmp/\"+filename)\n        retcode = waagent.Run(self.patch_cmd + package, False)\n        if 0 < retcode and retcode < 100:\n            return 1\n        else:\n            if retcode == 102:\n                self.reboot_required = True\n            return 0\n\n    def check_reboot(self):\n        pass\n\n    def get_pkg_patched(self):\n        return self.patched_pkgs\n"
  },
  {
    "path": "OSPatching/patch/UbuntuPatching.py",
    "content": "#!/usr/bin/python\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport logging\n\nfrom Utils.WAAgentUtil import waagent\nfrom AbstractPatching import AbstractPatching\n\nclass UbuntuPatching(AbstractPatching):\n    def __init__(self, hutil):\n        super(UbuntuPatching,self).__init__(hutil)\n        self.update_cmd = 'apt-get update'\n        self.check_cmd = 'apt-get -qq -s upgrade'\n        self.check_cmd_distupgrade = 'apt-get -qq -s dist-upgrade'\n        self.check_security_suffix = ' -o Dir::Etc::SourceList=/etc/apt/security.sources.list'\n        waagent.Run('grep \"-security\" /etc/apt/sources.list | sudo grep -v \"#\" > /etc/apt/security.sources.list')\n        self.download_cmd = 'apt-get -d -y install'\n        self.patch_cmd = 'apt-get -y -q --force-yes -o Dpkg::Options::=\"--force-confdef\" install'\n        self.fix_cmd = 'dpkg --configure -a --force-confdef'\n        self.status_cmd = 'apt-cache show'\n        self.pkg_query_cmd = 'dpkg-query -L'\n        # Avoid a config prompt\n        os.environ['DEBIAN_FRONTEND']='noninteractive'\n\n    def install(self):\n        \"\"\"\n        Install for dependencies.\n        \"\"\"\n        # Update source.list\n        waagent.Run(self.update_cmd, False)\n        # /var/run/reboot-required is not created unless the update-notifier-common package is installed\n        retcode = waagent.Run('apt-get -y install update-notifier-common')\n        if retcode > 0:\n            self.hutil.error(\"Failed to install update-notifier-common\")\n\n    def try_package_with_autofix(self, cmd):\n        retcode, output = waagent.RunGetOutput(cmd)\n        if retcode == 0:\n            return retcode, output\n        # An error occurred while running the command. Try to recover.\n        # Unfortunately apt-get returns code 100 regardless of the error encountered, \n        # so we can't smartly detect the cause of failure\n        self.log_and_syslog(logging.WARNING, \"Error running command ({0}). Will try to correct package state ({1}). Error was {2}\".format(cmd, self.fix_cmd, output))\n        retcode, output = waagent.RunGetOutput(self.fix_cmd)\n        if retcode != 0:\n            self.log_and_syslog(logging.WARNING, \"Error correcting package state ({0}). Error was {1}\".format(self.fix_cmd, output))\n        retcode, output = waagent.RunGetOutput(cmd)\n        if retcode != 0:\n            self.log_and_syslog(logging.WARNING, \"Unable to run ({0}) on second attempt. Giving up. Error was {1}\".format(cmd, output))\n        return retcode, output\n\n    def check(self, category):\n        \"\"\"\n        Check valid upgrades,\n        Return the package list to download & upgrade\n        \"\"\"\n        # Perform upgrade or dist-upgrade as appropriate\n        if self.dist_upgrade_all:\n            self.log_and_syslog(logging.INFO, \"Performing dist-upgrade for ALL packages\")\n            check_cmd = self.check_cmd_distupgrade\n        else:\n            check_cmd = self.check_cmd\n        \n        # If upgrading only required/security patches, append the command suffix\n        # Otherwise, assume all packages will be upgraded\n        if category == self.category_required:\n            check_cmd = check_cmd + self.check_security_suffix\n        retcode, output = self.try_package_with_autofix(check_cmd)\n        \n        to_download = [line.split()[1] for line in output.split('\\n') if line.startswith('Inst')]\n\n        # Azure repo assumes upgrade may have dependency changes\n        if retcode != 0:\n            self.log_and_syslog(logging.WARNING, \"Failed to get list of upgradeable packages\")\n        elif self.is_string_none_or_empty(self.dist_upgrade_list):\n            self.log_and_syslog(logging.INFO, \"Dist upgrade list not specified, will perform normal patch\")\n        elif not os.path.isfile(self.dist_upgrade_list):\n            self.log_and_syslog(logging.WARNING, \"Dist upgrade list was specified but file [{0}] does not exist\".format(self.dist_upgrade_list))\n        else:\n            self.log_and_syslog(logging.INFO, \"Running dist-upgrade using {0}\".format(self.dist_upgrade_list))\n            self.check_azure_cmd = 'apt-get -qq -s dist-upgrade -o Dir::Etc::SourceList={0}'.format(self.dist_upgrade_list)\n            retcode, azoutput = self.try_package_with_autofix(self.check_azure_cmd)\n            azure_to_download = [line.split()[1] for line in azoutput.split('\\n') if line.startswith('Inst')]\n            to_download += list(set(azure_to_download) - set(to_download))\n\n        return retcode, to_download\n        \n    def download_package(self, package):\n        return waagent.Run(self.download_cmd + ' ' + package)\n\n    def patch_package(self, package):\n        retcode, output = self.try_package_with_autofix(self.patch_cmd + ' ' + package)\n        return retcode\n\n    def check_reboot(self):\n        self.reboot_required = os.path.isfile('/var/run/reboot-required')\n\n    def get_pkg_needs_restart(self):\n        fd = '/var/run/reboot-required.pkgs'\n        if not os.path.isfile(fd):\n            return []\n        return waagent.GetFileContents(fd).split('\\n')\n\n    def report(self):\n        \"\"\"\n        TODO: Report the detail status of patching\n        \"\"\"\n        for package_patched in self.patched:\n            retcode,output = waagent.RunGetOutput(self.status_cmd + ' ' + package_patched)\n            output = output.split('\\n\\n')[0]\n            self.hutil.log(output)\n\n"
  },
  {
    "path": "OSPatching/patch/__init__.py",
    "content": "#!/usr/bin/python\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.4+\n\nimport os\nimport re\nimport platform\n\nfrom UbuntuPatching import UbuntuPatching\nfrom redhatPatching import redhatPatching\nfrom centosPatching import centosPatching\nfrom OraclePatching import OraclePatching\nfrom SuSEPatching import SuSEPatching\n\n# Define the function in case waagent(<2.0.4) doesn't have DistInfo()\ndef DistInfo(fullname=0):\n    if 'FreeBSD' in platform.system():\n        release = re.sub('\\-.*\\Z', '', str(platform.release()))\n        distinfo = ['FreeBSD', release]\n        return distinfo\n    if os.path.isfile('/etc/oracle-release'):\n        release = re.sub('\\-.*\\Z', '', str(platform.release()))\n        distinfo = ['Oracle', release]\n        return distinfo\n    if 'linux_distribution' in dir(platform):\n        distinfo = list(platform.linux_distribution(\\\n                        full_distribution_name=fullname))\n        # remove trailing whitespace in distro name\n        distinfo[0] = distinfo[0].strip()\n        return distinfo\n    else:\n        return platform.dist()\n\ndef GetMyPatching(hutil, patching_class_name=''):\n    \"\"\"\n    Return MyPatching object.\n    NOTE: Logging is not initialized at this point.\n    \"\"\"\n    if patching_class_name == '':\n        if 'Linux' in platform.system():\n            Distro = DistInfo()[0]\n        else: # I know this is not Linux!\n            if 'FreeBSD' in platform.system():\n                Distro = platform.system()\n        Distro = Distro.strip('\"')\n        Distro = Distro.strip(' ')\n        patching_class_name = Distro + 'Patching'\n    else:\n        Distro = patching_class_name\n    if not globals().has_key(patching_class_name):\n        hutil.log_and_syslog(Distro + ' is not a supported distribution.')\n        return None\n    return globals()[patching_class_name](hutil)\n"
  },
  {
    "path": "OSPatching/patch/centosPatching.py",
    "content": "#!/usr/bin/python\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom redhatPatching import redhatPatching\n\n\nclass centosPatching(redhatPatching):\n    def __init__(self, hutil):\n        super(centosPatching,self).__init__(hutil)\n"
  },
  {
    "path": "OSPatching/patch/redhatPatching.py",
    "content": "#!/usr/bin/python\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport re\n\nfrom Utils.WAAgentUtil import waagent\nfrom AbstractPatching import AbstractPatching\n\n\nclass redhatPatching(AbstractPatching):\n    def __init__(self, hutil):\n        super(redhatPatching,self).__init__(hutil)\n        self.cron_restart_cmd = 'service crond restart'\n        self.check_cmd = 'yum -q check-update'\n        self.check_security_cmd = 'yum -q --security check-update'\n        self.clean_cmd = 'yum clean packages'\n        self.download_cmd = 'yum -q -y --downloadonly update'\n        self.patch_cmd = 'yum -y update'\n        self.status_cmd = 'yum -q info'\n        self.pkg_query_cmd = 'repoquery -l'\n        self.cache_dir = '/var/cache/yum/'\n\n    def install(self):\n        \"\"\"\n        Install for dependencies.\n        \"\"\"\n        # For yum --downloadonly option\n        waagent.Run('yum -y install yum-downloadonly', False)\n\n        # For yum --security option\n        retcode = waagent.Run('yum -y install yum-plugin-security')\n        if retcode > 0:\n            self.hutil.error(\"Failed to install yum-plugin-security\")\n\n        # For package-cleanup, needs-restarting, repoquery\n        retcode = waagent.Run('yum -y install yum-utils')\n        if retcode > 0:\n            self.hutil.error(\"Failed to install yum-utils\")\n\n        # For lsof\n        retcode = waagent.Run('yum -y install lsof')\n        if retcode > 0:\n            self.hutil.error(\"Failed to install lsof\")\n\n        # Install missing dependencies\n        missing_dependency_list = self.check_missing_dependencies()\n        for pkg in missing_dependency_list:\n            retcode = waagent.Run('yum -y install ' + pkg)\n            if retcode > 0:\n                self.hutil.error(\"Failed to install missing dependency: \" + pkg)\n\n    def check(self, category):\n        \"\"\"\n        Check valid upgrades,\n        Return the package list to download & upgrade\n        \"\"\"\n        if category == self.category_all:\n            check_cmd = self.check_cmd\n        elif category == self.category_required:\n            check_cmd = self.check_security_cmd\n        to_download = []\n        retcode,output = waagent.RunGetOutput(check_cmd, chk_err=False)\n        if retcode == 0:\n            return 0, to_download\n        elif retcode == 100:\n            lines = output.strip().split('\\n')\n            for line in lines:\n                line = re.split(r'\\s+', line.strip())\n                if len(line) != 3:\n                    break\n                to_download.append(line[0])\n            return 0, to_download\n        elif retcode == 1:\n            return 1, to_download\n\n    def download_package(self, package):\n        retcode = waagent.Run(self.download_cmd + ' ' + package, chk_err=False)\n        # Yum exit code is not 0 even if succeed, so check if the package rpm exsits to verify that downloading succeeds.\n        return self.check_download(package)\n\n    def patch_package(self, package):\n        return waagent.Run(self.patch_cmd + ' ' + package)\n\n    def check_reboot(self):\n        retcode,last_kernel = waagent.RunGetOutput(\"rpm -q --last kernel\")\n        last_kernel = last_kernel.split()[0][7:]\n        retcode,current_kernel = waagent.RunGetOutput('uname -r')\n        current_kernel = current_kernel.strip()\n        self.reboot_required = (last_kernel != current_kernel)\n\n    def report(self):\n        \"\"\"\n        TODO: Report the detail status of patching\n        \"\"\"\n        for package_patched in self.patched:\n            self.info_pkg(package_patched)\n\n    def info_pkg(self, pkg_name):\n        \"\"\"\n        Return details about a package        \n        \"\"\"\n        retcode,output = waagent.RunGetOutput(self.status_cmd + ' ' + pkg_name)\n        if retcode != 0:\n            self.hutil.error(output)\n            return None\n        installed_pkg_info_list = output.rpartition('Available Packages')[0].strip().split('\\n')\n        available_pkg_info_list = output.rpartition('Available Packages')[-1].strip().split('\\n')\n        pkg_info = dict()\n        pkg_info['installed'] = dict()\n        pkg_info['available'] = dict()\n        for item in installed_pkg_info_list:\n            if item.startswith('Name'):\n                pkg_info['installed']['name'] = item.split(':')[-1].strip()\n            elif item.startswith('Arch'):\n                pkg_info['installed']['arch'] = item.split(':')[-1].strip()\n            elif item.startswith('Version'):\n                pkg_info['installed']['version'] = item.split(':')[-1].strip()\n            elif item.startswith('Release'):\n                pkg_info['installed']['release'] = item.split(':')[-1].strip()\n        for item in available_pkg_info_list:\n            if item.startswith('Name'):\n                pkg_info['available']['name'] = item.split(':')[-1].strip()\n            elif item.startswith('Arch'):\n                pkg_info['available']['arch'] = item.split(':')[-1].strip()\n            elif item.startswith('Version'):\n                pkg_info['available']['version'] = item.split(':')[-1].strip()\n            elif item.startswith('Release'):\n                pkg_info['available']['release'] = item.split(':')[-1].strip()\n        return pkg_info\n\n    def check_download(self, pkg_name):\n        pkg_info = self.info_pkg(pkg_name)\n        name = pkg_info['available']['name']\n        arch = pkg_info['available']['arch']\n        version = pkg_info['available']['version']\n        release = pkg_info['available']['release']\n        package = '.'.join(['-'.join([name, version, release]), arch, 'rpm'])\n        retcode,output = waagent.RunGetOutput('cd ' + self.cache_dir + ';find . -name \"'+ package + '\"')\n        if retcode != 0:\n            self.hutil.error(\"Unable to check whether the downloading secceeds\")\n        else:\n            if output == '':\n                return 1\n            else:\n                return 0\n\n    def check_missing_dependencies(self):\n        retcode, output = waagent.RunGetOutput('package-cleanup --problems', chk_err=False)\n        missing_dependency_list = []\n        for line in output.split('\\n'):\n            if 'requires' not in line:\n                continue\n            words = line.split()\n            missing_dependency = words[words.index('requires') + 1]\n            if missing_dependency not in missing_dependency_list:\n                missing_dependency_list.append(missing_dependency)\n        return missing_dependency_list\n"
  },
  {
    "path": "OSPatching/references",
    "content": "Utils/\n"
  },
  {
    "path": "OSPatching/scheduled/__init__.py",
    "content": ""
  },
  {
    "path": "OSPatching/scheduled/history",
    "content": ""
  },
  {
    "path": "OSPatching/test/FakePatching.py",
    "content": "#!/usr/bin/python\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nfrom AbstractPatching import AbstractPatching\n\n\nsys.path.append('../patch')\n\n\nclass FakePatching(AbstractPatching):\n    def __init__(self, hutil=None):\n        super(FakePatching,self).__init__(hutil)\n        self.pkg_query_cmd = 'dpkg-query -L'\n        self.gap_between_stage = 1\n        self.download_duration = 3600\n        self.security_download_list = ['a', 'b', 'c', 'd', 'e']\n        self.all_download_list = ['1', '2', '3', '4', 'a', 'b', 'c', 'd', 'e']\n\n    def install(self):\n        \"\"\"\n        Install for dependencies.\n        \"\"\"\n        pass\n\n    def check(self, category):\n        \"\"\"\n        Check valid upgrades,\n        Return the package list to download & upgrade\n        \"\"\"\n        if category == 'important':\n            return 0, self.security_download_list\n        else:\n            return 0, self.all_download_list\n\n    def download_package(self, package):\n        return 0\n\n    def patch_package(self, package):\n        return 0\n\n    def check_reboot(self):\n        return False\n"
  },
  {
    "path": "OSPatching/test/FakePatching2.py",
    "content": "#!/usr/bin/python\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nimport time\n\nfrom AbstractPatching import AbstractPatching\n\n\nsys.path.append('../patch')\n\n\nclass FakePatching(AbstractPatching):\n    def __init__(self, hutil=None):\n        super(FakePatching,self).__init__(hutil)\n        self.pkg_query_cmd = 'dpkg-query -L'\n        self.gap_between_stage = 1\n        self.download_duration = 60\n        self.security_download_list = ['a', 'b', 'c', 'd', 'e']\n        self.all_download_list = ['1', '2', '3', '4', 'a', 'b', 'c', 'd', 'e']\n\n    def install(self):\n        \"\"\"\n        Install for dependencies.\n        \"\"\"\n        pass\n\n    def check(self, category):\n        \"\"\"\n        Check valid upgrades,\n        Return the package list to download & upgrade\n        \"\"\"\n        if category == 'important':\n            return 0, self.security_download_list\n        else:\n            return 0, self.all_download_list\n\n    def download_package(self, package):\n        time.sleep(11)\n        return 0\n\n    def patch_package(self, package):\n        return 0\n\n    def check_reboot(self):\n        return False\n"
  },
  {
    "path": "OSPatching/test/FakePatching3.py",
    "content": "#!/usr/bin/python\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nfrom AbstractPatching import AbstractPatching\n\n\nsys.path.append('../patch')\n\n\nclass FakePatching(AbstractPatching):\n    def __init__(self, hutil=None):\n        super(FakePatching,self).__init__(hutil)\n        self.pkg_query_cmd = 'dpkg-query -L'\n        self.gap_between_stage = 20\n        self.download_duration = 60\n        self.security_download_list = ['a', 'b', 'c', 'd', 'e']\n        self.all_download_list = ['1', '2', '3', '4', 'a', 'b', 'c', 'd', 'e']\n\n    def install(self):\n        \"\"\"\n        Install for dependencies.\n        \"\"\"\n        pass\n\n    def check(self, category):\n        \"\"\"\n        Check valid upgrades,\n        Return the package list to download & upgrade\n        \"\"\"\n        if category == 'important':\n            return 0, self.security_download_list\n        else:\n            return 0, self.all_download_list\n\n    def download_package(self, package):\n        return 0\n\n    def patch_package(self, package):\n        return 0\n\n    def check_reboot(self):\n        return False\n"
  },
  {
    "path": "OSPatching/test/HandlerEnvironment.json",
    "content": "[{  \"name\": \"Microsoft.OSTCExtensions.OSPatchingForLinuxTest\", \"seqNo\": \"0\", \"version\": 1.0,  \"handlerEnvironment\": {    \"logFolder\": \".\",    \"configFolder\": \"./config\",    \"statusFolder\": \"./status\",    \"heartbeatFile\": \"./heartbeat.log\"}}]\n"
  },
  {
    "path": "OSPatching/test/README.txt",
    "content": "In some distros, python has to be upgraded to Python2.7\ncopy test.crt and test.prv into /var/lib/waagent/\nRun \"./prepare_settings.py; ./test_handler_1.py\"\nRun \"./prepare_settings.py; ./test_handler_2.py\"\nRun \"./prepare_settings.py; ./test_handler_3.py\"\n"
  },
  {
    "path": "OSPatching/test/check.py",
    "content": "#!/usr/bin/python\n#\n# OSPatching extension\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport sys\nimport datetime\n\ndef main():\n    intervalOfWeeks = int(sys.argv[1])\n    if intervalOfWeeks == 1:\n        sys.exit(0)\n\n    history_scheduled = os.path.join(os.getcwd(), 'scheduled/history')\n    today = datetime.date.today()\n    today_dayOfWeek = today.strftime(\"%a\")\n\n    last_scheduled_date = None\n    with open(history_scheduled) as f:\n        lines = f.readlines()\n        lines.reverse()\n        for line in lines:\n            line = line.strip()\n            if line.endswith(today_dayOfWeek):\n                last_scheduled_date =  datetime.datetime.strptime(line, '%Y-%m-%d %a')\n                break\n\n    if last_scheduled_date is not None and last_scheduled_date.date() + datetime.timedelta(days=intervalOfWeeks*7) > today:\n        sys.exit(1)\n    else:\n        sys.exit(0)\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "OSPatching/test/config/0.settings",
    "content": "{\"runtimeSettings\":[{\"handlerSettings\":{\"protectedSettingsCertThumbprint\":\"test\",\"protectedSettings\":\"MIICmwYJKoZIhvcNAQcDoIICjDCCAogCAQAxggFxMIIBbQIBADBVMEExPzA9BgoJkiaJk/IsZAEZFi9XaW5kb3dzIEF6dXJlIFNlcnZpY2UgTWFuYWdlbWVudCBmb3IgRXh0ZW5zaW9ucwIQHIAxlZWZBI1AXqEZ5v5FPjANBgkqhkiG9w0BAQEFAASCAQAEBPkZsa4VN2rr5SBkMDGD8r/Rbp4W4l0cOV7gN96cQi2oWk7tnAGmz/Yr38OJGv+r7ilG4DP7EJAs2gNmnld8SvQsjI4TMAF6Rt6Xbc9yQiE8PblDXTLqIr/IenK8xIvItsWwDQHiJMLB1EDfyOnwYgnUxBpQYSR3PqySEmBMtQMy7BH6egfOhrd/eifSUew6kv/Zl2wP5DTsU8A8BufiCbuG9rwEhIdDVVDmL1jLQK52OobQaS2IkYa+v+d5bBfDEmJMvVjRqeiwfkXcraWHsHcJBmBLeb/AIxzS4oCx24K5025VbGv3SEHsKx1LIA5EA6+PEhYsT3Vi7JFKAa0VMIIBDAYJKoZIhvcNAQcBMBQGCCqGSIb3DQMHBAj4oY4VX4QKoYCB6GH7cWNvfJCjaNAB5uVXgMWMFbqc9c+CX4k7zqm+fdti9j3mYPpgT/Qs2Z8vrXHFU815T8erezXNijPVyG7C6g6foyzXa3pduB16/4GlMIWYTmfzmSEZZ8Qq0MkgKuq0xQQK5GnfZkCj1hZM5m9WU+2RQZKtAjU8BS8n/os/nCcv9IwOKJ7wyql9qe+j1ZFKrar8bT+evei900g0bNpPba3R1u5yx70e/JLRF5sYBju1PDOua+gV/PqtGY7UTUTWq2r3fLg+ziJMUShYRbtIVUKmxGSc6kDCGmuNNPQsnmh7+wqlBtN60Sw=\",\"publicSettings\":{}}}]}"
  },
  {
    "path": "OSPatching/test/default.settings",
    "content": "{\"dayOfWeek\": \"everyday\", \"rebootAfterPatch\": \"rebootifneed\", \"storageAccountKey\": \"<TOCHANGE>\", \"stop\": \"false\", \"vmStatusTest\": {\"local\": \"true\", \"idleTestScript\": \"#!/usr/bin/python\\n    # Locally.\\n    def is_vm_idle():\\n        return True\\n    \", \"healthyTestScript\": \"#!/usr/bin/python\\n    # Locally.\\n    def is_vm_healthy():\\n        return True\\n    \"}, \"disabled\": \"false\", \"startTime\": \"03:00\", \"category\": \"important\", \"intervalOfWeeks\": \"1\", \"installDuration\": \"00:30\", \"storageAccountName\": \"<TOCHANGE>\", \"oneoff\": \"false\"}"
  },
  {
    "path": "OSPatching/test/handler.py",
    "content": "#!/usr/bin/python\n#\n# OSPatching extension\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport sys\nimport re\nimport time\nimport chardet\nimport tempfile\nimport urllib2\nimport urlparse\nimport shutil\nimport traceback\nimport logging\nfrom azure.storage import BlobService\nfrom Utils.WAAgentUtil import waagent\nimport Utils.HandlerUtil as Util\nfrom patch import *\n\n# Global variables definition\nExtensionShortName = 'OSPatching'\nDownloadDirectory = 'download'\nidleTestScriptName = \"idleTest.py\"\nhealthyTestScriptName = \"healthyTest.py\"\n\nidleTestScriptLocal = \"\"\"\n#!/usr/bin/python\n# Locally.\ndef is_vm_idle():\n    return True\n\"\"\"\n\nhealthyTestScriptLocal = \"\"\"\n#!/usr/bin/python\n# Locally.\ndef is_vm_healthy():\n    return True\n\"\"\"\n\nidleTestScriptGithub = \"https://raw.githubusercontent.com/bingosummer/scripts/master/idleTest.py\"\nhealthyTestScriptGithub = \"https://raw.githubusercontent.com/bingosummer/scripts/master/healthyTest.py\"\n\nidleTestScriptStorage = \"https://binxia.blob.core.windows.net/ospatching-v2/idleTest.py\"\nhealthyTestScriptStorage = \"https://binxia.blob.core.windows.net/ospatching-v2/healthyTest.py\"\n\npublic_settings = {\n    \"disabled\" : \"false\",\n    \"stop\" : \"false\",\n    \"rebootAfterPatch\" : \"RebootIfNeed\",\n    \"category\" : \"ImportantAndRecommended\",\n    \"installDuration\" : \"00:30\",\n    \"oneoff\" : \"false\",\n    \"intervalOfWeeks\" : \"1\",\n    \"dayOfWeek\" : \"everyday\",\n    \"startTime\" : \"03:00\",\n    \"vmStatusTest\" : {\n        \"local\" : \"true\",\n        \"idleTestScript\" : idleTestScriptLocal, #idleTestScriptStorage,\n        \"healthyTestScript\" : healthyTestScriptLocal, #healthyTestScriptStorage\n    }\n}\n\nprotected_settings = {\n    \"storageAccountName\" : \"<TOCHANGE>\",\n    \"storageAccountKey\" : \"<TOCHANGE>\"\n}\n\ndef install():\n    hutil.do_parse_context('Install')\n    try:\n        MyPatching.install()\n        hutil.do_exit(0, 'Install', 'success', '0', 'Install Succeeded.')\n    except Exception as e:\n        hutil.log_and_syslog(logging.ERROR, \"Failed to install the extension with error: %s, stack trace: %s\" %(str(e), traceback.format_exc()))\n        hutil.do_exit(1, 'Install', 'error', '0', 'Install Failed.')\n\ndef enable():\n    hutil.do_parse_context('Enable')\n    try:\n        # protected_settings = hutil.get_protected_settings()\n        # public_settings = hutil.get_public_settings()\n        settings = protected_settings.copy()\n        settings.update(public_settings)\n        MyPatching.parse_settings(settings)\n        # Ensure the same configuration is executed only once\n        hutil.exit_if_seq_smaller()\n        oneoff = settings.get(\"oneoff\")\n        download_customized_vmstatustest()\n        copy_vmstatustestscript(hutil.get_seq_no(), oneoff)\n        MyPatching.enable()\n        current_config = MyPatching.get_current_config()\n        hutil.do_exit(0, 'Enable', 'success', '0', 'Enable Succeeded. Current Configuration: ' + current_config)\n    except Exception as e:\n        current_config = MyPatching.get_current_config()\n        hutil.log_and_syslog(logging.ERROR, \"Failed to enable the extension with error: %s, stack trace: %s\" %(str(e), traceback.format_exc()))\n        hutil.do_exit(1, 'Enable', 'error', '0', 'Enable Failed. Current Configuation: ' + current_config)\n\ndef uninstall():\n    hutil.do_parse_context('Uninstall')\n    hutil.do_exit(0, 'Uninstall', 'success', '0', 'Uninstall Succeeded.')\n\ndef disable():\n    hutil.do_parse_context('Disable')\n    try:\n        # Ensure the same configuration is executed only once\n        hutil.exit_if_seq_smaller()\n        MyPatching.disable()\n        hutil.do_exit(0, 'Disable', 'success', '0', 'Disable Succeeded.')\n    except Exception as e:\n        hutil.log_and_syslog(logging.ERROR, \"Failed to disable the extension with error: %s, stack trace: %s\" %(str(e), traceback.format_exc()))\n        hutil.do_exit(1, 'Disable', 'error', '0', 'Disable Failed.')\n\ndef update():\n    hutil.do_parse_context('Upadate')\n    hutil.do_exit(0, 'Update', 'success', '0', 'Update Succeeded.')\n\ndef download():\n    hutil.do_parse_context('Download')\n    try:\n        # protected_settings = hutil.get_protected_settings()\n        # public_settings = hutil.get_public_settings()\n        settings = protected_settings.copy()\n        settings.update(public_settings)\n        MyPatching.parse_settings(settings)\n        MyPatching.download()\n        current_config = MyPatching.get_current_config()\n        hutil.do_exit(0,'Enable','success','0', 'Download Succeeded. Current Configuation: ' + current_config)\n    except Exception as e:\n        current_config = MyPatching.get_current_config()\n        hutil.log_and_syslog(logging.ERROR, \"Failed to download updates with error: %s, stack trace: %s\" %(str(e), traceback.format_exc()))\n        hutil.do_exit(1, 'Enable','error','0', 'Download Failed. Current Configuation: ' + current_config)\n\ndef patch():\n    hutil.do_parse_context('Patch')\n    try:\n        # protected_settings = hutil.get_protected_settings()\n        # public_settings = hutil.get_public_settings()\n        settings = protected_settings.copy()\n        settings.update(public_settings)\n        MyPatching.parse_settings(settings)\n        MyPatching.patch()\n        current_config = MyPatching.get_current_config()\n        hutil.do_exit(0,'Enable','success','0', 'Patch Succeeded. Current Configuation: ' + current_config)\n    except Exception as e:\n        current_config = MyPatching.get_current_config()\n        hutil.log_and_syslog(logging.ERROR, \"Failed to patch with error: %s, stack trace: %s\" %(str(e), traceback.format_exc()))\n        hutil.do_exit(1, 'Enable','error','0', 'Patch Failed. Current Configuation: ' + current_config)\n\ndef oneoff():\n    hutil.do_parse_context('Oneoff')\n    try:\n        # protected_settings = hutil.get_protected_settings()\n        # public_settings = hutil.get_public_settings()\n        settings = protected_settings.copy()\n        settings.update(public_settings)\n        MyPatching.parse_settings(settings)\n        MyPatching.patch_one_off()\n        current_config = MyPatching.get_current_config()\n        hutil.do_exit(0,'Enable','success','0', 'Oneoff Patch Succeeded. Current Configuation: ' + current_config)\n    except Exception as e:\n        current_config = MyPatching.get_current_config()\n        hutil.log_and_syslog(logging.ERROR, \"Failed to one-off patch with error: %s, stack trace: %s\" %(str(e), traceback.format_exc()))\n        hutil.do_exit(1, 'Enable','error','0', 'Oneoff Patch Failed. Current Configuation: ' + current_config)\n\ndef download_files(hutil):\n    # protected_settings = hutil.get_protected_settings()\n    # public_settings = hutil.get_public_settings()\n    settings = protected_settings.copy()\n    settings.update(public_settings)\n    local = settings.get(\"vmStatusTest\", dict()).get(\"local\", \"\")\n    if local.lower() == \"true\":\n        local = True\n    elif local.lower() == \"false\":\n        local = False\n    else:\n        hutil.log_and_syslog(logging.WARNING, \"The parameter \\\"local\\\" \"\n                  \"is empty or invalid. Set it as False. Continue...\")\n        local = False\n    idle_test_script = settings.get(\"vmStatusTest\", dict()).get('idleTestScript')\n    healthy_test_script = settings.get(\"vmStatusTest\", dict()).get('healthyTestScript')\n\n    if (not idle_test_script and not healthy_test_script):\n        hutil.log_and_syslog(logging.WARNING, \"The parameter \\\"idleTestScript\\\" and \\\"healthyTestScript\\\" \"\n                  \"are both empty. Exit downloading VMStatusTest scripts...\")\n        return\n    elif local:\n        if (idle_test_script and idle_test_script.startswith(\"http\")) or \\\n           (healthy_test_script and healthy_test_script.startswith(\"http\")):\n            hutil.log_and_syslog(logging.WARNING, \"The parameter \\\"idleTestScript\\\" or \\\"healthyTestScript\\\" \"\n                  \"should not be uri. Exit downloading VMStatusTest scripts...\")\n            return\n    elif not local:\n        if (idle_test_script and not idle_test_script.startswith(\"http\")) or \\\n           (healthy_test_script and not healthy_test_script.startswith(\"http\")):\n            hutil.log_and_syslog(logging.WARNING, \"The parameter \\\"idleTestScript\\\" or \\\"healthyTestScript\\\" \"\n                  \"should be uri. Exit downloading VMStatusTest scripts...\")\n            return\n\n    hutil.do_status_report('Downloading','transitioning', '0',\n                           'Downloading VMStatusTest scripts...')\n\n    vmStatusTestScripts = dict()\n    vmStatusTestScripts[idle_test_script] = idleTestScriptName\n    vmStatusTestScripts[healthy_test_script] = healthyTestScriptName\n\n    if local:\n        hutil.log_and_syslog(logging.INFO, \"Saving VMStatusTest scripts from user's configurations...\")\n        for src,dst in vmStatusTestScripts.items():\n            if not src:\n                continue\n            file_path = save_local_file(src, dst, hutil)\n            preprocess_files(file_path, hutil)\n        return\n\n    storage_account_name = None\n    storage_account_key = None\n    if settings:\n        storage_account_name = settings.get(\"storageAccountName\", \"\").strip()\n        storage_account_key = settings.get(\"storageAccountKey\", \"\").strip()\n    if storage_account_name and storage_account_key:\n        hutil.log_and_syslog(logging.INFO, \"Downloading VMStatusTest scripts from azure storage...\")\n        for src,dst in vmStatusTestScripts.items():\n            if not src:\n                continue\n            file_path = download_blob(storage_account_name,\n                                      storage_account_key,\n                                      src,\n                                      dst,\n                                      hutil)\n            preprocess_files(file_path, hutil)\n    elif not(storage_account_name or storage_account_key):\n        hutil.log_and_syslog(logging.INFO, \"No azure storage account and key specified in protected \"\n                  \"settings. Downloading VMStatusTest scripts from external links...\")\n        for src,dst in vmStatusTestScripts.items():\n            if not src:\n                continue\n            file_path = download_external_file(src, dst, hutil)\n            preprocess_files(file_path, hutil)\n    else:\n        #Storage account and key should appear in pairs\n        error_msg = \"Azure storage account or storage key is not provided\"\n        hutil.log_and_syslog(logging.ERROR, error_msg)\n        raise ValueError(error_msg)\n\ndef download_blob(storage_account_name, storage_account_key,\n                  blob_uri, dst, hutil):\n    seqNo = hutil.get_seq_no()\n    container_name = get_container_name_from_uri(blob_uri)\n    blob_name = get_blob_name_from_uri(blob_uri)\n    download_dir = prepare_download_dir(seqNo)\n    download_path = os.path.join(download_dir, dst)\n    #Guest agent already ensure the plugin is enabled one after another.\n    #The blob download will not conflict.\n    blob_service = BlobService(storage_account_name, storage_account_key)\n    try:\n        blob_service.get_blob_to_path(container_name, blob_name, download_path)\n    except Exception as e:\n        hutil.log_and_syslog(logging.ERROR, (\"Failed to download blob with uri:{0} \"\n                     \"with error {1}\").format(blob_uri,e))\n        raise\n    return download_path\n\ndef download_external_file(uri, dst, hutil):\n    seqNo = hutil.get_seq_no()\n    download_dir = prepare_download_dir(seqNo)\n    file_path = os.path.join(download_dir, dst)\n    try:\n        download_and_save_file(uri, file_path)\n    except Exception as e:\n        hutil.log_and_syslog(logging.ERROR, (\"Failed to download external file with uri:{0} \"\n                     \"with error {1}\").format(uri, e))\n        raise\n    return file_path\n\ndef save_local_file(src, dst, hutil):\n    seqNo = hutil.get_seq_no()\n    download_dir = prepare_download_dir(seqNo)\n    file_path = os.path.join(download_dir, dst)\n    try:\n        waagent.SetFileContents(file_path, src)\n    except Exception as e:\n        hutil.log_and_syslog(logging.ERROR, (\"Failed to save file from user's configuration \"\n                     \"with error {0}\").format(e))\n        raise\n    return file_path\n\ndef preprocess_files(file_path, hutil):\n    \"\"\"\n        Preprocess the text file. If it is a binary file, skip it.\n    \"\"\"\n    is_text, code_type = is_text_file(file_path)\n    if is_text:\n        dos2unix(file_path)\n        hutil.log_and_syslog(logging.INFO, \"Converting text files from DOS to Unix formats: Done\")\n        if code_type in ['UTF-8', 'UTF-16LE', 'UTF-16BE']:\n            remove_bom(file_path)\n            hutil.log_and_syslog(logging.INFO, \"Removing BOM: Done\")\n\ndef is_text_file(file_path):\n    with open(file_path, 'rb') as f:\n        contents = f.read(512)\n    return is_text(contents)\n\ndef is_text(contents):\n    supported_encoding = ['ascii', 'UTF-8', 'UTF-16LE', 'UTF-16BE']\n    code_type = chardet.detect(contents)['encoding']\n    if code_type in supported_encoding:\n        return True, code_type\n    else:\n        return False, code_type\n\ndef dos2unix(file_path):\n    temp_file_path = tempfile.mkstemp()[1]\n    f_temp = open(temp_file_path, 'wb')\n    with open(file_path, 'rU') as f:\n        contents = f.read()\n    f_temp.write(contents)\n    f_temp.close()\n    shutil.move(temp_file_path, file_path)\n\ndef remove_bom(file_path):\n    temp_file_path = tempfile.mkstemp()[1]\n    f_temp = open(temp_file_path, 'wb')\n    with open(file_path, 'rb') as f:\n        contents = f.read()\n    for encoding in [\"utf-8-sig\", \"utf-16\"]:\n        try:\n            f_temp.write(contents.decode(encoding).encode('utf-8'))\n            break\n        except UnicodeDecodeError:\n            continue\n    f_temp.close()\n    shutil.move(temp_file_path, file_path)\n\ndef download_and_save_file(uri, file_path):\n    src = urllib2.urlopen(uri)\n    dest = open(file_path, 'wb')\n    buf_size = 1024\n    buf = src.read(buf_size)\n    while(buf):\n        dest.write(buf)\n        buf = src.read(buf_size)\n\ndef prepare_download_dir(seqNo):\n    download_dir_main = os.path.join(os.getcwd(), DownloadDirectory)\n    create_directory_if_not_exists(download_dir_main)\n    download_dir = os.path.join(download_dir_main, seqNo)\n    create_directory_if_not_exists(download_dir)\n    return download_dir\n\ndef create_directory_if_not_exists(directory):\n    \"\"\"create directory if no exists\"\"\"\n    if not os.path.exists(directory):\n        os.makedirs(directory)\n\ndef get_path_from_uri(uriStr):\n    uri = urlparse.urlparse(uriStr)\n    return uri.path\n\ndef get_blob_name_from_uri(uri):\n    return get_properties_from_uri(uri)['blob_name']\n\ndef get_container_name_from_uri(uri):\n    return get_properties_from_uri(uri)['container_name']\n\ndef get_properties_from_uri(uri):\n    path = get_path_from_uri(uri)\n    if path.endswith('/'):\n        path = path[:-1]\n    if path[0] == '/':\n        path = path[1:]\n    first_sep = path.find('/')\n    if first_sep == -1:\n        hutil.log_and_syslog(logging.ERROR, \"Failed to extract container, blob, from {}\".format(path))\n    blob_name = path[first_sep+1:]\n    container_name = path[:first_sep]\n    return {'blob_name': blob_name, 'container_name': container_name}\n\ndef download_customized_vmstatustest():\n    download_dir = prepare_download_dir(hutil.get_seq_no())\n    maxRetry = 2\n    for retry in range(0, maxRetry + 1):\n        try:\n            download_files(hutil)\n            break\n        except Exception:\n            hutil.log_and_syslog(logging.ERROR, \"Failed to download files, retry=\" + str(retry) + \", maxRetry=\" + str(maxRetry))\n            if retry != maxRetry:\n                hutil.log_and_syslog(logging.INFO, \"Sleep 10 seconds\")\n                time.sleep(10)\n            else:\n                raise\n\ndef copy_vmstatustestscript(seqNo, oneoff):\n    src_dir = prepare_download_dir(seqNo)\n    for filename in (idleTestScriptName, healthyTestScriptName):\n        src = os.path.join(src_dir, filename)\n        if oneoff is not None and oneoff.lower() == \"false\":\n            dst = \"oneoff\"\n        else:\n            dst = \"scheduled\"\n        dst = os.path.join(os.getcwd(), dst)\n        if os.path.isfile(src):\n            shutil.copy(src, dst)\n\ndef delete_current_vmstatustestscript():\n    for filename in (idleTestScriptName, healthyTestScriptName):\n        current_vmstatustestscript = os.path.join(os.getcwd(), \"patch/\"+filename)\n        if os.path.isfile(current_vmstatustestscript):\n            os.remove(current_vmstatustestscript)\n\n# Main function is the only entrance to this extension handler\ndef main():\n    waagent.LoggerInit('/var/log/waagent.log', '/dev/stdout')\n    waagent.Log(\"%s started to handle.\" %(ExtensionShortName))\n\n    global hutil\n    hutil = Util.HandlerUtility(waagent.Log, waagent.Error,\n                                ExtensionShortName)\n\n    global MyPatching\n    MyPatching = GetMyPatching(hutil)\n    if MyPatching is None:\n        sys.exit(1)\n\n    for a in sys.argv[1:]:\n        if re.match(\"^([-/]*)(disable)\", a):\n            disable()\n        elif re.match(\"^([-/]*)(uninstall)\", a):\n            uninstall()\n        elif re.match(\"^([-/]*)(install)\", a):\n            install()\n        elif re.match(\"^([-/]*)(enable)\", a):\n            enable()\n        elif re.match(\"^([-/]*)(update)\", a):\n            update()\n        elif re.match(\"^([-/]*)(download)\", a):\n            download()\n        elif re.match(\"^([-/]*)(patch)\", a):\n            patch()\n        elif re.match(\"^([-/]*)(oneoff)\", a):\n            oneoff()\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "OSPatching/test/oneoff/__init__.py",
    "content": ""
  },
  {
    "path": "OSPatching/test/prepare_settings.py",
    "content": "#!/usr/bin/python\nimport json\n\nidleTestScriptLocal = \"\"\"\n#!/usr/bin/python\n# Locally.\ndef is_vm_idle():\n    return True\n\"\"\"\n\nhealthyTestScriptLocal = \"\"\"\n#!/usr/bin/python\n# Locally.\ndef is_vm_healthy():\n    return True\n\"\"\"\n\nidleTestScriptGithub = \"https://raw.githubusercontent.com/bingosummer/scripts/master/idleTest.py\"\nhealthyTestScriptGithub = \"https://raw.githubusercontent.com/bingosummer/scripts/master/healthyTest.py\"\n\nidleTestScriptStorage = \"https://binxia.blob.core.windows.net/ospatching-v2/idleTest.py\"\nhealthyTestScriptStorage = \"https://binxia.blob.core.windows.net/ospatching-v2/healthyTest.py\"\n\nsettings = {\n    \"disabled\" : \"false\",\n    \"stop\" : \"false\",\n    \"rebootAfterPatch\" : \"rebootifneed\",\n    \"category\" : \"important\",\n    \"installDuration\" : \"00:30\",\n    \"oneoff\" : \"false\",\n    \"intervalOfWeeks\" : \"1\",\n    \"dayOfWeek\" : \"everyday\",\n    \"startTime\" : \"03:00\",\n    \"vmStatusTest\" : {\n        \"local\" : \"true\",\n        \"idleTestScript\" : idleTestScriptLocal, #idleTestScriptStorage,\n        \"healthyTestScript\" : healthyTestScriptLocal, #healthyTestScriptStorage\n    },\n    \"storageAccountName\" : \"<TOCHANGE>\",\n    \"storageAccountKey\" : \"<TOCHANGE>\"\n}\n\nsettings_string = json.dumps(settings)\nsettings_file = \"default.settings\"\nwith open(settings_file, \"w\") as f:\n    f.write(settings_string)\n"
  },
  {
    "path": "OSPatching/test/scheduled/__init__.py",
    "content": ""
  },
  {
    "path": "OSPatching/test/scheduled/history",
    "content": ""
  },
  {
    "path": "OSPatching/test/test.crt",
    "content": "Bag Attributes\n    localKeyID: 01 00 00 00\n    friendlyName: ospatch-s131\nsubject=/DC=Windows Azure Service Management for Extensions\nissuer=/DC=Windows Azure Service Management for Extensions\n-----BEGIN CERTIFICATE-----\nMIIDCjCCAfKgAwIBAgIQHIAxlZWZBI1AXqEZ5v5FPjANBgkqhkiG9w0BAQUFADBB\nMT8wPQYKCZImiZPyLGQBGRYvV2luZG93cyBBenVyZSBTZXJ2aWNlIE1hbmFnZW1l\nbnQgZm9yIEV4dGVuc2lvbnMwHhcNMTUwNTE0MDMxMDU2WhcNMjAwNTE0MDMxMDU2\nWjBBMT8wPQYKCZImiZPyLGQBGRYvV2luZG93cyBBenVyZSBTZXJ2aWNlIE1hbmFn\nZW1lbnQgZm9yIEV4dGVuc2lvbnMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK\nAoIBAQCjtXmqCuqgRZ1nkEzQowNJbtWGqWg+1lTqaS3w/SsQ6K0fjuu1do8jNSuP\nNLPmY1o/96OA+7HoO4MyE2QfCzb7pGKIH0UPj/0u5HkR9NfRKG+LcZ6saoJQQDbP\nmdMqN8rTAyiH/Ks95rx5LzlSVX5QL9QtV11fSB9B/ILO5ebQIVAehAchFnSnUGqy\nHkhQPW8XOAmR4WarW3itaFhKmsbuXwCwbePwcBBhOxqyqqYwGG85zhOSj6xHKDep\nqF+UTACBd7Ei4SNme6DMDndNNplSLZOswyp+9ElmE01Eu98CtJN6FbrJ1qZU22EV\n85Dz4l1UF4zD7JOb5d1XM/l56YEnAgMBAAEwDQYJKoZIhvcNAQEFBQADggEBAD5x\nXZrheNS+n2pCav+VuGrB5gVs9NrH8hZAXxIFQ8bMNRE7HTrUIpSQ04dZBlpo2kVI\nv1Fx0XPcV9pm22ySzQdxGOVPQqUWzhIVBYqz4gdH2zPSijysJstFPtGK+Z7ygnWA\nu0NCfpYJhy7hNv8/No7+J5M+BwKrBJUoIHCvrvE1gP97ZrcUD1XsIvOe4yvGEkp4\nlydb1Djc1E+BzmI+MwL4BbPnGyBgBqAhSiNAa47Pp9OQhIyvCiifGC3QAkT5NMmq\nC+fY3AG2SdHY+39zYtehYyhUP9wKo2d/ecpx79ruE4HYJME6AuLVTRXqzQijFfPz\nM9ouI2lVvsL6DpRRby0=\n-----END CERTIFICATE-----\n"
  },
  {
    "path": "OSPatching/test/test.prv",
    "content": "Bag Attributes\n    localKeyID: 01 00 00 00\n    Microsoft CSP Name: Microsoft Enhanced Cryptographic Provider v1.0\nKey Attributes\n    X509v3 Key Usage: 10\n-----BEGIN PRIVATE KEY-----\nMIIEuwIBADANBgkqhkiG9w0BAQEFAASCBKUwggShAgEAAoIBAQCjtXmqCuqgRZ1n\nkEzQowNJbtWGqWg+1lTqaS3w/SsQ6K0fjuu1do8jNSuPNLPmY1o/96OA+7HoO4My\nE2QfCzb7pGKIH0UPj/0u5HkR9NfRKG+LcZ6saoJQQDbPmdMqN8rTAyiH/Ks95rx5\nLzlSVX5QL9QtV11fSB9B/ILO5ebQIVAehAchFnSnUGqyHkhQPW8XOAmR4WarW3it\naFhKmsbuXwCwbePwcBBhOxqyqqYwGG85zhOSj6xHKDepqF+UTACBd7Ei4SNme6DM\nDndNNplSLZOswyp+9ElmE01Eu98CtJN6FbrJ1qZU22EV85Dz4l1UF4zD7JOb5d1X\nM/l56YEnAgMBAAECgf9NUVCuRdhtvTDX0HnMW8jOEHLk35j45Rt4Mj5CxzwsNsGN\nIVaZ5x2pylGwoY2YDeKgNw4Gguw8QmP7Pc54ohDyOjqa1q6mGAErH7zyGDE9+w8l\nTKVdC2J2/7cJQnwe1+WGBc0s8WY62taRSRaCaLhzof1MryqB7XZ3BF5kfwpixhIg\nqJ9eS9CYNVdAzHYEsHG3EvqBQm4JojtRMdMpME1SbCoSoZB4NT4T4bzGfyYYzXZo\n0LSgRyPwJFBC0TdbjpF9bvJaNT3jVuAk2g0rRdR/Zio5GmqhzQe8x2Vg2NH2DzZ8\nArM1ZtmvW46etx9umDkKZZRLEron+sZ0QhdNL1UCgYEA38rmR9zV98pCmV6/xMCX\nRDXtmOKD6cM8bWAHE7Dkb10vPuz8WtTpfpjriBF1W3dwCyRClibelmOItjYr0uli\n84w2IWCYA9z5T8mT2ymDNNYl3cmLM5gk1Prnm2uCAtQbN3kS+NHaGSIF7eV7xjTo\nQyV3qYNf+R3z2FO47fNoYD0CgYEAu0Tvtvmwv9Rq/8DIUZp3bjWhldhiXtTGImwe\nldXKxTbNpOA5pVxPxp7WnEGXAY3TeWWIWEuxRCt8J6GiRzW48LTKsbygKloM6dhb\nYJ1FIwUXrW8jofwwliLhTlCxde/2MUBA6BZGsJ4GJT+nRUcpkjDrgS2uKbW7+iLR\nId2/+TMCgYAyDM29qq0L0udcJ62Z0jzCW5E8zQQVhr1/9KcAh2I/acbEOvohUla6\nInciokzt3ONpCn392MmVNsN/hNP+QoYH1AbTJig5TPVRG9L+g+U9LtufI5EHQ/KQ\n02BzCPM1sLw5htFwZnZxgoNy9gzdgj2jrsB5X9FaBJHhgq/sP7DLPQKBgQCKkdIH\nRO+Cor2iDZasu23QQSMV7A2uOid6ZSKkoJPwJkM40yoUsB/fyrzm1qnUXouy8mxX\nWXsMBFlUQggARUJZ6o1pwzeI3yVbC9thvD3iUexZSznErQWOsrSg7JjDuhIkE3Vz\nxrf8DJJjkZxGaQfbwxMgfRq4hl9YEddKBfn9fQKBgGpCHiQ2R9EFm4KR2zgB05Me\n0IGyD+cC6MHPiL7sYcDLmG7Y56AZExmR4tcXs/gG21/3kTJTMANJyh3349/f1/Ma\nxKqG2df6sY/JUDPGiY5X35QM3oFM5razS5M5+4aRlRDTp4gLO+PQ00JQAD3DpwJf\nNSEn3hd8Qa3aFEqyL/n6\n-----END PRIVATE KEY-----\n"
  },
  {
    "path": "OSPatching/test/test_handler_1.py",
    "content": "#!/usr/bin/python\n#\n# OSPatching extension\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.4+\n\n\nimport os\nimport sys\nimport re\nimport time\nimport chardet\nimport tempfile\nimport urllib2\nimport urlparse\nimport platform\nimport shutil\nimport traceback\nimport logging\nfrom azure.storage import BlobService\nfrom Utils.WAAgentUtil import waagent\nimport Utils.HandlerUtil as Util\nimport json\nimport unittest\nsys.path.append('..')\nfrom patch import *\nfrom FakePatching import FakePatching\n\n# Global variables definition\nExtensionShortName = 'OSPatching'\nDownloadDirectory = 'download'\nidleTestScriptName = \"idleTest.py\"\nhealthyTestScriptName = \"healthyTest.py\"\nhandlerName = os.path.basename(sys.argv[0])\nstatus_file = './status/0.status'\nlog_file = './extension.log'\n\nsettings_file = \"default.settings\"\nwith open(settings_file, \"r\") as f:\n    settings_string = f.read()\nsettings = json.loads(settings_string)\n\nidleTestScriptLocal = \"\"\"\n#!/usr/bin/python\n# Locally.\ndef is_vm_idle():\n    return True\n\"\"\"\n\nhealthyTestScriptLocal = \"\"\"\n#!/usr/bin/python\n# Locally.\ndef is_vm_healthy():\n    return True\n\"\"\"\n\ndef install():\n    hutil.do_parse_context('Install')\n    try:\n        MyPatching.install()\n        hutil.do_exit(0, 'Install', 'success', '0', 'Install Succeeded.')\n    except Exception, e:\n        hutil.log_and_syslog(logging.ERROR, \"Failed to install the extension with error: %s, stack trace: %s\" %(str(e), traceback.format_exc()))\n        hutil.do_exit(1, 'Install', 'error', '0', 'Install Failed.')\n\ndef enable():\n    hutil.do_parse_context('Enable')\n    try:\n        MyPatching.parse_settings(settings)\n        # Ensure the same configuration is executed only once\n        hutil.exit_if_seq_smaller()\n        oneoff = settings.get(\"oneoff\")\n        download_customized_vmstatustest()\n        copy_vmstatustestscript(hutil.get_seq_no(), oneoff)\n        MyPatching.enable()\n        current_config = MyPatching.get_current_config()\n        hutil.do_exit(0, 'Enable', 'success', '0', 'Enable Succeeded. Current Configuration: ' + current_config)\n    except Exception, e:\n        current_config = MyPatching.get_current_config()\n        hutil.log_and_syslog(logging.ERROR, \"Failed to enable the extension with error: %s, stack trace: %s\" %(str(e), traceback.format_exc()))\n        hutil.do_exit(1, 'Enable', 'error', '0', 'Enable Failed. Current Configuation: ' + current_config)\n\ndef uninstall():\n    hutil.do_parse_context('Uninstall')\n    hutil.do_exit(0, 'Uninstall', 'success', '0', 'Uninstall Succeeded.')\n\ndef disable():\n    hutil.do_parse_context('Disable')\n    try:\n        # Ensure the same configuration is executed only once\n        hutil.exit_if_seq_smaller()\n        MyPatching.disable()\n        hutil.do_exit(0, 'Disable', 'success', '0', 'Disable Succeeded.')\n    except Exception, e:\n        hutil.log_and_syslog(logging.ERROR, \"Failed to disable the extension with error: %s, stack trace: %s\" %(str(e), traceback.format_exc()))\n        hutil.do_exit(1, 'Disable', 'error', '0', 'Disable Failed.')\n\ndef update():\n    hutil.do_parse_context('Upadate')\n    hutil.do_exit(0, 'Update', 'success', '0', 'Update Succeeded.')\n\ndef download():\n    hutil.do_parse_context('Download')\n    try:\n        MyPatching.parse_settings(settings)\n        MyPatching.download()\n        current_config = MyPatching.get_current_config()\n        hutil.do_exit(0,'Enable','success','0', 'Download Succeeded. Current Configuation: ' + current_config)\n    except Exception, e:\n        current_config = MyPatching.get_current_config()\n        hutil.log_and_syslog(logging.ERROR, \"Failed to download updates with error: %s, stack trace: %s\" %(str(e), traceback.format_exc()))\n        hutil.do_exit(1, 'Enable','error','0', 'Download Failed. Current Configuation: ' + current_config)\n\ndef patch():\n    hutil.do_parse_context('Patch')\n    try:\n        MyPatching.parse_settings(settings)\n        MyPatching.patch()\n        current_config = MyPatching.get_current_config()\n        hutil.do_exit(0,'Enable','success','0', 'Patch Succeeded. Current Configuation: ' + current_config)\n    except Exception, e:\n        current_config = MyPatching.get_current_config()\n        hutil.log_and_syslog(logging.ERROR, \"Failed to patch with error: %s, stack trace: %s\" %(str(e), traceback.format_exc()))\n        hutil.do_exit(1, 'Enable','error','0', 'Patch Failed. Current Configuation: ' + current_config)\n\ndef oneoff():\n    hutil.do_parse_context('Oneoff')\n    try:\n        MyPatching.parse_settings(settings)\n        MyPatching.patch_one_off()\n        current_config = MyPatching.get_current_config()\n        hutil.do_exit(0,'Enable','success','0', 'Oneoff Patch Succeeded. Current Configuation: ' + current_config)\n    except Exception, e:\n        current_config = MyPatching.get_current_config()\n        hutil.log_and_syslog(logging.ERROR, \"Failed to one-off patch with error: %s, stack trace: %s\" %(str(e), traceback.format_exc()))\n        hutil.do_exit(1, 'Enable','error','0', 'Oneoff Patch Failed. Current Configuation: ' + current_config)\n\ndef download_files(hutil):\n    local = settings.get(\"vmStatusTest\", dict()).get(\"local\", \"\")\n    if local.lower() == \"true\":\n        local = True\n    elif local.lower() == \"false\":\n        local = False\n    else:\n        hutil.log_and_syslog(logging.WARNING, \"The parameter \\\"local\\\" \"\n                  \"is empty or invalid. Set it as False. Continue...\")\n        local = False\n    idle_test_script = settings.get(\"vmStatusTest\", dict()).get('idleTestScript')\n    healthy_test_script = settings.get(\"vmStatusTest\", dict()).get('healthyTestScript')\n\n    if (not idle_test_script and not healthy_test_script):\n        hutil.log_and_syslog(logging.WARNING, \"The parameter \\\"idleTestScript\\\" and \\\"healthyTestScript\\\" \"\n                  \"are both empty. Exit downloading VMStatusTest scripts...\")\n        return\n    elif local:\n        if (idle_test_script and idle_test_script.startswith(\"http\")) or \\\n           (healthy_test_script and healthy_test_script.startswith(\"http\")):\n            hutil.log_and_syslog(logging.WARNING, \"The parameter \\\"idleTestScript\\\" or \\\"healthyTestScript\\\" \"\n                  \"should not be uri. Exit downloading VMStatusTest scripts...\")\n            return\n    elif not local:\n        if (idle_test_script and not idle_test_script.startswith(\"http\")) or \\\n           (healthy_test_script and not healthy_test_script.startswith(\"http\")):\n            hutil.log_and_syslog(logging.WARNING, \"The parameter \\\"idleTestScript\\\" or \\\"healthyTestScript\\\" \"\n                  \"should be uri. Exit downloading VMStatusTest scripts...\")\n            return\n\n    hutil.do_status_report('Downloading','transitioning', '0',\n                           'Downloading VMStatusTest scripts...')\n\n    vmStatusTestScripts = dict()\n    vmStatusTestScripts[idle_test_script] = idleTestScriptName\n    vmStatusTestScripts[healthy_test_script] = healthyTestScriptName\n\n    if local:\n        hutil.log_and_syslog(logging.INFO, \"Saving VMStatusTest scripts from user's configurations...\")\n        for src,dst in vmStatusTestScripts.items():\n            if not src:\n                continue\n            file_path = save_local_file(src, dst, hutil)\n            preprocess_files(file_path, hutil)\n        return\n\n    storage_account_name = None\n    storage_account_key = None\n    if settings:\n        storage_account_name = settings.get(\"storageAccountName\", \"\").strip()\n        storage_account_key = settings.get(\"storageAccountKey\", \"\").strip()\n    if storage_account_name and storage_account_key:\n        hutil.log_and_syslog(logging.INFO, \"Downloading VMStatusTest scripts from azure storage...\")\n        for src,dst in vmStatusTestScripts.items():\n            if not src:\n                continue\n            file_path = download_blob(storage_account_name,\n                                      storage_account_key,\n                                      src,\n                                      dst,\n                                      hutil)\n            preprocess_files(file_path, hutil)\n    elif not(storage_account_name or storage_account_key):\n        hutil.log_and_syslog(logging.INFO, \"No azure storage account and key specified in protected \"\n                  \"settings. Downloading VMStatusTest scripts from external links...\")\n        for src,dst in vmStatusTestScripts.items():\n            if not src:\n                continue\n            file_path = download_external_file(src, dst, hutil)\n            preprocess_files(file_path, hutil)\n    else:\n        #Storage account and key should appear in pairs\n        error_msg = \"Azure storage account or storage key is not provided\"\n        hutil.log_and_syslog(logging.ERROR, error_msg)\n        raise ValueError(error_msg)\n\ndef download_blob(storage_account_name, storage_account_key,\n                  blob_uri, dst, hutil):\n    seqNo = hutil.get_seq_no()\n    container_name = get_container_name_from_uri(blob_uri)\n    blob_name = get_blob_name_from_uri(blob_uri)\n    download_dir = prepare_download_dir(seqNo)\n    download_path = os.path.join(download_dir, dst)\n    #Guest agent already ensure the plugin is enabled one after another.\n    #The blob download will not conflict.\n    blob_service = BlobService(storage_account_name, storage_account_key)\n    try:\n        blob_service.get_blob_to_path(container_name, blob_name, download_path)\n    except Exception, e:\n        hutil.log_and_syslog(logging.ERROR, (\"Failed to download blob with uri:{0} \"\n                     \"with error {1}\").format(blob_uri,e))\n        raise\n    return download_path\n\ndef download_external_file(uri, dst, hutil):\n    seqNo = hutil.get_seq_no()\n    download_dir = prepare_download_dir(seqNo)\n    file_path = os.path.join(download_dir, dst)\n    try:\n        download_and_save_file(uri, file_path)\n    except Exception, e:\n        hutil.log_and_syslog(logging.ERROR, (\"Failed to download external file with uri:{0} \"\n                     \"with error {1}\").format(uri, e))\n        raise\n    return file_path\n\ndef save_local_file(src, dst, hutil):\n    seqNo = hutil.get_seq_no()\n    download_dir = prepare_download_dir(seqNo)\n    file_path = os.path.join(download_dir, dst)\n    try:\n        waagent.SetFileContents(file_path, src)\n    except Exception, e:\n        hutil.log_and_syslog(logging.ERROR, (\"Failed to save file from user's configuration \"\n                     \"with error {0}\").format(e))\n        raise\n    return file_path\n\ndef preprocess_files(file_path, hutil):\n    \"\"\"\n        Preprocess the text file. If it is a binary file, skip it.\n    \"\"\"\n    is_text, code_type = is_text_file(file_path)\n    if is_text:\n        dos2unix(file_path)\n        hutil.log_and_syslog(logging.INFO, \"Converting text files from DOS to Unix formats: Done\")\n        if code_type in ['UTF-8', 'UTF-16LE', 'UTF-16BE']:\n            remove_bom(file_path)\n            hutil.log_and_syslog(logging.INFO, \"Removing BOM: Done\")\n\ndef is_text_file(file_path):\n    with open(file_path, 'rb') as f:\n        contents = f.read(512)\n    return is_text(contents)\n\ndef is_text(contents):\n    supported_encoding = ['ascii', 'UTF-8', 'UTF-16LE', 'UTF-16BE']\n    code_type = chardet.detect(contents)['encoding']\n    if code_type in supported_encoding:\n        return True, code_type\n    else:\n        return False, code_type\n\ndef dos2unix(file_path):\n    temp_file_path = tempfile.mkstemp()[1]\n    f_temp = open(temp_file_path, 'wb')\n    with open(file_path, 'rU') as f:\n        contents = f.read()\n    f_temp.write(contents)\n    f_temp.close()\n    shutil.move(temp_file_path, file_path)\n\ndef remove_bom(file_path):\n    temp_file_path = tempfile.mkstemp()[1]\n    f_temp = open(temp_file_path, 'wb')\n    with open(file_path, 'rb') as f:\n        contents = f.read()\n    for encoding in [\"utf-8-sig\", \"utf-16\"]:\n        try:\n            f_temp.write(contents.decode(encoding).encode('utf-8'))\n            break\n        except UnicodeDecodeError:\n            continue\n    f_temp.close()\n    shutil.move(temp_file_path, file_path)\n\ndef download_and_save_file(uri, file_path):\n    src = urllib2.urlopen(uri)\n    dest = open(file_path, 'wb')\n    buf_size = 1024\n    buf = src.read(buf_size)\n    while(buf):\n        dest.write(buf)\n        buf = src.read(buf_size)\n\ndef prepare_download_dir(seqNo):\n    download_dir_main = os.path.join(os.getcwd(), DownloadDirectory)\n    create_directory_if_not_exists(download_dir_main)\n    download_dir = os.path.join(download_dir_main, seqNo)\n    create_directory_if_not_exists(download_dir)\n    return download_dir\n\ndef create_directory_if_not_exists(directory):\n    \"\"\"create directory if no exists\"\"\"\n    if not os.path.exists(directory):\n        os.makedirs(directory)\n\ndef get_path_from_uri(uriStr):\n    uri = urlparse.urlparse(uriStr)\n    return uri.path\n\ndef get_blob_name_from_uri(uri):\n    return get_properties_from_uri(uri)['blob_name']\n\ndef get_container_name_from_uri(uri):\n    return get_properties_from_uri(uri)['container_name']\n\ndef get_properties_from_uri(uri):\n    path = get_path_from_uri(uri)\n    if path.endswith('/'):\n        path = path[:-1]\n    if path[0] == '/':\n        path = path[1:]\n    first_sep = path.find('/')\n    if first_sep == -1:\n        hutil.log_and_syslog(logging.ERROR, \"Failed to extract container, blob, from {}\".format(path))\n    blob_name = path[first_sep+1:]\n    container_name = path[:first_sep]\n    return {'blob_name': blob_name, 'container_name': container_name}\n\ndef download_customized_vmstatustest():\n    download_dir = prepare_download_dir(hutil.get_seq_no())\n    maxRetry = 2\n    for retry in range(0, maxRetry + 1):\n        try:\n            download_files(hutil)\n            break\n        except Exception, e:\n            hutil.log_and_syslog(logging.ERROR, \"Failed to download files, retry=\" + str(retry) + \", maxRetry=\" + str(maxRetry))\n            if retry != maxRetry:\n                hutil.log_and_syslog(logging.INFO, \"Sleep 10 seconds\")\n                time.sleep(10)\n            else:\n                raise\n\ndef copy_vmstatustestscript(seqNo, oneoff):\n    src_dir = prepare_download_dir(seqNo)\n    for filename in (idleTestScriptName, healthyTestScriptName):\n        src = os.path.join(src_dir, filename)\n        if os.path.isfile(src):\n            if oneoff is not None and oneoff.lower() == \"true\":\n                dst = \"oneoff\"\n            else:\n                dst = \"scheduled\"\n            dst = os.path.join(os.getcwd(), dst)\n            shutil.copy(src, dst)\n\ndef delete_current_vmstatustestscript():\n    for filename in (idleTestScriptName, healthyTestScriptName):\n        current_vmstatustestscript = os.path.join(os.getcwd(), \"patch/\"+filename)\n        if os.path.isfile(current_vmstatustestscript):\n            os.remove(current_vmstatustestscript)\n\nclass Test(unittest.TestCase):\n    def setUp(self):\n        print '\\n\\n============================================================================================'\n        waagent.LoggerInit('/var/log/waagent.log', '/dev/stdout')\n        waagent.Log(\"%s started to handle.\" %(ExtensionShortName))\n        global hutil\n        hutil = Util.HandlerUtility(waagent.Log, waagent.Error)\n        hutil.do_parse_context('TEST')\n\n        global MyPatching\n        MyPatching = FakePatching(hutil)\n        if MyPatching is None:\n            sys.exit(1)\n\n        distro = DistInfo()[0]\n        if 'centos' in distro or 'Oracle' in distro or 'redhat' in distro:\n            MyPatching.cron_restart_cmd = 'service crond restart'\n\n        try:\n            os.remove('mrseq')\n        except:\n            pass\n\n        waagent.SetFileContents(MyPatching.package_downloaded_path, '')\n        waagent.SetFileContents(MyPatching.package_patched_path, '')\n\n    def test_case_insensitive_parameters(self):\n        print 'test_case_insensitive_parameters'\n\n        global settings\n        settings = {\n            \"disabled\" : \"False\",\n            \"stop\" : \"false\",\n            \"rebootAfterPatch\" : \"rEbOoTiFnEeD\",\n            \"category\" : \"imPortant\",\n            \"installDuration\" : \"01:00\",\n            \"oneoff\" : \"falSe\",\n            \"dayOfWeek\" : \"Sunday|Monday|Tuesday|wednesday|Thursday|Friday|Saturday\",\n            \"startTime\" : \"02:00\"\n        }\n        MyPatching.parse_settings(settings)\n\n        self.assertFalse(MyPatching.disabled)\n        self.assertFalse(MyPatching.stop)\n        self.assertEqual(MyPatching.reboot_after_patch, \"rebootifneed\")\n        self.assertFalse(MyPatching.oneoff)\n        self.assertEqual(MyPatching.day_of_week, [7, 1, 2, 3, 4, 5, 6])\n        self.assertEqual(MyPatching.category, \"important\")\n        import datetime\n        self.assertEqual(MyPatching.start_time, datetime.datetime.strptime(\"02:00\", '%H:%M'))\n\n    def test_illegal_parameters(self):\n        print 'test_illegal_parameters'\n\n        global settings\n        settings = {\n            \"disabled\" : \"illegal\",\n            \"stop\" : \"false\",\n            \"rebootAfterPatch\" : \"illegal\",\n            \"category\" : \"illegal\",\n            \"installDuration\" : \"1 hour\",\n            \"oneoff\" : \"illegal\",\n            \"dayOfWeek\" : \"Sunday|Moy|Tday|wednesday|Thursday|Friday|Srday\",\n            \"startTime\" : \"02:00\"\n        }\n        MyPatching.parse_settings(settings)\n\n        self.assertFalse(MyPatching.disabled)\n        self.assertFalse(MyPatching.stop)\n        self.assertEqual(MyPatching.reboot_after_patch, \"rebootifneed\")\n        self.assertFalse(MyPatching.oneoff)\n        self.assertEqual(MyPatching.day_of_week, range(1,8))\n        self.assertEqual(MyPatching.category, \"important\")\n        import datetime\n        self.assertEqual(MyPatching.start_time, datetime.datetime.strptime(\"02:00\", '%H:%M'))\n\n    def test_conflict_parameters_1(self):\n        print 'test_conflict_parameters_1'\n\n        global settings\n        settings = {\n            \"disabled\" : \"false\",\n            \"stop\" : \"false\",\n            \"rebootAfterPatch\" : \"rebootifneed\",\n            \"category\" : \"important\",\n            \"installDuration\" : \"01:01\",\n            \"oneoff\" : \"false\",\n            \"vmStatusTest\" : {\n                \"local\" : \"true\",\n                \"healthyTestScript\" : \"http://test.com/test.py\"\n            }\n        }\n        MyPatching.parse_settings(settings)\n        old_log_len = len(waagent.GetFileContents(log_file))\n        download_customized_vmstatustest()\n        log_contents = waagent.GetFileContents(log_file)[old_log_len:]\n        self.assertTrue('The parameter \"idleTestScript\" or \"healthyTestScript\" should not be uri' in log_contents)\n\n    def test_conflict_parameters_2(self):\n        print 'test_conflict_parameters_2'\n\n        global settings\n        settings = {\n            \"disabled\" : \"false\",\n            \"stop\" : \"false\",\n            \"rebootAfterPatch\" : \"rebootifneed\",\n            \"category\" : \"important\",\n            \"installDuration\" : \"01:01\",\n            \"oneoff\" : \"false\",\n            \"vmStatusTest\" : {\n                \"local\" : \"false\",\n                \"healthyTestScript\" : idleTestScriptLocal\n            }\n        }\n        MyPatching.parse_settings(settings)\n        old_log_len = len(waagent.GetFileContents(log_file))\n        download_customized_vmstatustest()\n        log_contents = waagent.GetFileContents(log_file)[old_log_len:]\n        self.assertTrue('The parameter \"idleTestScript\" or \"healthyTestScript\" should be uri' in log_contents)\n\n    def test_install(self):\n        \"\"\"\n        Each Distro has different dependencies for OSPatching Extension.\n        It is MANUAL to check whether they are installed or not.\n        Ubuntu        : update-notifier-common\n        CentOS/Oracle : yum-downloadonly\n                        yum-plugin-security\n        SuSE          : None\n        \"\"\"\n        print 'test_install'\n\n        with self.assertRaises(SystemExit) as cm:\n            install()\n\n        self.assertEqual(cm.exception.code, 0)\n        self.assertEqual(get_status(\"Install\"), 'success')\n\n    def test_enable(self):\n        print 'test_enable'\n\n        global settings\n        settings = {\n            \"disabled\" : \"false\",\n            \"stop\" : \"false\",\n            \"rebootAfterPatch\" : \"rebootifneed\",\n            \"category\" : \"important\",\n            \"installDuration\" : \"01:01\",\n            \"oneoff\" : \"false\",\n        }\n\n        with self.assertRaises(SystemExit) as cm:\n            enable()\n\n        self.assertEqual(cm.exception.code, 0)\n        self.assertEqual(get_status(\"Enable\"), 'success')\n        download_cmd = 'python test_handler_1.py -download'\n        patch_cmd = 'python test_handler_1.py -patch'\n        crontab_content = waagent.GetFileContents('/etc/crontab')\n        self.assertTrue(download_cmd in crontab_content)\n        self.assertTrue(patch_cmd in crontab_content)\n\n    def test_disable(self):\n        print 'test_disable'\n\n        global settings\n        settings = {}\n        with self.assertRaises(SystemExit) as cm:\n            disable()\n\n        self.assertEqual(cm.exception.code, 0)\n        self.assertEqual(get_status(\"Disable\"), 'success')\n        download_cmd = 'python test_handler_1.py -download'\n        patch_cmd = 'python test_handler_1.py -patch'\n        crontab_content = waagent.GetFileContents('/etc/crontab')\n        self.assertTrue(download_cmd not in crontab_content)\n        self.assertTrue(patch_cmd not in crontab_content)\n\n    def test_cron(self):\n        print 'test_cron'\n\n        global settings\n        settings = {}\n        enable_time = time.time()\n        settings['startTime'] = time.strftime('%H:%M', time.localtime(enable_time + 180))\n        delta_time = int(time.strftime('%S', time.localtime(enable_time + 120)))\n        MyPatching.download_duration = 60\n     \n        with self.assertRaises(SystemExit) as cm:\n            enable()\n        self.assertEqual(cm.exception.code, 0)\n        self.assertEqual(get_status(\"Enable\"), 'success')\n        download_cmd = \" \".join([\"python\", handlerName, \"-download\"])\n        patch_cmd = \" \".join([\"python\", handlerName, \"-patch\"])\n        crontab_content = waagent.GetFileContents('/etc/crontab')\n        self.assertTrue(download_cmd in crontab_content)\n        self.assertTrue(patch_cmd in crontab_content)\n\n        time.sleep(180 + 5)\n        distro = DistInfo()[0]\n        if 'SuSE' in distro:\n            find_cron = 'grep CRON /var/log/messages'\n        elif 'Ubuntu' in distro:\n            find_cron = 'grep CRON /var/log/syslog'\n        else:\n            find_cron = 'cat /var/log/cron'\n    \n        day = int(time.strftime('%d', time.localtime(enable_time)))\n        find_download_time = \"grep '\" + str(day) + time.strftime(' %H:%M', time.localtime(enable_time + 120)) + \"'\"\n        find_patch_time = \"grep '\" + str(day) + time.strftime(' %H:%M', time.localtime(enable_time + 180)) + \"'\"\n\n        find_download = \"grep '\" + download_cmd + \"'\"\n        find_patch = \"grep '\" + patch_cmd + \"'\"\n        retcode, output = waagent.RunGetOutput(find_cron + ' | ' + find_download_time + ' | ' + find_download)\n        self.assertTrue(output)\n        retcode, output = waagent.RunGetOutput(find_cron + ' | ' + find_patch_time + ' | ' + find_patch)\n        self.assertTrue(output)\n        \n    def test_download(self):\n        \"\"\"\n        Check file package.downloaded after download\n        \"\"\"\n        print 'test_download'\n\n        global settings\n        settings = {\n            \"category\" : \"importantandrecommended\",\n        }\n\n        with self.assertRaises(SystemExit) as cm:\n            download()\n\n        self.assertEqual(cm.exception.code, 0)\n        download_content = waagent.GetFileContents(MyPatching.package_downloaded_path)\n        security_download_list = get_patch_list(MyPatching.package_downloaded_path, 'important')\n        self.assertTrue(set(security_download_list) == set(MyPatching.security_download_list))\n        all_download_list = get_patch_list(MyPatching.package_downloaded_path)\n        self.assertTrue(set(all_download_list) == set(MyPatching.all_download_list))\n\n    def test_download_security(self):\n        \"\"\"\n        check file package.downloaded after download\n        \"\"\"\n        print 'test_download_security'\n        global settings\n        settings = {\n            \"category\" : \"important\",\n        }\n\n        with self.assertRaises(SystemExit) as cm:\n            download()\n\n        self.assertEqual(cm.exception.code, 0)\n        security_download_list = get_patch_list(MyPatching.package_downloaded_path, 'important')\n        self.assertTrue(set(security_download_list) == set(MyPatching.security_download_list))\n        all_download_list = get_patch_list(MyPatching.package_downloaded_path)\n        self.assertTrue(set(all_download_list) == set(MyPatching.security_download_list))\n\n    def test_patch(self):\n        '''\n        check file package.patched when patch successful\n        '''\n        print 'test_patch'\n        global settings\n        settings = {}\n        \n        with self.assertRaises(SystemExit) as cm:\n            download()\n        self.assertEqual(cm.exception.code, 0)\n        with self.assertRaises(SystemExit) as cm:\n            patch()\n        self.assertEqual(cm.exception.code, 0)\n\n        download_content = waagent.GetFileContents(MyPatching.package_downloaded_path)\n        patch_content = waagent.GetFileContents(MyPatching.package_patched_path)\n        self.assertEqual(download_content, patch_content)\n        \n\n    def test_patch_failed(self):\n        '''\n        check file package.patched when patch fail\n        '''\n        print 'test_patch_failed'\n        global settings\n        settings = {}\n\n        def patch_package(self):\n            return 1\n        MyPatching.patch_package = patch_package\n\n        old_log_len = len(waagent.GetFileContents(log_file))\n        with self.assertRaises(SystemExit) as cm:\n            download()\n        self.assertEqual(cm.exception.code, 0)\n        with self.assertRaises(SystemExit) as cm:\n            patch()\n        log_contents = waagent.GetFileContents(log_file)[old_log_len:]\n\n        self.assertEqual(cm.exception.code, 0)\n        patch_content = waagent.GetFileContents(MyPatching.package_patched_path)\n        self.assertFalse(patch_content)\n        self.assertTrue('Failed to patch the package' in log_contents)\n        \n        \n    def test_patch_one_off(self):\n        '''\n        check package.downloaded and package.patched when patch_one_off successful\n        '''\n        print 'test_patch_one_off'\n        global settings\n        settings = {\n            \"oneoff\" : \"true\",\n            \"category\" : \"importantandrecommended\"\n        }\n\n        with self.assertRaises(SystemExit) as cm:\n            oneoff()\n        \n        self.assertEqual(cm.exception.code, 0)\n        self.assertEqual(get_status(\"Enable\"), 'success')\n        time.sleep(3)\n        security_download_list = get_patch_list(MyPatching.package_downloaded_path, 'important')\n        self.assertTrue(set(security_download_list) == set(MyPatching.security_download_list))\n        all_download_list = get_patch_list(MyPatching.package_patched_path)\n        self.assertTrue(set(all_download_list) == set(MyPatching.all_download_list))\n        download_content = waagent.GetFileContents(MyPatching.package_downloaded_path)\n        patch_content = waagent.GetFileContents(MyPatching.package_patched_path)\n        self.assertEqual(patch_content, download_content)\n\n    def test_patch_time_exceed(self):\n        '''\n        check package.patched when patch time exceed\n        '''\n        print 'test_patch_time_exceed'\n        global settings\n        settings = {\n            \"category\" : \"importantandrecommended\",\n            \"installDuration\" : \"00:06\"        # 5 minutes reserved for reboot\n        }\n\n        old_log_len = len(waagent.GetFileContents(log_file))\n        def patch_package(self):\n            time.sleep(11)\n            return 0\n        MyPatching.patch_package = patch_package\n        \n        with self.assertRaises(SystemExit) as cm:\n            download()\n        self.assertEqual(cm.exception.code, 0)\n        with self.assertRaises(SystemExit) as cm:\n            patch()\n        self.assertEqual(cm.exception.code, 0)\n\n        patch_list = get_patch_list(MyPatching.package_patched_path)\n        self.assertEqual(patch_list, ['a', 'b', 'c', 'd', 'e', '1'])\n        log_contents = waagent.GetFileContents(log_file)[old_log_len:]\n        self.assertTrue('Patching time exceeded' in log_contents)\n\n\ndef get_patch_list(file_path, category = None):\n    content = waagent.GetFileContents(file_path)\n    if category != None:\n        result = [line.split()[0] for line in content.split('\\n') if line.endswith(category)]\n    else:\n        result = [line.split()[0] for line in content.split('\\n') if ' ' in line]\n    return result\n    \n\ndef get_status(operation, retkey='status'):\n    contents = waagent.GetFileContents(status_file)\n    status = json.loads(contents)[0]['status']\n    if status['operation'] == operation:\n        return status[retkey]\n    return ''\n\ndef change_settings(key, value):\n    with open(settings_file, \"r\") as f:\n        settings_string = f.read()\n        settings = json.loads(settings_string)\n    with open(settings_file, \"w\") as f:\n        settings[key] = value\n        settings_string = json.dumps(settings)\n        f.write(settings_string)\n    return settings\n\ndef main():\n    if len(sys.argv) == 1:\n        unittest.main()\n        return\n\n    waagent.LoggerInit('/var/log/waagent.log', '/dev/stdout')\n    waagent.Log(\"%s started to handle.\" %(ExtensionShortName))\n\n    global hutil\n    hutil = Util.HandlerUtility(waagent.Log, waagent.Error)\n    hutil.do_parse_context('TEST')\n    global MyPatching\n    MyPatching = FakePatching(hutil)\n\n    if MyPatching == None:\n        sys.exit(1)\n\n    for a in sys.argv[1:]:\n        if re.match(\"^([-/]*)(disable)\", a):\n            disable()\n        elif re.match(\"^([-/]*)(uninstall)\", a):\n            uninstall()\n        elif re.match(\"^([-/]*)(install)\", a):\n            install()\n        elif re.match(\"^([-/]*)(enable)\", a):\n            enable()\n        elif re.match(\"^([-/]*)(update)\", a):\n            update()\n        elif re.match(\"^([-/]*)(download)\", a):\n            download()\n        elif re.match(\"^([-/]*)(patch)\", a):\n            patch()\n        elif re.match(\"^([-/]*)(oneoff)\", a):\n            oneoff()\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "OSPatching/test/test_handler_2.py",
    "content": "#!/usr/bin/python\n#\n# OSPatching extension\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport chardet\nimport json\nimport logging\nimport os\nimport re\nimport shutil\nimport sys\nimport tempfile\nimport time\nimport traceback\nimport urllib2\nimport urlparse\nimport unittest\n\nfrom azure.storage import BlobService\n\nimport Utils.HandlerUtil as Util\nfrom patch import *\nfrom FakePatching2 import FakePatching\nfrom Utils.WAAgentUtil import waagent\n\nsys.path.append('..')\n\n# Global variables definition\nExtensionShortName = 'OSPatching'\nDownloadDirectory = 'download'\nidleTestScriptName = \"idleTest.py\"\nhealthyTestScriptName = \"healthyTest.py\"\nhandlerName = os.path.basename(sys.argv[0])\nstatus_file = './status/0.status'\nlog_file = './extension.log'\n\nsettings_file = \"default.settings\"\nwith open(settings_file, \"r\") as f:\n    settings_string = f.read()\nsettings = json.loads(settings_string)\n\ndef install():\n    hutil.do_parse_context('Install')\n    try:\n        MyPatching.install()\n        hutil.do_exit(0, 'Install', 'success', '0', 'Install Succeeded.')\n    except Exception as e:\n        hutil.log_and_syslog(logging.ERROR, \"Failed to install the extension with error: %s, stack trace: %s\" %(str(e), traceback.format_exc()))\n        hutil.do_exit(1, 'Install', 'error', '0', 'Install Failed.')\n\ndef enable():\n    hutil.do_parse_context('Enable')\n    try:\n        MyPatching.parse_settings(settings)\n        # Ensure the same configuration is executed only once\n        hutil.exit_if_seq_smaller()\n        oneoff = settings.get(\"oneoff\")\n        download_customized_vmstatustest()\n        copy_vmstatustestscript(hutil.get_seq_no(), oneoff)\n        MyPatching.enable()\n        current_config = MyPatching.get_current_config()\n        hutil.do_exit(0, 'Enable', 'success', '0', 'Enable Succeeded. Current Configuration: ' + current_config)\n    except Exception as e:\n        current_config = MyPatching.get_current_config()\n        hutil.log_and_syslog(logging.ERROR, \"Failed to enable the extension with error: %s, stack trace: %s\" %(str(e), traceback.format_exc()))\n        hutil.do_exit(1, 'Enable', 'error', '0', 'Enable Failed. Current Configuation: ' + current_config)\n\ndef uninstall():\n    hutil.do_parse_context('Uninstall')\n    hutil.do_exit(0, 'Uninstall', 'success', '0', 'Uninstall Succeeded.')\n\ndef disable():\n    hutil.do_parse_context('Disable')\n    try:\n        # Ensure the same configuration is executed only once\n        hutil.exit_if_seq_smaller()\n        MyPatching.disable()\n        hutil.do_exit(0, 'Disable', 'success', '0', 'Disable Succeeded.')\n    except Exception as e:\n        hutil.log_and_syslog(logging.ERROR, \"Failed to disable the extension with error: %s, stack trace: %s\" %(str(e), traceback.format_exc()))\n        hutil.do_exit(1, 'Disable', 'error', '0', 'Disable Failed.')\n\ndef update():\n    hutil.do_parse_context('Upadate')\n    hutil.do_exit(0, 'Update', 'success', '0', 'Update Succeeded.')\n\ndef download():\n    hutil.do_parse_context('Download')\n    try:\n        MyPatching.parse_settings(settings)\n        MyPatching.download()\n        current_config = MyPatching.get_current_config()\n        hutil.do_exit(0,'Enable','success','0', 'Download Succeeded. Current Configuation: ' + current_config)\n    except Exception as e:\n        current_config = MyPatching.get_current_config()\n        hutil.log_and_syslog(logging.ERROR, \"Failed to download updates with error: %s, stack trace: %s\" %(str(e), traceback.format_exc()))\n        hutil.do_exit(1, 'Enable','error','0', 'Download Failed. Current Configuation: ' + current_config)\n\ndef patch():\n    hutil.do_parse_context('Patch')\n    try:\n        MyPatching.parse_settings(settings)\n        MyPatching.patch()\n        current_config = MyPatching.get_current_config()\n        hutil.do_exit(0,'Enable','success','0', 'Patch Succeeded. Current Configuation: ' + current_config)\n    except Exception as e:\n        current_config = MyPatching.get_current_config()\n        hutil.log_and_syslog(logging.ERROR, \"Failed to patch with error: %s, stack trace: %s\" %(str(e), traceback.format_exc()))\n        hutil.do_exit(1, 'Enable','error','0', 'Patch Failed. Current Configuation: ' + current_config)\n\ndef oneoff():\n    hutil.do_parse_context('Oneoff')\n    try:\n        MyPatching.parse_settings(settings)\n        MyPatching.patch_one_off()\n        current_config = MyPatching.get_current_config()\n        hutil.do_exit(0,'Enable','success','0', 'Oneoff Patch Succeeded. Current Configuation: ' + current_config)\n    except Exception as e:\n        current_config = MyPatching.get_current_config()\n        hutil.log_and_syslog(logging.ERROR, \"Failed to one-off patch with error: %s, stack trace: %s\" %(str(e), traceback.format_exc()))\n        hutil.do_exit(1, 'Enable','error','0', 'Oneoff Patch Failed. Current Configuation: ' + current_config)\n\ndef download_files(hutil):\n    local = settings.get(\"vmStatusTest\", dict()).get(\"local\", \"\")\n    if local.lower() == \"true\":\n        local = True\n    elif local.lower() == \"false\":\n        local = False\n    else:\n        hutil.log_and_syslog(logging.WARNING, \"The parameter \\\"local\\\" \"\n                  \"is empty or invalid. Set it as False. Continue...\")\n        local = False\n    idle_test_script = settings.get(\"vmStatusTest\", dict()).get('idleTestScript')\n    healthy_test_script = settings.get(\"vmStatusTest\", dict()).get('healthyTestScript')\n\n    if (not idle_test_script and not healthy_test_script):\n        hutil.log_and_syslog(logging.WARNING, \"The parameter \\\"idleTestScript\\\" and \\\"healthyTestScript\\\" \"\n                  \"are both empty. Exit downloading VMStatusTest scripts...\")\n        return\n    elif local:\n        if (idle_test_script and idle_test_script.startswith(\"http\")) or \\\n           (healthy_test_script and healthy_test_script.startswith(\"http\")):\n            hutil.log_and_syslog(logging.WARNING, \"The parameter \\\"idleTestScript\\\" or \\\"healthyTestScript\\\" \"\n                  \"should not be uri. Exit downloading VMStatusTest scripts...\")\n            return\n    elif not local:\n        if (idle_test_script and not idle_test_script.startswith(\"http\")) or \\\n           (healthy_test_script and not healthy_test_script.startswith(\"http\")):\n            hutil.log_and_syslog(logging.WARNING, \"The parameter \\\"idleTestScript\\\" or \\\"healthyTestScript\\\" \"\n                  \"should be uri. Exit downloading VMStatusTest scripts...\")\n            return\n\n    hutil.do_status_report('Downloading','transitioning', '0',\n                           'Downloading VMStatusTest scripts...')\n\n    vmStatusTestScripts = dict()\n    vmStatusTestScripts[idle_test_script] = idleTestScriptName\n    vmStatusTestScripts[healthy_test_script] = healthyTestScriptName\n\n    if local:\n        hutil.log_and_syslog(logging.INFO, \"Saving VMStatusTest scripts from user's configurations...\")\n        for src,dst in vmStatusTestScripts.items():\n            if not src:\n                continue\n            file_path = save_local_file(src, dst, hutil)\n            preprocess_files(file_path, hutil)\n        return\n\n    storage_account_name = None\n    storage_account_key = None\n    if settings:\n        storage_account_name = settings.get(\"storageAccountName\", \"\").strip()\n        storage_account_key = settings.get(\"storageAccountKey\", \"\").strip()\n    if storage_account_name and storage_account_key:\n        hutil.log_and_syslog(logging.INFO, \"Downloading VMStatusTest scripts from azure storage...\")\n        for src,dst in vmStatusTestScripts.items():\n            if not src:\n                continue\n            file_path = download_blob(storage_account_name,\n                                      storage_account_key,\n                                      src,\n                                      dst,\n                                      hutil)\n            preprocess_files(file_path, hutil)\n    elif not(storage_account_name or storage_account_key):\n        hutil.log_and_syslog(logging.INFO, \"No azure storage account and key specified in protected \"\n                  \"settings. Downloading VMStatusTest scripts from external links...\")\n        for src,dst in vmStatusTestScripts.items():\n            if not src:\n                continue\n            file_path = download_external_file(src, dst, hutil)\n            preprocess_files(file_path, hutil)\n    else:\n        #Storage account and key should appear in pairs\n        error_msg = \"Azure storage account or storage key is not provided\"\n        hutil.log_and_syslog(logging.ERROR, error_msg)\n        raise ValueError(error_msg)\n\ndef download_blob(storage_account_name, storage_account_key,\n                  blob_uri, dst, hutil):\n    seqNo = hutil.get_seq_no()\n    container_name = get_container_name_from_uri(blob_uri)\n    blob_name = get_blob_name_from_uri(blob_uri)\n    download_dir = prepare_download_dir(seqNo)\n    download_path = os.path.join(download_dir, dst)\n    #Guest agent already ensure the plugin is enabled one after another.\n    #The blob download will not conflict.\n    blob_service = BlobService(storage_account_name, storage_account_key)\n    try:\n        blob_service.get_blob_to_path(container_name, blob_name, download_path)\n    except Exception as e:\n        hutil.log_and_syslog(logging.ERROR, (\"Failed to download blob with uri:{0} \"\n                     \"with error {1}\").format(blob_uri,e))\n        raise\n    return download_path\n\ndef download_external_file(uri, dst, hutil):\n    seqNo = hutil.get_seq_no()\n    download_dir = prepare_download_dir(seqNo)\n    file_path = os.path.join(download_dir, dst)\n    try:\n        download_and_save_file(uri, file_path)\n    except Exception as e:\n        hutil.log_and_syslog(logging.ERROR, (\"Failed to download external file with uri:{0} \"\n                     \"with error {1}\").format(uri, e))\n        raise\n    return file_path\n\ndef save_local_file(src, dst, hutil):\n    seqNo = hutil.get_seq_no()\n    download_dir = prepare_download_dir(seqNo)\n    file_path = os.path.join(download_dir, dst)\n    try:\n        waagent.SetFileContents(file_path, src)\n    except Exception as e:\n        hutil.log_and_syslog(logging.ERROR, (\"Failed to save file from user's configuration \"\n                     \"with error {0}\").format(e))\n        raise\n    return file_path\n\ndef preprocess_files(file_path, hutil):\n    \"\"\"\n        Preprocess the text file. If it is a binary file, skip it.\n    \"\"\"\n    is_text, code_type = is_text_file(file_path)\n    if is_text:\n        dos2unix(file_path)\n        hutil.log_and_syslog(logging.INFO, \"Converting text files from DOS to Unix formats: Done\")\n        if code_type in ['UTF-8', 'UTF-16LE', 'UTF-16BE']:\n            remove_bom(file_path)\n            hutil.log_and_syslog(logging.INFO, \"Removing BOM: Done\")\n\ndef is_text_file(file_path):\n    with open(file_path, 'rb') as f:\n        contents = f.read(512)\n    return is_text(contents)\n\ndef is_text(contents):\n    supported_encoding = ['ascii', 'UTF-8', 'UTF-16LE', 'UTF-16BE']\n    code_type = chardet.detect(contents)['encoding']\n    if code_type in supported_encoding:\n        return True, code_type\n    else:\n        return False, code_type\n\ndef dos2unix(file_path):\n    temp_file_path = tempfile.mkstemp()[1]\n    f_temp = open(temp_file_path, 'wb')\n    with open(file_path, 'rU') as f:\n        contents = f.read()\n    f_temp.write(contents)\n    f_temp.close()\n    shutil.move(temp_file_path, file_path)\n\ndef remove_bom(file_path):\n    temp_file_path = tempfile.mkstemp()[1]\n    f_temp = open(temp_file_path, 'wb')\n    with open(file_path, 'rb') as f:\n        contents = f.read()\n    for encoding in [\"utf-8-sig\", \"utf-16\"]:\n        try:\n            f_temp.write(contents.decode(encoding).encode('utf-8'))\n            break\n        except UnicodeDecodeError:\n            continue\n    f_temp.close()\n    shutil.move(temp_file_path, file_path)\n\ndef download_and_save_file(uri, file_path):\n    src = urllib2.urlopen(uri)\n    dest = open(file_path, 'wb')\n    buf_size = 1024\n    buf = src.read(buf_size)\n    while(buf):\n        dest.write(buf)\n        buf = src.read(buf_size)\n\ndef prepare_download_dir(seqNo):\n    download_dir_main = os.path.join(os.getcwd(), DownloadDirectory)\n    create_directory_if_not_exists(download_dir_main)\n    download_dir = os.path.join(download_dir_main, seqNo)\n    create_directory_if_not_exists(download_dir)\n    return download_dir\n\ndef create_directory_if_not_exists(directory):\n    \"\"\"create directory if no exists\"\"\"\n    if not os.path.exists(directory):\n        os.makedirs(directory)\n\ndef get_path_from_uri(uriStr):\n    uri = urlparse.urlparse(uriStr)\n    return uri.path\n\ndef get_blob_name_from_uri(uri):\n    return get_properties_from_uri(uri)['blob_name']\n\ndef get_container_name_from_uri(uri):\n    return get_properties_from_uri(uri)['container_name']\n\ndef get_properties_from_uri(uri):\n    path = get_path_from_uri(uri)\n    if path.endswith('/'):\n        path = path[:-1]\n    if path[0] == '/':\n        path = path[1:]\n    first_sep = path.find('/')\n    if first_sep == -1:\n        hutil.log_and_syslog(logging.ERROR, \"Failed to extract container, blob, from {}\".format(path))\n    blob_name = path[first_sep+1:]\n    container_name = path[:first_sep]\n    return {'blob_name': blob_name, 'container_name': container_name}\n\ndef download_customized_vmstatustest():\n    download_dir = prepare_download_dir(hutil.get_seq_no())\n    maxRetry = 2\n    for retry in range(0, maxRetry + 1):\n        try:\n            download_files(hutil)\n            break\n        except Exception:\n            hutil.log_and_syslog(logging.ERROR, \"Failed to download files, retry=\" + str(retry) + \", maxRetry=\" + str(maxRetry))\n            if retry != maxRetry:\n                hutil.log_and_syslog(logging.INFO, \"Sleep 10 seconds\")\n                time.sleep(10)\n            else:\n                raise\n\ndef copy_vmstatustestscript(seqNo, oneoff):\n    src_dir = prepare_download_dir(seqNo)\n    for filename in (idleTestScriptName, healthyTestScriptName):\n        src = os.path.join(src_dir, filename)\n        if os.path.isfile(src):\n            if oneoff is not None and oneoff.lower() == \"true\":\n                dst = \"oneoff\"\n            else:\n                dst = \"scheduled\"\n            dst = os.path.join(os.getcwd(), dst)\n            shutil.copy(src, dst)\n\ndef delete_current_vmstatustestscript():\n    for filename in (idleTestScriptName, healthyTestScriptName):\n        current_vmstatustestscript = os.path.join(os.getcwd(), \"patch/\"+filename)\n        if os.path.isfile(current_vmstatustestscript):\n            os.remove(current_vmstatustestscript)\n\nclass Test(unittest.TestCase):\n    def setUp(self):\n        print('\\n\\n============================================================================================')\n        waagent.LoggerInit('/var/log/waagent.log', '/dev/stdout')\n        waagent.Log(\"%s started to handle.\" %(ExtensionShortName))\n        global hutil\n        hutil = Util.HandlerUtility(waagent.Log, waagent.Error)\n        hutil.do_parse_context('TEST')\n\n        global MyPatching\n        MyPatching = FakePatching(hutil)\n        if MyPatching is None:\n            sys.exit(1)\n\n        distro = DistInfo()[0]\n        if 'centos' in distro or 'Oracle' in distro or 'redhat' in distro:\n            MyPatching.cron_restart_cmd = 'service crond restart'\n\n        try:\n            os.remove('mrseq')\n        except:\n            pass\n\n        waagent.SetFileContents(MyPatching.package_downloaded_path, '')\n        waagent.SetFileContents(MyPatching.package_patched_path, '')\n\n    def test_download_time_exceed(self):\n        '''\n        check package.downloaded and package.patched\n        '''\n        print('test_download_time_exceed')\n\n        global settings\n        current_time = time.time()\n        settings = change_settings(\"startTime\", time.strftime('%H:%M', time.localtime(current_time + 180)))\n        settings = change_settings(\"category\", \"importantandrecommended\")\n\n        old_log_len = len(waagent.GetFileContents(log_file))\n        with self.assertRaises(SystemExit) as cm:\n            enable()\n        self.assertEqual(cm.exception.code, 0)\n        time.sleep(180 + 10)\n\n        all_download_list = get_patch_list(MyPatching.package_downloaded_path)\n        self.assertTrue(set(all_download_list) == set(['a', 'b', 'c', 'd', 'e']))\n        # Check extension.log\n        log_contents = waagent.GetFileContents(log_file)[old_log_len:]\n        self.assertTrue('Download time exceeded' in log_contents)\n        restore_settings()\n\n    def test_stop_before_download(self):\n        '''\n        check stop flag before download and after patch\n        '''\n        print('test_stop_before_download')\n        global settings\n        current_time = time.time()\n        settings = change_settings(\"startTime\", time.strftime('%H:%M', time.localtime(current_time + 180)))\n        settings = change_settings(\"category\", \"importantandrecommended\")\n\n        old_log_len = len(waagent.GetFileContents(log_file))\n        with self.assertRaises(SystemExit) as cm:\n            enable()\n        self.assertEqual(cm.exception.code, 0)\n\n        os.remove('mrseq')\n        settings = change_settings(\"stop\", \"true\")\n        with self.assertRaises(SystemExit) as cm:\n            enable()\n        self.assertEqual(cm.exception.code, 0)\n        self.assertTrue(MyPatching.exists_stop_flag())\n\n        time.sleep(180 + 5 + 60)\n        self.assertFalse(MyPatching.exists_stop_flag())\n        self.assertFalse(waagent.GetFileContents(MyPatching.package_downloaded_path))\n        self.assertFalse(waagent.GetFileContents(MyPatching.package_patched_path))\n        log_contents = waagent.GetFileContents(log_file)[old_log_len:]\n        self.assertTrue('Downloading patches is stopped/canceled' in log_contents)\n        restore_settings()\n\n    def test_stop_while_download(self):\n        print('test_stop_while_download')\n        global settings\n        current_time = time.time()\n        settings = change_settings(\"startTime\", time.strftime('%H:%M', time.localtime(current_time + 180)))\n        settings = change_settings(\"category\", \"importantandrecommended\")\n\n        old_log_len = len(waagent.GetFileContents(log_file))\n        delta_time = int(time.strftime('%S', time.localtime(current_time + 120)))\n\n        with self.assertRaises(SystemExit) as cm:\n            enable()\n        self.assertEqual(cm.exception.code, 0)\n\n        # set stop flag after downloaded 40 seconds\n        time.sleep(160 - delta_time)\n        os.remove('mrseq')\n        settings = change_settings(\"stop\", \"true\")\n        with self.assertRaises(SystemExit) as cm:\n            enable()\n        self.assertEqual(cm.exception.code, 0)\n        self.assertTrue(MyPatching.exists_stop_flag())\n\n        # Make sure the total sleep time is greater than 180s\n        time.sleep(20 + delta_time + 5)\n        self.assertFalse(MyPatching.exists_stop_flag())\n        download_list = get_patch_list(MyPatching.package_downloaded_path)\n        self.assertEqual(download_list, ['a', 'b', 'c'])\n        self.assertFalse(waagent.GetFileContents(MyPatching.package_patched_path))\n        # Check extension.log\n        log_contents = waagent.GetFileContents(log_file)[old_log_len:]\n        self.assertTrue('Installing patches is stopped/canceled' in log_contents)\n        restore_settings()\n\n\ndef get_patch_list(file_path, category = None):\n    content = waagent.GetFileContents(file_path)\n    if category != None:\n        result = [line.split()[0] for line in content.split('\\n') if line.endswith(category)]\n    else:\n        result = [line.split()[0] for line in content.split('\\n') if ' ' in line]\n    return result\n    \n\ndef get_status(operation, retkey='status'):\n    contents = waagent.GetFileContents(status_file)\n    status = json.loads(contents)[0]['status']\n    if status['operation'] == operation:\n        return status[retkey]\n    return ''\n\ndef change_settings(key, value):\n    with open(settings_file, \"r\") as f:\n        settings_string = f.read()\n        settings = json.loads(settings_string)\n    with open(settings_file, \"w\") as f:\n        settings[key] = value\n        settings_string = json.dumps(settings)\n        f.write(settings_string)\n    return settings\n\ndef restore_settings():\n    idleTestScriptLocal = \"\"\"#!/usr/bin/python\n    # Locally.\n    def is_vm_idle():\n        return True\n    \"\"\"\n\n    healthyTestScriptLocal = \"\"\"#!/usr/bin/python\n    # Locally.\n    def is_vm_healthy():\n        return True\n    \"\"\"\n\n    settings = {\n        \"disabled\" : \"false\",\n        \"stop\" : \"false\",\n        \"rebootAfterPatch\" : \"rebootifneed\",\n        \"category\" : \"important\",\n        \"installDuration\" : \"00:30\",\n        \"oneoff\" : \"false\",\n        \"intervalOfWeeks\" : \"1\",\n        \"dayOfWeek\" : \"everyday\",\n        \"startTime\" : \"03:00\",\n        \"vmStatusTest\" : {\n            \"local\" : \"true\",\n            \"idleTestScript\" : idleTestScriptLocal, #idleTestScriptStorage,\n            \"healthyTestScript\" : healthyTestScriptLocal, #healthyTestScriptStorage\n        },\n        \"storageAccountName\" : \"<TOCHANGE>\",\n        \"storageAccountKey\" : \"<TOCHANGE>\"\n    }\n\n    settings_string = json.dumps(settings)\n    settings_file = \"default.settings\"\n    with open(settings_file, \"w\") as f:\n        f.write(settings_string)\n\n\ndef main():\n    if len(sys.argv) == 1:\n        unittest.main()\n        return\n\n    waagent.LoggerInit('/var/log/waagent.log', '/dev/stdout')\n    waagent.Log(\"%s started to handle.\" % ExtensionShortName)\n\n    global hutil\n    hutil = Util.HandlerUtility(waagent.Log, waagent.Error,\n                                ExtensionShortName)\n    hutil.do_parse_context('TEST')\n    global MyPatching\n    MyPatching = FakePatching(hutil)\n\n    if MyPatching is None:\n        sys.exit(1)\n\n    for a in sys.argv[1:]:\n        if re.match(\"^([-/]*)(disable)\", a):\n            disable()\n        elif re.match(\"^([-/]*)(uninstall)\", a):\n            uninstall()\n        elif re.match(\"^([-/]*)(install)\", a):\n            install()\n        elif re.match(\"^([-/]*)(enable)\", a):\n            enable()\n        elif re.match(\"^([-/]*)(update)\", a):\n            update()\n        elif re.match(\"^([-/]*)(download)\", a):\n            download()\n        elif re.match(\"^([-/]*)(patch)\", a):\n            patch()\n        elif re.match(\"^([-/]*)(oneoff)\", a):\n            oneoff()\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "OSPatching/test/test_handler_3.py",
    "content": "#!/usr/bin/python\n#\n# OSPatching extension\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport sys\nimport re\nimport time\nimport chardet\nimport tempfile\nimport urllib2\nimport urlparse\nimport shutil\nimport traceback\nimport logging\nfrom azure.storage import BlobService\nfrom Utils.WAAgentUtil import waagent\nimport Utils.HandlerUtil as Util\nimport json\nimport unittest\nfrom patch import *\nfrom FakePatching3 import FakePatching\n\nsys.path.append('..')\n\n\n# Global variables definition\nExtensionShortName = 'OSPatching'\nDownloadDirectory = 'download'\nidleTestScriptName = \"idleTest.py\"\nhealthyTestScriptName = \"healthyTest.py\"\nhandlerName = os.path.basename(sys.argv[0])\nstatus_file = './status/0.status'\nlog_file = './extension.log'\n\nsettings_file = \"default.settings\"\nwith open(settings_file, \"r\") as f:\n    settings_string = f.read()\nsettings = json.loads(settings_string)\n\ndef install():\n    hutil.do_parse_context('Install')\n    try:\n        MyPatching.install()\n        hutil.do_exit(0, 'Install', 'success', '0', 'Install Succeeded.')\n    except Exception as e:\n        hutil.log_and_syslog(logging.ERROR, \"Failed to install the extension with error: %s, stack trace: %s\" %(str(e), traceback.format_exc()))\n        hutil.do_exit(1, 'Install', 'error', '0', 'Install Failed.')\n\ndef enable():\n    hutil.do_parse_context('Enable')\n    try:\n        MyPatching.parse_settings(settings)\n        # Ensure the same configuration is executed only once\n        hutil.exit_if_seq_smaller()\n        oneoff = settings.get(\"oneoff\")\n        download_customized_vmstatustest()\n        copy_vmstatustestscript(hutil.get_seq_no(), oneoff)\n        MyPatching.enable()\n        current_config = MyPatching.get_current_config()\n        hutil.do_exit(0, 'Enable', 'success', '0', 'Enable Succeeded. Current Configuration: ' + current_config)\n    except Exception as e:\n        current_config = MyPatching.get_current_config()\n        hutil.log_and_syslog(logging.ERROR, \"Failed to enable the extension with error: %s, stack trace: %s\" %(str(e), traceback.format_exc()))\n        hutil.do_exit(1, 'Enable', 'error', '0', 'Enable Failed. Current Configuation: ' + current_config)\n\ndef uninstall():\n    hutil.do_parse_context('Uninstall')\n    hutil.do_exit(0, 'Uninstall', 'success', '0', 'Uninstall Succeeded.')\n\ndef disable():\n    hutil.do_parse_context('Disable')\n    try:\n        # Ensure the same configuration is executed only once\n        hutil.exit_if_seq_smaller()\n        MyPatching.disable()\n        hutil.do_exit(0, 'Disable', 'success', '0', 'Disable Succeeded.')\n    except Exception as e:\n        hutil.log_and_syslog(logging.ERROR, \"Failed to disable the extension with error: %s, stack trace: %s\" %(str(e), traceback.format_exc()))\n        hutil.do_exit(1, 'Disable', 'error', '0', 'Disable Failed.')\n\ndef update():\n    hutil.do_parse_context('Upadate')\n    hutil.do_exit(0, 'Update', 'success', '0', 'Update Succeeded.')\n\ndef download():\n    hutil.do_parse_context('Download')\n    try:\n        MyPatching.parse_settings(settings)\n        MyPatching.download()\n        current_config = MyPatching.get_current_config()\n        hutil.do_exit(0,'Enable','success','0', 'Download Succeeded. Current Configuation: ' + current_config)\n    except Exception as e:\n        current_config = MyPatching.get_current_config()\n        hutil.log_and_syslog(logging.ERROR, \"Failed to download updates with error: %s, stack trace: %s\" %(str(e), traceback.format_exc()))\n        hutil.do_exit(1, 'Enable','error','0', 'Download Failed. Current Configuation: ' + current_config)\n\ndef patch():\n    hutil.do_parse_context('Patch')\n    try:\n        MyPatching.parse_settings(settings)\n        MyPatching.patch()\n        current_config = MyPatching.get_current_config()\n        hutil.do_exit(0,'Enable','success','0', 'Patch Succeeded. Current Configuation: ' + current_config)\n    except Exception as e:\n        current_config = MyPatching.get_current_config()\n        hutil.log_and_syslog(logging.ERROR, \"Failed to patch with error: %s, stack trace: %s\" %(str(e), traceback.format_exc()))\n        hutil.do_exit(1, 'Enable','error','0', 'Patch Failed. Current Configuation: ' + current_config)\n\ndef oneoff():\n    hutil.do_parse_context('Oneoff')\n    try:\n        MyPatching.parse_settings(settings)\n        MyPatching.patch_one_off()\n        current_config = MyPatching.get_current_config()\n        hutil.do_exit(0,'Enable','success','0', 'Oneoff Patch Succeeded. Current Configuation: ' + current_config)\n    except Exception as e:\n        current_config = MyPatching.get_current_config()\n        hutil.log_and_syslog(logging.ERROR, \"Failed to one-off patch with error: %s, stack trace: %s\" %(str(e), traceback.format_exc()))\n        hutil.do_exit(1, 'Enable','error','0', 'Oneoff Patch Failed. Current Configuation: ' + current_config)\n\ndef download_files(hutil):\n    local = settings.get(\"vmStatusTest\", dict()).get(\"local\", \"\")\n    if local.lower() == \"true\":\n        local = True\n    elif local.lower() == \"false\":\n        local = False\n    else:\n        hutil.log_and_syslog(logging.WARNING, \"The parameter \\\"local\\\" \"\n                  \"is empty or invalid. Set it as False. Continue...\")\n        local = False\n    idle_test_script = settings.get(\"vmStatusTest\", dict()).get('idleTestScript')\n    healthy_test_script = settings.get(\"vmStatusTest\", dict()).get('healthyTestScript')\n\n    if (not idle_test_script and not healthy_test_script):\n        hutil.log_and_syslog(logging.WARNING, \"The parameter \\\"idleTestScript\\\" and \\\"healthyTestScript\\\" \"\n                  \"are both empty. Exit downloading VMStatusTest scripts...\")\n        return\n    elif local:\n        if (idle_test_script and idle_test_script.startswith(\"http\")) or \\\n           (healthy_test_script and healthy_test_script.startswith(\"http\")):\n            hutil.log_and_syslog(logging.WARNING, \"The parameter \\\"idleTestScript\\\" or \\\"healthyTestScript\\\" \"\n                  \"should not be uri. Exit downloading VMStatusTest scripts...\")\n            return\n    elif not local:\n        if (idle_test_script and not idle_test_script.startswith(\"http\")) or \\\n           (healthy_test_script and not healthy_test_script.startswith(\"http\")):\n            hutil.log_and_syslog(logging.WARNING, \"The parameter \\\"idleTestScript\\\" or \\\"healthyTestScript\\\" \"\n                  \"should be uri. Exit downloading VMStatusTest scripts...\")\n            return\n\n    hutil.do_status_report('Downloading','transitioning', '0',\n                           'Downloading VMStatusTest scripts...')\n\n    vmStatusTestScripts = dict()\n    vmStatusTestScripts[idle_test_script] = idleTestScriptName\n    vmStatusTestScripts[healthy_test_script] = healthyTestScriptName\n\n    if local:\n        hutil.log_and_syslog(logging.INFO, \"Saving VMStatusTest scripts from user's configurations...\")\n        for src,dst in vmStatusTestScripts.items():\n            if not src:\n                continue\n            file_path = save_local_file(src, dst, hutil)\n            preprocess_files(file_path, hutil)\n        return\n\n    storage_account_name = None\n    storage_account_key = None\n    if settings:\n        storage_account_name = settings.get(\"storageAccountName\", \"\").strip()\n        storage_account_key = settings.get(\"storageAccountKey\", \"\").strip()\n    if storage_account_name and storage_account_key:\n        hutil.log_and_syslog(logging.INFO, \"Downloading VMStatusTest scripts from azure storage...\")\n        for src,dst in vmStatusTestScripts.items():\n            if not src:\n                continue\n            file_path = download_blob(storage_account_name,\n                                      storage_account_key,\n                                      src,\n                                      dst,\n                                      hutil)\n            preprocess_files(file_path, hutil)\n    elif not(storage_account_name or storage_account_key):\n        hutil.log_and_syslog(logging.INFO, \"No azure storage account and key specified in protected \"\n                  \"settings. Downloading VMStatusTest scripts from external links...\")\n        for src,dst in vmStatusTestScripts.items():\n            if not src:\n                continue\n            file_path = download_external_file(src, dst, hutil)\n            preprocess_files(file_path, hutil)\n    else:\n        #Storage account and key should appear in pairs\n        error_msg = \"Azure storage account or storage key is not provided\"\n        hutil.log_and_syslog(logging.ERROR, error_msg)\n        raise ValueError(error_msg)\n\ndef download_blob(storage_account_name, storage_account_key,\n                  blob_uri, dst, hutil):\n    seqNo = hutil.get_seq_no()\n    container_name = get_container_name_from_uri(blob_uri)\n    blob_name = get_blob_name_from_uri(blob_uri)\n    download_dir = prepare_download_dir(seqNo)\n    download_path = os.path.join(download_dir, dst)\n    #Guest agent already ensure the plugin is enabled one after another.\n    #The blob download will not conflict.\n    blob_service = BlobService(storage_account_name, storage_account_key)\n    try:\n        blob_service.get_blob_to_path(container_name, blob_name, download_path)\n    except Exception as e:\n        hutil.log_and_syslog(logging.ERROR, (\"Failed to download blob with uri:{0} \"\n                     \"with error {1}\").format(blob_uri,e))\n        raise\n    return download_path\n\ndef download_external_file(uri, dst, hutil):\n    seqNo = hutil.get_seq_no()\n    download_dir = prepare_download_dir(seqNo)\n    file_path = os.path.join(download_dir, dst)\n    try:\n        download_and_save_file(uri, file_path)\n    except Exception as e:\n        hutil.log_and_syslog(logging.ERROR, (\"Failed to download external file with uri:{0} \"\n                     \"with error {1}\").format(uri, e))\n        raise\n    return file_path\n\ndef save_local_file(src, dst, hutil):\n    seqNo = hutil.get_seq_no()\n    download_dir = prepare_download_dir(seqNo)\n    file_path = os.path.join(download_dir, dst)\n    try:\n        waagent.SetFileContents(file_path, src)\n    except Exception as e:\n        hutil.log_and_syslog(logging.ERROR, (\"Failed to save file from user's configuration \"\n                     \"with error {0}\").format(e))\n        raise\n    return file_path\n\ndef preprocess_files(file_path, hutil):\n    \"\"\"\n        Preprocess the text file. If it is a binary file, skip it.\n    \"\"\"\n    is_text, code_type = is_text_file(file_path)\n    if is_text:\n        dos2unix(file_path)\n        hutil.log_and_syslog(logging.INFO, \"Converting text files from DOS to Unix formats: Done\")\n        if code_type in ['UTF-8', 'UTF-16LE', 'UTF-16BE']:\n            remove_bom(file_path)\n            hutil.log_and_syslog(logging.INFO, \"Removing BOM: Done\")\n\ndef is_text_file(file_path):\n    with open(file_path, 'rb') as f:\n        contents = f.read(512)\n    return is_text(contents)\n\ndef is_text(contents):\n    supported_encoding = ['ascii', 'UTF-8', 'UTF-16LE', 'UTF-16BE']\n    code_type = chardet.detect(contents)['encoding']\n    if code_type in supported_encoding:\n        return True, code_type\n    else:\n        return False, code_type\n\ndef dos2unix(file_path):\n    temp_file_path = tempfile.mkstemp()[1]\n    f_temp = open(temp_file_path, 'wb')\n    with open(file_path, 'rU') as f:\n        contents = f.read()\n    f_temp.write(contents)\n    f_temp.close()\n    shutil.move(temp_file_path, file_path)\n\ndef remove_bom(file_path):\n    temp_file_path = tempfile.mkstemp()[1]\n    f_temp = open(temp_file_path, 'wb')\n    with open(file_path, 'rb') as f:\n        contents = f.read()\n    for encoding in [\"utf-8-sig\", \"utf-16\"]:\n        try:\n            f_temp.write(contents.decode(encoding).encode('utf-8'))\n            break\n        except UnicodeDecodeError:\n            continue\n    f_temp.close()\n    shutil.move(temp_file_path, file_path)\n\ndef download_and_save_file(uri, file_path):\n    src = urllib2.urlopen(uri)\n    dest = open(file_path, 'wb')\n    buf_size = 1024\n    buf = src.read(buf_size)\n    while(buf):\n        dest.write(buf)\n        buf = src.read(buf_size)\n\ndef prepare_download_dir(seqNo):\n    download_dir_main = os.path.join(os.getcwd(), DownloadDirectory)\n    create_directory_if_not_exists(download_dir_main)\n    download_dir = os.path.join(download_dir_main, seqNo)\n    create_directory_if_not_exists(download_dir)\n    return download_dir\n\ndef create_directory_if_not_exists(directory):\n    \"\"\"create directory if no exists\"\"\"\n    if not os.path.exists(directory):\n        os.makedirs(directory)\n\ndef get_path_from_uri(uriStr):\n    uri = urlparse.urlparse(uriStr)\n    return uri.path\n\ndef get_blob_name_from_uri(uri):\n    return get_properties_from_uri(uri)['blob_name']\n\ndef get_container_name_from_uri(uri):\n    return get_properties_from_uri(uri)['container_name']\n\ndef get_properties_from_uri(uri):\n    path = get_path_from_uri(uri)\n    if path.endswith('/'):\n        path = path[:-1]\n    if path[0] == '/':\n        path = path[1:]\n    first_sep = path.find('/')\n    if first_sep == -1:\n        hutil.log_and_syslog(logging.ERROR, \"Failed to extract container, blob, from {}\".format(path))\n    blob_name = path[first_sep+1:]\n    container_name = path[:first_sep]\n    return {'blob_name': blob_name, 'container_name': container_name}\n\ndef download_customized_vmstatustest():\n    maxRetry = 2\n    for retry in range(0, maxRetry + 1):\n        try:\n            download_files(hutil)\n            break\n        except Exception:\n            hutil.log_and_syslog(logging.ERROR, \"Failed to download files, retry=\" + str(retry) + \", maxRetry=\" + str(maxRetry))\n            if retry != maxRetry:\n                hutil.log_and_syslog(logging.INFO, \"Sleep 10 seconds\")\n                time.sleep(10)\n            else:\n                raise\n\ndef copy_vmstatustestscript(seqNo, oneoff):\n    src_dir = prepare_download_dir(seqNo)\n    for filename in (idleTestScriptName, healthyTestScriptName):\n        src = os.path.join(src_dir, filename)\n        if os.path.isfile(src):\n            if oneoff is not None and oneoff.lower() == \"true\":\n                dst = \"oneoff\"\n            else:\n                dst = \"scheduled\"\n            dst = os.path.join(os.getcwd(), dst)\n            shutil.copy(src, dst)\n\ndef delete_current_vmstatustestscript():\n    for filename in (idleTestScriptName, healthyTestScriptName):\n        current_vmstatustestscript = os.path.join(os.getcwd(), \"patch/\"+filename)\n        if os.path.isfile(current_vmstatustestscript):\n            os.remove(current_vmstatustestscript)\n\nclass Test(unittest.TestCase):\n    def setUp(self):\n        print('\\n\\n============================================================================================')\n        waagent.LoggerInit('/var/log/waagent.log', '/dev/stdout')\n        waagent.Log(\"%s started to handle.\" %(ExtensionShortName))\n        global hutil\n        hutil = Util.HandlerUtility(waagent.Log, waagent.Error,\n                                    ExtensionShortName)\n        hutil.do_parse_context('TEST')\n\n        global MyPatching\n        MyPatching = FakePatching(hutil)\n        if MyPatching is None:\n            sys.exit(1)\n\n        distro = DistInfo()[0]\n        if 'centos' in distro or 'Oracle' in distro or 'redhat' in distro:\n            MyPatching.cron_restart_cmd = 'service crond restart'\n\n        try:\n            os.remove('mrseq')\n        except:\n            pass\n\n        waagent.SetFileContents(MyPatching.package_downloaded_path, '')\n        waagent.SetFileContents(MyPatching.package_patched_path, '')\n\n    def test_stop_between_download_and_stage1(self):\n        print('test_stop_between_download_and_stage1')\n\n        global settings\n        current_time = time.time()\n        settings = change_settings(\"startTime\", time.strftime('%H:%M', time.localtime(current_time + 180)))\n        settings = change_settings(\"category\", \"importantandrecommended\")\n\n        old_log_len = len(waagent.GetFileContents(log_file))\n        delta_time = int(time.strftime('%S', time.localtime(current_time + 120)))\n        with self.assertRaises(SystemExit) as cm:\n            enable()\n        self.assertEqual(cm.exception.code, 0)\n\n        # set stop flag after downloaded 40 seconds\n        time.sleep(160 - delta_time)\n        os.remove('mrseq')\n        settings = change_settings(\"stop\", \"true\")\n        with self.assertRaises(SystemExit) as cm:\n            enable()\n        self.assertEqual(cm.exception.code, 0)\n        self.assertTrue(MyPatching.exists_stop_flag())\n\n        # Make sure the total sleep time is greater than 180s\n        time.sleep(20 + delta_time + 5 + 60)\n        self.assertFalse(MyPatching.exists_stop_flag())\n        download_list = get_patch_list(MyPatching.package_downloaded_path)\n        self.assertEqual(download_list, ['a', 'b', 'c', 'd', 'e', '1', '2', '3', '4'])\n        self.assertFalse(waagent.GetFileContents(MyPatching.package_patched_path))\n        log_contents = waagent.GetFileContents(log_file)[old_log_len:]\n        self.assertTrue('Installing patches is stopped/canceled' in log_contents)\n        restore_settings()\n\n    def test_stop_between_stage1_and_stage2(self):\n        print 'test_stop_between_stage1_and_stage2'\n\n        global settings\n        current_time = time.time()\n        settings = change_settings(\"startTime\", time.strftime('%H:%M', time.localtime(current_time + 180)))\n        settings = change_settings(\"category\", \"importantandrecommended\")\n\n        old_log_len = len(waagent.GetFileContents(log_file))\n        delta_time = int(time.strftime('%S', time.localtime(current_time)))\n        with self.assertRaises(SystemExit) as cm:\n            enable()\n        self.assertEqual(cm.exception.code, 0)\n\n        # Set stop flag after patched 10 seconds\n        # Meanwhile the extension is sleeping between stage 1 & 2\n        time.sleep(180 - delta_time + 10)\n        os.remove('mrseq')\n        settings = change_settings(\"stop\", \"true\")\n        with self.assertRaises(SystemExit) as cm:\n            enable()\n        self.assertEqual(cm.exception.code, 0)\n        self.assertTrue(MyPatching.exists_stop_flag())\n\n        # The patching (stage 1 & 2) has ended\n        time.sleep(20)\n        self.assertFalse(MyPatching.exists_stop_flag())\n        download_list = get_patch_list(MyPatching.package_downloaded_path)\n        self.assertEqual(download_list, ['a', 'b', 'c', 'd', 'e', '1', '2', '3', '4'])\n        patch_list = get_patch_list(MyPatching.package_patched_path)\n        self.assertEqual(patch_list, ['a', 'b', 'c', 'd', 'e'])\n        log_contents = waagent.GetFileContents(log_file)[old_log_len:]\n        self.assertTrue(\"Installing patches (Category:\" + MyPatching.category_all + \") is stopped/canceled\" in log_contents)\n        restore_settings()\n\n\ndef get_patch_list(file_path, category = None):\n    content = waagent.GetFileContents(file_path)\n    if category != None:\n        result = [line.split()[0] for line in content.split('\\n') if line.endswith(category)]\n    else:\n        result = [line.split()[0] for line in content.split('\\n') if ' ' in line]\n    return result\n    \n\ndef get_status(operation, retkey='status'):\n    contents = waagent.GetFileContents(status_file)\n    status = json.loads(contents)[0]['status']\n    if status['operation'] == operation:\n        return status[retkey]\n    return ''\n\ndef change_settings(key, value):\n    with open(settings_file, \"r\") as f:\n        settings_string = f.read()\n        settings = json.loads(settings_string)\n    with open(settings_file, \"w\") as f:\n        settings[key] = value\n        settings_string = json.dumps(settings)\n        f.write(settings_string)\n    return settings\n\ndef restore_settings():\n    idleTestScriptLocal = \"\"\"#!/usr/bin/python\n    # Locally.\n    def is_vm_idle():\n        return True\n    \"\"\"\n\n    healthyTestScriptLocal = \"\"\"#!/usr/bin/python\n    # Locally.\n    def is_vm_healthy():\n        return True\n    \"\"\"\n\n    settings = {\n        \"disabled\" : \"false\",\n        \"stop\" : \"false\",\n        \"rebootAfterPatch\" : \"rebootifneed\",\n        \"category\" : \"important\",\n        \"installDuration\" : \"00:30\",\n        \"oneoff\" : \"false\",\n        \"intervalOfWeeks\" : \"1\",\n        \"dayOfWeek\" : \"everyday\",\n        \"startTime\" : \"03:00\",\n        \"vmStatusTest\" : {\n            \"local\" : \"true\",\n            \"idleTestScript\" : idleTestScriptLocal, #idleTestScriptStorage,\n            \"healthyTestScript\" : healthyTestScriptLocal, #healthyTestScriptStorage\n        },\n        \"storageAccountName\" : \"<TOCHANGE>\",\n        \"storageAccountKey\" : \"<TOCHANGE>\"\n    }\n\n    settings_string = json.dumps(settings)\n    settings_file = \"default.settings\"\n    with open(settings_file, \"w\") as f:\n        f.write(settings_string)\n\ndef main():\n    if len(sys.argv) == 1:\n        unittest.main()\n        return\n\n    waagent.LoggerInit('/var/log/waagent.log', '/dev/stdout')\n    waagent.Log(\"%s started to handle.\" % ExtensionShortName)\n\n    global hutil\n    hutil = Util.HandlerUtility(waagent.Log, waagent.Error,\n                                ExtensionShortName)\n    hutil.do_parse_context('TEST')\n    global MyPatching\n    MyPatching = FakePatching(hutil)\n\n    if MyPatching is None:\n        sys.exit(1)\n\n    for a in sys.argv[1:]:\n        if re.match(\"^([-/]*)(disable)\", a):\n            disable()\n        elif re.match(\"^([-/]*)(uninstall)\", a):\n            uninstall()\n        elif re.match(\"^([-/]*)(install)\", a):\n            install()\n        elif re.match(\"^([-/]*)(enable)\", a):\n            enable()\n        elif re.match(\"^([-/]*)(update)\", a):\n            update()\n        elif re.match(\"^([-/]*)(download)\", a):\n            download()\n        elif re.match(\"^([-/]*)(patch)\", a):\n            patch()\n        elif re.match(\"^([-/]*)(oneoff)\", a):\n            oneoff()\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "OmsAgent/.gitignore",
    "content": "packages\nkeys/keyring.gpg\nkeys/keyring.gpg~\nkeys/.gnupg/\n.vscode/\next/future\nUtils/\nwaagent\nwaagentc\n"
  },
  {
    "path": "OmsAgent/HandlerManifest.json",
    "content": "[\n  {\n    \"name\":  \"OmsAgentForLinux\",\n    \"version\": \"1.13.19\",\n    \"handlerManifest\": {\n      \"installCommand\": \"omsagent_shim.sh -install\",\n      \"uninstallCommand\": \"omsagent_shim.sh -uninstall\",\n      \"updateCommand\": \"omsagent_shim.sh -update\",\n      \"enableCommand\": \"omsagent_shim.sh -enable\",\n      \"disableCommand\": \"omsagent_shim.sh -disable\",\n      \"rebootAfterInstall\": false,\n      \"reportHeartbeat\": false,\n      \"updateMode\": \"UpdateWithInstall\",\n      \"continueOnUpdateFailure\": \"true\"\n    }\n  }\n]\n"
  },
  {
    "path": "OmsAgent/ImportGPGkey.sh",
    "content": "#!/bin/sh\n\nif [ -z \"$1\" ]; then\n    echo \"Usage:\"\n    echo \"   $0 PUBLIC_GPG_KEY\"\n    exit 1\nfi\n\nif [ -z \"$2\" ]; then\n    KEYRING_NAME=\"keyring.gpg\"\nelse\n    KEYRING_NAME=$2\nfi\n\nTARGET_DIR=\"$(dirname $1)\"\nHOME=$TARGET_DIR gpg --no-default-keyring --keyring $TARGET_DIR/$KEYRING_NAME --import $1\nRETVAL=$?\n\n# chown omsagent $TARGET_DIR/$KEYRING_NAME\n\nexit $RETVAL"
  },
  {
    "path": "OmsAgent/README.md",
    "content": "# [DEPRECATED] OmsAgent Extension\n\n> :warning: The Log Analytics agent has been **deprecated** and has no support as of **August 31, 2024.** If you use the Log Analytics agent to ingest data to Azure Monitor, [migrate now to the new Azure Monitor agent](https://docs.microsoft.com/en-us/azure/azure-monitor/agents/azure-monitor-agent-migration).\n>\n\n[See the latest version and extension-bundle mapping.](https://docs.microsoft.com/en-us/azure/virtual-machines/extensions/oms-linux#agent-and-vm-extension-version)\n\nYou can read the User Guide below.\n* [Learn more: Azure Virtual Machine Extensions](https://azure.microsoft.com/en-us/documentation/articles/virtual-machines-extensions-features/)\n\nOmsAgent Extension can:\n* Install the omsagent\n* Onboard to a OMS workspace\n\n# User Guide\n\n## 1. Configuration schema\n\n### 1.1. Public configuration\n\nSchema for the public configuration file looks like this:\n\n* `workspaceId`: (required, string) the OMS workspace id to onboard to\n* `stopOnMultipleConnections`: (optional, true/false) warn and stop onboarding if the machine already has a workspace connection; defaults to false\n* `noDigest`: (optional, true/false) RPM manager skips verification of package or header digests when reading (same as running rpm --nodigest --nofiledigest)\n* `skipDockerProviderInstall`: (optional, true/false) if the value is true, then skips the installation of the docker provider; default value is false\n\n```json\n{\n  \"workspaceId\": \"<workspace-id (guid)>\",\n  \"stopOnMultipleConnections\": true/false,\n  \"noDigest\": true/false,\n  \"skipDockerProviderInstall\": true/false\n}\n```\n\n### 1.2. Protected configuration\n\nSchema for the protected configuration file looks like this:\n\n* `workspaceKey`: (required, string) the primary/secondary shared key of the workspace\n* `proxy`: (optional, string) the proxy connection string - of the form \\[user:pass@\\]host\\[:port\\]\n* `vmResourceId`: (optional, string) the full azure resource id of the vm - of the form /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName} for Resource Manager VMs and of the form /subscriptions/{subscriptionId}/resourceGroups/{vmName}/providers/Microsoft.ClassicCompute/virtualMachines/{vmName} for Classic VMs\n\n```json\n{\n  \"workspaceKey\": \"<workspace-key>\",\n  \"proxy\": \"<proxy-string>\",\n  \"vmResourceId\": \"<vm-resource-id>\"\n}\n```\n\n## 2. Deploying the Extension to a VM\n\nYou can deploy it using Azure CLI, Azure Powershell and ARM template.\n\n\n\n### 2.1. Using [**Azure CLI**][azure-cli]\nBefore deploying OmsAgent Extension, you should configure your `public.json` and `protected.json`\n(in section 1.1 and 1.2 above).\n\n#### 2.1.1 Resource Manager\n\nYou can deploy the OmsAgent Extension by running:\n```\naz vm extension set \\\n  --resource-group myResourceGroup \\\n  --vm-name myVM \\\n  --name OmsAgentForLinux \\\n  --publisher Microsoft.EnterpriseCloud.Monitoring \\\n  --version <version> --protected-settings '{\"workspaceKey\": \"omskey\"}' \\\n  --settings '{\"workspaceId\": \"omsid\"}'\n\n```\n\n#### 2.1.2 Classic\nClassic mode is used to managed legacy resources created outside of Resource Manager, and requires the [classic cli][azure-cli-classic] to manage via the command line.\nYou need to enable Classic Mode (also called Azure Service Management mode) in the cli by running:\n```\nazure config mode asm\n```\n\nYou can deploy the OmsAgent Extension by running:\n```\nazure vm extension set <vm-name> \\\nOmsAgentForLinux Microsoft.EnterpriseCloud.Monitoring <version> \\\n--public-config-path public.json  \\\n--private-config-path protected.json\n```\n\nIn the command above, you can change version with `'*'` to use latest\nversion available, or `'1.*'` to get newest version that does not introduce non-\nbreaking schema changes. To learn the latest version available, run:\n```\nazure vm extension list\n```\n\n### 2.2. Using [**Azure Powershell**][azure-powershell]\n\n#### 2.2.1 Resource Manager\n\nYou can login to your Azure account (Azure Resource Manager mode) by running:\n\n```powershell\nLogin-AzureRmAccount\n```\n\nClick [**HERE**](https://azure.microsoft.com/en-us/documentation/articles/powershell-azure-resource-manager/) to learn more about how to use Azure Powershell with Azure Resource Manager.\n\nYou can deploy the OmsAgent Extension by running:\n\n```powershell\n$RGName = '<resource-group-name>'\n$VmName = '<vm-name>'\n$Location = '<location>'\n\n$ExtensionName = 'OmsAgentForLinux'\n$Publisher = 'Microsoft.EnterpriseCloud.Monitoring'\n$Version = '<version>'\n\n$PublicConf = '{\n    \"workspaceId\": \"<workspace id>\",\n    \"stopOnMultipleConnections\": true/false,\n    \"noDigest\": true/false,\n    \"skipDockerProviderInstall\": true/false\n}'\n$PrivateConf = '{\n    \"workspaceKey\": \"<workspace key>\",\n    \"proxy\": \"<proxy string>\",\n    \"vmResourceId\": \"<vm resource id>\"\n}'\n\nSet-AzureRmVMExtension -ResourceGroupName $RGName -VMName $VmName -Location $Location `\n  -Name $ExtensionName -Publisher $Publisher `\n  -ExtensionType $ExtensionName -TypeHandlerVersion $Version `\n  -Settingstring $PublicConf -ProtectedSettingString $PrivateConf\n```\n\n#### 2.2.2 Classic\n\nYou can login to your Azure account (Azure Service Management mode) by running:\n\n```powershell\nAdd-AzureAccount\n```\n\nYou can deploy the OmsAgent Extension by running:\n\n```powershell\n$VmName = '<vm-name>'\n$vm = Get-AzureVM -ServiceName $VmName -Name $VmName\n\n$ExtensionName = 'OmsAgentForLinux'\n$Publisher = 'Microsoft.EnterpriseCloud.Monitoring'\n$Version = '<version>'\n\n$PublicConf = '{\n    \"workspaceId\": \"<workspace id>\",\n    \"stopOnMultipleConnections\": true/false,\n    \"noDigest\": true/false,\n    \"skipDockerProviderInstall\": true/false\n}'\n$PrivateConf = '{\n    \"workspaceKey\": \"<workspace key>\",\n    \"proxy\": \"<proxy string>\",\n    \"vmResourceId\": \"<vm resource id>\"\n}'\n\nSet-AzureVMExtension -ExtensionName $ExtensionName -VM $vm `\n  -Publisher $Publisher -Version $Version `\n  -PrivateConfiguration $PrivateConf -PublicConfiguration $PublicConf |\n  Update-AzureVM\n```\n\n\n### 2.3. Using [**ARM Template**][arm-template]\n```json\n{\n  \"type\": \"Microsoft.Compute/virtualMachines/extensions\",\n  \"name\": \"<extension-deployment-name>\",\n  \"apiVersion\": \"<api-version>\",\n  \"location\": \"<location>\",\n  \"dependsOn\": [\n    \"[concat('Microsoft.Compute/virtualMachines/', <vm-name>)]\"\n  ],\n  \"properties\": {\n    \"publisher\": \"Microsoft.EnterpriseCloud.Monitoring\",\n    \"type\": \"OmsAgentForLinux\",\n    \"typeHandlerVersion\": \"1.4\",\n    \"settings\": {\n      \"workspaceId\": \"<workspace id>\",\n      \"stopOnMultipleConnections\": true/false,\n      \"noDigest\": true/false,\n      \"skipDockerProviderInstall\": true/false\n    },\n    \"protectedSettings\": {\n      \"workspaceKey\": \"<workspace key>\",\n      \"proxy\": \"<proxy string>\",\n      \"vmResourceId\": \"<vm resource id>\"\n    }\n  }\n}\n```\n\n## 3. Scenarios\n\n### 3.1 Onboard to OMS workspace\n```json\n{\n  \"workspaceId\": \"MyWorkspaceId\",\n  \"stopOnMultipleConnections\": true,\n  \"noDigest\": false,\n  \"skipDockerProviderInstall\": true\n}\n```\n```json\n{\n  \"workspaceKey\": \"MyWorkspaceKey\",\n  \"proxy\": \"proxyuser:proxypassword@proxyserver:8080\",\n  \"vmResourceId\": \"/subscriptions/c90fcea1-7cd5-4255-9e2e-25d627a2a259/resourceGroups/RGName/providers/Microsoft.Compute/virtualMachines/VMName\"\n}\n```\n\n## [Supported Linux Distributions](https://docs.microsoft.com/en-us/azure/azure-monitor/platform/log-analytics-agent#supported-linux-operating-systems)\n\n## Troubleshooting\n\n* The status of the extension is reported back to Azure so that user can\nsee the status on Azure Portal\n* All the execution output and errors generated by the extension are logged into\nthe following directories -\n`/var/lib/waagent/Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux-<version>/packages/`, `/opt/microsoft/omsagent/bin`\nand the tail of the output is logged into the log directory specified\nin HandlerEnvironment.json and reported back to Azure\n* The operation log of the extension is `/var/log/azure/Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux/<version>/extension.log` file.\n\n### Common error codes and their meanings\n\n| Error Code | Meaning | Possible Action |\n| :---: | --- | --- |\n| 10 | VM is already connected to an OMS workspace | To connect the VM to the workspace specified in the extension schema, set stopOnMultipleConnections to false in public settings or remove this property. This VM gets billed once for each workspace it is connected to. |\n| 11 | Invalid config provided to the extension | Follow the preceding examples to set all property values necessary for deployment. |\n| 12 | The dpkg package manager is locked | Make sure all dpkg update operations on the machine have finished and retry. |\n| 20 | Enable called prematurely | [Update the Azure Linux Agent](https://docs.microsoft.com/en-us/azure/virtual-machines/linux/update-agent) to the latest available version. |\n| 40-44 | Issue with the Automatic Management scenario | Please contact support with the details from the /var/log/azure/Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux/\\<version\\>/extension.log |\n| 51 | This extension is not supported on the VM's operation system | |\n| 52 | The extension failed due to a missing dependency | |\n| 53 | The extension failed due to missing or wrong configuration parameters | |\n| 55 | Cannot connect to the Microsoft Operations Management Suite service | Check that the system either has Internet access, or that a valid HTTP proxy has been provided. Additionally, check the correctness of the workspace ID. |\n\nAdditional error codes and troubleshooting information can be found on the [OMS-Agent-for-Linux Troubleshooting Guide](https://github.com/Microsoft/OMS-Agent-for-Linux/blob/master/docs/Troubleshooting.md#).\n\n\n[azure-powershell]: https://azure.microsoft.com/en-us/documentation/articles/powershell-install-configure/\n[azure-cli-classic]: https://docs.microsoft.com/en-us/cli/azure/install-classic-cli\n[azure-cli]: https://docs.microsoft.com/en-us/cli/azure/install-azure-cli\n[arm-template]: http://azure.microsoft.com/en-us/documentation/templates/\n[arm-overview]: https://azure.microsoft.com/en-us/documentation/articles/resource-group-overview/\n"
  },
  {
    "path": "OmsAgent/apply_version.sh",
    "content": "#! /bin/bash\n\nsource ./omsagent.version\n\necho \"OMS_EXTENSION_VERSION=$OMS_EXTENSION_VERSION\"\necho \"OMS_SHELL_BUNDLE_VERSION=$OMS_SHELL_BUNDLE_VERSION\"\n\n\n# updating HandlerManifest.json\n# check for \"version\": \"1.12.5\",\nsed -i \"s/\\\"version\\\".*$/\\\"version\\\": \\\"$OMS_EXTENSION_VERSION\\\",/g\" HandlerManifest.json\n\n# updating watcherutil.py\n# check OMSExtensionVersion = '1.12.5'\nsed -i \"s/^OMSExtensionVersion = .*$/OMSExtensionVersion = '$OMS_EXTENSION_VERSION'/\"  watcherutil.py\n\n# updating omsagent.py\n# check BundleFileName = 'omsagent-0.0.0-0.universal.x64.sh'\nsed -i \"s/^BundleFileName = .*$/BundleFileName = 'omsagent-$OMS_SHELL_BUNDLE_VERSION.universal.x64.sh'/\" omsagent.py\n\n# updating manifest.xml\n# check <Version>...</Version>\nsed -i -e \"s|<Version>[0-9a-z.]\\{1,\\}</Version>|<Version>$OMS_EXTENSION_VERSION</Version>|g\" manifest.xml\n"
  },
  {
    "path": "OmsAgent/extension-test/README.md",
    "content": "\n# OMS Extension Automated Testing\n\n## Requirements\n\n* If host machine is Windows:\n  * Must active Windows Subsystem for Linux [WSL](https://docs.microsoft.com/en-us/windows/wsl/install-win10)\n* Create a ssh key using [ssh-keygen](https://help.github.com/articles/generating-a-new-ssh-key-and-adding-it-to-the-ssh-agent/)\n* [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest)\n* Putty PSCP\n  * [Putty for Windows](https://www.putty.org/)\n  * Putty tools for Linux:\n    * For DPKG: 'sudo apt-get install putty-tools'\n    * For RPM: 'sudo yum install putty-tools'\n    * For SUSE: 'sudo zypper install putty-tools'\n* Python 2.7+ & [pip](https://pip.pypa.io/en/stable/installing/)\n* [Requests](http://docs.python-requests.org/en/master/), [ADAL](https://github.com/AzureAD/azure-activedirectory-library-for-python), [json2html](https://github.com/softvar/json2html), [rstr](https://pypi.org/project/rstr/)\n\n```bash\n$ pip install requests adal json2html rstr\n```\n\n## Images currently supported for testing:\n\n* CentOS 6 and 7\n* Oracle Linux 6 and 7\n* Debian 8 and 9\n* Ubuntu 14.04, 16.04, and 18.04\n* Red Hat 6 and 7\n* SUSE 12\n\n## Running Tests\n\n### Prepare\n\n#### Resources\n\n1. Create a resource group that will be used to store all test resources\n2. Create an Azure Key Vault to store test secrets\n3. Create a Log Analytics workspace where your test VMs will send data\n  - From the workspace blade, navigate to Settings > Advanced Settings > and note the workspace Id and Key for later\n4. Create a network security group, preferably in West US 2\n  - From the NSG blade, navigate to Settings > Inbound Security Rules > Add\n  - Use the following settings\n    - `Source` – IP Addresses\n    - `Source IP Addresses/CIDR ranges` – the IP of your host machine\n    - `Source port ranges` – *\n    - `Destination` – Any\n    - `Destination port ranges` – 22\n    - `Protocol` – Any or TCP\n    - `Action` – Allow\n    - `Priority` – Lowest possible number\n    - `Name` – AllowSSH\n  - Add\n5. [Increase your VM quota](https://docs.microsoft.com/en-us/azure/azure-supportability/resource-manager-core-quotas-request) to 15 in the region you will specify below in parameters.json\n6. [Optional] Register your own AAD app to allow end-to-end verification script to access Microsoft REST APIs\n  - Azure Portal > Azure Active Directory > App Registrations (Preview) > New Registration\n    - `Name` – A name of your choice, can be changed later\n    - `Supported Account Types` – Accounts in this organizational directory only (Microsoft)\n    - `Redirect URI (Optional)` – Leave blank\n    - Register\n    - Use Application (client) ID value displayed in app overview to replace `<app-id>` in parameters.json\n  - In blade of new registration > Certificates & Secrets > New Client Secret\n    - `Description` – A descriptive word or phrase of your choice\n    - `Expires` – Never\n    - Add\n    - *Copy down the new client secret value!* Use this to replace `<app-secret>` in parameters.json\n\n#### Parameters\n1. In your Azure Key Vault, manually upload secrets with the following name-value pairings:\n  - `<tenant>` – your AAD tenant, visible in Azure Portal > Azure Active Directory > Properties > Directory ID\n  - `<app-id>`, `<app-secret>` – verify_e2e service principal ID, secret (available in OneNote document, or use the values from the app you optionally registered in step 6 above)\n  - `<subscription-id>` – ID of subscription that hosts your desired Log Analytics test workspace\n  - `<tenant-id>` – ID of your Azure AD tenant\n  - `<workspace-id>`, `<workspace-key>` – Log Analytics test workspace ID, key  \n2. In parameters.json, fill in the following:\n  - `<resource group>`, `<location>` – resource group, region (e.g. westus2) in which you want your VMs created\n  - `<username>`, `<password>` – the VM username and password (see [requirements](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/faq#what-are-the-password-requirements-when-creating-a-vm))\n  - `<nsg resource group>` – resource group of your NSG\n  - `<nsg>` – NSG name\n  - `<size>` – Standard_B1ms\n  - `<workspace>` – name of the workspace you created\n  - `<key vault>` – name of the Key Vault you created\n  - `<old version>` - specific version of the extension (define as empty \"\" if not using)\n\n#### Other\n1. Allow the end-to-end verification script to read your workspace\n  - Open workspace in Azure Portal\n  - Access control (IAM) > Add\n    - `Role` – Reader\n    - `Assign access to` – Azure AD user, group, or application\n    - `Select` – verify_e2e\n  - Save\n2. Log in to Azure using the Azure CLI and set your subscription\n\n```bash\n$ az login\n$ az account set --subscription subscription_name\n```\n\n3. Custom Log Setup:\n  - [Custom logs Docs](https://docs.microsoft.com/en-us/azure/log-analytics/log-analytics-data-sources-custom-logs)\n  - Add custom.log file to setup Custom_Log_CL\n    ![AddingCustomlogFile](pictures/AddingCustomlogFile.png?raw=true)\n  - Add location of the file on containers i.e., '/var/log/custom.log'\n    ![AddLocationofFile](pictures/AddLocationofFile.png?raw=true)\n  - Add Custom_Log_CL tag\n  ![AddingCustomlogTag](pictures/AddingCustomlogTag.png?raw=true)\n\n### Run test scripts\n\n- Available modes: \n  - default: No options needed. Runs the install & reinstall tests on the latest agent with a 10 min wait time before verification.\n  - `long`: Runs the tests just like the default mode but add a very longer wait time\n  - `autoupgrade`: Runs the tests just like the default mode but waits till the agent is updated to a new version and terminates if running for more than 26 hours.\n  - `instantupgrade`: Install the older version first and runs the default tests after force upgrade to newer version\n  - `debug`: AZ CLI commands run with '--verbose' by default. Add 'debug' after short/long to see complete debug logs of az cli\n\n#### All images in default mode\n\n```bash\n$ python -u oms_extension_tests.py\n```\n\n#### All images in default mode with debug in long run\n\n```bash\n$ python -u oms_extension_tests.py long debug\n```\n\n#### Subset of images\n\n```bash\n$ python -u oms_extension_tests.py image1 image2 ...\n```\n\n#### Autoupgrade of images (This option will wait until the extension is upgraded to the new version and continue to next steps after verifying data)\n\n```bash\n$ python -u oms_extension_tests.py autoupgrade image1 image2 ...\n```\n\n#### Instantupgrade of images (This option will install the desired older version of extension first and then force upgrade to the latest version)\n\nNote: Must define a proper value for the `old_version` in parameters.json file else the program will encounter an undefined typeHandler error.\n\n```bash\n$ python -u oms_extension_tests.py instantupgrade image1 image2 ...\n```\n"
  },
  {
    "path": "OmsAgent/extension-test/oms_extension_tests.py",
    "content": "\"\"\"\nTest the OMS Agent on all or a subset of images.\n\nSetup: read parameters and setup HTML report\nTest:\n1. Create vm and install agent\n2. Wait for data to propagate to backend and check for data\n3. Remove extension\n4. Reinstall extension\n5. Optionally, wait for hours and check data and extension status\n6. Purge extension and delete vm\nFinish: compile HTML report and log file\n\"\"\"\n\nimport json\nimport os\nimport os.path\nimport subprocess\nimport re\nimport sys\nimport rstr\nimport glob\nimport shutil\n\nfrom time import sleep\nfrom datetime import datetime, timedelta\nfrom platform import system\nfrom collections import OrderedDict\nfrom verify_e2e import check_e2e\n\nfrom json2html import *\n\nE2E_DELAY = 15 # Delay (minutes) before checking for data\nAUTOUPGRADE_DELAY = 15 # Delay (minutes) before rechecking the extension version\nLONG_DELAY = 250 # Delay (minutes) before rechecking extension\n\nimages_list = { 'ubuntu14': 'Canonical:UbuntuServer:14.04.5-LTS:14.04.201808180',\n         'ubuntu16': 'Canonical:UbuntuServer:16.04-LTS:latest',\n         'ubuntu18': 'Canonical:UbuntuServer:18.04-LTS:latest',\n         'debian8': 'credativ:Debian:8:latest',\n         'debian9': 'credativ:Debian:9:latest',\n         'redhat6': 'RedHat:RHEL:6.9:latest',\n         'redhat7': 'RedHat:RHEL:7.3:latest',\n         'centos6': 'OpenLogic:CentOS:6.9:latest',\n         'centos7': 'OpenLogic:CentOS:7.5:latest',\n         # 'oracle6': 'Oracle:Oracle-Linux:6.9:latest',\n         'oracle7': 'Oracle:Oracle-Linux:7.5:latest',\n         'sles12': 'SUSE:SLES:12-SP3:latest',\n         'sles15': 'SUSE:SLES:15:latest'}\n\nvmnames = []\nimages = {}\ninstall_times = {}\n\nrunwith = '--verbose'\n\nos.system('touch ./omsfiles/omsresults.log')\nos.system('touch ./omsfiles/omsresults.html')\nos.system('touch ./omsfiles/omsresults.status')\n\nvms_list = []\nif len(sys.argv) > 0:\n    options = sys.argv[1:]\n    vms_list = [ i for i in options if i not in ('long', 'debug', 'autoupgrade', 'instantupgrade')]\n    is_long = 'long' in options\n    runwith = '--debug' if 'debug' in options else '--verbose'\n    if 'autoupgrade' in options and 'instantupgrade' in options:\n        print(\"Select only one option from 'autoupgrade' and 'instantupgrade'. You cannot run both at the same time\")\n        exit()\n    is_autoupgrade = 'autoupgrade' in options\n    is_instantupgrade = 'instantupgrade' in options\nelse:\n    is_long = is_debug = is_autoupgrade = is_instantupgrade = False\n\nif vms_list:\n    for vm in vms_list:\n        vm_dict = { vm: images_list[vm] }\n        images.update(vm_dict)\nelse:\n    images = images_list\n\nprint(\"List of VMs & Image Sources added for testing: {}\".format(images))\n\nwith open('{0}/parameters.json'.format(os.getcwd()), 'r') as f:\n    parameters = f.read()\n    if re.search(r'\"<.*>\"', parameters):\n        print('Please replace placeholders in parameters.json')\n        exit()\n    parameters = json.loads(parameters)\n\nresource_group = parameters['resource group']\nlocation = parameters['location']\nusername = parameters['username']\nnsg = parameters['nsg']\nnsg_resource_group = parameters['nsg resource group']\nsize = parameters['size'] # Preferred: 'Standard_B1ms'\nextension = 'OmsAgentForLinux'\npublisher = 'Microsoft.EnterpriseCloud.Monitoring'\nkey_vault = parameters['key vault']\nsubscription = str(json.loads(subprocess.check_output('az keyvault secret show --name subscription-id --vault-name {0}'.format(key_vault), shell=True))[\"value\"])\nworkspace_id = str(json.loads(subprocess.check_output('az keyvault secret show --name workspace-id --vault-name {0}'.format(key_vault), shell=True))[\"value\"])\nworkspace_key = str(json.loads(subprocess.check_output('az keyvault secret show --name workspace-key --vault-name {0}'.format(key_vault), shell=True))[\"value\"])\npublic_settings = { \"workspaceId\": workspace_id }\nprivate_settings = { \"workspaceKey\": workspace_key }\nnsg_uri = \"/subscriptions/\" + subscription + \"/resourceGroups/\" + nsg_resource_group + \"/providers/Microsoft.Network/networkSecurityGroups/\" + nsg\nssh_private = parameters['ssh private']\nssh_public = ssh_private + '.pub'\nif parameters['old version']:\n    old_version = parameters['old version']\n\n# Sometimes Azure VM images become unavailable or are unavailable in certain regions, lets check...\nfor distname, image in images.iteritems():\n    img_publisher, _, sku, _ = image.split(':')\n    if subprocess.check_output('az vm image list --all --location {0} --publisher {1} --sku {2}'.format(location, img_publisher, sku), shell=True) == '[]\\n':\n        print('Could not find image for {0} in {1}, please double check VM image availability'.format(distname, location))\n        exit()\n    else:\n        print('VM image availability successfully validated')\n\n# Detect the host system and validate nsg\nif system() == 'Windows':\n    if os.system('az network nsg show --resource-group {0} --name {1} --query \"[?n]\"'.format(nsg_resource_group, nsg)) == 0:\n        print(\"Network Security Group successfully validated\")\nelif system() == 'Linux':\n    if os.system('az network nsg show --resource-group {0} --name {1} > /dev/null 2>&1'.format(nsg_resource_group, nsg)) == 0:\n        print(\"Network Security Group successfully validated\")\nelse:\n    print(\"\"\"Please verify that the nsg or nsg resource group are valid and are in the right subscription.\nIf there is no Network Security Group, please create new one. NSG is a must to create a VM in this testing.\"\"\")\n    exit()\n\n# Remove intermediate log and html files\nos.system('rm -rf ./*.log ./*.html ./results 2> /dev/null')\n\nresult_html_file = open(\"finalresult.html\", 'a+')\n\n# Common logic to save command itself\ndef write_log_command(log, cmd):\n    print(cmd)\n    log.write(cmd + '\\n')\n    log.write('-' * 40)\n    log.write('\\n')\n\n# Common logic to append a file to another\ndef append_file(src, dest):\n    f = open(src, 'r')\n    dest.write(f.read())\n    f.close()\n\n# Get time difference in minutes and seconds\ndef get_time_diff(timevalue1, timevalue2):\n    timediff = timevalue2 - timevalue1\n    minutes, seconds = divmod(timediff.days * 86400 + timediff.seconds, 60)\n    return minutes, seconds\n\n# Correct potential windows line endings with dos2unix command\ndef dos_2_unix():\n    os.system('dos2unix ./omsfiles/*')\n\n# Secure copy required files from local to vm\ndef copy_to_vm(dnsname, username, ssh_private, location):\n    os.system(\"scp -i {0} -o StrictHostKeyChecking=no -o LogLevel=ERROR -o UserKnownHostsFile=/dev/null -r omsfiles/* {1}@{2}.{3}.cloudapp.azure.com:/tmp/\".format(ssh_private, username, dnsname.lower(), location))\n\n# Secure copy files from vm to local\ndef copy_from_vm(dnsname, username, ssh_private, location, filename):\n    os.system(\"scp -i {0} -o StrictHostKeyChecking=no -o LogLevel=ERROR -o UserKnownHostsFile=/dev/null -r {1}@{2}.{3}.cloudapp.azure.com:/home/scratch/{4} omsfiles/.\".format(ssh_private, username, dnsname.lower(), location, filename))\n\n# Run scripts on vm using AZ CLI\ndef run_command(resource_group, vmname, commandid, script):\n    os.system('az vm run-command invoke -g {0} -n {1} --command-id {2} --scripts \"{3}\" {4}'.format(resource_group, vmname, commandid, script, runwith))\n\n# Create vm using AZ CLI\ndef create_vm(resource_group, vmname, image, username, ssh_public, location, dnsname, vmsize, nsg_uri):\n    os.system('az vm create -g {0} -n {1} --image {2} --admin-username {3} --ssh-key-value @{4} --location {5} --public-ip-address-dns-name {6} --size {7} --nsg {8} {9}'.format(resource_group, vmname, image, username, ssh_public, location, dnsname, vmsize, nsg_uri, runwith))\n\n# Add extension to vm using AZ CLI\ndef add_extension(extension, publisher, vmname, resource_group, private_settings, public_settings, update_option):\n    os.system('az vm extension set -n {0} --publisher {1} --vm-name {2} --resource-group {3} --protected-settings \"{4}\" --settings \"{5}\" {6} {7}'.format(extension, publisher, vmname, resource_group, private_settings, public_settings, update_option, runwith))\n\n# Delete extension from vm using AZ CLI\ndef delete_extension(extension, vmname, resource_group):\n    os.system('az vm extension delete -n {0} --vm-name {1} --resource-group {2} {3}'.format(extension, vmname, resource_group, runwith))\n\n# Get vm details using AZ CLI\ndef get_vm_resources(resource_group, vmname):\n    vm_cli_out = json.loads(subprocess.check_output('az vm show -g {0} -n {1}'.format(resource_group, vmname), shell=True))\n    os_disk = vm_cli_out['storageProfile']['osDisk']['name']\n    nic_name = vm_cli_out['networkProfile']['networkInterfaces'][0]['id'].split('/')[-1]\n    ip_list = json.loads(subprocess.check_output('az vm list-ip-addresses -n {0} -g {1}'.format(vmname, resource_group), shell=True))\n    ip_name = ip_list[0]['virtualMachine']['network']['publicIpAddresses'][0]['name']\n    return os_disk, nic_name, ip_name\n\ndef get_extension_version_now(resource_group, vmname, extension):\n    vm_ext_out = json.loads(subprocess.check_output('az vm extension show --resource-group {0} --vm-name {1} --name {2} --expand instanceView'.format(resource_group, vmname, extension), shell=True))\n    installed_version = int(('').join(str(vm_ext_out[\"instanceView\"][\"typeHandlerVersion\"]).split('.')))\n    return installed_version\n\n# Delete vm using AZ CLI\ndef delete_vm(resource_group, vmname):\n    os.system('az vm delete -g {0} -n {1} --yes {2}'.format(resource_group, vmname, runwith))\n\n# Delete vm disk using AZ CLI\ndef delete_vm_disk(resource_group, os_disk):\n    os.system('az disk delete --resource-group {0} --name {1} --yes {2}'.format(resource_group, os_disk, runwith))\n\n# Delete vm network interface using AZ CLI\ndef delete_nic(resource_group, nic_name):\n    os.system('az network nic delete --resource-group {0} --name {1} --no-wait {2}'.format(resource_group, nic_name, runwith))\n\n# Delete vm ip from AZ CLI\ndef delete_ip(resource_group, ip_name):\n    os.system('az network public-ip delete --resource-group {0} --name {1} {2}'.format(resource_group, ip_name, runwith))\n\n\nhtmlstart = \"\"\"<!DOCTYPE html>\n<html>\n<head>\n<style>\ntable {\n    font-family: arial, sans-serif;\n    border-collapse: collapse;\n    width: 100%;\n}\n\ntable:not(th) {\n    font-weight: lighter;\n}\n\ntd, th {\n    border: 1px solid #dddddd;\n    text-align: left;\n    padding: 8px;\n}\n\ntr:nth-child(even) {\n    background-color: #dddddd;\n}\n</style>\n</head>\n<body>\n\"\"\"\nresult_html_file.write(htmlstart)\n\ndef main():\n    \"\"\"Orchestrate fundemental testing steps onlined in header docstring.\"\"\"\n    if is_instantupgrade:\n        install_oms_msg = create_vm_and_install_old_extension()\n        verify_oms_msg = verify_data()\n        instantupgrade_status_msg = force_upgrade_extension()\n        instantupgrade_verify_msg = verify_data()\n    else:\n        instantupgrade_verify_msg, instantupgrade_status_msg = None, None\n        install_oms_msg = create_vm_and_install_extension()\n        verify_oms_msg = verify_data()\n\n    if is_autoupgrade:\n        autoupgrade_status_msg = autoupgrade()\n        autoupgrade_verify_msg = verify_data()\n    else:\n        autoupgrade_verify_msg, autoupgrade_status_msg = None, None\n    \n    remove_oms_msg = remove_extension()\n    reinstall_oms_msg = reinstall_extension()\n    if is_long:\n        for i in reversed(range(1, LONG_DELAY + 1)):\n            sys.stdout.write('\\rLong-term delay: T-{0} minutes...'.format(i))\n            sys.stdout.flush()\n            sleep(60)\n        print('')\n        long_status_msg = check_status()\n        long_verify_msg = verify_data()\n    else:\n        long_verify_msg, long_status_msg = None, None\n    remove_extension_and_delete_vm()\n    messages = (install_oms_msg, verify_oms_msg, instantupgrade_verify_msg, instantupgrade_status_msg, autoupgrade_verify_msg, autoupgrade_status_msg, remove_oms_msg, reinstall_oms_msg, long_verify_msg, long_status_msg)\n    create_report(messages)\n    mv_result_files()\n\n\ndef create_vm_and_install_extension():\n    \"\"\"Create vm and install the extension, returning HTML results.\"\"\"\n\n    message = \"\"\n    update_option = \"\"\n    install_times.clear()\n    for distname, image in images.iteritems():\n        uid = rstr.xeger(r'[0-9a-f]{8}')\n        vmname = distname.lower() + '-' + uid\n        vmnames.append(vmname)\n        dnsname = vmname\n        vm_log_file = distname.lower() + \"result.log\"\n        vm_html_file = distname.lower() + \"result.html\"\n        log_open = open(vm_log_file, 'a+')\n        html_open = open(vm_html_file, 'a+')\n        print(\"\\nCreate VM and Install Extension - {0}: {1} \\n\".format(vmname, image))\n        create_vm(resource_group, vmname, image, username, ssh_public, location, dnsname, size, nsg_uri)\n        dos_2_unix()\n        copy_to_vm(dnsname, username, ssh_private, location)\n        delete_extension(extension, vmname, resource_group)\n        run_command(resource_group, vmname, 'RunShellScript', 'python -u /tmp/oms_extension_run_script.py -preinstall')\n        add_extension(extension, publisher, vmname, resource_group, private_settings, public_settings, update_option)\n        run_command(resource_group, vmname, 'RunShellScript', 'python -u /home/scratch/oms_extension_run_script.py -postinstall')\n        install_times.update({vmname: datetime.now()})\n        run_command(resource_group, vmname, 'RunShellScript', 'python -u /home/scratch/oms_extension_run_script.py -injectlogs')\n        copy_from_vm(dnsname, username, ssh_private, location, 'omsresults.*')\n        write_log_command(log_open, 'Status After Creating VM and Adding OMS Extension')\n        html_open.write('<h1 id=\"{0}\"> VM: {0} <h1>'.format(distname))\n        html_open.write(\"<h2> Install OMS Agent </h2>\")\n        append_file('omsfiles/omsresults.log', log_open)\n        append_file('omsfiles/omsresults.html', html_open)\n        log_open.close()\n        html_open.close()\n        status = open('omsfiles/omsresults.status', 'r').read()\n        if status == \"Agent Found\":\n            message += \"\"\"\n                            <td><span style='background-color: #66ff99'>Install Success</span></td>\"\"\"\n        elif status == \"Onboarding Failed\":\n            message += \"\"\"\n                            <td><span style='background-color: red; color: white'>Onboarding Failed</span></td>\"\"\"\n        elif status == \"Agent Not Found\":\n            message += \"\"\"\n                            <td><span style='background-color: red; color: white'>Install Failed</span></td>\"\"\"\n    return message\n\ndef create_vm_and_install_old_extension():\n    \"\"\"Create vm and install a specific version of the extension, returning HTML results.\"\"\"\n\n    message = \"\"\n    update_option = '--version {0} --no-auto-upgrade'.format(old_version)\n    install_times.clear()\n    for distname, image in images.iteritems():\n        uid = rstr.xeger(r'[0-9a-f]{8}')\n        vmname = distname.lower() + '-' + uid\n        vmnames.append(vmname)\n        dnsname = vmname\n        vm_log_file = distname.lower() + \"result.log\"\n        vm_html_file = distname.lower() + \"result.html\"\n        log_open = open(vm_log_file, 'a+')\n        html_open = open(vm_html_file, 'a+')\n        print(\"\\nCreate VM and Install Extension {0} v-{1} - {2}: {3} \\n\".format(extension, old_version, vmname, image))\n        create_vm(resource_group, vmname, image, username, ssh_public, location, dnsname, size, nsg_uri)\n        dos_2_unix()\n        copy_to_vm(dnsname, username, ssh_private, location)\n        delete_extension(extension, vmname, resource_group)\n        run_command(resource_group, vmname, 'RunShellScript', 'python -u /tmp/oms_extension_run_script.py -preinstall')\n        add_extension(extension, publisher, vmname, resource_group, private_settings, public_settings, update_option)\n        run_command(resource_group, vmname, 'RunShellScript', 'python -u /home/scratch/oms_extension_run_script.py -postinstall')\n        install_times.update({vmname: datetime.now()})\n        run_command(resource_group, vmname, 'RunShellScript', 'python -u /home/scratch/oms_extension_run_script.py -injectlogs')\n        copy_from_vm(dnsname, username, ssh_private, location, 'omsresults.*')\n        write_log_command(log_open, \"Status After Creating VM and Adding OMS Extension version: {0}\".format(old_version))\n        html_open.write('<h1 id=\"{0}\"> VM: {0} <h1>'.format(distname))\n        html_open.write(\"<h2> Install OMS Agent version: {0} </h2>\".format(old_version))\n        append_file('omsfiles/omsresults.log', log_open)\n        append_file('omsfiles/omsresults.html', html_open)\n        log_open.close()\n        html_open.close()\n        status = open('omsfiles/omsresults.status', 'r').read()\n        if status == \"Agent Found\":\n            message += \"\"\"\n                            <td><span style='background-color: #66ff99'>Install Success</span></td>\"\"\"\n        elif status == \"Onboarding Failed\":\n            message += \"\"\"\n                            <td><span style='background-color: red; color: white'>Onboarding Failed</span></td>\"\"\"\n        elif status == \"Agent Not Found\":\n            message += \"\"\"\n                            <td><span style='background-color: red; color: white'>Install Failed</span></td>\"\"\"\n    return message\n\ndef force_upgrade_extension():\n    \"\"\" Force Update the extension to the latest version \"\"\"\n\n    message = \"\"\n    update_option = '--force-update'\n    install_times.clear()\n    for vmname in vmnames:\n        distname = vmname.split('-')[0]\n        vm_log_file = distname + \"result.log\"\n        vm_html_file = distname + \"result.html\"\n        log_open = open(vm_log_file, 'a+')\n        html_open = open(vm_html_file, 'a+')\n        dnsname = vmname\n        print(\"\\n Force Upgrade Extension: {0} \\n\".format(vmname))\n        add_extension(extension, publisher, vmname, resource_group, private_settings, public_settings, update_option)\n        run_command(resource_group, vmname, 'RunShellScript', 'python -u /home/scratch/oms_extension_run_script.py -postinstall')\n        install_times.update({vmname: datetime.now()})\n        run_command(resource_group, vmname, 'RunShellScript', 'python -u /home/scratch/oms_extension_run_script.py -injectlogs')\n        copy_from_vm(dnsname, username, ssh_private, location, 'omsresults.*')\n        write_log_command(log_open, 'Status After Force Upgrading OMS Extension')\n        html_open.write('<h2> Force Upgrade Extension: {0} <h2>'.format(vmname))\n        append_file('omsfiles/omsresults.log', log_open)\n        append_file('omsfiles/omsresults.html', html_open)\n        log_open.close()\n        html_open.close()\n        status = open('omsfiles/omsresults.status').read()\n        if status == \"Agent Found\":\n            message += \"\"\"\n                            <td><span style='background-color: #66ff99'>Reinstall Success</span></td>\"\"\"\n        elif status == \"Onboarding Failed\":\n            message += \"\"\"\n                            <td><span style='background-color: red; color: white'>Onboarding Failed</span></td>\"\"\"\n        elif status == \"Agent Not Found\":\n            message += \"\"\"\n                            <td><span style='background-color: red; color: white'>Reinstall Failed</span></td>\"\"\"\n    return message\n\ndef verify_data():\n    \"\"\"Verify data end-to-end, returning HTML results.\"\"\"\n\n    message = \"\"\n    for vmname in vmnames:\n        distname = vmname.split('-')[0]\n        vm_log_file = distname + \"result.log\"\n        vm_html_file = distname + \"result.html\"\n        log_open = open(vm_log_file, 'a+')\n        html_open = open(vm_html_file, 'a+')\n        \n        # Delay to allow data to propagate\n        while datetime.now() < (install_times[vmname] + timedelta(minutes=E2E_DELAY)):\n            mins, secs = get_time_diff(datetime.now(), install_times[vmname] + timedelta(minutes=E2E_DELAY))\n            sys.stdout.write('\\rE2E propagation delay: {0} minutes {1} seconds...'.format(mins, secs))\n            sys.stdout.flush()\n            sleep(1)\n        print('')\n        minutes, _ = get_time_diff(install_times[vmname], datetime.now())\n        timespan = 'PT{0}M'.format(minutes)\n        data = check_e2e(vmname, timespan)\n\n        # write detailed table for vm\n        html_open.write(\"<h2> Verify Data from OMS workspace </h2>\")\n        write_log_command(log_open, 'Status After Verifying Data')\n        results = data[distname][0]\n        log_open.write(distname + ':\\n' + json.dumps(results, indent=4, separators=(',', ': ')) + '\\n')\n        # prepend distro column to results row before generating the table\n        data = [OrderedDict([('Distro', distname)] + results.items())]\n        out = json2html.convert(data)\n        html_open.write(out)\n\n        # write to summary table\n        from verify_e2e import success_count\n        if success_count == 6:\n            message += \"\"\"\n                            <td><span style='background-color: #66ff99'>Verify Success</td>\"\"\"\n        elif 0 < success_count < 6:\n            from verify_e2e import success_sources, failed_sources\n            message += \"\"\"\n                            <td><span style='background-color: #66ff99'>{0} Success</span> <br><br><span style='background-color: red; color: white'>{1} Failed</span></td>\"\"\".format(', '.join(success_sources), ', '.join(failed_sources))\n        elif success_count == 0:\n            message += \"\"\"\n                            <td><span style='background-color: red; color: white'>Verify Failed</span></td>\"\"\"\n    return message\n\ndef autoupgrade():\n    \"\"\" Waits for the extension to get updated automatically and continues with the tests after. Maximum wait time is 26 hours \"\"\"\n\n    message = \"\"\n    install_times.clear()\n    for vmname in vmnames:\n        initial_version = get_extension_version_now(resource_group, vmname, extension)\n        time_lapsed = 0\n        while initial_version >= get_extension_version_now(resource_group, vmname, extension):\n            sleep(AUTOUPGRADE_DELAY*60)\n            time_lapsed+=AUTOUPGRADE_DELAY\n            if time_lapsed < 1440:\n                sys.stdout.write(\"waiting for new version. Time Lapsed: {0} minutes\".format(time_lapsed))\n                sys.stdout.flush()\n            elif 1440 <= time_lapsed < 1560:\n                sys.stdout.write('Process waiting for more than 24 hrs. Please check the deployment of the new version is completed or not. This wait will end in {0} minutes'.format(1560 - time_lapsed))\n                sys.stdout.flush()\n            elif time_lapsed >= 1560:\n                print(\"\"\"Process waiting for more than 26 hrs. No New version of extension has been deployed.\n                    If a new version is deployed, please check for any errors and re-run\"\"\")\n                break\n\n        distname = vmname.split('-')[0]\n        vm_log_file = distname + \"result.log\"\n        vm_html_file = distname + \"result.html\"\n        log_open = open(vm_log_file, 'a+')\n        html_open = open(vm_html_file, 'a+')\n        dnsname = vmname\n        print(\"\\n Checking Status After AutoUpgrade: {0} \\n\".format(vmname))\n        run_command(resource_group, vmname, 'RunShellScript', 'python -u /home/scratch/oms_extension_run_script.py -postinstall')\n        install_times.update({vmname: datetime.now()})\n        run_command(resource_group, vmname, 'RunShellScript', 'python -u /home/scratch/oms_extension_run_script.py -injectlogs')\n        copy_from_vm(dnsname, username, ssh_private, location, 'omsresults.*')\n        write_log_command(log_open, 'Status After AutoUpgrade OMS Extension')\n        html_open.write('<h2> Status After AutoUpgrade OMS Extension: {0} <h2>'.format(vmname))\n        append_file('omsfiles/omsresults.log', log_open)\n        append_file('omsfiles/omsresults.html', html_open)\n        log_open.close()\n        html_open.close()\n        status = open('omsfiles/omsresults.status').read()\n        if status == \"Agent Found\":\n            message += \"\"\"\n                            <td><span style='background-color: #66ff99'>AutoUpgrade Success</span></td>\"\"\"\n        elif status == \"Onboarding Failed\":\n            message += \"\"\"\n                            <td><span style='background-color: red; color: white'>Onboarding Failed</span></td>\"\"\"\n        elif status == \"Agent Not Found\":\n            message += \"\"\"\n                            <td><span style='background-color: red; color: white'>AutoUpgrade Failed</span></td>\"\"\"\n    return message\n\ndef remove_extension():\n    \"\"\"Remove the extension, returning HTML results.\"\"\"\n\n    message = \"\"\n    for vmname in vmnames:\n        distname = vmname.split('-')[0]\n        vm_log_file = distname + \"result.log\"\n        vm_html_file = distname + \"result.html\"\n        log_open = open(vm_log_file, 'a+')\n        html_open = open(vm_html_file, 'a+')\n        dnsname = vmname\n        run_command(resource_group, vmname, 'RunShellScript', 'python -u /home/scratch/oms_extension_run_script.py -copyomslogs')\n        print(\"\\nRemove Extension: {0} \\n\".format(vmname))\n        delete_extension(extension, vmname, resource_group)\n        run_command(resource_group, vmname, 'RunShellScript', 'python -u /home/scratch/oms_extension_run_script.py -status')\n        copy_from_vm(dnsname, username, ssh_private, location, 'omsresults.*')\n        write_log_command(log_open, 'Status After Removing OMS Extension')\n        html_open.write('<h2> Remove Extension: {0} <h2>'.format(vmname))\n        append_file('omsfiles/omsresults.log', log_open)\n        append_file('omsfiles/omsresults.html', html_open)\n        log_open.close()\n        html_open.close()\n        status = open('omsfiles/omsresults.status', 'r').read()\n        if status == \"Agent Found\":\n            message += \"\"\"\n                            <td><span style=\"background-color: red; color: white\">Remove Failed</span></td>\"\"\"\n        elif status == \"Onboarding Failed\":\n            message += \"\"\"\n                            <td><span style=\"background-color: red; color: white\">Onboarding Failed</span></td>\"\"\"\n        elif status == \"Agent Not Found\":\n            message += \"\"\"\n                            <td><span style=\"background-color: #66ff99\">Remove Success</span></td>\"\"\"\n    return message\n\n\ndef reinstall_extension():\n    \"\"\"Reinstall the extension, returning HTML results.\"\"\"\n\n    update_option = '--force-update'\n    message = \"\"\n    for vmname in vmnames:\n        distname = vmname.split('-')[0]\n        vm_log_file = distname + \"result.log\"\n        vm_html_file = distname + \"result.html\"\n        log_open = open(vm_log_file, 'a+')\n        html_open = open(vm_html_file, 'a+')\n        dnsname = vmname\n        print(\"\\n Reinstall Extension: {0} \\n\".format(vmname))\n        add_extension(extension, publisher, vmname, resource_group, private_settings, public_settings, update_option)\n        run_command(resource_group, vmname, 'RunShellScript', 'python -u /home/scratch/oms_extension_run_script.py -postinstall')\n        copy_from_vm(dnsname, username, ssh_private, location, 'omsresults.*')\n        write_log_command(log_open, 'Status After Reinstall OMS Extension')\n        html_open.write('<h2> Reinstall Extension: {0} <h2>'.format(vmname))\n        append_file('omsfiles/omsresults.log', log_open)\n        append_file('omsfiles/omsresults.html', html_open)\n        log_open.close()\n        html_open.close()\n        status = open('omsfiles/omsresults.status').read()\n        if status == \"Agent Found\":\n            message += \"\"\"\n                            <td><span style='background-color: #66ff99'>Reinstall Success</span></td>\"\"\"\n        elif status == \"Onboarding Failed\":\n            message += \"\"\"\n                            <td><span style='background-color: red; color: white'>Onboarding Failed</span></td>\"\"\"\n        elif status == \"Agent Not Found\":\n            message += \"\"\"\n                            <td><span style='background-color: red; color: white'>Reinstall Failed</span></td>\"\"\"\n    return message\n\ndef check_status():\n    \"\"\"Check agent status.\"\"\"\n\n    message = \"\"\n    install_times.clear()\n    for vmname in vmnames:\n        distname = vmname.split('-')[0]\n        vm_log_file = distname + \"result.log\"\n        vm_html_file = distname + \"result.html\"\n        log_open = open(vm_log_file, 'a+')\n        html_open = open(vm_html_file, 'a+')\n        dnsname = vmname\n        print(\"\\n Checking Status: {0} \\n\".format(vmname))\n        run_command(resource_group, vmname, 'RunShellScript', 'python -u /home/scratch/oms_extension_run_script.py -status')\n        install_times.update({vmname: datetime.now()})\n        run_command(resource_group, vmname, 'RunShellScript', 'python -u /home/scratch/oms_extension_run_script.py -injectlogs')\n        copy_from_vm(dnsname, username, ssh_private, location, 'omsresults.*')\n        write_log_command(log_open, 'Status After Long Run OMS Extension')\n        html_open.write('<h2> Status After Long Run OMS Extension: {0} <h2>'.format(vmname))\n        append_file('omsfiles/omsresults.log', log_open)\n        append_file('omsfiles/omsresults.html', html_open)\n        log_open.close()\n        html_open.close()\n        status = open('omsfiles/omsresults.status').read()\n        if status == \"Agent Found\":\n            message += \"\"\"\n                            <td><span style='background-color: #66ff99'>Reinstall Success</span></td>\"\"\"\n        elif status == \"Onboarding Failed\":\n            message += \"\"\"\n                            <td><span style='background-color: red; color: white'>Onboarding Failed</span></td>\"\"\"\n        elif status == \"Agent Not Found\":\n            message += \"\"\"\n                            <td><span style='background-color: red; color: white'>Reinstall Failed</span></td>\"\"\"\n    return message\n\ndef remove_extension_and_delete_vm():\n    \"\"\"Remove extension and delete vm.\"\"\"\n\n    for vmname in vmnames:\n        distname = vmname.split('-')[0]\n        vm_log_file = distname + \"result.log\"\n        log_open = open(vm_log_file, 'a+')\n        dnsname = vmname\n        run_command(resource_group, vmname, 'RunShellScript', 'python -u /home/scratch/oms_extension_run_script.py -copyomslogs')\n        copy_from_vm(dnsname, username, ssh_private, location, '{0}-omsagent.log'.format(distname))\n        print(\"\\n Remove extension and Delete VM: {0} \\n\".format(vmname))\n        delete_extension(extension, vmname, resource_group)\n        run_command(resource_group, vmname, 'RunShellScript', 'python -u /home/scratch/oms_extension_run_script.py -copyextlogs')\n        copy_from_vm(dnsname, username, ssh_private, location, '{0}-extnwatcher.log'.format(distname))\n        disk, nic, ip = get_vm_resources(resource_group, vmname)\n        delete_vm(resource_group, vmname)\n        delete_vm_disk(resource_group, disk)\n        delete_nic(resource_group, nic)\n        delete_ip(resource_group, ip)\n        append_file('omsfiles/{0}-extnwatcher.log'.format(distname), log_open)\n        append_file('omsfiles/{0}-omsagent.log'.format(distname), log_open)\n        log_open.close()\n\ndef create_report(messages):\n    \"\"\"Compile the final HTML report.\"\"\"\n\n    install_oms_msg, verify_oms_msg, instantupgrade_verify_msg, instantupgrade_status_msg, autoupgrade_verify_msg, autoupgrade_status_msg, remove_oms_msg, reinstall_oms_msg, long_verify_msg, long_status_msg = messages\n    result_log_file = open(\"finalresult.log\", \"a+\")\n\n    # summary table\n    diststh = \"\"\n    resultsth = \"\"\n    for vmname in vmnames:\n        distname = vmname.split('-')[0]\n        diststh += \"\"\"\n                <th>{0}</th>\"\"\".format(distname)\n        resultsth += \"\"\"\n                <th><a href='#{0}'>{0} results</a></th>\"\"\".format(distname)\n    \n    if instantupgrade_verify_msg and instantupgrade_status_msg:\n        instantupgrade_summary = \"\"\"\n        <tr>\n          <td>Instant Upgrade Verify Data</td>\n          {0}\n        </tr>\n        <tr>\n          <td>Instant Upgrade Status</td>\n          {1}\n        </tr>\n        \"\"\".format(instantupgrade_verify_msg, instantupgrade_status_msg)\n    else:\n        instantupgrade_summary = \"\"\n\n    if autoupgrade_verify_msg and autoupgrade_status_msg:\n        autoupgrade_summary = \"\"\"\n        <tr>\n          <td>AutoUpgrade Verify Data</td>\n          {0}\n        </tr>\n        <tr>\n          <td>AutoUpgrade Status</td>\n          {1}\n        </tr>\n        \"\"\".format(autoupgrade_verify_msg, autoupgrade_status_msg)\n    else:\n        autoupgrade_summary = \"\"\n    \n    # pre-compile long-running summary\n    if long_verify_msg and long_status_msg:\n        long_running_summary = \"\"\"\n        <tr>\n          <td>Long-Term Verify Data</td>\n          {0}\n        </tr>\n        <tr>\n          <td>Long-Term Status</td>\n          {1}\n        </tr>\n        \"\"\".format(long_verify_msg, long_status_msg)\n    else:\n        long_running_summary = \"\"\n\n    statustable = \"\"\"\n    <table>\n    <caption><h2>Test Result Table</h2><caption>\n    <tr>\n        <th>Distro</th>\n        {0}\n    </tr>\n    <tr>\n        <td>Install OMSAgent</td>\n        {1}\n    </tr>\n    <tr>\n        <td>Verify Data</td>\n        {2}\n    </tr>\n    {3}\n    {4}\n    <tr>\n        <td>Remove OMSAgent</td>\n        {5}\n    </tr>\n    <tr>\n        <td>Reinstall OMSAgent</td>\n        {6}\n    </tr>\n    {7}\n    <tr>\n        <td>Result Link</td>\n        {8}\n    <tr>\n    </table>\n    \"\"\".format(diststh, install_oms_msg, verify_oms_msg, instantupgrade_summary, autoupgrade_summary, remove_oms_msg, reinstall_oms_msg, long_running_summary, resultsth)\n    result_html_file.write(statustable)\n\n    # Create final html & log file\n    for vmname in vmnames:\n        distname = vmname.split('-')[0]\n        append_file(distname + \"result.log\", result_log_file)\n        append_file(distname + \"result.html\", result_html_file)\n    \n    result_log_file.close()\n    htmlend = \"\"\"\n    </body>\n    </html>\n    \"\"\"\n    result_html_file.write(htmlend)\n    result_html_file.close()\n\ndef mv_result_files():\n    if not os.path.exists('results'):\n        os.makedirs('results')\n    \n    file_types = ['*result.*', 'omsfiles/*-extnwatcher.log', 'omsfiles/*-omsagent.log']\n    for files in file_types:\n        for f in glob.glob(files):\n            shutil.move(os.path.join(f), os.path.join('results/'))\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "OmsAgent/extension-test/omsfiles/apache_access.log",
    "content": "41.88.172.43 - - [18/Oct/2018:00:34:19 +0000] \"GET /posts/posts/explore HTTP/1.0\" 200 4955 \"http://phillips.org/homepage/\" \"Mozilla/5.0 (X11; Linux i686) AppleWebKit/5342 (KHTML, like Gecko) Chrome/15.0.818.0 Safari/5342\"\n97.77.75.235 - - [18/Oct/2018:00:39:13 +0000] \"PUT /list HTTP/1.0\" 200 5022 \"http://www.goodwin.com/login.htm\" \"Mozilla/5.0 (Windows 98; Win 9x 4.90; it-IT; rv:1.9.1.20) Gecko/2010-05-09 00:24:19 Firefox/3.6.7\"\n6.33.183.64 - - [18/Oct/2018:00:42:47 +0000] \"PUT /apps/cart.jsp?appID=7380 HTTP/1.0\" 200 4961 \"http://hess-jones.com/categories/register.html\" \"Mozilla/5.0 (Windows CE) AppleWebKit/5332 (KHTML, like Gecko) Chrome/13.0.883.0 Safari/5332\"\n50.159.139.180 - - [18/Oct/2018:00:45:04 +0000] \"GET /posts/posts/explore HTTP/1.0\" 200 4928 \"http://www.campbell-farrell.biz/wp-content/tag/blog/index/\" \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_4) AppleWebKit/5322 (KHTML, like Gecko) Chrome/15.0.818.0 Safari/5322\"\n82.191.47.90 - - [18/Oct/2018:00:50:01 +0000] \"GET /wp-admin HTTP/1.0\" 200 4931 \"http://erickson.net/main/\" \"Mozilla/5.0 (iPod; U; CPU iPhone OS 4_2 like Mac OS X; sl-SI) AppleWebKit/533.19.5 (KHTML, like Gecko) Version/4.0.5 Mobile/8B113 Safari/6533.19.5\"\n154.50.38.159 - - [18/Oct/2018:00:51:46 +0000] \"GET /wp-content HTTP/1.0\" 200 4979 \"http://www.palmer.com/index.php\" \"Mozilla/5.0 (iPod; U; CPU iPhone OS 3_3 like Mac OS X; sl-SI) AppleWebKit/533.7.4 (KHTML, like Gecko) Version/3.0.5 Mobile/8B117 Safari/6533.7.4\"\n140.54.30.228 - - [18/Oct/2018:00:53:07 +0000] \"POST /wp-content HTTP/1.0\" 200 5041 \"http://www.sharp-kidd.com/faq.php\" \"Mozilla/5.0 (Windows 98; en-US; rv:1.9.0.20) Gecko/2015-01-02 20:55:20 Firefox/3.8\"\n29.153.222.134 - - [18/Oct/2018:00:54:28 +0000] \"GET /search/tag/list HTTP/1.0\" 200 5007 \"http://martinez.com/list/wp-content/post/\" \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/5321 (KHTML, like Gecko) Chrome/14.0.827.0 Safari/5321\"\n89.21.173.120 - - [18/Oct/2018:00:57:37 +0000] \"GET /posts/posts/explore HTTP/1.0\" 301 4979 \"http://kane.org/homepage/\" \"Mozilla/5.0 (compatible; MSIE 5.0; Windows NT 5.1; Trident/4.1)\"\n237.144.104.90 - - [18/Oct/2018:00:58:23 +0000] \"PUT /app/main/posts HTTP/1.0\" 404 5031 \"http://www.guerrero-schroeder.com/list/categories/search/\" \"Mozilla/5.0 (Windows 98; Win 9x 4.90; en-US; rv:1.9.2.20) Gecko/2015-11-28 04:08:22 Firefox/3.6.3\"\n239.4.131.80 - - [18/Oct/2018:00:59:46 +0000] \"PUT /explore HTTP/1.0\" 404 5005 \"http://www.sanchez.com/terms/\" \"Mozilla/5.0 (Windows CE; sl-SI; rv:1.9.1.20) Gecko/2017-05-24 17:33:32 Firefox/3.8\"\n72.190.211.123 - - [18/Oct/2018:01:04:24 +0000] \"GET /posts/posts/explore HTTP/1.0\" 200 5015 \"http://www.byrd-kerr.com/home/\" \"Mozilla/5.0 (Windows 95; it-IT; rv:1.9.0.20) Gecko/2018-03-15 01:33:17 Firefox/13.0\"\n91.80.110.133 - - [18/Oct/2018:01:05:39 +0000] \"GET /apps/cart.jsp?appID=3963 HTTP/1.0\" 200 4933 \"http://guzman.org/\" \"Mozilla/5.0 (Windows 98; sl-SI; rv:1.9.2.20) Gecko/2016-11-21 02:54:31 Firefox/3.8\"\n149.90.53.105 - - [18/Oct/2018:01:06:50 +0000] \"DELETE /wp-content HTTP/1.0\" 200 4996 \"http://www.schroeder.com/privacy/\" \"Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10_5_0 rv:2.0; en-US) AppleWebKit/533.3.1 (KHTML, like Gecko) Version/5.0.2 Safari/533.3.1\"\n78.40.84.134 - - [18/Oct/2018:01:09:35 +0000] \"GET /apps/cart.jsp?appID=9468 HTTP/1.0\" 200 5050 \"http://thomas-smith.biz/\" \"Mozilla/5.0 (X11; Linux x86_64; rv:1.9.5.20) Gecko/2012-05-17 06:37:48 Firefox/6.0\"\n30.224.7.147 - - [18/Oct/2018:01:13:04 +0000] \"GET /wp-content HTTP/1.0\" 200 4980 \"http://www.ayala-rodriguez.net/\" \"Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10_6_3; rv:1.9.4.20) Gecko/2012-09-14 05:10:58 Firefox/4.0\"\n190.241.3.20 - - [18/Oct/2018:01:14:10 +0000] \"GET /explore HTTP/1.0\" 200 4875 \"http://www.johnson.com/login.php\" \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/5320 (KHTML, like Gecko) Chrome/13.0.829.0 Safari/5320\"\n245.197.148.127 - - [18/Oct/2018:01:15:28 +0000] \"GET /posts/posts/explore HTTP/1.0\" 200 4976 \"http://griffith-miller.org/home.php\" \"Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10_7_3 rv:2.0; en-US) AppleWebKit/535.46.3 (KHTML, like Gecko) Version/5.0.4 Safari/535.46.3\"\n90.8.112.249 - - [18/Oct/2018:01:20:27 +0000] \"GET /apps/cart.jsp?appID=3243 HTTP/1.0\" 200 4991 \"http://holland-brown.com/terms/\" \"Opera/8.65.(Windows NT 5.0; it-IT) Presto/2.9.161 Version/10.00\"\n131.127.138.38 - - [18/Oct/2018:01:23:07 +0000] \"GET /app/main/posts HTTP/1.0\" 200 5060 \"http://ware-cole.net/wp-content/tag/main/login/\" \"Mozilla/5.0 (Windows 95; en-US; rv:1.9.2.20) Gecko/2012-01-02 22:56:38 Firefox/3.6.12\"\n164.26.165.230 - - [18/Oct/2018:01:24:10 +0000] \"DELETE /wp-admin HTTP/1.0\" 200 5028 \"http://harding-murphy.biz/author.html\" \"Mozilla/5.0 (X11; Linux i686) AppleWebKit/5351 (KHTML, like Gecko) Chrome/13.0.868.0 Safari/5351\"\n5.3.62.184 - - [18/Oct/2018:01:25:09 +0000] \"GET /posts/posts/explore HTTP/1.0\" 200 4928 \"http://www.stafford-hill.biz/\" \"Mozilla/5.0 (X11; Linux i686) AppleWebKit/5350 (KHTML, like Gecko) Chrome/15.0.815.0 Safari/5350\"\n181.162.5.173 - - [18/Oct/2018:01:26:41 +0000] \"GET /list HTTP/1.0\" 200 5038 \"http://www.hall.com/posts/index/\" \"Mozilla/5.0 (Macintosh; PPC Mac OS X 10_7_3) AppleWebKit/5322 (KHTML, like Gecko) Chrome/15.0.880.0 Safari/5322\"\n98.153.76.19 - - [18/Oct/2018:01:28:10 +0000] \"DELETE /wp-admin HTTP/1.0\" 200 5018 \"http://www.shaw-cole.com/\" \"Opera/9.15.(X11; Linux i686; sl-SI) Presto/2.9.164 Version/10.00\"\n127.70.246.76 - - [18/Oct/2018:01:29:45 +0000] \"PUT /wp-admin HTTP/1.0\" 200 5071 \"http://www.french.net/\" \"Mozilla/5.0 (X11; Linux i686; rv:1.9.6.20) Gecko/2013-12-07 23:58:36 Firefox/3.6.17\"\n244.88.20.30 - - [18/Oct/2018:01:30:27 +0000] \"PUT /posts/posts/explore HTTP/1.0\" 200 5029 \"http://watson.info/register.html\" \"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_7_4 rv:5.0; sl-SI) AppleWebKit/534.44.6 (KHTML, like Gecko) Version/5.0.4 Safari/534.44.6\"\n36.196.205.161 - - [18/Oct/2018:01:31:44 +0000] \"POST /wp-content HTTP/1.0\" 200 4948 \"http://harris.com/app/terms/\" \"Mozilla/5.0 (X11; Linux x86_64; rv:1.9.6.20) Gecko/2014-10-08 23:33:35 Firefox/3.6.17\"\n176.75.22.168 - - [18/Oct/2018:01:32:19 +0000] \"GET /posts/posts/explore HTTP/1.0\" 200 5058 \"http://conley.biz/tags/login.htm\" \"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/5312 (KHTML, like Gecko) Chrome/13.0.844.0 Safari/5312\"\n241.178.144.215 - - [18/Oct/2018:01:36:36 +0000] \"PUT /wp-content HTTP/1.0\" 200 5028 \"http://holden.com/login/\" \"Mozilla/5.0 (Windows CE) AppleWebKit/5330 (KHTML, like Gecko) Chrome/13.0.824.0 Safari/5330\"\n90.204.24.160 - - [18/Oct/2018:01:37:23 +0000] \"PUT /list HTTP/1.0\" 200 4969 \"http://www.proctor-simmons.info/categories/author/\" \"Mozilla/5.0 (X11; Linux i686; rv:1.9.7.20) Gecko/2013-10-05 02:04:20 Firefox/3.6.6\"\n246.240.89.237 - - [18/Oct/2018:01:39:12 +0000] \"PUT /wp-content HTTP/1.0\" 200 4986 \"http://bailey.org/explore/wp-content/main.php\" \"Mozilla/5.0 (compatible; MSIE 9.0; Windows 98; Trident/5.1)\"\n5.76.9.164 - - [18/Oct/2018:01:40:38 +0000] \"DELETE /app/main/posts HTTP/1.0\" 200 5045 \"http://www.buck.info/categories/wp-content/homepage.htm\" \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/5362 (KHTML, like Gecko) Chrome/15.0.847.0 Safari/5362\"\n197.98.233.63 - - [18/Oct/2018:01:42:03 +0000] \"GET /explore HTTP/1.0\" 200 5034 \"http://www.martin-howard.com/explore/search/privacy.html\" \"Mozilla/5.0 (Windows CE; it-IT; rv:1.9.2.20) Gecko/2012-01-07 08:08:21 Firefox/12.0\"\n123.26.215.34 - - [18/Oct/2018:01:43:13 +0000] \"DELETE /search/tag/list HTTP/1.0\" 200 4997 \"http://frazier-schmidt.com/main/main/\" \"Mozilla/5.0 (iPod; U; CPU iPhone OS 3_2 like Mac OS X; en-US) AppleWebKit/534.18.7 (KHTML, like Gecko) Version/3.0.5 Mobile/8B117 Safari/6534.18.7\"\n93.167.30.46 - - [18/Oct/2018:01:45:30 +0000] \"POST /posts/posts/explore HTTP/1.0\" 200 4914 \"http://www.stanley-evans.com/\" \"Mozilla/5.0 (X11; Linux i686; rv:1.9.7.20) Gecko/2018-05-29 09:05:34 Firefox/15.0\"\n0.12.43.164 - - [18/Oct/2018:01:49:46 +0000] \"GET /explore HTTP/1.0\" 200 4924 \"http://allison.com/app/explore/app/main/\" \"Mozilla/5.0 (X11; Linux i686; rv:1.9.5.20) Gecko/2013-09-24 01:40:33 Firefox/3.8\"\n28.33.105.197 - - [18/Oct/2018:01:50:50 +0000] \"GET /app/main/posts HTTP/1.0\" 200 4970 \"http://www.jennings.com/categories/homepage.html\" \"Mozilla/5.0 (Windows NT 5.2) AppleWebKit/5310 (KHTML, like Gecko) Chrome/14.0.876.0 Safari/5310\"\n199.171.27.50 - - [18/Oct/2018:01:54:52 +0000] \"GET /app/main/posts HTTP/1.0\" 200 4934 \"http://www.sanders-shah.net/tag/post.php\" \"Mozilla/5.0 (X11; Linux i686) AppleWebKit/5362 (KHTML, like Gecko) Chrome/15.0.897.0 Safari/5362\"\n134.36.90.225 - - [18/Oct/2018:01:56:10 +0000] \"DELETE /list HTTP/1.0\" 200 5053 \"http://smith-rodriguez.com/explore/privacy/\" \"Mozilla/5.0 (iPod; U; CPU iPhone OS 4_0 like Mac OS X; it-IT) AppleWebKit/535.42.4 (KHTML, like Gecko) Version/4.0.5 Mobile/8B115 Safari/6535.42.4\"\n46.187.133.243 - - [18/Oct/2018:01:57:08 +0000] \"GET /posts/posts/explore HTTP/1.0\" 200 4952 \"http://henson.net/categories/blog/author/\" \"Mozilla/5.0 (iPod; U; CPU iPhone OS 4_2 like Mac OS X; it-IT) AppleWebKit/532.7.1 (KHTML, like Gecko) Version/4.0.5 Mobile/8B117 Safari/6532.7.1\"\n140.81.9.137 - - [18/Oct/2018:02:00:59 +0000] \"POST /explore HTTP/1.0\" 200 4927 \"http://www.simmons.org/privacy.php\" \"Mozilla/5.0 (Windows CE; sl-SI; rv:1.9.2.20) Gecko/2013-07-30 04:10:19 Firefox/3.8\"\n31.107.2.231 - - [18/Oct/2018:02:02:48 +0000] \"DELETE /list HTTP/1.0\" 200 5006 \"http://bray.biz/\" \"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_8_1; rv:1.9.4.20) Gecko/2012-02-25 04:00:09 Firefox/3.6.15\"\n48.156.39.28 - - [18/Oct/2018:02:04:12 +0000] \"GET /apps/cart.jsp?appID=6250 HTTP/1.0\" 301 4988 \"http://www.lopez.com/\" \"Mozilla/5.0 (compatible; MSIE 6.0; Windows CE; Trident/3.1)\"\n231.240.140.141 - - [18/Oct/2018:02:05:25 +0000] \"GET /app/main/posts HTTP/1.0\" 200 4976 \"http://chandler.com/faq.html\" \"Mozilla/5.0 (Windows 98) AppleWebKit/5361 (KHTML, like Gecko) Chrome/13.0.807.0 Safari/5361\"\n197.49.239.55 - - [18/Oct/2018:02:07:56 +0000] \"PUT /posts/posts/explore HTTP/1.0\" 200 5053 \"http://smith.biz/posts/wp-content/posts/main/\" \"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_8_5 rv:6.0; en-US) AppleWebKit/533.15.7 (KHTML, like Gecko) Version/4.0.3 Safari/533.15.7\"\n185.165.245.238 - - [18/Oct/2018:02:11:59 +0000] \"GET /list HTTP/1.0\" 200 5010 \"http://mercado.info/faq.html\" \"Mozilla/5.0 (X11; Linux i686; rv:1.9.6.20) Gecko/2011-04-30 23:25:27 Firefox/3.6.4\"\n65.96.205.50 - - [18/Oct/2018:02:12:34 +0000] \"GET /app/main/posts HTTP/1.0\" 200 5063 \"http://brown.net/list/category/category/faq/\" \"Mozilla/5.0 (X11; Linux i686; rv:1.9.6.20) Gecko/2015-01-26 02:39:38 Firefox/3.8\"\n149.35.179.83 - - [18/Oct/2018:02:14:51 +0000] \"DELETE /wp-content HTTP/1.0\" 200 4893 \"http://www.adams-perkins.com/home/\" \"Mozilla/5.0 (X11; Linux i686) AppleWebKit/5342 (KHTML, like Gecko) Chrome/14.0.825.0 Safari/5342\"\n125.6.78.177 - - [18/Oct/2018:02:17:05 +0000] \"PUT /posts/posts/explore HTTP/1.0\" 404 4994 \"http://www.king.com/author.html\" \"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/5320 (KHTML, like Gecko) Chrome/13.0.873.0 Safari/5320\"\n107.154.205.58 - - [18/Oct/2018:02:19:19 +0000] \"GET /apps/cart.jsp?appID=8180 HTTP/1.0\" 200 4996 \"http://www.morgan.com/category/app/author.html\" \"Mozilla/5.0 (Windows; U; Windows NT 5.01) AppleWebKit/533.34.5 (KHTML, like Gecko) Version/4.0 Safari/533.34.5\"\n169.176.221.189 - - [18/Oct/2018:02:22:57 +0000] \"GET /app/main/posts HTTP/1.0\" 200 5015 \"http://meadows.com/list/tag/app/post/\" \"Mozilla/5.0 (Windows NT 5.01) AppleWebKit/5322 (KHTML, like Gecko) Chrome/14.0.801.0 Safari/5322\"\n130.28.74.78 - - [18/Oct/2018:02:26:27 +0000] \"POST /app/main/posts HTTP/1.0\" 200 4963 \"http://www.ashley-trujillo.info/author.html\" \"Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10_5_5; rv:1.9.4.20) Gecko/2016-03-23 15:04:20 Firefox/5.0\"\n133.76.165.208 - - [18/Oct/2018:02:27:00 +0000] \"GET /wp-admin HTTP/1.0\" 200 4978 \"http://www.frazier-schwartz.info/categories/app/app/post.jsp\" \"Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10_7_9; rv:1.9.3.20) Gecko/2013-08-13 05:42:00 Firefox/14.0\"\n110.206.82.119 - - [18/Oct/2018:02:28:48 +0000] \"GET /list HTTP/1.0\" 301 4954 \"http://www.mitchell.biz/author.php\" \"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_5_6; rv:1.9.2.20) Gecko/2012-10-23 18:30:25 Firefox/3.8\"\n168.198.62.27 - - [18/Oct/2018:02:30:12 +0000] \"GET /wp-content HTTP/1.0\" 200 4979 \"http://www.rodriguez.com/categories/post.php\" \"Mozilla/5.0 (Windows 98) AppleWebKit/5350 (KHTML, like Gecko) Chrome/13.0.884.0 Safari/5350\"\n14.117.101.228 - - [18/Oct/2018:02:31:00 +0000] \"GET /wp-admin HTTP/1.0\" 200 4952 \"http://stein.info/main.php\" \"Mozilla/5.0 (Windows 98) AppleWebKit/5320 (KHTML, like Gecko) Chrome/14.0.873.0 Safari/5320\"\n124.225.54.86 - - [18/Oct/2018:02:31:48 +0000] \"GET /app/main/posts HTTP/1.0\" 200 4970 \"http://kennedy.biz/category/app/posts/home.jsp\" \"Mozilla/5.0 (Windows NT 6.2; en-US; rv:1.9.1.20) Gecko/2012-05-31 15:55:28 Firefox/3.6.7\"\n155.191.142.109 - - [18/Oct/2018:02:36:19 +0000] \"PUT /explore HTTP/1.0\" 200 4982 \"http://farmer.com/category/search.php\" \"Mozilla/5.0 (Windows 98; sl-SI; rv:1.9.2.20) Gecko/2015-10-31 05:03:45 Firefox/3.8\"\n113.111.34.186 - - [18/Oct/2018:02:39:34 +0000] \"GET /explore HTTP/1.0\" 200 4922 \"http://www.powell.org/login/\" \"Mozilla/5.0 (Windows NT 5.0) AppleWebKit/5340 (KHTML, like Gecko) Chrome/14.0.857.0 Safari/5340\"\n4.87.205.98 - - [18/Oct/2018:02:44:29 +0000] \"GET /wp-admin HTTP/1.0\" 200 4982 \"http://www.santos.com/\" \"Mozilla/5.0 (X11; Linux i686; rv:1.9.7.20) Gecko/2018-03-18 03:36:36 Firefox/12.0\"\n110.63.127.229 - - [18/Oct/2018:02:46:05 +0000] \"POST /list HTTP/1.0\" 200 4964 \"http://good.com/about.html\" \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_4; rv:1.9.3.20) Gecko/2015-05-23 23:12:44 Firefox/3.8\"\n243.18.176.206 - - [18/Oct/2018:02:48:21 +0000] \"PUT /list HTTP/1.0\" 200 5043 \"http://stephens-baldwin.com/tags/register/\" \"Mozilla/5.0 (Windows NT 5.2; en-US; rv:1.9.2.20) Gecko/2011-12-23 01:29:18 Firefox/3.8\"\n168.225.235.180 - - [18/Oct/2018:02:50:31 +0000] \"PUT /posts/posts/explore HTTP/1.0\" 404 5043 \"http://ortega.com/\" \"Mozilla/5.0 (Macintosh; PPC Mac OS X 10_8_3; rv:1.9.2.20) Gecko/2011-03-26 14:18:21 Firefox/7.0\"\n7.129.23.77 - - [18/Oct/2018:02:52:23 +0000] \"GET /posts/posts/explore HTTP/1.0\" 200 4999 \"http://lewis-bruce.com/category/explore/home/\" \"Mozilla/5.0 (Windows 98) AppleWebKit/5332 (KHTML, like Gecko) Chrome/15.0.852.0 Safari/5332\"\n201.131.130.135 - - [18/Oct/2018:02:53:07 +0000] \"PUT /wp-content HTTP/1.0\" 200 4943 \"http://williams.com/\" \"Mozilla/5.0 (Windows 98; Win 9x 4.90; sl-SI; rv:1.9.1.20) Gecko/2014-01-14 15:51:52 Firefox/4.0\"\n160.40.4.98 - - [18/Oct/2018:02:54:00 +0000] \"POST /posts/posts/explore HTTP/1.0\" 200 5094 \"http://lucas-west.com/blog/category/privacy/\" \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3; rv:1.9.5.20) Gecko/2018-01-10 05:44:15 Firefox/15.0\"\n233.25.14.15 - - [18/Oct/2018:02:57:06 +0000] \"DELETE /wp-content HTTP/1.0\" 404 4992 \"http://www.adams-clayton.biz/tags/search/wp-content/search/\" \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3 rv:5.0; en-US) AppleWebKit/532.47.2 (KHTML, like Gecko) Version/4.0.2 Safari/532.47.2\"\n71.39.28.180 - - [18/Oct/2018:03:00:12 +0000] \"GET /list HTTP/1.0\" 200 4975 \"http://walker.com/\" \"Mozilla/5.0 (X11; Linux i686; rv:1.9.6.20) Gecko/2016-07-11 18:18:38 Firefox/3.8\"\n51.153.212.169 - - [18/Oct/2018:03:04:56 +0000] \"GET /app/main/posts HTTP/1.0\" 301 4933 \"http://chavez.com/list/categories/posts/terms/\" \"Mozilla/5.0 (Windows 98; Win 9x 4.90; sl-SI; rv:1.9.1.20) Gecko/2017-05-05 14:56:33 Firefox/3.6.11\"\n220.43.102.130 - - [18/Oct/2018:03:07:41 +0000] \"DELETE /apps/cart.jsp?appID=6069 HTTP/1.0\" 200 4936 \"http://douglas.com/homepage/\" \"Mozilla/5.0 (Windows NT 5.01; en-US; rv:1.9.2.20) Gecko/2016-07-30 00:30:02 Firefox/3.6.8\"\n139.218.49.46 - - [18/Oct/2018:03:11:02 +0000] \"PUT /apps/cart.jsp?appID=6207 HTTP/1.0\" 200 5043 \"http://perez.com/home/\" \"Mozilla/5.0 (Windows NT 5.01; en-US; rv:1.9.1.20) Gecko/2012-10-30 07:57:39 Firefox/11.0\"\n226.16.197.119 - - [18/Oct/2018:03:12:44 +0000] \"GET /posts/posts/explore HTTP/1.0\" 200 4979 \"http://www.jones.com/main.asp\" \"Mozilla/5.0 (Macintosh; PPC Mac OS X 10_8_0; rv:1.9.6.20) Gecko/2011-01-20 10:45:37 Firefox/3.6.20\"\n40.119.68.10 - - [18/Oct/2018:03:14:45 +0000] \"DELETE /posts/posts/explore HTTP/1.0\" 200 4964 \"http://www.ortega.com/author.htm\" \"Mozilla/5.0 (X11; Linux x86_64; rv:1.9.5.20) Gecko/2011-07-22 13:30:52 Firefox/3.6.15\"\n168.208.53.165 - - [18/Oct/2018:03:16:06 +0000] \"GET /explore HTTP/1.0\" 200 5014 \"http://www.perez-miller.com/category/privacy.html\" \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0; rv:1.9.5.20) Gecko/2011-01-02 02:10:51 Firefox/3.8\"\n11.52.197.212 - - [18/Oct/2018:03:20:15 +0000] \"DELETE /explore HTTP/1.0\" 500 5042 \"http://www.rubio.info/post/\" \"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_7_5 rv:3.0; it-IT) AppleWebKit/535.43.7 (KHTML, like Gecko) Version/4.0.5 Safari/535.43.7\"\n21.7.60.251 - - [18/Oct/2018:03:22:55 +0000] \"GET /list HTTP/1.0\" 301 5072 \"http://stevenson.org/index/\" \"Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10_7_3; rv:1.9.6.20) Gecko/2012-04-25 16:25:18 Firefox/10.0\"\n20.111.53.239 - - [18/Oct/2018:03:27:08 +0000] \"POST /apps/cart.jsp?appID=2130 HTTP/1.0\" 200 4990 \"http://www.hunt-raymond.com/main/search/\" \"Mozilla/5.0 (Windows 95; en-US; rv:1.9.0.20) Gecko/2016-01-26 09:23:42 Firefox/3.8\"\n163.155.239.245 - - [18/Oct/2018:03:27:45 +0000] \"DELETE /posts/posts/explore HTTP/1.0\" 200 4939 \"http://adkins.com/categories/posts/search/\" \"Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10_7_1; rv:1.9.2.20) Gecko/2010-12-04 23:04:59 Firefox/3.8\"\n133.140.40.180 - - [18/Oct/2018:03:31:50 +0000] \"GET /posts/posts/explore HTTP/1.0\" 200 4979 \"http://www.malone.com/tags/about.php\" \"Opera/8.30.(Windows CE; sl-SI) Presto/2.9.189 Version/10.00\"\n199.152.210.117 - - [18/Oct/2018:03:34:19 +0000] \"GET /wp-admin HTTP/1.0\" 200 4986 \"http://www.camacho.com/explore/search/faq.php\" \"Mozilla/5.0 (X11; Linux i686; rv:1.9.7.20) Gecko/2018-09-05 13:26:24 Firefox/3.8\"\n180.22.162.153 - - [18/Oct/2018:03:38:22 +0000] \"GET /apps/cart.jsp?appID=2363 HTTP/1.0\" 200 4993 \"http://dean-cherry.com/homepage.php\" \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/5330 (KHTML, like Gecko) Chrome/14.0.850.0 Safari/5330\"\n67.123.236.154 - - [18/Oct/2018:03:40:26 +0000] \"PUT /search/tag/list HTTP/1.0\" 200 4972 \"http://powers.com/home.jsp\" \"Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10_5_3; rv:1.9.3.20) Gecko/2010-02-12 23:31:10 Firefox/3.6.5\"\n70.12.70.204 - - [18/Oct/2018:03:45:02 +0000] \"DELETE /explore HTTP/1.0\" 200 4937 \"http://www.romero.com/tag/explore/main.html\" \"Mozilla/5.0 (X11; Linux i686; rv:1.9.6.20) Gecko/2016-05-05 13:52:25 Firefox/3.8\"\n85.226.90.231 - - [18/Oct/2018:03:49:37 +0000] \"GET /app/main/posts HTTP/1.0\" 200 4989 \"http://www.bradley-bailey.com/faq.asp\" \"Mozilla/5.0 (X11; Linux i686) AppleWebKit/5351 (KHTML, like Gecko) Chrome/13.0.880.0 Safari/5351\"\n235.65.112.180 - - [18/Oct/2018:03:52:37 +0000] \"GET /search/tag/list HTTP/1.0\" 200 5030 \"http://www.mendoza.com/tags/blog/tag/category.html\" \"Mozilla/5.0 (Windows CE; en-US; rv:1.9.0.20) Gecko/2011-08-04 17:20:08 Firefox/14.0\"\n158.47.154.156 - - [18/Oct/2018:03:54:48 +0000] \"GET /list HTTP/1.0\" 200 4998 \"http://www.campos.com/search.php\" \"Mozilla/5.0 (compatible; MSIE 6.0; Windows CE; Trident/4.1)\"\n125.79.156.46 - - [18/Oct/2018:03:56:03 +0000] \"GET /search/tag/list HTTP/1.0\" 200 5010 \"http://www.golden.com/post.htm\" \"Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10_7_1; rv:1.9.2.20) Gecko/2014-10-07 01:16:43 Firefox/3.6.10\"\n184.232.250.128 - - [18/Oct/2018:04:00:14 +0000] \"PUT /wp-content HTTP/1.0\" 200 4984 \"http://www.turner.info/\" \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_4; rv:1.9.2.20) Gecko/2013-09-09 13:08:21 Firefox/15.0\"\n99.91.125.62 - - [18/Oct/2018:04:02:08 +0000] \"PUT /posts/posts/explore HTTP/1.0\" 200 5065 \"http://www.leblanc.com/\" \"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/5322 (KHTML, like Gecko) Chrome/15.0.851.0 Safari/5322\"\n44.235.108.106 - - [18/Oct/2018:04:05:59 +0000] \"PUT /wp-content HTTP/1.0\" 200 5103 \"http://www.matthews.info/search/blog/main/category/\" \"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/5350 (KHTML, like Gecko) Chrome/13.0.818.0 Safari/5350\"\n137.133.193.233 - - [18/Oct/2018:04:09:31 +0000] \"GET /wp-admin HTTP/1.0\" 200 5008 \"http://curtis.com/\" \"Mozilla/5.0 (Windows NT 5.1; en-US; rv:1.9.1.20) Gecko/2017-08-29 05:05:21 Firefox/3.6.5\"\n123.45.94.23 - - [18/Oct/2018:04:10:46 +0000] \"GET /posts/posts/explore HTTP/1.0\" 200 5048 \"http://www.white-miller.com/search.htm\" \"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_5_8 rv:2.0; it-IT) AppleWebKit/531.39.6 (KHTML, like Gecko) Version/5.0.1 Safari/531.39.6\"\n45.199.49.213 - - [18/Oct/2018:04:11:37 +0000] \"GET /wp-content HTTP/1.0\" 200 5078 \"http://www.shaffer.info/\" \"Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10_6_0; rv:1.9.4.20) Gecko/2014-11-11 14:14:32 Firefox/3.6.4\"\n8.115.73.60 - - [18/Oct/2018:04:16:16 +0000] \"GET /list HTTP/1.0\" 200 5013 \"http://moore.com/privacy.jsp\" \"Opera/9.90.(X11; Linux i686; en-US) Presto/2.9.190 Version/10.00\"\n8.36.203.85 - - [18/Oct/2018:04:20:02 +0000] \"DELETE /wp-content HTTP/1.0\" 200 4908 \"http://martin.com/search/faq/\" \"Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10_6_6; rv:1.9.4.20) Gecko/2012-12-19 23:52:21 Firefox/3.6.2\"\n204.50.213.48 - - [18/Oct/2018:04:20:55 +0000] \"GET /search/tag/list HTTP/1.0\" 200 5016 \"http://russell.com/post.html\" \"Mozilla/5.0 (Macintosh; PPC Mac OS X 10_7_6 rv:6.0; en-US) AppleWebKit/535.19.1 (KHTML, like Gecko) Version/5.0 Safari/535.19.1\"\n201.165.240.2 - - [18/Oct/2018:04:23:44 +0000] \"GET /search/tag/list HTTP/1.0\" 200 4885 \"http://www.reynolds-hunter.com/app/explore/home/\" \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_5_1; rv:1.9.6.20) Gecko/2016-10-06 18:45:10 Firefox/3.6.11\"\n2.248.77.71 - - [18/Oct/2018:04:24:20 +0000] \"GET /search/tag/list HTTP/1.0\" 200 5014 \"http://www.erickson.com/categories/posts/list/search/\" \"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_5_1; rv:1.9.2.20) Gecko/2010-07-10 10:14:15 Firefox/3.8\"\n180.166.108.32 - - [18/Oct/2018:04:27:51 +0000] \"GET /search/tag/list HTTP/1.0\" 200 4991 \"http://www.wall.info/privacy.jsp\" \"Mozilla/5.0 (Windows NT 4.0) AppleWebKit/5310 (KHTML, like Gecko) Chrome/14.0.800.0 Safari/5310\"\n112.68.225.108 - - [18/Oct/2018:04:29:45 +0000] \"GET /apps/cart.jsp?appID=1353 HTTP/1.0\" 404 5023 \"http://blair-miller.com/\" \"Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10_6_0; rv:1.9.4.20) Gecko/2011-10-19 08:56:36 Firefox/9.0\"\n"
  },
  {
    "path": "OmsAgent/extension-test/omsfiles/custom.log",
    "content": "2018-10-18 01:47:10 We need to rent a room for our party.\n2018-10-18 01:47:10 Yeah, I think it's a good environment for learning English.\n2018-10-18 01:47:10 Everyone was busy, so I went to the movie alone.\n2018-10-18 01:47:10 A purple pig and a green donkey flew a kite in the middle of the night and ended up sunburnt.\n2018-10-18 01:47:10 Yeah, I think it's a good environment for learning English.\n2018-10-18 01:47:10 I am never at home on Sundays.\n2018-10-18 01:47:10 There were white out conditions in the town; subsequently, the roads were impassable.\n2018-10-18 01:47:10 I hear that Nancy is very pretty.\n2018-10-18 01:47:10 There were white out conditions in the town; subsequently, the roads were impassable.\n2018-10-18 01:47:10 I am counting my calories, yet I really want dessert.\n2018-10-18 01:47:10 A purple pig and a green donkey flew a kite in the middle of the night and ended up sunburnt.\n2018-10-18 01:47:10 I am counting my calories, yet I really want dessert.\n2018-10-18 01:47:10 Cats are good pets, for they are clean and are not noisy.\n2018-10-18 01:47:10 Where do random thoughts come from?\n2018-10-18 01:47:10 I am never at home on Sundays.\n2018-10-18 01:47:10 The memory we used to share is no longer coherent.\n2018-10-18 01:47:10 She folded her handkerchief neatly.\n2018-10-18 01:47:10 The memory we used to share is no longer coherent.\n2018-10-18 01:47:10 She was too short to see over the fence.\n2018-10-18 01:47:10 Rock music approaches at high velocity.\n"
  },
  {
    "path": "OmsAgent/extension-test/omsfiles/customlog.conf",
    "content": "# This file is configured by the OMS service\n\n<source>\n  type sudo_tail\n  path /var/log/custom.log\n  pos_file /var/opt/microsoft/omsagent/state/CUSTOM_LOG_BLOB.Custom_Log_CL_<workspace-id>.pos\n  read_from_head true\n  run_interval 60\n  tag oms.blob.CustomLog.CUSTOM_LOG_BLOB.Custom_Log_CL_<workspace-id>.*\n  format none\n</source>\n"
  },
  {
    "path": "OmsAgent/extension-test/omsfiles/error.log",
    "content": "Version: '5.7.23-0ubuntu0.16.04.1-log'  socket: '/var/run/mysqld/mysqld.sock'  port: 3306  (Ubuntu)\n2018-10-15T19:54:50.675213Z 2 [Note] Access denied for user 'root'@'localhost' (using password: NO)\n2018-10-15T19:55:16.350986Z 0 [Note] Giving 7 client threads a chance to die gracefully\n2018-10-15T19:55:16.351028Z 0 [Note] Shutting down slave threads\n2018-10-15T19:55:18.351113Z 0 [Note] Forcefully disconnecting 4 remaining clients\n2018-10-15T19:55:18.351141Z 0 [Warning] /usr/sbin/mysqld: Forcing close of thread 3  user: 'zabbix'\n\n2018-10-15T19:55:18.351160Z 0 [Warning] /usr/sbin/mysqld: Forcing close of thread 4  user: 'zabbix'\n\n2018-10-15T19:55:18.351186Z 0 [Warning] /usr/sbin/mysqld: Forcing close of thread 5  user: 'zabbix'\n\n2018-10-15T19:55:18.351193Z 0 [Warning] /usr/sbin/mysqld: Forcing close of thread 9  user: 'zabbix'\n\n2018-10-15T19:55:18.351205Z 0 [Note] Event Scheduler: Purging the queue. 0 events\n2018-10-15T19:55:18.351518Z 0 [Note] Binlog end\n2018-10-15T19:55:18.352972Z 0 [Note] Shutting down plugin 'ngram'\n2018-10-15T19:55:18.352983Z 0 [Note] Shutting down plugin 'partition'\n2018-10-15T19:55:18.352986Z 0 [Note] Shutting down plugin 'BLACKHOLE'\n2018-10-15T19:55:18.352989Z 0 [Note] Shutting down plugin 'ARCHIVE'\n2018-10-15T19:55:18.353039Z 0 [Note] Shutting down plugin 'MEMORY'\n2018-10-15T19:55:18.353044Z 0 [Note] Shutting down plugin 'INNODB_SYS_VIRTUAL'\n2018-10-15T19:55:18.353048Z 0 [Note] Shutting down plugin 'INNODB_SYS_DATAFILES'\n2018-10-15T19:55:18.353051Z 0 [Note] Shutting down plugin 'INNODB_SYS_TABLESPACES'\n2018-10-15T19:55:18.353053Z 0 [Note] Shutting down plugin 'INNODB_SYS_FOREIGN_COLS'\n2018-10-15T19:55:18.353056Z 0 [Note] Shutting down plugin 'INNODB_SYS_FOREIGN'\n2018-10-15T19:55:18.353059Z 0 [Note] Shutting down plugin 'INNODB_SYS_FIELDS'\n2018-10-15T19:55:18.353062Z 0 [Note] Shutting down plugin 'INNODB_SYS_COLUMNS'\n2018-10-15T19:55:18.353065Z 0 [Note] Shutting down plugin 'INNODB_SYS_INDEXES'\n2018-10-15T19:55:18.353068Z 0 [Note] Shutting down plugin 'INNODB_SYS_TABLESTATS'\n2018-10-15T19:55:18.353071Z 0 [Note] Shutting down plugin 'INNODB_SYS_TABLES'\n2018-10-15T19:55:18.353073Z 0 [Note] Shutting down plugin 'INNODB_FT_INDEX_TABLE'\n2018-10-15T19:55:18.353076Z 0 [Note] Shutting down plugin 'INNODB_FT_INDEX_CACHE'\n2018-10-15T19:55:18.353079Z 0 [Note] Shutting down plugin 'INNODB_FT_CONFIG'\n2018-10-15T19:55:18.353082Z 0 [Note] Shutting down plugin 'INNODB_FT_BEING_DELETED'\n2018-10-15T19:55:18.353085Z 0 [Note] Shutting down plugin 'INNODB_FT_DELETED'\n2018-10-15T19:55:18.353087Z 0 [Note] Shutting down plugin 'INNODB_FT_DEFAULT_STOPWORD'\n2018-10-15T19:55:18.353091Z 0 [Note] Shutting down plugin 'INNODB_METRICS'\n2018-10-15T19:55:18.353094Z 0 [Note] Shutting down plugin 'INNODB_TEMP_TABLE_INFO'\n2018-10-15T19:55:18.353097Z 0 [Note] Shutting down plugin 'INNODB_BUFFER_POOL_STATS'\n2018-10-15T19:55:18.353099Z 0 [Note] Shutting down plugin 'INNODB_BUFFER_PAGE_LRU'\n2018-10-15T19:55:18.353102Z 0 [Note] Shutting down plugin 'INNODB_BUFFER_PAGE'\n2018-10-15T19:55:18.353105Z 0 [Note] Shutting down plugin 'INNODB_CMP_PER_INDEX_RESET'\n2018-10-15T19:55:18.353108Z 0 [Note] Shutting down plugin 'INNODB_CMP_PER_INDEX'\n2018-10-15T19:55:18.353111Z 0 [Note] Shutting down plugin 'INNODB_CMPMEM_RESET'\n2018-10-15T19:55:18.353113Z 0 [Note] Shutting down plugin 'INNODB_CMPMEM'\n2018-10-15T19:55:18.353116Z 0 [Note] Shutting down plugin 'INNODB_CMP_RESET'\n2018-10-15T19:55:18.353119Z 0 [Note] Shutting down plugin 'INNODB_CMP'\n2018-10-15T19:55:18.353122Z 0 [Note] Shutting down plugin 'INNODB_LOCK_WAITS'\n2018-10-15T19:55:18.353125Z 0 [Note] Shutting down plugin 'INNODB_LOCKS'\n2018-10-15T19:55:18.353128Z 0 [Note] Shutting down plugin 'INNODB_TRX'\n2018-10-15T19:55:18.353131Z 0 [Note] Shutting down plugin 'InnoDB'\n2018-10-15T19:55:18.354768Z 0 [Note] InnoDB: FTS optimize thread exiting.\n2018-10-15T19:55:18.354950Z 0 [Note] InnoDB: Starting shutdown...\n2018-10-15T19:55:18.455235Z 0 [Note] InnoDB: Dumping buffer pool(s) to /var/lib/mysql/ib_buffer_pool\n2018-10-15T19:55:18.455408Z 0 [Note] InnoDB: Buffer pool(s) dump completed at 181015 19:55:18\n2018-10-15T19:55:19.782226Z 0 [Note] InnoDB: Shutdown completed; log sequence number 15199598\n2018-10-15T19:55:19.784179Z 0 [Note] InnoDB: Removed temporary tablespace data file: \"ibtmp1\"\n2018-10-15T19:55:19.784526Z 0 [Note] Shutting down plugin 'MRG_MYISAM'\n2018-10-15T19:55:19.784782Z 0 [Note] Shutting down plugin 'MyISAM'\n2018-10-15T19:55:19.785078Z 0 [Note] Shutting down plugin 'CSV'\n2018-10-15T19:55:19.785417Z 0 [Note] Shutting down plugin 'PERFORMANCE_SCHEMA'\n2018-10-15T19:55:19.785694Z 0 [Note] Shutting down plugin 'sha256_password'\n2018-10-15T19:55:19.785937Z 0 [Note] Shutting down plugin 'mysql_native_password'\n2018-10-15T19:55:19.786432Z 0 [Note] Shutting down plugin 'binlog'\n2018-10-15T19:55:19.796160Z 0 [Note] /usr/sbin/mysqld: Shutdown complete\n\n2018-10-15T19:55:19.858798Z 0 [Warning] Changed limits: max_open_files: 1024 (requested 5000)\n2018-10-15T19:55:19.858828Z 0 [Warning] Changed limits: table_open_cache: 431 (requested 2000)\n2018-10-15T19:55:20.017175Z 0 [Warning] TIMESTAMP with implicit DEFAULT value is deprecated. Please use --explicit_defaults_for_timestamp server option (see documentation for more details).\n2018-10-15T19:55:20.019931Z 0 [Note] /usr/sbin/mysqld (mysqld 5.7.23-0ubuntu0.16.04.1-log) starting as process 12803 ...\n2018-10-15T19:55:20.025911Z 0 [Note] InnoDB: PUNCH HOLE support available\n2018-10-15T19:55:20.026301Z 0 [Note] InnoDB: Mutexes and rw_locks use GCC atomic builtins\n2018-10-15T19:55:20.026621Z 0 [Note] InnoDB: Uses event mutexes\n2018-10-15T19:55:20.026886Z 0 [Note] InnoDB: GCC builtin __atomic_thread_fence() is used for memory barrier\n2018-10-15T19:55:20.027229Z 0 [Note] InnoDB: Compressed tables use zlib 1.2.8\n2018-10-15T19:55:20.027559Z 0 [Note] InnoDB: Using Linux native AIO\n2018-10-15T19:55:20.028218Z 0 [Note] InnoDB: Number of pools: 1\n2018-10-15T19:55:20.028662Z 0 [Note] InnoDB: Using CPU crc32 instructions\n2018-10-15T19:55:20.031387Z 0 [Note] InnoDB: Initializing buffer pool, total size = 128M, instances = 1, chunk size = 128M\n2018-10-15T19:55:20.039710Z 0 [Note] InnoDB: Completed initialization of buffer pool\n2018-10-15T19:55:20.042206Z 0 [Note] InnoDB: If the mysqld execution user is authorized, page cleaner thread priority can be changed. See the man page of setpriority().\n2018-10-15T19:55:20.054579Z 0 [Note] InnoDB: Highest supported file format is Barracuda.\n2018-10-15T19:55:20.111399Z 0 [Note] InnoDB: Creating shared tablespace for temporary tables\n2018-10-15T19:55:20.111920Z 0 [Note] InnoDB: Setting file './ibtmp1' size to 12 MB. Physically writing the file full; Please wait ...\n2018-10-15T19:55:21.176656Z 0 [Note] InnoDB: File './ibtmp1' size is now 12 MB.\n2018-10-15T19:55:21.177860Z 0 [Note] InnoDB: 96 redo rollback segment(s) found. 96 redo rollback segment(s) are active.\n2018-10-15T19:55:21.178348Z 0 [Note] InnoDB: 32 non-redo rollback segment(s) are active.\n2018-10-15T19:55:21.180824Z 0 [Note] InnoDB: Waiting for purge to start\n2018-10-15T19:55:21.231391Z 0 [Note] InnoDB: 5.7.23 started; log sequence number 15199598\n2018-10-15T19:55:21.232458Z 0 [Note] Plugin 'FEDERATED' is disabled.\n2018-10-15T19:55:21.236600Z 0 [Note] InnoDB: Loading buffer pool(s) from /var/lib/mysql/ib_buffer_pool\n2018-10-15T19:55:21.242685Z 0 [Warning] Failed to set up SSL because of the following SSL library error: SSL context is not usable without certificate and private key\n2018-10-15T19:55:21.243032Z 0 [Note] Server hostname (bind-address): '127.0.0.1'; port: 3306\n2018-10-15T19:55:21.243445Z 0 [Note]   - '127.0.0.1' resolves to '127.0.0.1';\n2018-10-15T19:55:21.243731Z 0 [Note] Server socket created on IP: '127.0.0.1'.\n2018-10-15T19:55:21.249632Z 0 [Note] InnoDB: Buffer pool(s) load completed at 181015 19:55:21\n2018-10-15T19:55:21.254564Z 0 [Note] Event Scheduler: Loaded 0 events\n2018-10-15T19:55:21.255500Z 0 [Note] /usr/sbin/mysqld: ready for connections.\nVersion: '5.7.23-0ubuntu0.16.04.1-log'  socket: '/var/run/mysqld/mysqld.sock'  port: 3306  (Ubuntu)\n2018-10-15T19:55:21.856724Z 2 [Note] Access denied for user 'root'@'localhost' (using password: NO)\n"
  },
  {
    "path": "OmsAgent/extension-test/omsfiles/mysql-slow.log",
    "content": "/usr/sbin/mysqld, Version: 5.7.23-0ubuntu0.16.04.1-log ((Ubuntu)). started with:\nTcp port: 3306  Unix socket: /var/run/mysqld/mysqld.sock\nTime                 Id Command    Argument\n# Time: 2018-10-15T19:56:12.584806Z\n# User@Host: root[root] @ localhost []  Id:     8\n# Query_time: 10.000575  Lock_time: 0.000000 Rows_sent: 1  Rows_examined: 0\nuse test;\nSET timestamp=1539633372;\nselect sleep(10);\n# Time: 2018-10-15T19:56:36.398796Z\n# User@Host: root[root] @ localhost []  Id:     8\n# Query_time: 10.000546  Lock_time: 0.000000 Rows_sent: 1  Rows_examined: 0\nSET timestamp=1539633396;\nselect sleep(10);\n"
  },
  {
    "path": "OmsAgent/extension-test/omsfiles/mysql.log",
    "content": "2018-10-15T18:38:50.315829Z\t   33 Query\tselect a.alertid,a.mediatypeid,a.sendto,a.subject,a.message,a.status,a.retries,e.source,e.object,e.objectid from alerts a left join events e on a.eventid=e.eventid where alerttype=0 and a.status=3 order by a.alertid\n2018-10-15T18:38:50.863314Z\t   21 Query\tselect taskid,type,clock,ttl from task where status in (1,2) order by taskid\n2018-10-15T18:38:50.863721Z\t   16 Query\tselect h.hostid,h.host,h.name,t.httptestid,t.name,t.agent,t.authentication,t.http_user,t.http_password,t.http_proxy,t.retries,t.ssl_cert_file,t.ssl_key_file,t.ssl_key_password,t.verify_peer,t.verify_host,t.delay from httptest t,hosts h where t.hostid=h.hostid and t.nextcheck<=1539628730 and mod(t.httptestid,1)=0 and t.status=0 and h.proxy_hostid is null and h.status=0 and (h.maintenance_status=0 or h.maintenance_type=0)\n2018-10-15T18:38:50.864137Z\t   16 Query\tselect min(t.nextcheck) from httptest t,hosts h where t.hostid=h.hostid and mod(t.httptestid,1)=0 and t.status=0 and h.proxy_hostid is null and h.status=0 and (h.maintenance_status=0 or h.maintenance_type=0)\n2018-10-15T18:38:51.317472Z\t   33 Query\tselect a.alertid,a.mediatypeid,a.sendto,a.subject,a.message,a.status,a.retries,e.source,e.object,e.objectid from alerts a left join events e on a.eventid=e.eventid where alerttype=0 and a.status=3 order by a.alertid\n2018-10-15T18:38:52.318953Z\t   33 Query\tselect a.alertid,a.mediatypeid,a.sendto,a.subject,a.message,a.status,a.retries,e.source,e.object,e.objectid from alerts a left join events e on a.eventid=e.eventid where alerttype=0 and a.status=3 order by a.alertid\n2018-10-15T18:38:52.947568Z\t   28 Query\tselect escalationid,actionid,triggerid,eventid,r_eventid,nextcheck,esc_step,status,itemid,acknowledgeid from escalations where triggerid is not null order by actionid,triggerid,itemid,escalationid\n2018-10-15T18:38:52.947842Z\t   28 Query\tselect escalationid,actionid,triggerid,eventid,r_eventid,nextcheck,esc_step,status,itemid,acknowledgeid from escalations where itemid is not null order by actionid,triggerid,itemid,escalationid\n2018-10-15T18:38:52.947973Z\t   28 Query\tselect escalationid,actionid,triggerid,eventid,r_eventid,nextcheck,esc_step,status,itemid,acknowledgeid from escalations where triggerid is null and itemid is null order by actionid,triggerid,itemid,escalationid\n2018-10-15T18:38:53.320549Z\t   33 Query\tselect a.alertid,a.mediatypeid,a.sendto,a.subject,a.message,a.status,a.retries,e.source,e.object,e.objectid from alerts a left join events e on a.eventid=e.eventid where alerttype=0 and a.status=3 order by a.alertid\n2018-10-15T18:38:54.322093Z\t   33 Query\tselect a.alertid,a.mediatypeid,a.sendto,a.subject,a.message,a.status,a.retries,e.source,e.object,e.objectid from alerts a left join events e on a.eventid=e.eventid where alerttype=0 and a.status=3 order by a.alertid\n2018-10-15T18:38:55.323700Z\t   33 Query\tselect a.alertid,a.mediatypeid,a.sendto,a.subject,a.message,a.status,a.retries,e.source,e.object,e.objectid from alerts a left join events e on a.eventid=e.eventid where alerttype=0 and a.status=3 order by a.alertid\n2018-10-15T18:38:55.864344Z\t   21 Query\tselect taskid,type,clock,ttl from task where status in (1,2) order by taskid\n2018-10-15T18:38:55.864883Z\t   16 Query\tselect h.hostid,h.host,h.name,t.httptestid,t.name,t.agent,t.authentication,t.http_user,t.http_password,t.http_proxy,t.retries,t.ssl_cert_file,t.ssl_key_file,t.ssl_key_password,t.verify_peer,t.verify_host,t.delay from httptest t,hosts h where t.hostid=h.hostid and t.nextcheck<=1539628735 and mod(t.httptestid,1)=0 and t.status=0 and h.proxy_hostid is null and h.status=0 and (h.maintenance_status=0 or h.maintenance_type=0)\n2018-10-15T18:38:55.865129Z\t   16 Query\tselect min(t.nextcheck) from httptest t,hosts h where t.hostid=h.hostid and mod(t.httptestid,1)=0 and t.status=0 and h.proxy_hostid is null and h.status=0 and (h.maintenance_status=0 or h.maintenance_type=0)\n2018-10-15T18:38:55.948479Z\t   28 Query\tselect escalationid,actionid,triggerid,eventid,r_eventid,nextcheck,esc_step,status,itemid,acknowledgeid from escalations where triggerid is not null order by actionid,triggerid,itemid,escalationid\n2018-10-15T18:38:55.948664Z\t   28 Query\tselect escalationid,actionid,triggerid,eventid,r_eventid,nextcheck,esc_step,status,itemid,acknowledgeid from escalations where itemid is not null order by actionid,triggerid,itemid,escalationid\n2018-10-15T18:38:55.948814Z\t   28 Query\tselect escalationid,actionid,triggerid,eventid,r_eventid,nextcheck,esc_step,status,itemid,acknowledgeid from escalations where triggerid is null and itemid is null order by actionid,triggerid,itemid,escalationid\n2018-10-15T18:38:56.325397Z\t   33 Query\tselect a.alertid,a.mediatypeid,a.sendto,a.subject,a.message,a.status,a.retries,e.source,e.object,e.objectid from alerts a left join events e on a.eventid=e.eventid where alerttype=0 and a.status=3 order by a.alertid\n2018-10-15T18:38:57.326948Z\t   33 Query\tselect a.alertid,a.mediatypeid,a.sendto,a.subject,a.message,a.status,a.retries,e.source,e.object,e.objectid from alerts a left join events e on a.eventid=e.eventid where alerttype=0 and a.status=3 order by a.alertid\n2018-10-15T18:38:58.328609Z\t   33 Query\tselect a.alertid,a.mediatypeid,a.sendto,a.subject,a.message,a.status,a.retries,e.source,e.object,e.objectid from alerts a left join events e on a.eventid=e.eventid where alerttype=0 and a.status=3 order by a.alertid\n2018-10-15T18:38:58.949362Z\t   28 Query\tselect escalationid,actionid,triggerid,eventid,r_eventid,nextcheck,esc_step,status,itemid,acknowledgeid from escalations where triggerid is not null order by actionid,triggerid,itemid,escalationid\n2018-10-15T18:38:58.949594Z\t   28 Query\tselect escalationid,actionid,triggerid,eventid,r_eventid,nextcheck,esc_step,status,itemid,acknowledgeid from escalations where itemid is not null order by actionid,triggerid,itemid,escalationid\n2018-10-15T18:38:58.949700Z\t   28 Query\tselect escalationid,actionid,triggerid,eventid,r_eventid,nextcheck,esc_step,status,itemid,acknowledgeid from escalations where triggerid is null and itemid is null order by actionid,triggerid,itemid,escalationid\n2018-10-15T18:38:59.330164Z\t   33 Query\tselect a.alertid,a.mediatypeid,a.sendto,a.subject,a.message,a.status,a.retries,e.source,e.object,e.objectid from alerts a left join events e on a.eventid=e.eventid where alerttype=0 and a.status=3 order by a.alertid\n2018-10-15T18:39:00.331671Z\t   33 Query\tselect a.alertid,a.mediatypeid,a.sendto,a.subject,a.message,a.status,a.retries,e.source,e.object,e.objectid from alerts a left join events e on a.eventid=e.eventid where alerttype=0 and a.status=3 order by a.alertid\n2018-10-15T18:39:00.847135Z\t   17 Query\tselect m.maintenanceid,m.maintenance_type,m.active_since,tp.timeperiod_type,tp.every,tp.month,tp.dayofweek,tp.day,tp.start_time,tp.period,tp.start_date from maintenances m,maintenances_windows mw,timeperiods tp where m.maintenanceid=mw.maintenanceid and mw.timeperiodid=tp.timeperiodid and m.active_since<=1539628740 and m.active_till>1539628740\n2018-10-15T18:39:00.847466Z\t   17 Query\tbegin\n2018-10-15T18:39:00.847520Z\t   17 Query\tselect hostid,host,maintenance_type,maintenance_from from hosts where status=0 and flags<>2 and maintenance_status=1\n2018-10-15T18:39:00.847687Z\t   17 Query\tcommit\n2018-10-15T18:39:00.865215Z\t   21 Query\tselect taskid,type,clock,ttl from task where status in (1,2) order by taskid\n2018-10-15T18:39:00.865899Z\t   16 Query\tselect h.hostid,h.host,h.name,t.httptestid,t.name,t.agent,t.authentication,t.http_user,t.http_password,t.http_proxy,t.retries,t.ssl_cert_file,t.ssl_key_file,t.ssl_key_password,t.verify_peer,t.verify_host,t.delay from httptest t,hosts h where t.hostid=h.hostid and t.nextcheck<=1539628740 and mod(t.httptestid,1)=0 and t.status=0 and h.proxy_hostid is null and h.status=0 and (h.maintenance_status=0 or h.maintenance_type=0)\n2018-10-15T18:39:00.866166Z\t   16 Query\tselect min(t.nextcheck) from httptest t,hosts h where t.hostid=h.hostid and mod(t.httptestid,1)=0 and t.status=0 and h.proxy_hostid is null and h.status=0 and (h.maintenance_status=0 or h.maintenance_type=0)\n2018-10-15T18:39:01.333227Z\t   33 Query\tselect a.alertid,a.mediatypeid,a.sendto,a.subject,a.message,a.status,a.retries,e.source,e.object,e.objectid from alerts a left join events e on a.eventid=e.eventid where alerttype=0 and a.status=3 order by a.alertid\n2018-10-15T18:39:01.950246Z\t   28 Query\tselect escalationid,actionid,triggerid,eventid,r_eventid,nextcheck,esc_step,status,itemid,acknowledgeid from escalations where triggerid is not null order by actionid,triggerid,itemid,escalationid\n2018-10-15T18:39:01.950520Z\t   28 Query\tselect escalationid,actionid,triggerid,eventid,r_eventid,nextcheck,esc_step,status,itemid,acknowledgeid from escalations where itemid is not null order by actionid,triggerid,itemid,escalationid\n2018-10-15T18:39:01.950663Z\t   28 Query\tselect escalationid,actionid,triggerid,eventid,r_eventid,nextcheck,esc_step,status,itemid,acknowledgeid from escalations where triggerid is null and itemid is null order by actionid,triggerid,itemid,escalationid\n2018-10-15T18:39:02.334772Z\t   33 Query\tselect a.alertid,a.mediatypeid,a.sendto,a.subject,a.message,a.status,a.retries,e.source,e.object,e.objectid from alerts a left join events e on a.eventid=e.eventid where alerttype=0 and a.status=3 order by a.alertid\n2018-10-15T18:39:03.336170Z\t   33 Query\tselect a.alertid,a.mediatypeid,a.sendto,a.subject,a.message,a.status,a.retries,e.source,e.object,e.objectid from alerts a left join events e on a.eventid=e.eventid where alerttype=0 and a.status=3 order by a.alertid\n2018-10-15T18:39:04.338531Z\t   33 Query\tselect a.alertid,a.mediatypeid,a.sendto,a.subject,a.message,a.status,a.retries,e.source,e.object,e.objectid from alerts a left join events e on a.eventid=e.eventid where alerttype=0 and a.status=3 order by a.alertid\n2018-10-15T18:39:04.951288Z\t   28 Query\tselect escalationid,actionid,triggerid,eventid,r_eventid,nextcheck,esc_step,status,itemid,acknowledgeid from escalations where triggerid is not null order by actionid,triggerid,itemid,escalationid\n2018-10-15T18:39:04.951515Z\t   28 Query\tselect escalationid,actionid,triggerid,eventid,r_eventid,nextcheck,esc_step,status,itemid,acknowledgeid from escalations where itemid is not null order by actionid,triggerid,itemid,escalationid\n2018-10-15T18:39:04.951615Z\t   28 Query\tselect escalationid,actionid,triggerid,eventid,r_eventid,nextcheck,esc_step,status,itemid,acknowledgeid from escalations where triggerid is null and itemid is null order by actionid,triggerid,itemid,escalationid\n2018-10-15T18:39:05.340207Z\t   33 Query\tselect a.alertid,a.mediatypeid,a.sendto,a.subject,a.message,a.status,a.retries,e.source,e.object,e.objectid from alerts a left join events e on a.eventid=e.eventid where alerttype=0 and a.status=3 order by a.alertid\n2018-10-15T18:39:05.866136Z\t   21 Query\tselect taskid,type,clock,ttl from task where status in (1,2) order by taskid\n2018-10-15T18:39:05.866969Z\t   16 Query\tselect h.hostid,h.host,h.name,t.httptestid,t.name,t.agent,t.authentication,t.http_user,t.http_password,t.http_proxy,t.retries,t.ssl_cert_file,t.ssl_key_file,t.ssl_key_password,t.verify_peer,t.verify_host,t.delay from httptest t,hosts h where t.hostid=h.hostid and t.nextcheck<=1539628745 and mod(t.httptestid,1)=0 and t.status=0 and h.proxy_hostid is null and h.status=0 and (h.maintenance_status=0 or h.maintenance_type=0)\n2018-10-15T18:39:05.867202Z\t   16 Query\tselect min(t.nextcheck) from httptest t,hosts h where t.hostid=h.hostid and mod(t.httptestid,1)=0 and t.status=0 and h.proxy_hostid is null and h.status=0 and (h.maintenance_status=0 or h.maintenance_type=0)\n2018-10-15T18:39:06.341692Z\t   33 Query\tselect a.alertid,a.mediatypeid,a.sendto,a.subject,a.message,a.status,a.retries,e.source,e.object,e.objectid from alerts a left join events e on a.eventid=e.eventid where alerttype=0 and a.status=3 order by a.alertid\n2018-10-15T18:39:07.343211Z\t   33 Query\tselect a.alertid,a.mediatypeid,a.sendto,a.subject,a.message,a.status,a.retries,e.source,e.object,e.objectid from alerts a left join events e on a.eventid=e.eventid where alerttype=0 and a.status=3 order by a.alertid\n2018-10-15T18:39:07.952124Z\t   28 Query\tselect escalationid,actionid,triggerid,eventid,r_eventid,nextcheck,esc_step,status,itemid,acknowledgeid from escalations where triggerid is not null order by actionid,triggerid,itemid,escalationid\n2018-10-15T18:39:07.952332Z\t   28 Query\tselect escalationid,actionid,triggerid,eventid,r_eventid,nextcheck,esc_step,status,itemid,acknowledgeid from escalations where itemid is not null order by actionid,triggerid,itemid,escalationid\n2018-10-15T18:39:07.952436Z\t   28 Query\tselect escalationid,actionid,triggerid,eventid,r_eventid,nextcheck,esc_step,status,itemid,acknowledgeid from escalations where triggerid is null and itemid is null order by actionid,triggerid,itemid,escalationid\n2018-10-15T18:39:08.344797Z\t   33 Query\tselect a.alertid,a.mediatypeid,a.sendto,a.subject,a.message,a.status,a.retries,e.source,e.object,e.objectid from alerts a left join events e on a.eventid=e.eventid where alerttype=0 and a.status=3 order by a.alertid\n2018-10-15T18:39:09.346291Z\t   33 Query\tselect a.alertid,a.mediatypeid,a.sendto,a.subject,a.message,a.status,a.retries,e.source,e.object,e.objectid from alerts a left join events e on a.eventid=e.eventid where alerttype=0 and a.status=3 order by a.alertid\n2018-10-15T18:39:10.347130Z\t   33 Query\tselect a.alertid,a.mediatypeid,a.sendto,a.subject,a.message,a.status,a.retries,e.source,e.object,e.objectid from alerts a left join events e on a.eventid=e.eventid where alerttype=0 and a.status=3 order by a.alertid\n2018-10-15T18:39:10.867114Z\t   21 Query\tselect taskid,type,clock,ttl from task where status in (1,2) order by taskid\n2018-10-15T18:39:10.867965Z\t   16 Query\tselect h.hostid,h.host,h.name,t.httptestid,t.name,t.agent,t.authentication,t.http_user,t.http_password,t.http_proxy,t.retries,t.ssl_cert_file,t.ssl_key_file,t.ssl_key_password,t.verify_peer,t.verify_host,t.delay from httptest t,hosts h where t.hostid=h.hostid and t.nextcheck<=1539628750 and mod(t.httptestid,1)=0 and t.status=0 and h.proxy_hostid is null and h.status=0 and (h.maintenance_status=0 or h.maintenance_type=0)\n2018-10-15T18:39:10.868197Z\t   16 Query\tselect min(t.nextcheck) from httptest t,hosts h where t.hostid=h.hostid and mod(t.httptestid,1)=0 and t.status=0 and h.proxy_hostid is null and h.status=0 and (h.maintenance_status=0 or h.maintenance_type=0)\n2018-10-15T18:39:10.953013Z\t   28 Query\tselect escalationid,actionid,triggerid,eventid,r_eventid,nextcheck,esc_step,status,itemid,acknowledgeid from escalations where triggerid is not null order by actionid,triggerid,itemid,escalationid\n2018-10-15T18:39:10.953242Z\t   28 Query\tselect escalationid,actionid,triggerid,eventid,r_eventid,nextcheck,esc_step,status,itemid,acknowledgeid from escalations where itemid is not null order by actionid,triggerid,itemid,escalationid\n2018-10-15T18:39:10.953370Z\t   28 Query\tselect escalationid,actionid,triggerid,eventid,r_eventid,nextcheck,esc_step,status,itemid,acknowledgeid from escalations where triggerid is null and itemid is null order by actionid,triggerid,itemid,escalationid\n2018-10-15T18:39:11.348383Z\t   33 Query\tselect a.alertid,a.mediatypeid,a.sendto,a.subject,a.message,a.status,a.retries,e.source,e.object,e.objectid from alerts a left join events e on a.eventid=e.eventid where alerttype=0 and a.status=3 order by a.alertid\n2018-10-15T18:39:12.348909Z\t   33 Query\tselect a.alertid,a.mediatypeid,a.sendto,a.subject,a.message,a.status,a.retries,e.source,e.object,e.objectid from alerts a left join events e on a.eventid=e.eventid where alerttype=0 and a.status=3 order by a.alertid\n2018-10-15T18:39:13.349494Z\t   33 Query\tselect a.alertid,a.mediatypeid,a.sendto,a.subject,a.message,a.status,a.retries,e.source,e.object,e.objectid from alerts a left join events e on a.eventid=e.eventid where alerttype=0 and a.status=3 order by a.alertid\n2018-10-15T18:39:13.953982Z\t   28 Query\tselect escalationid,actionid,triggerid,eventid,r_eventid,nextcheck,esc_step,status,itemid,acknowledgeid from escalations where triggerid is not null order by actionid,triggerid,itemid,escalationid\n2018-10-15T18:39:13.954400Z\t   28 Query\tselect escalationid,actionid,triggerid,eventid,r_eventid,nextcheck,esc_step,status,itemid,acknowledgeid from escalations where itemid is not null order by actionid,triggerid,itemid,escalationid\n2018-10-15T18:39:13.954532Z\t   28 Query\tselect escalationid,actionid,triggerid,eventid,r_eventid,nextcheck,esc_step,status,itemid,acknowledgeid from escalations where triggerid is null and itemid is null order by actionid,triggerid,itemid,escalationid\n2018-10-15T18:39:14.350097Z\t   33 Query\tselect a.alertid,a.mediatypeid,a.sendto,a.subject,a.message,a.status,a.retries,e.source,e.object,e.objectid from alerts a left join events e on a.eventid=e.eventid where alerttype=0 and a.status=3 order by a.alertid\n2018-10-15T18:39:14.444846Z\t   12 Query\tDROP DATABASE test\n2018-10-15T18:39:14.576240Z\t   12 Query\tSELECT DATABASE()\n2018-10-15T18:39:15.310599Z\t   12 Query\tCREATE DATABASE test\n2018-10-15T18:39:15.350757Z\t   33 Query\tselect a.alertid,a.mediatypeid,a.sendto,a.subject,a.message,a.status,a.retries,e.source,e.object,e.objectid from alerts a left join events e on a.eventid=e.eventid where alerttype=0 and a.status=3 order by a.alertid\n2018-10-15T18:39:15.868143Z\t   21 Query\tselect taskid,type,clock,ttl from task where status in (1,2) order by taskid\n2018-10-15T18:39:15.869085Z\t   16 Query\tselect h.hostid,h.host,h.name,t.httptestid,t.name,t.agent,t.authentication,t.http_user,t.http_password,t.http_proxy,t.retries,t.ssl_cert_file,t.ssl_key_file,t.ssl_key_password,t.verify_peer,t.verify_host,t.delay from httptest t,hosts h where t.hostid=h.hostid and t.nextcheck<=1539628755 and mod(t.httptestid,1)=0 and t.status=0 and h.proxy_hostid is null and h.status=0 and (h.maintenance_status=0 or h.maintenance_type=0)\n2018-10-15T18:39:15.869995Z\t   16 Query\tselect min(t.nextcheck) from httptest t,hosts h where t.hostid=h.hostid and mod(t.httptestid,1)=0 and t.status=0 and h.proxy_hostid is null and h.status=0 and (h.maintenance_status=0 or h.maintenance_type=0)\n2018-10-15T18:39:16.352828Z\t   33 Query\tselect a.alertid,a.mediatypeid,a.sendto,a.subject,a.message,a.status,a.retries,e.source,e.object,e.objectid from alerts a left join events e on a.eventid=e.eventid where alerttype=0 and a.status=3 order by a.alertid\n2018-10-15T18:39:16.683314Z\t   12 Query\tSELECT DATABASE()\n2018-10-15T18:39:16.684005Z\t   12 Init DB\ttest\n2018-10-15T18:39:16.685263Z\t   12 Query\tshow databases\n2018-10-15T18:39:16.685810Z\t   12 Query\tshow tables\n2018-10-15T18:39:16.955061Z\t   28 Query\tselect escalationid,actionid,triggerid,eventid,r_eventid,nextcheck,esc_step,status,itemid,acknowledgeid from escalations where triggerid is not null order by actionid,triggerid,itemid,escalationid\n2018-10-15T18:39:16.955820Z\t   28 Query\tselect escalationid,actionid,triggerid,eventid,r_eventid,nextcheck,esc_step,status,itemid,acknowledgeid from escalations where itemid is not null order by actionid,triggerid,itemid,escalationid\n2018-10-15T18:39:16.956073Z\t   28 Query\tselect escalationid,actionid,triggerid,eventid,r_eventid,nextcheck,esc_step,status,itemid,acknowledgeid from escalations where triggerid is null and itemid is null order by actionid,triggerid,itemid,escalationid\n2018-10-15T18:39:17.355676Z\t   33 Query\tselect a.alertid,a.mediatypeid,a.sendto,a.subject,a.message,a.status,a.retries,e.source,e.object,e.objectid from alerts a left join events e on a.eventid=e.eventid where alerttype=0 and a.status=3 order by a.alertid\n2018-10-15T18:39:17.769543Z\t   12 Query\tCREATE TABLE IF NOT EXISTS data (id INT AUTO_INCREMENT, title VARCHAR(255) NOT NULL, description TEXT, PRIMARY KEY (id))\n2018-10-15T18:39:18.357242Z\t   33 Query\tselect a.alertid,a.mediatypeid,a.sendto,a.subject,a.message,a.status,a.retries,e.source,e.object,e.objectid from alerts a left join events e on a.eventid=e.eventid where alerttype=0 and a.status=3 order by a.alertid\n2018-10-15T18:39:19.358825Z\t   33 Query\tselect a.alertid,a.mediatypeid,a.sendto,a.subject,a.message,a.status,a.retries,e.source,e.object,e.objectid from alerts a left join events e on a.eventid=e.eventid where alerttype=0 and a.status=3 order by a.alertid\n2018-10-15T18:39:19.956729Z\t   28 Query\tselect escalationid,actionid,triggerid,eventid,r_eventid,nextcheck,esc_step,status,itemid,acknowledgeid from escalations where triggerid is not null order by actionid,triggerid,itemid,escalationid\n2018-10-15T18:39:19.957576Z\t   28 Query\tselect escalationid,actionid,triggerid,eventid,r_eventid,nextcheck,esc_step,status,itemid,acknowledgeid from escalations where itemid is not null order by actionid,triggerid,itemid,escalationid\n2018-10-15T18:39:19.958050Z\t   28 Query\tselect escalationid,actionid,triggerid,eventid,r_eventid,nextcheck,esc_step,status,itemid,acknowledgeid from escalations where triggerid is null and itemid is null order by actionid,triggerid,itemid,escalationid\n2018-10-15T18:39:20.360434Z\t   33 Query\tselect a.alertid,a.mediatypeid,a.sendto,a.subject,a.message,a.status,a.retries,e.source,e.object,e.objectid from alerts a left join events e on a.eventid=e.eventid where alerttype=0 and a.status=3 order by a.alertid\n2018-10-15T18:39:20.870503Z\t   21 Query\tselect taskid,type,clock,ttl from task where status in (1,2) order by taskid\n2018-10-15T18:39:20.871246Z\t   16 Query\tselect h.hostid,h.host,h.name,t.httptestid,t.name,t.agent,t.authentication,t.http_user,t.http_password,t.http_proxy,t.retries,t.ssl_cert_file,t.ssl_key_file,t.ssl_key_password,t.verify_peer,t.verify_host,t.delay from httptest t,hosts h where t.hostid=h.hostid and t.nextcheck<=1539628760 and mod(t.httptestid,1)=0 and t.status=0 and h.proxy_hostid is null and h.status=0 and (h.maintenance_status=0 or h.maintenance_type=0)\n2018-10-15T18:39:20.871544Z\t   16 Query\tselect min(t.nextcheck) from httptest t,hosts h where t.hostid=h.hostid and mod(t.httptestid,1)=0 and t.status=0 and h.proxy_hostid is null and h.status=0 and (h.maintenance_status=0 or h.maintenance_type=0)\n2018-10-15T18:39:21.361831Z\t   33 Query\tselect a.alertid,a.mediatypeid,a.sendto,a.subject,a.message,a.status,a.retries,e.source,e.object,e.objectid from alerts a left join events e on a.eventid=e.eventid where alerttype=0 and a.status=3 order by a.alertid\n2018-10-15T18:39:22.363301Z\t   33 Query\tselect a.alertid,a.mediatypeid,a.sendto,a.subject,a.message,a.status,a.retries,e.source,e.object,e.objectid from alerts a left join events e on a.eventid=e.eventid where alerttype=0 and a.status=3 order by a.alertid\n2018-10-15T18:39:22.958623Z\t   28 Query\tselect escalationid,actionid,triggerid,eventid,r_eventid,nextcheck,esc_step,status,itemid,acknowledgeid from escalations where triggerid is not null order by actionid,triggerid,itemid,escalationid\n2018-10-15T18:39:22.958924Z\t   28 Query\tselect escalationid,actionid,triggerid,eventid,r_eventid,nextcheck,esc_step,status,itemid,acknowledgeid from escalations where itemid is not null order by actionid,triggerid,itemid,escalationid\n2018-10-15T18:39:22.959097Z\t   28 Query\tselect escalationid,actionid,triggerid,eventid,r_eventid,nextcheck,esc_step,status,itemid,acknowledgeid from escalations where triggerid is null and itemid is null order by actionid,triggerid,itemid,escalationid\n2018-10-15T18:39:23.364725Z\t   33 Query\tselect a.alertid,a.mediatypeid,a.sendto,a.subject,a.message,a.status,a.retries,e.source,e.object,e.objectid from alerts a left join events e on a.eventid=e.eventid where alerttype=0 and a.status=3 order by a.alertid\n"
  },
  {
    "path": "OmsAgent/extension-test/omsfiles/oms_extension_run_script.py",
    "content": "import datetime\nimport os\nimport os.path\nimport platform\nimport re\nimport subprocess\nimport sys\nimport time\n\nif \"check_output\" not in dir(subprocess): # duck punch it in!\n    def check_output(*popenargs, **kwargs):\n        r\"\"\"Run command with arguments and return its output as a byte string.\n        Backported from Python 2.7 as it's implemented as pure python on stdlib.\n        >>> check_output(['/usr/bin/python', '--version'])\n        Python 2.6.2\n        \"\"\"\n        process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)\n        output, unused_err = process.communicate()\n        retcode = process.poll()\n        if retcode:\n            cmd = kwargs.get(\"args\")\n            if cmd is None:\n                cmd = popenargs[0]\n            error = subprocess.CalledProcessError(retcode, cmd)\n            error.logput = output\n            raise error\n        return output\n\n    subprocess.check_output = check_output\n\n# Create directory and copy files\nif not os.path.isdir('/home/scratch/'):\n    os.system('mkdir /home/scratch/ \\\n            && cp /tmp/*.py /home/scratch/ \\\n            && cp /tmp/*.log /home/scratch/ \\\n            && cp /tmp/*.conf /home/scratch/')\n\nout_file = '/home/scratch/omsresults.log'\nopen_file = open(out_file, 'w+')\n\ndef main():\n    # Determine the operation being executed\n    vm_supported, vm_dist, vm_ver = is_vm_supported_for_extension()\n    linux_detect_installer()\n\n    if len(sys.argv) == 2:\n        option = sys.argv[1]\n        if re.match('^([-/]*)(preinstall)', option):\n            install_additional_packages()\n        elif re.match('^([-/]*)(postinstall)', option):\n            detect_workspace_id()\n            config_start_oms_services()\n            restart_services()\n            result_commands()\n            service_control_commands()\n            write_html()\n            dist_status()\n        elif re.match('^([-/]*)(status)', option):\n            result_commands()\n            service_control_commands()\n            write_html()\n            dist_status()\n        elif re.match('^([-/]*)(injectlogs)', option):\n            time.sleep(120)\n            inject_logs()\n        elif re.match('^([-/]*)(copyomslogs)', option):\n            detect_workspace_id()\n            copy_oms_logs()\n        elif re.match('^([-/]*)(copyextlogs)', option):\n            copy_extension_logs()\n    else:\n        print(\"No operation specified. run with 'preinstall' or 'postinstall' or 'status' or 'copyextlogs'\")\n\ndef is_vm_supported_for_extension():\n\n    global vm_supported, vm_dist, vm_ver\n    supported_dists = {'redhat' : ['6', '7'], # CentOS\n                       'centos' : ['6', '7'], # CentOS\n                       'red hat' : ['6', '7'], # Oracle, RHEL\n                       'oracle' : ['6', '7'], # Oracle\n                       'debian' : ['8', '9'], # Debian\n                       'ubuntu' : ['14.04', '16.04', '18.04'], # Ubuntu\n                       'suse' : ['12'], 'sles' : ['15']} # SLES\n\n    try:\n        vm_dist, vm_ver, vm_id = platform.linux_distribution()\n    except AttributeError:\n        vm_dist, vm_ver, vm_id = platform.dist()\n\n    if not vm_dist and not vm_ver: # SLES 15\n        with open('/etc/os-release', 'r') as fp:\n            for line in fp:\n                if line.startswith('ID='):\n                    vm_dist = line.split('=')[1]\n                    vm_dist = vm_dist.split('-')[0]\n                    vm_dist = vm_dist.replace('\\\"', '').replace('\\n', '')\n                elif line.startswith('VERSION_ID='):\n                    vm_ver = line.split('=')[1]\n                    vm_ver = vm_ver.split('.')[0]\n                    vm_ver = vm_ver.replace('\\\"', '').replace('\\n', '')\n\n    vm_supported = False\n\n    # Find this VM distribution in the supported list\n    for supported_dist in supported_dists.keys():\n        if not vm_dist.lower().startswith(supported_dist):\n            continue\n\n        # Check if this VM distribution version is supported\n        vm_ver_split = vm_ver.split('.')\n        for supported_ver in supported_dists[supported_dist]:\n            supported_ver_split = supported_ver.split('.')\n\n            vm_ver_match = True\n            for idx, supported_ver_num in enumerate(supported_ver_split):\n                try:\n                    supported_ver_num = int(supported_ver_num)\n                    vm_ver_num = int(vm_ver_split[idx])\n                except IndexError:\n                    vm_ver_match = False\n                    break\n                if vm_ver_num is not supported_ver_num:\n                    vm_ver_match = False\n                    break\n            if vm_ver_match:\n                vm_supported = True\n                break\n\n        if vm_supported:\n            break\n\n    return vm_supported, vm_dist, vm_ver\n\ndef replace_items(infile, old_word, new_word):\n    \"\"\"Replace old_word with new_world in file infile.\"\"\"\n    if not os.path.isfile(infile):\n        print(\"Error on replace_word, not a regular file: \" + infile)\n        sys.exit(1)\n\n    f1 = open(infile, 'r').read()\n    f2 = open(infile, 'w')\n    m = f1.replace(old_word, new_word)\n    f2.write(m)\n\ndef detect_workspace_id():\n    \"\"\"Detect the workspace id where the agent is onboarded.\"\"\"\n    global workspace_id\n    x = subprocess.check_output('/opt/microsoft/omsagent/bin/omsadmin.sh -l', shell=True)\n    try:\n        workspace_id = re.search('[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}', x).group(0)\n    except AttributeError:\n        workspace_id = None\n\ndef linux_detect_installer():\n    \"\"\"Check what installer (dpkg or rpm) should be used.\"\"\"\n    global INSTALLER\n    INSTALLER = None\n    if vm_supported and (vm_dist.startswith('Ubuntu') or vm_dist.startswith('debian')):\n        INSTALLER = 'APT'\n    elif vm_supported and (vm_dist.startswith('CentOS') or vm_dist.startswith('Oracle') or vm_dist.startswith('Red Hat')):\n        INSTALLER = 'YUM'\n    elif vm_supported  and vm_dist.startswith('SUSE Linux'):\n        INSTALLER = 'ZYPPER'\n\ndef install_additional_packages():\n    \"\"\"Install additional packages command here.\"\"\"\n    if INSTALLER == 'APT':\n        os.system('apt-get -y update && apt-get -y install wget apache2 git \\\n                && service apache2 start')\n    elif INSTALLER == 'YUM':\n        os.system('yum update -y && yum install -y wget httpd git \\\n                && service httpd start')\n    elif INSTALLER == 'ZYPPER':\n        os.system('zypper update -y && zypper install -y wget httpd git \\\n                && service apache2 start')\n\ndef enable_dsc():\n    \"\"\"Enable DSC\"\"\"\n    os.system('/opt/microsoft/omsconfig/Scripts/OMS_MetaConfigHelper.py --enable')\n\ndef disable_dsc():\n    \"\"\"Disable DSC\"\"\"\n    os.system('/opt/microsoft/omsconfig/Scripts/OMS_MetaConfigHelper.py --disable')\n    Pending_mof = '/etc/opt/omi/conf/omsconfig/configuration/Pending.mof'\n    Current_mof = '/etc/opt/omi/conf/omsconfig/configuration/Pending.mof'\n    if os.path.isfile(Pending_mof) or os.path.isfile(Current_mof):\n        os.remove(Pending_mof)\n        os.remove(Current_mof)\n\ndef copy_config_files():\n    \"\"\"Convert, copy, and set permissions for agent configuration files.\"\"\"\n    os.system('cat /home/scratch/perf.conf >> /etc/opt/microsoft/omsagent/{0}/conf/omsagent.conf \\\n            && cp /home/scratch/rsyslog-oms.conf /etc/opt/omi/conf/omsconfig/rsyslog-oms.conf \\\n            && cp /home/scratch/rsyslog-oms.conf /etc/rsyslog.d/95-omsagent.conf \\\n            && chown omsagent:omiusers /etc/rsyslog.d/95-omsagent.conf \\\n            && chmod 644 /etc/rsyslog.d/95-omsagent.conf \\\n            && cp /home/scratch/customlog.conf /etc/opt/microsoft/omsagent/{0}/conf/omsagent.d/customlog.conf \\\n            && chown omsagent:omiusers /etc/opt/microsoft/omsagent/{0}/conf/omsagent.d/customlog.conf \\\n            && cp /etc/opt/microsoft/omsagent/sysconf/omsagent.d/apache_logs.conf /etc/opt/microsoft/omsagent/{0}/conf/omsagent.d/apache_logs.conf \\\n            && chown omsagent:omiusers /etc/opt/microsoft/omsagent/{0}/conf/omsagent.d/apache_logs.conf \\\n            && cp /etc/opt/microsoft/omsagent/sysconf/omsagent.d/mysql_logs.conf /etc/opt/microsoft/omsagent/{0}/conf/omsagent.d/mysql_logs.conf \\\n            && chown omsagent:omiusers /etc/opt/microsoft/omsagent/{0}/conf/omsagent.d/mysql_logs.conf'.format(workspace_id))\n    replace_items('/etc/opt/microsoft/omsagent/{0}/conf/omsagent.conf'.format(workspace_id), '<workspace-id>', workspace_id)\n    replace_items('/etc/opt/microsoft/omsagent/{0}/conf/omsagent.d/customlog.conf'.format(workspace_id), '<workspace-id>', workspace_id)\n\ndef apache_mysql_conf():\n    \"\"\"Configure Apache and MySQL, set up empty log files, and add permissions.\"\"\"\n    apache_conf_file = '/etc/opt/microsoft/omsagent/{0}/conf/omsagent.d/apache_logs.conf'.format(workspace_id)\n    mysql_conf_file = '/etc/opt/microsoft/omsagent/{0}/conf/omsagent.d/mysql_logs.conf'.format(workspace_id)\n    apache_access_conf_path_string = '/usr/local/apache2/logs/access_log /var/log/apache2/access.log /var/log/httpd/access_log /var/log/apache2/access_log'\n    apache_error_conf_path_string = '/usr/local/apache2/logs/error_log /var/log/apache2/error.log /var/log/httpd/error_log /var/log/apache2/error_log'\n    os.system('chown omsagent:omiusers {0}'.format(apache_conf_file))\n    os.system('chown omsagent:omiusers {0}'.format(mysql_conf_file))\n\n    os.system('mkdir -p /var/log/mysql \\\n            && touch /var/log/mysql/mysql.log /var/log/mysql/error.log /var/log/mysql/mysql-slow.log \\\n            && touch /var/log/custom.log \\\n            && chmod +r /var/log/mysql/* \\\n            && chmod +rx /var/log/mysql \\\n            && chmod +r /var/log/custom.log')\n\n    if INSTALLER == 'APT':\n        replace_items(apache_conf_file, apache_access_conf_path_string, '/var/log/apache2/access.log')\n        replace_items(apache_conf_file, apache_error_conf_path_string, '/var/log/apache2/error.log')\n        os.system('mkdir -p /var/log/apache2 \\\n                && touch /var/log/apache2/access.log /var/log/apache2/error.log \\\n                && chmod +r /var/log/apache2/* \\\n                && chmod +rx /var/log/apache2')\n    elif INSTALLER == 'YUM':\n        replace_items(apache_conf_file, apache_access_conf_path_string, '/var/log/httpd/access_log')\n        replace_items(apache_conf_file, apache_error_conf_path_string, '/var/log/httpd/error_log')\n        os.system('mkdir -p /var/log/httpd \\\n                && touch /var/log/httpd/access_log /var/log/httpd/error_log \\\n                && chmod +r /var/log/httpd/* \\\n                && chmod +rx /var/log/httpd')\n    elif INSTALLER == 'ZYPPER':\n        replace_items(apache_conf_file, apache_access_conf_path_string, '/var/log/apache2/access_log')\n        replace_items(apache_conf_file, apache_error_conf_path_string, '/var/log/apache2/error_log')\n        os.system('mkdir -p /var/log/apache2 \\\n                && touch /var/log/apache2/access_log /var/log/apache2/error_log \\\n                && chmod +r /var/log/apache2/* \\\n                && chmod +rx /var/log/apache2')\n\ndef inject_logs():\n    \"\"\"Inject logs (after) agent is running in order to simulate real Apache/MySQL/Custom logs output.\"\"\"\n\n    # set apache timestamps to current time to ensure they are searchable with 1 hour period in log analytics\n    now = datetime.datetime.utcnow().strftime('[%d/%b/%Y:%H:%M:%S +0000]')\n    os.system(r\"sed -i 's|\\(\\[.*\\]\\)|{0}|' /home/scratch/apache_access.log\".format(now))\n\n    if INSTALLER == 'APT':\n        os.system('cat /home/scratch/apache_access.log >> /var/log/apache2/access.log \\\n                && chown root:root /var/log/apache2/access.log \\\n                && chmod 644 /var/log/apache2/access.log')\n    elif INSTALLER == 'YUM':\n        os.system('cat /home/scratch/apache_access.log >> /var/log/httpd/access_log \\\n                && chown root:root /var/log/httpd/access_log \\\n                && chmod 644 /var/log/httpd/access_log')\n    elif INSTALLER == 'ZYPPER':\n        os.system('cat /home/scratch/apache_access.log >> /var/log/apache2/access_log \\\n                && chown root:root /var/log/apache2/access_log \\\n                && chmod 644 /var/log/apache2/access_log')\n\n    os.system('cat /home/scratch/mysql.log >> /var/log/mysql/mysql.log \\\n            && cat /home/scratch/error.log >> /var/log/mysql/error.log \\\n            && cat /home/scratch/mysql-slow.log >> /var/log/mysql/mysql-slow.log \\\n            && cat /home/scratch/custom.log >> /var/log/custom.log')\n\ndef config_start_oms_services():\n    \"\"\"Orchestrate overall configuration prior to agent start.\"\"\"\n    os.system('/opt/omi/bin/omiserver -d')\n    disable_dsc()\n    copy_config_files()\n    apache_mysql_conf()\n\ndef restart_services():\n    \"\"\"Restart rsyslog, OMI, and OMS.\"\"\"\n    time.sleep(10)\n    os.system('service rsyslog restart \\\n                && /opt/omi/bin/service_control restart \\\n                && /opt/microsoft/omsagent/bin/service_control restart')\n\n\ndef append_file(filename, destFile):\n    f = open(filename, 'r')\n    destFile.write(f.read())\n    f.close()\n\ndef exec_command(cmd):\n    \"\"\"Run the provided command, check, and return its output.\"\"\"\n    try:\n        out = subprocess.check_output(cmd, shell=True)\n        return out\n    except subprocess.CalledProcessError as e:\n        print(e.returncode)\n        return e.returncode\n\ndef write_log_output(log, out):\n    \"\"\"Save command output to the log file.\"\"\"\n    if(type(out) != str):\n        out = str(out)\n    log.write(out + '\\n')\n    log.write('-' * 80)\n    log.write('\\n')\n\ndef write_log_command(log, cmd):\n    \"\"\"Print command and save command to log file.\"\"\"\n    print(cmd)\n    log.write(cmd + '\\n')\n    log.write('=' * 40)\n    log.write('\\n')\n\ndef check_pkg_status(pkg):\n    \"\"\"Check pkg install status and return output and derived status.\"\"\"\n    if INSTALLER == 'APT':\n        cmd = 'dpkg -s {0}'.format(pkg)\n        output = exec_command(cmd)\n        if (os.system('{0} | grep deinstall > /dev/null 2>&1'.format(cmd)) == 0 or\n                os.system('dpkg -s omsagent > /dev/null 2>&1') != 0):\n            status = 'Not Installed'\n        else:\n            status = 'Install Ok'\n    elif INSTALLER == 'YUM' or INSTALLER == 'ZYPPER':\n        cmd = 'rpm -qi {0}'.format(pkg)\n        output = exec_command(cmd)\n        if os.system('{0} > /dev/null 2>&1'.format(cmd)) == 0:\n            status = 'Install Ok'\n        else:\n            status = 'Not Installed'\n\n    write_log_command(open_file, cmd)\n    write_log_output(open_file, output)\n    return (output, status)\n\ndef result_commands():\n    \"\"\"Determine and store status of agent.\"\"\"\n    global waagentOut, onboardStatus, omiRunStatus, psefomsagent, omsagentRestart, omiRestart\n    global omiInstallOut, omsagentInstallOut, omsconfigInstallOut, scxInstallOut, omiInstallStatus, omsagentInstallStatus, omsconfigInstallStatus, scxInstallStatus\n    cmd = 'waagent --version'\n    waagentOut = exec_command(cmd)\n    write_log_command(open_file, cmd)\n    write_log_output(open_file, waagentOut)\n    cmd = '/opt/microsoft/omsagent/bin/omsadmin.sh -l'\n    onboardStatus = exec_command(cmd)\n    write_log_command(open_file, cmd)\n    write_log_output(open_file, onboardStatus)\n    cmd = 'scxadmin -status'\n    omiRunStatus = exec_command(cmd)\n    write_log_command(open_file, cmd)\n    write_log_output(open_file, omiRunStatus)\n\n    omiInstallOut, omiInstallStatus = check_pkg_status('omi')\n    omsagentInstallOut, omsagentInstallStatus = check_pkg_status('omsagent')\n    omsconfigInstallOut, omsconfigInstallStatus = check_pkg_status('omsconfig')\n    scxInstallOut, scxInstallStatus = check_pkg_status('scx')\n\n    # OMS agent process check\n    cmd = 'ps -ef | egrep \"omsagent|omi\"'\n    psefomsagent = exec_command(cmd)\n    write_log_command(open_file, cmd)\n    write_log_output(open_file, psefomsagent)\n\n    time.sleep(10)\n    # OMS agent restart\n    cmd = '/opt/microsoft/omsagent/bin/service_control restart'\n    omsagentRestart = exec_command(cmd)\n    write_log_command(open_file, cmd)\n    write_log_output(open_file, omsagentRestart)\n\n    # OMI agent restart\n    cmd = '/opt/omi/bin/service_control restart'\n    omiRestart = exec_command(cmd)\n    write_log_command(open_file, cmd)\n    write_log_output(open_file, omiRestart)\n\ndef service_control_commands():\n    \"\"\"Determine and store results of various service commands.\"\"\"\n    global serviceStop, serviceDisable, serviceEnable, serviceStart\n\n    # OMS stop (shutdown the agent)\n    cmd = '/opt/microsoft/omsagent/bin/service_control stop'\n    serviceStop = exec_command(cmd)\n    write_log_command(open_file, cmd)\n    write_log_output(open_file, serviceStop)\n\n    # OMS disable (disable agent from starting upon system start)\n    cmd = '/opt/microsoft/omsagent/bin/service_control disable'\n    serviceDisable = exec_command(cmd)\n    write_log_command(open_file, cmd)\n    write_log_output(open_file, serviceDisable)\n\n    # OMS enable (enable agent to start upon system start)\n    cmd = '/opt/microsoft/omsagent/bin/service_control enable'\n    serviceEnable = exec_command(cmd)\n    write_log_command(open_file, cmd)\n    write_log_output(open_file, serviceEnable)\n\n    # OMS start (start the agent)\n    cmd = '/opt/microsoft/omsagent/bin/service_control start'\n    serviceStart = exec_command(cmd)\n    write_log_command(open_file, cmd)\n    write_log_output(open_file, serviceStart)\n\ndef write_html():\n    \"\"\"Use stored command results to create an HTML report of the test results.\"\"\"\n    os.system('rm /home/scratch/omsresults.html')\n    html_file = '/home/scratch/omsresults.html'\n    f = open(html_file, 'w+')\n    message = \"\"\"\n<div class=\"text\" style=\"white-space: pre-wrap\" >\n\n<table>\n  <caption><h4>OMS Install Results</h4><caption>\n  <tr>\n    <th>Package</th>\n    <th>Status</th>\n    <th>Output</th>\n  </tr>\n  <tr>\n    <td>OMI</td>\n    <td>{0}</td>\n    <td>{1}</td>\n  </tr>\n  <tr>\n    <td>OMSAgent</td>\n    <td>{2}</td>\n    <td>{3}</td>\n  </tr>\n  <tr>\n    <td>OMSConfig</td>\n    <td>{4}</td>\n    <td>{5}</td>\n  </tr>\n  <tr>\n    <td>SCX</td>\n    <td>{6}</td>\n    <td>{7}</td>\n  </tr>\n</table>\n\n<table>\n  <caption><h4>OMS Command Outputs</h4><caption>\n  <tr>\n    <th>Command</th>\n    <th>Output</th>\n  </tr>\n  <tr>\n    <td>waagent --version</td>\n    <td>{8}</td>\n  </tr>\n  <tr>\n    <td>/opt/microsoft/omsagent/bin/omsadmin.sh -l</td>\n    <td>{9}</td>\n  </tr>\n  <tr>\n    <td>scxadmin -status</td>\n    <td>{10}</td>\n  </tr>\n  <tr>\n    <td>ps -ef | egrep \"omsagent|omi\"</td>\n    <td>{11}</td>\n  </tr>\n  <tr>\n    <td>/opt/microsoft/omsagent/bin/service_control restart</td>\n    <td>{12}</td>\n  <tr>\n  <tr>\n    <td>/opt/omi/bin/service_control restart</td>\n    <td>{13}</td>\n  <tr>\n  <tr>\n    <td>/opt/microsoft/omsagent/bin/service_control stop</td>\n    <td>{14}</td>\n  <tr>\n  <tr>\n    <td>/opt/microsoft/omsagent/bin/service_control disable</td>\n    <td>{15}</td>\n  <tr>\n  <tr>\n    <td>/opt/microsoft/omsagent/bin/service_control enable</td>\n    <td>{16}</td>\n  <tr>\n  <tr>\n    <td>/opt/microsoft/omsagent/bin/service_control stop</td>\n    <td>{17}</td>\n  <tr>\n</table>\n</div>\n\"\"\".format(omiInstallStatus, omiInstallOut, omsagentInstallStatus, omsagentInstallOut, omsconfigInstallStatus, omsconfigInstallOut, scxInstallStatus, scxInstallOut, waagentOut, onboardStatus, omiRunStatus, psefomsagent, omsagentRestart, omiRestart, serviceStop, serviceDisable, serviceEnable, serviceStart)\n\n    f.write(message)\n    f.close()\n\ndef dist_status():\n    f = open('/home/scratch/omsresults.status', 'w+')\n    if os.system('/opt/microsoft/omsagent/bin/omsadmin.sh -l') == 0:\n        detect_workspace_id()\n        x_out = subprocess.check_output('/opt/microsoft/omsagent/bin/omsadmin.sh -l', shell=True)\n        if x_out.rstrip() == \"No Workspace\":\n            status_message = \"Onboarding Failed\"\n        elif re.search('[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}', x_out).group(0) == workspace_id:\n            status_message = \"Agent Found\"\n    else:\n        status_message = \"Agent Not Found\"\n    f.write(status_message)\n    f.close()\n\ndef sorted_dir(folder):\n    def getmtime(name):\n        path = os.path.join(folder, name)\n        return os.path.getmtime(path)\n\n    return sorted(os.listdir(folder), key=getmtime, reverse=True)\n\ndef copy_oms_logs():\n    omslogfile = \"\"\n    split_name = vm_dist.split(' ')\n    split_ver = vm_ver.split('.')\n    if vm_dist.startswith('Red Hat'):\n        omslogfile = '/home/scratch/{0}-omsagent.log'.format((split_name[0]+split_name[1]).lower() + split_ver[0])\n    else:\n        omslogfile = '/home/scratch/{0}-omsagent.log'.format(split_name[0].lower() + split_ver[0])\n    omslogfileOpen = open(omslogfile, 'a+')\n    omsagent_file = '/var/opt/microsoft/omsagent/{0}/log/omsagent.log'.format(workspace_id)\n    write_log_command(omslogfileOpen, '\\n OmsAgent Logs:\\n')\n    append_file(omsagent_file, omslogfileOpen)\n\ndef copy_extension_logs():\n    extlogfile = \"\"\n    split_name = vm_dist.split(' ')\n    split_ver = vm_ver.split('.')\n    if vm_dist.startswith('Red Hat'):\n        extlogfile = '/home/scratch/{0}-extnwatcher.log'.format((split_name[0]+split_name[1]).lower() + split_ver[0])\n    else:\n        extlogfile = '/home/scratch/{0}-extnwatcher.log'.format(split_name[0].lower() + split_ver[0])\n\n    extlogfileOpen = open(extlogfile, 'a+')\n    oms_azure_ext_dir = '/var/log/azure/Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux/'\n    ext_contents = sorted_dir(oms_azure_ext_dir)\n    if ext_contents[0].startswith('extension') or ext_contents[0].startswith('watcher'):\n        write_log_command(extlogfileOpen, '\\n Extension Logs:\\n')\n        append_file(oms_azure_ext_dir + '/extension.log', extlogfileOpen)\n        write_log_command(extlogfileOpen, '\\n Watcher Logs:\\n')\n        append_file(oms_azure_ext_dir + '/watcher.log', extlogfileOpen)\n    else:\n        write_log_command(extlogfileOpen, '\\n Extension Logs:\\n')\n        append_file(oms_azure_ext_dir + ext_contents[0] + '/extension.log', extlogfileOpen)\n        write_log_command(extlogfileOpen, '\\n Watcher Logs:\\n')\n        append_file(oms_azure_ext_dir + ext_contents[0] + '/watcher.log', extlogfileOpen)\n    extlogfileOpen.close()\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "OmsAgent/extension-test/omsfiles/perf.conf",
    "content": "\n<source>\n  type oms_omi\n  object_name \"Logical Disk\"\n  instance_regex \".*\"\n  counter_name_regex \"(% Used Inodes|Free Megabytes|% Used Space|Disk Transfers/sec|Disk Reads/sec|Disk Writes/sec)\"\n  interval 10s\n  omi_mapping_path /etc/opt/microsoft/omsagent/<workspace-id>/conf/omsagent.d/omi_mapping.json\n</source>\n\n<source>\n  type oms_omi\n  object_name \"Processor\"\n  instance_regex \".*\"\n  counter_name_regex \"(% Processor Time|% Privileged Time)\"\n  interval 10s\n  omi_mapping_path /etc/opt/microsoft/omsagent/<workspace-id>/conf/omsagent.d/omi_mapping.json\n</source>\n\n<source>\n  type oms_omi\n  object_name \"Network\"\n  instance_regex \".*\"\n  counter_name_regex \"(Total Bytes Transmitted|Total Bytes Received)\"\n  interval 10s\n  omi_mapping_path /etc/opt/microsoft/omsagent/<workspace-id>/conf/omsagent.d/omi_mapping.json\n</source>\n\n<source>\n  type oms_omi\n  object_name \"Memory\"\n  instance_regex \".*\"\n  counter_name_regex \"(Available MBytes Memory|% Used Memory|% Used Swap Space)\"\n  interval 10s\n  omi_mapping_path /etc/opt/microsoft/omsagent/<workspace-id>/conf/omsagent.d/omi_mapping.json\n</source>\n"
  },
  {
    "path": "OmsAgent/extension-test/omsfiles/rsyslog-oms.conf",
    "content": "\n# OMS Syslog collection for workspace a0d166ba-98b9-402e-b805-172ed62150a4\ndaemon.=alert;daemon.=crit;daemon.=debug;daemon.=emerg;daemon.=err;daemon.=info;daemon.=notice;daemon.=warning\t@127.0.0.1:25224\nkern.=alert;kern.=crit;kern.=debug;kern.=emerg;kern.=err;kern.=info;kern.=notice;kern.=warning\t@127.0.0.1:25224\nsyslog.=alert;syslog.=crit;syslog.=debug;syslog.=emerg;syslog.=err;syslog.=info;syslog.=notice;syslog.=warning\t@127.0.0.1:25224\ncron.=alert;cron.=crit;cron.=debug;cron.=emerg;cron.=err;cron.=info;cron.=notice;cron.=warning\t@127.0.0.1:25224"
  },
  {
    "path": "OmsAgent/extension-test/parameters.json",
    "content": "{\n  \"resource\": \"https://management.azure.com\",\n  \"authority host url\": \"https://login.microsoftonline.com\",\n  \"resource group\": \"<resource-group-name>\",\n  \"location\": \"<location>\",\n  \"username\": \"<username>\",\n  \"ssh private\": \"<ssh-private-keyfile-path>\",\n  \"nsg resource group\": \"<nsg-resource-group>\",\n  \"nsg\": \"<nsg>\",\n  \"size\": \"<size>\",\n  \"workspace\": \"<workspace-name>\",\n  \"key vault\": \"<key-vault-name>\",\n  \"old version\": \"<old-extesion-version>\"\n}\n"
  },
  {
    "path": "OmsAgent/extension-test/verify_e2e.py",
    "content": "'''Verify end-to-end data transmission.'''\n\nimport json\nimport os\nimport re\nimport sys\nimport subprocess\n\nimport adal\nimport requests\n\nENDPOINT = ('https://management.azure.com/subscriptions/{}/resourcegroups/'\n            '{}/providers/Microsoft.OperationalInsights/workspaces/{}/api/'\n            'query?api-version=2017-01-01-preview')\n\ndef check_e2e(hostname, timespan = 'PT30M'):\n    '''\n    Verify data from computer with provided hostname is\n    present in the Log Analytics workspace specified in\n    parameters.json, append results to e2eresults.json\n    '''\n    global success_count\n    global success_sources\n    global failed_sources\n    success_count = 0\n    failed_sources = []\n    success_sources = []\n\n    with open('{0}/parameters.json'.format(os.getcwd()), 'r') as f:\n        parameters = f.read()\n        if re.search(r'\"<.*>\"', parameters):\n            print('Please replace placeholders in parameters.json')\n            exit()\n        parameters = json.loads(parameters)\n\n    key_vault = parameters['key vault']\n    tenant_id = str(json.loads(subprocess.check_output('az keyvault secret show --name tenant-id --vault-name {0}'.format(key_vault), shell=True))[\"value\"])\n    app_id = str(json.loads(subprocess.check_output('az keyvault secret show --name app-id --vault-name {0}'.format(key_vault), shell=True))[\"value\"])\n    app_secret = str(json.loads(subprocess.check_output('az keyvault secret show --name app-secret --vault-name {0}'.format(key_vault), shell=True))[\"value\"])\n    authority_url = parameters['authority host url'] + '/' + tenant_id\n    context = adal.AuthenticationContext(authority_url)\n    token = context.acquire_token_with_client_credentials(\n        parameters['resource'],\n        app_id,\n        app_secret)\n\n    head = {'Authorization': 'Bearer ' + token['accessToken']}\n    subscription = str(json.loads(subprocess.check_output('az keyvault secret show --name subscription-id --vault-name {0}'.format(key_vault), shell=True))[\"value\"])\n    resource_group = parameters['resource group']\n    workspace = parameters['workspace']\n    url = ENDPOINT.format(subscription, resource_group, workspace)\n\n    sources = ['Heartbeat', 'Syslog', 'Perf', 'ApacheAccess_CL', 'MySQL_CL', 'Custom_Log_CL']\n    distro = hostname.split('-')[0]\n    results = {}\n    results[distro] = {}\n\n    print('Verifying data from computer {}'.format(hostname))\n    for s in sources:\n        query = '%s | where Computer == \\'%s\\' | take 1' % (s, hostname)\n        r = requests.post(url, headers=head, json={'query':query, 'timespan':timespan})\n\n        if r.status_code == requests.codes.ok:\n            r = (json.loads(r.text)['Tables'])[0]\n            if len(r['Rows']) < 1:\n                results[distro][s] = 'Failure: no logs'\n                failed_sources.append(s)\n            else:\n                results[distro][s] = 'Success'\n                success_count += 1\n                success_sources.append(s)\n        else:\n            results[distro][s] = 'Failure: {} {}'.format(r.status_code, r.text)\n\n    results[distro] = [results[distro]]\n    print(results)\n    return results\n\ndef main():\n    '''Check for data with given hostname.'''\n    if len(sys.argv) == 2:\n        check_e2e(sys.argv[1])\n    else:\n        print('Hostname not provided')\n        exit()\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "OmsAgent/keys/dscgpgkey.asc",
    "content": "-----BEGIN PGP PUBLIC KEY BLOCK-----\nVersion: GnuPG v1.4.7 (GNU/Linux)\n\nmQENBFcDALYBCADAKoZhZlJxGNGWzqV+1OG1xiQeoowKhssGAKvd+buXCGISZJwT\nLXZqIcIiLP7pqdcZWtE9bSc7yBY2MalDp9Liu0KekywQ6VVX1T72NPf5Ev6x6DLV\n7aVWsCzUAF+eb7DC9fPuFLEdxmOEYoPjzrQ7cCnSV4JQxAqhU4T6OjbvRazGl3ag\nOeizPXmRljMtUUttHQZnRhtlzkmwIrUivbfFPD+fEoHJ1+uIdfOzZX8/oKHKLe2j\nH632kvsNzJFlROVvGLYAk2WRcLu+RjjggixhwiB+Mu/A8Tf4V6b+YppS44q8EvVr\nM+QvY7LNSOffSO6Slsy9oisGTdfE39nC7pVRABEBAAG0NU1pY3Jvc29mdCAoUmVs\nZWFzZSBTaWduaW5nKSA8ZHNjZ3Bna2V5QG1pY3Jvc29mdC5jb20+iQE1BBMBAgAf\nBQJXAwC2AhsDBgsJCAcDAgQVAggDAxYCAQIeAQIXgAAKCRAgVBo93jISlLZYB/44\nDIa5AX9csM1N0+kddBHb23NSRkEFMlD+rTjiTk/Nsrh8RghPlHlXEd/Rpxf2c+xJ\nTjPrpdL0dHzou5ZEdTVtCeVCV0YA2cZk+RfhthHnX5M1m0suu5HgSEHfKyqlfJwZ\nuYapagLoE4jXbQnw9UJgdSpa8OFjOcyZ9oNCn9IHG3W7JAV1+upUBKM/iwHTuVrQ\nyrbYBlqVRWi4s3nDpqEZMBSq1KJucHIt2uOqAlz9hRUXjWNsD+Ff+Nn1EvkDdzn5\nKrRUgA9bSp6FPBEluIO/QFA6aTW4MrujCHCrpiDPxFGg7WTOXS8tg5AJ/d/l/pOp\n5/E3CO1YTCgEMl34eOdU\n=JQx7\n-----END PGP PUBLIC KEY BLOCK-----\n"
  },
  {
    "path": "OmsAgent/keys/msgpgkey.asc",
    "content": "-----BEGIN PGP PUBLIC KEY BLOCK-----\nVersion: GnuPG v1.4.7 (GNU/Linux)\n\nmQENBFcDBSwBCADAKoZhZlJxGNGWzqV+1OG1xiQeoowKhssGAKvd+buXCGISZJwT\nLXZqIcIiLP7pqdcZWtE9bSc7yBY2MalDp9Liu0KekywQ6VVX1T72NPf5Ev6x6DLV\n7aVWsCzUAF+eb7DC9fPuFLEdxmOEYoPjzrQ7cCnSV4JQxAqhU4T6OjbvRazGl3ag\nOeizPXmRljMtUUttHQZnRhtlzkmwIrUivbfFPD+fEoHJ1+uIdfOzZX8/oKHKLe2j\nH632kvsNzJFlROVvGLYAk2WRcLu+RjjggixhwiB+Mu/A8Tf4V6b+YppS44q8EvVr\nM+QvY7LNSOffSO6Slsy9oisGTdfE39nC7pVRABEBAAG0NE1pY3Jvc29mdCAoUmVs\nZWFzZSBTaWduaW5nKSA8bXNncGdrZXlAbWljcm9zb2Z0LmNvbT6JATUEEwECAB8F\nAlcDBSwCGwMGCwkIBwMCBBUCCAMDFgIBAh4BAheAAAoJEMTsSeVEvEF40uoIAJdJ\nyxhQLo/VntUHUrTita63CbUCDw1AAb3ltgXPIfSSnhotEb8KQrJjghu8XO3/Swre\ngeB6DuYm77tUIHOoA3SiOXi67EfhwM1iaRDzorf+U/59R0evQ57IWrA/g4Ceh0CJ\npicFwLUe0BKKVgxtTvOxPa08P1znA5IVWR6fruqHyy9TbYYSYYV7B+Cw3KS+JCzw\nfV/nH0F9slgxgcwhzezk1b0glGfCuiswnK7nHxHYW7B+vjfRd+Seq8lM1CYozbe5\n6TPbfgyisiEsZDulEU0jpGa2q1UwnKaP1A7mgTxRgLmmg/EzC3MTzvSqvQI6Xvme\nnHX/CNyXbumiyqsH3Tw=\n=yTH1\n-----END PGP PUBLIC KEY BLOCK-----\n"
  },
  {
    "path": "OmsAgent/manifest.xml",
    "content": "<?xml version='1.0' encoding='utf-8' ?>\n<ExtensionImage xmlns=\"http://schemas.microsoft.com/windowsazure\">\n  <ProviderNameSpace>Microsoft.EnterpriseCloud.Monitoring</ProviderNameSpace>\n  <Type>OmsAgentForLinux</Type>\n  <Version>1.13.19</Version>\n  <Label>Microsoft Operations Management Suite Agent for Linux</Label>\n  <HostingResources>VmRole</HostingResources>\n  <MediaLink></MediaLink>\n  <Description>Microsoft Operations Management Suite Agent for Linux</Description>\n  <IsInternalExtension>true</IsInternalExtension>\n  <Eula>https://github.com/Microsoft/OMS-Agent-for-Linux/blob/master/LICENSE</Eula>\n  <PrivacyUri>http://www.microsoft.com/privacystatement/en-us/OnlineServices/Default.aspx</PrivacyUri>\n  <HomepageUri>https://github.com/Microsoft/OMS-Agent-for-Linux</HomepageUri>\n  <IsJsonExtension>true</IsJsonExtension>\n  <SupportedOS>Linux</SupportedOS>\n  <CompanyName>Microsoft</CompanyName>\n  <!--%REGIONS%-->\n</ExtensionImage>\n"
  },
  {
    "path": "OmsAgent/omsagent.py",
    "content": "#!/usr/bin/env python\n#\n# OmsAgentForLinux Extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\nimport sys\nimport os\nimport os.path\nimport signal\nimport pwd\nimport grp\nimport re\nimport traceback\nimport time\nimport platform\nimport subprocess\nimport json\nimport base64\nimport inspect\nimport watcherutil\nimport shutil\n\nfrom threading import Thread\n\ntry:\n    from Utils.WAAgentUtil import waagent\n    import Utils.HandlerUtil as HUtil\nexcept Exception as e:\n    # These utils have checks around the use of them; this is not an exit case\n    print('Importing utils failed with error: {0}'.format(e))\n\nif sys.version_info[0] == 3:\n    import urllib.request as urllib\n    from urllib.parse import urlparse\n    import urllib.error as urlerror\n\nelif sys.version_info[0] == 2:\n    import urllib2 as urllib\n    from urlparse import urlparse\n    import urllib2 as urlerror\n\n# This monkey patch duplicates the one made in the waagent import above.\n# It is necessary because on 2.6, the waagent monkey patch appears to be overridden\n# by the python-future subprocess.check_output backport.\nif sys.version_info < (2,7):\n    def check_output(*popenargs, **kwargs):\n        r\"\"\"Backport from subprocess module from python 2.7\"\"\"\n        if 'stdout' in kwargs:\n            raise ValueError('stdout argument not allowed, it will be overridden.')\n        process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)\n        output, unused_err = process.communicate()\n        retcode = process.poll()\n        if retcode:\n            cmd = kwargs.get(\"args\")\n            if cmd is None:\n                cmd = popenargs[0]\n            raise subprocess.CalledProcessError(retcode, cmd, output=output)\n        return output\n\n    # Exception classes used by this module.\n    class CalledProcessError(Exception):\n        def __init__(self, returncode, cmd, output=None):\n            self.returncode = returncode\n            self.cmd = cmd\n            self.output = output\n\n        def __str__(self):\n            return \"Command '%s' returned non-zero exit status %d\" % (self.cmd, self.returncode)\n\n    subprocess.check_output = check_output\n    subprocess.CalledProcessError = CalledProcessError\n\n# Global Variables\nProceedOnSigningVerificationFailure = True\nPackagesDirectory = 'packages'\nkeysDirectory = 'keys'\n# Below file version will be replaced during OMS-Build time.\nBundleFileName = 'omsagent-0.0.0-0.universal.x64.sh'\nGUIDRegex = r'[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}'\nGUIDOnlyRegex = r'^' + GUIDRegex + '$'\nSCOMCertIssuerRegex = r'^[\\s]*Issuer:[\\s]*CN=SCX-Certificate/title=SCX' + GUIDRegex + ', DC=.*$'\nSCOMPort = 1270\nPostOnboardingSleepSeconds = 5\nInitialRetrySleepSeconds = 30\nIsUpgrade = False\n\n# Paths\nOMSAdminPath = '/opt/microsoft/omsagent/bin/omsadmin.sh'\nOMSAgentServiceScript = '/opt/microsoft/omsagent/bin/service_control'\nOMIConfigEditorPath = '/opt/omi/bin/omiconfigeditor'\nOMIServerConfPath = '/etc/opt/omi/conf/omiserver.conf'\nEtcOMSAgentPath = '/etc/opt/microsoft/omsagent/'\nVarOMSAgentPath = '/var/opt/microsoft/omsagent/'\nSCOMCertPath = '/etc/opt/microsoft/scx/ssl/scx.pem'\nExtensionStateSubdirectory = 'state'\n\n# Commands\n# Always use upgrade - will handle install if scx, omi are not installed or upgrade if they are.\nInstallCommandTemplate = '{0} --upgrade {1}'\nUninstallCommandTemplate = '{0} --remove'\nWorkspaceCheckCommand = '{0} -l'.format(OMSAdminPath)\nOnboardCommandWithOptionalParams = '{0} -w {1} -s {2} {3}'\n\nRestartOMSAgentServiceCommand = '{0} restart'.format(OMSAgentServiceScript)\nDisableOMSAgentServiceCommand = '{0} disable'.format(OMSAgentServiceScript)\n\nInstallExtraPackageCommandApt = 'apt-get -y update && apt-get -y install {0}'\nSkipDigestCmdTemplate = '{0} --noDigest'\n\n# Cloud Environments\nPublicCloudName     = \"AzurePublicCloud\"\nFairfaxCloudName    = \"AzureUSGovernmentCloud\"\nMooncakeCloudName   = \"AzureChinaCloud\"\nUSNatCloudName      = \"USNat\" # EX\nUSSecCloudName      = \"USSec\" # RX\nDefaultCloudName    = PublicCloudName # Fallback\n\nCloudDomainMap = {\n    PublicCloudName:   \"opinsights.azure.com\",\n    FairfaxCloudName:  \"opinsights.azure.us\",\n    MooncakeCloudName: \"opinsights.azure.cn\",\n    USNatCloudName:    \"opinsights.azure.eaglex.ic.gov\",\n    USSecCloudName:    \"opinsights.azure.microsoft.scloud\"\n}\n\n# Error codes\nDPKGLockedErrorCode = 55 #56, temporary as it excludes from SLA\nInstallErrorCurlNotInstalled = 55 #64, temporary as it excludes from SLA\nEnableErrorOMSReturned403 = 5\nEnableErrorOMSReturnedNon200 = 6\nEnableErrorResolvingHost = 7\nEnableErrorOnboarding = 8\nEnableCalledBeforeSuccessfulInstall = 52 # since install is a missing dependency\nUnsupportedOpenSSL = 55 #60, temporary as it excludes from SLA\nUnsupportedGpg = 55\n# OneClick error codes\nOneClickErrorCode = 40\nManagedIdentityExtMissingErrorCode = 41\nManagedIdentityExtErrorCode = 42\nMetadataAPIErrorCode = 43\nOMSServiceOneClickErrorCode = 44\nMissingorInvalidParameterErrorCode = 11\nUnwantedMultipleConnectionsErrorCode = 10\nCannotConnectToOMSErrorCode = 55\nUnsupportedOperatingSystem = 51\n\n# Configuration\nHUtilObject = None\nSettingsSequenceNumber = None\nHandlerEnvironment = None\nSettingsDict = None\n\n# OneClick Constants\nManagedIdentityExtListeningURLPath = '/var/lib/waagent/ManagedIdentity-Settings'\nGUIDRegex = '[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}'\nOAuthTokenResource = 'https://management.core.windows.net/'\nOMSServiceValidationEndpoint = 'https://global.oms.opinsights.azure.com/ManagedIdentityService.svc/Validate'\nAutoManagedWorkspaceCreationSleepSeconds = 20\n\n# agent permissions\nAgentUser='omsagent'\nAgentGroup='omiusers'\n\n\n\"\"\"\nWhat need to be packaged to make the signing work:\n    keys\n        dscgpgkey.asc\n        msgpgkey.asc\n    packages\n        omsagent-*.universal.x64.asc\n        omsagent-*.universal.x64.sha256sums\n\"\"\"\ndef verifyShellBundleSigningAndChecksum():\n    cert_directory = os.path.join(os.getcwd(), PackagesDirectory)\n    keys_directory = os.path.join(os.getcwd(), keysDirectory)\n    # import GPG key\n    dscGPGKeyFilePath = os.path.join(keys_directory, 'dscgpgkey.asc')\n    if not os.path.isfile(dscGPGKeyFilePath):\n        raise Exception(\"Unable to find the dscgpgkey.asc file at \" + dscGPGKeyFilePath)\n\n    importGPGKeyCommand = \"sh ImportGPGkey.sh \" + dscGPGKeyFilePath\n    exit_code, output = run_command_with_retries_output(importGPGKeyCommand, retries = 0, retry_check = retry_skip, check_error = False)\n\n    # Check that we can find the keyring file\n    keyringFilePath = os.path.join(keys_directory, 'keyring.gpg')\n    if not os.path.isfile(keyringFilePath):\n        raise Exception(\"Unable to find the Extension keyring file at \" + keyringFilePath)\n\n    # Check that we can find the asc file\n    bundleFileName, file_ext = os.path.splitext(BundleFileName)\n    ascFilePath = os.path.join(cert_directory, bundleFileName + \".asc\")\n    if not os.path.isfile(ascFilePath):\n        raise Exception(\"Unable to find the OMS shell bundle asc file at \" + ascFilePath)\n\n    # check that we can find the SHA256 sums file\n    sha256SumsFilePath = os.path.join(cert_directory, bundleFileName + \".sha256sums\")\n    if not os.path.isfile(sha256SumsFilePath):\n        raise Exception(\"Unable to find the OMS shell bundle SHA256 sums file at \" + sha256SumsFilePath)\n\n    # Verify the SHA256 sums file with the keyring and asc files\n    verifySha256SumsCommand = \"HOME=\" + keysDirectory + \" gpg --no-default-keyring --keyring \" + keyringFilePath + \" --verify \" + ascFilePath  + \" \" + sha256SumsFilePath\n    exit_code, output = run_command_with_retries_output(verifySha256SumsCommand, retries = 0, retry_check = retry_skip, check_error = False)\n    if exit_code != 0:\n        raise Exception(\"Failed to verify SHA256 sums file at \" + sha256SumsFilePath)\n\n    # Perform SHA256 sums to verify shell bundle\n    hutil_log_info(\"Perform SHA256 sums to verify shell bundle\")\n    performSha256SumsCommand = \"cd %s; sha256sum -c %s\" % (cert_directory, sha256SumsFilePath)\n    exit_code, output = run_command_with_retries_output(performSha256SumsCommand, retries = 0, retry_check = retry_skip, check_error = False)\n    if exit_code != 0:\n        raise Exception(\"Failed to verify shell bundle with the SHA256 sums file at \" + sha256SumsFilePath)\n\ndef main():\n    \"\"\"\n    Main method\n    Parse out operation from argument, invoke the operation, and finish.\n    \"\"\"\n    init_waagent_logger()\n    waagent_log_info('OmsAgentForLinux started to handle.')\n    global IsUpgrade\n\n    # Determine the operation being executed\n    operation = None\n    try:\n        option = sys.argv[1]\n        if re.match('^([-/]*)(disable)', option):\n            operation = 'Disable'\n        elif re.match('^([-/]*)(uninstall)', option):\n            operation = 'Uninstall'\n        elif re.match('^([-/]*)(install)', option):\n            operation = 'Install'\n        elif re.match('^([-/]*)(enable)', option):\n            operation = 'Enable'\n        elif re.match('^([-/]*)(update)', option):\n            operation = 'Update'\n            IsUpgrade = True\n        elif re.match('^([-/]*)(telemetry)', option):\n            operation = 'Telemetry'\n    except Exception as e:\n        waagent_log_error(str(e))\n\n    if operation is None:\n        log_and_exit('Unknown', 1, 'No valid operation provided')\n\n    # Set up for exit code and any error messages\n    exit_code = 0\n    message = '{0} succeeded'.format(operation)\n\n    # Clean status file to mitigate diskspace issues on small VMs\n    status_files = [\n            \"/var/opt/microsoft/omsconfig/status/dscperformconsistency\",\n            \"/var/opt/microsoft/omsconfig/status/dscperforminventory\",\n            \"/var/opt/microsoft/omsconfig/status/dscsetlcm\",\n            \"/var/opt/microsoft/omsconfig/status/omsconfighost\"\n        ]\n    for sf in status_files:\n        if os.path.isfile(sf):\n            if sf.startswith(\"/var/opt/microsoft/omsconfig/status\"):\n                try:\n                    os.remove(sf)\n                except Exception as e:\n                    hutil_log_info('Error removing telemetry status file before installation: {0}'.format(sf))\n                    hutil_log_info('Exception info: {0}'.format(traceback.format_exc()))\n\n    exit_code = check_disk_space_availability()\n    if exit_code != 0:\n        message = '{0} failed due to low disk space'.format(operation)\n        log_and_exit(operation, exit_code, message)\n\n    exit_if_gpg_unavailable(operation)\n\n    # Invoke operation\n    try:\n        global HUtilObject\n        HUtilObject = parse_context(operation)\n\n        # Verify shell bundle signing\n        try:\n            hutil_log_info(\"Start signing verification\")\n            verifyShellBundleSigningAndChecksum()\n            hutil_log_info(\"ShellBundle signing verification succeeded\")\n        except Exception as ex:\n            errmsg = \"ShellBundle signing verification failed with '%s'\" % ex.message\n            if ProceedOnSigningVerificationFailure:\n                hutil_log_error(errmsg)\n            else:\n                log_and_exit(operation, errmsg)\n\n        # invoke operation\n        exit_code, output = operations[operation]()\n\n        # Exit code 1 indicates a general problem that doesn't have a more\n        # specific error code; it often indicates a missing dependency\n        if exit_code == 1 and operation == 'Install':\n            message = 'Install failed with exit code 1. Please check that ' \\\n                      'dependencies are installed. For details, check logs ' \\\n                      'in /var/log/azure/Microsoft.EnterpriseCloud.' \\\n                      'Monitoring.OmsAgentForLinux'\n        elif exit_code == 127 and operation == 'Install':\n            # happens if shell bundle couldn't be extracted due to low space or missing dependency\n            exit_code = 52 # since it is a missing dependency\n            message = 'Install failed with exit code 127. Please check that ' \\\n                      'dependencies are installed. For details, check logs ' \\\n                      'in /var/log/azure/Microsoft.EnterpriseCloud.' \\\n                      'Monitoring.OmsAgentForLinux'\n        elif exit_code is DPKGLockedErrorCode and operation == 'Install':\n            message = 'Install failed with exit code {0} because the ' \\\n                      'package manager on the VM is currently locked: ' \\\n                      'please wait and try again'.format(DPKGLockedErrorCode)\n        elif exit_code != 0:\n            message = '{0} failed with exit code {1} {2}'.format(operation,\n                                                             exit_code, output)\n    except OmsAgentForLinuxException as e:\n        exit_code = e.error_code\n        message = e.get_error_message(operation)\n    except Exception as e:\n        exit_code = 1\n        message = '{0} failed with error: {1}\\n' \\\n                  'Stacktrace: {2}'.format(operation, e,\n                                           traceback.format_exc())\n\n    # Finish up and log messages\n    log_and_exit(operation, exit_code, message)\n\ndef check_disk_space_availability():\n    \"\"\"\n    Check if there is the required space on the machine.\n    \"\"\"\n    try:\n        if get_free_space_mb(\"/var\") < 500 or get_free_space_mb(\"/etc\") < 500 or get_free_space_mb(\"/opt\") < 500:\n            # 52 is the exit code for missing dependency i.e. disk space\n            # https://github.com/Azure/azure-marketplace/wiki/Extension-Build-Notes-Best-Practices#error-codes-and-messages-output-to-stderr\n            return 52\n        else:\n            return 0\n    except:\n        print('Failed to check disk usage.')\n        return 0\n\n\ndef get_free_space_mb(dirname):\n    \"\"\"\n    Get the free space in MB in the directory path.\n    \"\"\"\n    st = os.statvfs(dirname)\n    return (st.f_bavail * st.f_frsize) // (1024 * 1024)\n\ndef stop_telemetry_process():\n    pids_filepath = os.path.join(os.getcwd(),'omstelemetry.pid')\n\n    # kill existing telemetry watcher\n    if os.path.exists(pids_filepath):\n        with open(pids_filepath, \"r\") as f:\n            for pid in f.readlines():\n                # Verify the pid actually belongs to omsagent.\n                cmd_file = os.path.join(\"/proc\", str(pid.strip(\"\\n\")), \"cmdline\")\n                if os.path.exists(cmd_file):\n                    with open(cmd_file, \"r\") as pidf:\n                        cmdline = pidf.readlines()\n                        if cmdline[0].find(\"omsagent.py\") >= 0 and cmdline[0].find(\"-telemetry\") >= 0:\n                            kill_cmd = \"kill \" + pid\n                            run_command_and_log(kill_cmd)\n        run_command_and_log(\"rm \"+pids_filepath)\n\ndef start_telemetry_process():\n    \"\"\"\n    Start telemetry process that performs periodic monitoring activities\n    :return: None\n\n    \"\"\"\n    stop_telemetry_process()\n\n    #start telemetry watcher\n    omsagent_filepath = os.path.join(os.getcwd(),'omsagent.py')\n    args = ['python{0}'.format(sys.version_info[0]), omsagent_filepath, '-telemetry']\n    log = open(os.path.join(os.getcwd(), 'daemon.log'), 'w')\n    hutil_log_info('start watcher process '+str(args))\n    subprocess.Popen(args, stdout=log, stderr=log)\n\ndef telemetry():\n    pids_filepath = os.path.join(os.getcwd(), 'omstelemetry.pid')\n    py_pid = os.getpid()\n    with open(pids_filepath, 'w') as f:\n        f.write(str(py_pid) + '\\n')\n\n    if HUtilObject is not None:\n        watcher = watcherutil.Watcher(HUtilObject.error, HUtilObject.log)\n\n        watcher_thread = Thread(target = watcher.watch)\n        self_mon_thread = Thread(target = watcher.monitor_health)\n\n        watcher_thread.start()\n        self_mon_thread.start()\n\n        watcher_thread.join()\n        self_mon_thread.join()\n\n    return 0, \"\"\n\ndef prepare_update():\n    \"\"\"\n    Copy / move configuration directory to the backup\n    \"\"\"\n\n    # First check if backup directory was previously created for given workspace.\n    # If it is created with all the files , we need not move the files again.\n\n    public_settings, _ = get_settings()\n    workspaceId = public_settings.get('workspaceId')\n    etc_remove_path = os.path.join(EtcOMSAgentPath, workspaceId)\n    etc_move_path = os.path.join(EtcOMSAgentPath, ExtensionStateSubdirectory, workspaceId)\n    if (not os.path.isdir(etc_move_path)):\n        shutil.move(etc_remove_path, etc_move_path)\n\n    return 0, \"\"\n\ndef restore_state(workspaceId):\n    \"\"\"\n    Copy / move state from backup to the expected location.\n    \"\"\"\n    try:\n        etc_backup_path = os.path.join(EtcOMSAgentPath, ExtensionStateSubdirectory, workspaceId)\n        etc_final_path = os.path.join(EtcOMSAgentPath, workspaceId)\n        if (os.path.isdir(etc_backup_path) and not os.path.isdir(etc_final_path)):\n            shutil.move(etc_backup_path, etc_final_path)\n    except Exception as e:\n        hutil_log_error(\"Error while restoring the state. Exception : \"+traceback.format_exc())\n\n\ndef install():\n    \"\"\"\n    Ensure that this VM distro and version are supported.\n    Install the OMSAgent shell bundle, using retries.\n    Note: install operation times out from WAAgent at 15 minutes, so do not\n    wait longer.\n    \"\"\"\n    exit_if_vm_not_supported('Install')\n\n    public_settings, protected_settings = get_settings()\n    if public_settings is None:\n        raise ParameterMissingException('Public configuration must be ' \\\n                                        'provided')\n    workspaceId = public_settings.get('workspaceId')\n    check_workspace_id(workspaceId)\n\n    # Take the backup of the state for given workspace.\n    restore_state(workspaceId)\n\n    # In the case where a SCOM connection is already present, we should not\n    # create conflicts by installing the OMSAgent packages\n    stopOnMultipleConnections = public_settings.get('stopOnMultipleConnections')\n    if (stopOnMultipleConnections is not None\n            and stopOnMultipleConnections is True):\n        detect_multiple_connections(workspaceId)\n\n    package_directory = os.path.join(os.getcwd(), PackagesDirectory)\n    bundle_path = os.path.join(package_directory, BundleFileName)\n\n    os.chmod(bundle_path, 100)\n    skipDockerProviderInstall = public_settings.get(\n        'skipDockerProviderInstall')\n\n    if (skipDockerProviderInstall is not None\n            and skipDockerProviderInstall is True):\n        cmd = InstallCommandTemplate.format(\n            bundle_path, '--skip-docker-provider-install')\n    else:\n        cmd = InstallCommandTemplate.format(bundle_path, '')\n\n    noDigest = public_settings.get(\n        'noDigest')\n\n    if (noDigest is not None\n            and noDigest is True):\n        cmd = SkipDigestCmdTemplate.format(cmd)\n\n    hutil_log_info('Running command \"{0}\"'.format(cmd))\n\n    # Retry, since install can fail due to concurrent package operations\n    exit_code, output = run_command_with_retries_output(cmd, retries = 10,\n                                         retry_check = retry_if_dpkg_locked_or_curl_is_not_found,\n                                         final_check = final_check_if_dpkg_locked)\n\n    return exit_code, output\n\ndef check_kill_process(pstring):\n    for line in os.popen(\"ps ax | grep \" + pstring + \" | grep -v grep\"):\n        fields = line.split()\n        pid = fields[0]\n        os.kill(int(pid), signal.SIGKILL)\n\ndef uninstall():\n    \"\"\"\n    Uninstall the OMSAgent shell bundle.\n    This is a somewhat soft uninstall. It is not a purge.\n    Note: uninstall operation times out from WAAgent at 5 minutes\n    \"\"\"\n    package_directory = os.path.join(os.getcwd(), PackagesDirectory)\n    bundle_path = os.path.join(package_directory, BundleFileName)\n    global IsUpgrade\n\n    os.chmod(bundle_path, 100)\n    cmd = UninstallCommandTemplate.format(bundle_path)\n    hutil_log_info('Running command \"{0}\"'.format(cmd))\n\n    # Retry, since uninstall can fail due to concurrent package operations\n    try:\n        exit_code, output = run_command_with_retries_output(cmd, retries = 5,\n                                            retry_check = retry_if_dpkg_locked_or_curl_is_not_found,\n                                            final_check = final_check_if_dpkg_locked)\n    except Exception as e:\n        # try to force clean the installation\n        try:\n            check_kill_process(\"omsagent\")\n            exit_code = 0\n        except Exception as ex:\n            exit_code = 1\n            message = 'Uninstall failed with error: {0}\\n' \\\n                    'Stacktrace: {1}'.format(ex, traceback.format_exc())\n\n    if IsUpgrade:\n        IsUpgrade = False\n    else:\n        remove_workspace_configuration()\n\n    return exit_code, output\n\ndef enable():\n    \"\"\"\n    Onboard the OMSAgent to the specified OMS workspace.\n    This includes enabling the OMS process on the VM.\n    This call will return non-zero or throw an exception if\n    the settings provided are incomplete or incorrect.\n    Note: enable operation times out from WAAgent at 5 minutes\n    \"\"\"\n    exit_if_vm_not_supported('Enable')\n\n    public_settings, protected_settings = get_settings()\n\n    if public_settings is None:\n        raise ParameterMissingException('Public configuration must be ' \\\n                                        'provided')\n    if protected_settings is None:\n        raise ParameterMissingException('Private configuration must be ' \\\n                                        'provided')\n\n    vmResourceId = protected_settings.get('vmResourceId')\n\n    # If vmResourceId is not provided in private settings, get it from metadata API\n    if vmResourceId is None or not vmResourceId:\n        vmResourceId = get_vmresourceid_from_metadata()\n        hutil_log_info('vmResourceId from Metadata API is {0}'.format(vmResourceId))\n\n    if vmResourceId is None:\n        hutil_log_info('This may be a classic VM')\n\n    enableAutomaticManagement = public_settings.get('enableAutomaticManagement')\n\n    if (enableAutomaticManagement is not None\n           and enableAutomaticManagement is True):\n        hutil_log_info('enableAutomaticManagement is set to true; the ' \\\n                       'workspace ID and key will be determined by the OMS ' \\\n                       'service.')\n\n        workspaceInfo = retrieve_managed_workspace(vmResourceId)\n        if (workspaceInfo is None or 'WorkspaceId' not in workspaceInfo\n                or 'WorkspaceKey' not in workspaceInfo):\n            raise OneClickException('Workspace info was not determined')\n        else:\n            # Note: do NOT log workspace keys!\n            hutil_log_info('Managed workspaceInfo has been retrieved')\n            workspaceId = workspaceInfo['WorkspaceId']\n            workspaceKey = workspaceInfo['WorkspaceKey']\n            try:\n                check_workspace_id_and_key(workspaceId, workspaceKey)\n            except InvalidParameterError as e:\n                raise OMSServiceOneClickException('Received invalid ' \\\n                                                  'workspace info: ' \\\n                                                  '{0}'.format(e))\n\n    else:\n        workspaceId = public_settings.get('workspaceId')\n        workspaceKey = protected_settings.get('workspaceKey')\n        check_workspace_id_and_key(workspaceId, workspaceKey)\n\n    # Check if omsadmin script is available\n    if not os.path.exists(OMSAdminPath):\n        log_and_exit('Enable', EnableCalledBeforeSuccessfulInstall,\n                     'OMSAgent onboarding script {0} does not exist. Enable ' \\\n                     'cannot be called before install.'.format(OMSAdminPath))\n\n    vmResourceIdParam = '-a {0}'.format(vmResourceId)\n\n    proxy = protected_settings.get('proxy')\n    proxyParam = ''\n    if proxy is not None:\n        proxyParam = '-p {0}'.format(proxy)\n\n    # get domain from protected settings\n    domain = protected_settings.get('domain')\n    if domain is None:\n        # detect opinsights domain using IMDS\n        domain = get_azure_cloud_domain()\n    else:\n        hutil_log_info(\"Domain retrieved from protected settings '{0}'\".format(domain))\n\n    domainParam = ''\n    if domain:\n        domainParam = '-d {0}'.format(domain)\n\n    optionalParams = '{0} {1} {2}'.format(domainParam, proxyParam, vmResourceIdParam)\n    onboard_cmd = OnboardCommandWithOptionalParams.format(OMSAdminPath,\n                                                          workspaceId,\n                                                          workspaceKey,\n                                                          optionalParams)\n\n    hutil_log_info('Handler initiating onboarding.')\n    exit_code, output = run_command_with_retries_output(onboard_cmd, retries = 5,\n                                         retry_check = retry_onboarding,\n                                         final_check = raise_if_no_internet,\n                                         check_error = True, log_cmd = False)\n\n    # now ensure the permissions and ownership is set recursively\n    try:\n        workspaceId = public_settings.get('workspaceId')\n        etc_final_path = os.path.join(EtcOMSAgentPath, workspaceId)\n        if (os.path.isdir(etc_final_path)):\n            uid = pwd.getpwnam(AgentUser).pw_uid\n            gid = grp.getgrnam(AgentGroup).gr_gid\n            os.chown(etc_final_path, uid, gid)\n            os.system('chmod {1} {0}'.format(etc_final_path, 750))\n\n            for root, dirs, files in os.walk(etc_final_path):\n                for d in dirs:\n                    os.chown(os.path.join(root, d), uid, gid)\n                    os.system('chmod {1} {0}'.format(os.path.join(root, d), 750))\n                for f in files:\n                    os.chown(os.path.join(root, f), uid, gid)\n                    os.system('chmod {1} {0}'.format(os.path.join(root, f), 640))\n    except:\n        hutil_log_info('Failed to set permissions for OMS directories, could potentially have issues uploading.')\n\n    if exit_code == 0:\n        # Create a marker file to denote the workspace that was\n        # onboarded using the extension. This will allow supporting\n        # multi-homing through the extension like Windows does\n        extension_marker_path = os.path.join(EtcOMSAgentPath, workspaceId,\n                                             'conf/.azure_extension_marker')\n        if os.path.exists(extension_marker_path):\n            hutil_log_info('Extension marker file {0} already ' \\\n                           'created'.format(extension_marker_path))\n        else:\n            try:\n                open(extension_marker_path, 'w').close()\n                hutil_log_info('Created extension marker file ' \\\n                               '{0}'.format(extension_marker_path))\n            except IOError as e:\n                try:\n                    open(extension_marker_path, 'w+').close()\n                    hutil_log_info('Created extension marker file ' \\\n                               '{0}'.format(extension_marker_path))\n                except IOError as ex:\n                    hutil_log_error('Error creating {0} with error: ' \\\n                                '{1}'.format(extension_marker_path, ex))\n                    # we are having some kind of permissions issue creating the marker file\n                    output = \"Couldn't create marker file\"\n                    exit_code = 52 # since it is a missing dependency\n\n        # Sleep to prevent bombarding the processes, then restart all processes\n        # to resolve any issues with auto-started processes from --upgrade\n        time.sleep(PostOnboardingSleepSeconds)\n        if HUtilObject and HUtilObject.is_seq_smaller():\n            log_output = \"Current sequence number {0} is smaller than or egual to the sequence number of the most recent executed configuration, skipping omsagent process restart.\".format(HUtilObject._context._seq_no)\n            hutil_log_info(log_output)\n        else:\n            hutil_log_info('Restart omsagent service via service_control script.')\n            run_command_and_log(RestartOMSAgentServiceCommand)\n            #start telemetry process if enable is successful\n            start_telemetry_process()\n            \n        #save sequence number\n        HUtilObject.save_seq()\n\n    return exit_code, output\n\ndef remove_workspace_configuration():\n    \"\"\"\n    This is needed to distinguish between extension removal vs extension upgrade.\n    Its a workaround for waagent upgrade routine calling 'remove' on an old version\n    before calling 'upgrade' on new extension version issue.\n    In upgrade case, we need workspace configuration to persist when in\n    remove case we need all the files be removed.\n    This method will remove all the files/folders from the workspace path in Etc and Var.\n    \"\"\"\n\n    public_settings, _ = get_settings()\n    workspaceId = public_settings.get('workspaceId')\n    etc_remove_path = os.path.join(EtcOMSAgentPath, workspaceId)\n    var_remove_path = os.path.join(VarOMSAgentPath, workspaceId)\n\n    shutil.rmtree(etc_remove_path, True)\n    shutil.rmtree(var_remove_path, True)\n    hutil_log_info('Moved oms etc configuration directory and cleaned up var directory')\n\ndef is_arc_installed():\n    \"\"\"\n    Check if the system is on an Arc machine\n    \"\"\"\n    # Using systemctl to check this since Arc only supports VMs that have systemd\n    check_arc = os.system('systemctl status himdsd 1>/dev/null 2>&1')\n    return check_arc == 0\n\ndef get_arc_endpoint():\n    \"\"\"\n    Find the endpoint for Arc Hybrid IMDS\n    \"\"\"\n    endpoint_filepath = '/lib/systemd/system.conf.d/azcmagent.conf'\n    endpoint = ''\n    try:\n        with open(endpoint_filepath, 'r') as f:\n            data = f.read()\n        endpoint = data.split(\"\\\"IMDS_ENDPOINT=\")[1].split(\"\\\"\\n\")[0]\n    except:\n        hutil_log_error('Unable to load Arc IMDS endpoint from {0}'.format(endpoint_filepath))\n    return endpoint\n\ndef get_imds_endpoint():\n    \"\"\"\n    Find the endpoint for IMDS, whether Arc or not\n    \"\"\"\n    azure_imds_endpoint = 'http://169.254.169.254/metadata/instance?api-version=2018-10-01'\n    if (is_arc_installed()):\n        hutil_log_info('Arc is installed, loading Arc-specific IMDS endpoint')\n        imds_endpoint = get_arc_endpoint()\n        if imds_endpoint:\n            imds_endpoint += '/metadata/instance?api-version=2019-08-15'\n        else:\n            # Fall back to the traditional IMDS endpoint; the cloud domain and VM\n            # resource id detection logic are resilient to failed queries to IMDS\n            imds_endpoint = azure_imds_endpoint\n            hutil_log_info('Falling back to default Azure IMDS endpoint')\n    else:\n        imds_endpoint = azure_imds_endpoint\n\n    hutil_log_info('Using IMDS endpoint \"{0}\"'.format(imds_endpoint))\n    return imds_endpoint\n\ndef get_vmresourceid_from_metadata():\n    imds_endpoint = get_imds_endpoint()\n    req = urllib.Request(imds_endpoint)\n    req.add_header('Metadata', 'True')\n\n    try:\n        response = json.loads(urllib.urlopen(req).read())\n\n        if ('compute' not in response or response['compute'] is None):\n            return None # classic vm\n\n        if response['compute']['vmScaleSetName']:\n            return '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Compute/virtualMachineScaleSets/{2}/virtualMachines/{3}'.format(response['compute']['subscriptionId'],response['compute']['resourceGroupName'],response['compute']['vmScaleSetName'],response['compute']['name'])\n        else:\n            return '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Compute/virtualMachines/{2}'.format(response['compute']['subscriptionId'],response['compute']['resourceGroupName'],response['compute']['name'])\n\n    except urlerror.HTTPError as e:\n        hutil_log_error('Request to Metadata service URL ' \\\n                        'failed with an HTTPError: {0}'.format(e))\n        hutil_log_info('Response from Metadata service: ' \\\n                       '{0}'.format(e.read()))\n        return None\n    except:\n        hutil_log_error('Unexpected error from Metadata service')\n        return None\n\ndef get_azure_environment_from_imds():\n    imds_endpoint = get_imds_endpoint()\n    req = urllib.Request(imds_endpoint)\n    req.add_header('Metadata', 'True')\n\n    try:\n        response = json.loads(urllib.urlopen(req).read())\n\n        if ('compute' not in response or response['compute'] is None):\n            return None # classic vm\n\n        if ('azEnvironment' not in response['compute'] or response['compute']['azEnvironment'] is None):\n            return None # classic vm\n\n        return response['compute']['azEnvironment']\n    except urlerror.HTTPError as e:\n        hutil_log_error('Request to Metadata service URL ' \\\n                        'failed with an HTTPError: {0}'.format(e))\n        hutil_log_info('Response from Metadata service: ' \\\n                       '{0}'.format(e.read()))\n        return None\n    except:\n        hutil_log_error('Unexpected error from Metadata service')\n        return None\n\ndef get_azure_cloud_domain():\n    try:\n        environment = get_azure_environment_from_imds()\n\n        if environment:\n            for cloud, domain in CloudDomainMap.items():\n                if environment.lower() == cloud.lower():\n                    hutil_log_info('Detected cloud environment \"{0}\" via IMDS. The domain \"{1}\" will be used.'.format(cloud, domain))\n                    return domain\n\n        hutil_log_info('Unknown cloud environment \"{0}\"'.format(environment))\n    except Exception as e:\n        hutil_log_error('Failed to detect cloud environment: {0}'.format(e))\n\n    hutil_log_info('Falling back to default domain \"{0}\"'.format(CloudDomainMap[DefaultCloudName]))\n    return CloudDomainMap[DefaultCloudName]\n\ndef retrieve_managed_workspace(vm_resource_id):\n    \"\"\"\n    EnableAutomaticManagement has been set to true; the\n    ManagedIdentity extension and the VM Resource ID are also\n    required for the OneClick scenario\n    Using these and the Metadata API, we will call the OMS service\n    to determine what workspace ID and key to onboard to\n    \"\"\"\n    # Check for OneClick scenario requirements:\n    if not os.path.exists(ManagedIdentityExtListeningURLPath):\n        raise ManagedIdentityExtMissingException\n\n    # Determine the Tenant ID using the Metadata API\n    tenant_id = get_tenant_id_from_metadata_api(vm_resource_id)\n\n    # Retrieve an OAuth token using the ManagedIdentity extension\n    if tenant_id is not None:\n        hutil_log_info('Tenant ID from Metadata API is {0}'.format(tenant_id))\n        access_token = get_access_token(tenant_id, OAuthTokenResource)\n    else:\n        return None\n\n    # Query OMS service for the workspace info for onboarding\n    if tenant_id is not None and access_token is not None:\n        return get_workspace_info_from_oms(vm_resource_id, tenant_id,\n                                           access_token)\n    else:\n        return None\n\n\ndef disable():\n    \"\"\"\n    Disable all OMS workspace processes on the VM.\n    Note: disable operation times out from WAAgent at 15 minutes\n    \"\"\"\n    #stop the telemetry process\n    stop_telemetry_process()\n\n    # Check if the service control script is available\n    if not os.path.exists(OMSAgentServiceScript):\n        log_and_exit('Disable', 1, 'OMSAgent service control script {0} does' \\\n                                   'not exist. Disable cannot be called ' \\\n                                   'before install.'.format(OMSAgentServiceScript))\n        return 1\n\n    exit_code, output = run_command_and_log(DisableOMSAgentServiceCommand)\n    return exit_code, output\n\n\n# Dictionary of operations strings to methods\noperations = {'Disable' : disable,\n              'Uninstall' : uninstall,\n              'Install' : install,\n              'Enable' : enable,\n              # For update call we will only prepare the update by taking some backup of the state\n              #  since omsagent.py->install() will be called\n              # everytime upgrade is done due to upgradeMode =\n              # \"UpgradeWithInstall\" set in HandlerManifest\n              'Update' : prepare_update,\n              'Telemetry' : telemetry\n}\n\n\ndef parse_context(operation):\n    \"\"\"\n    Initialize a HandlerUtil object for this operation.\n    If the required modules have not been imported, this will return None.\n    \"\"\"\n    hutil = None\n    if ('Utils.WAAgentUtil' in sys.modules\n            and 'Utils.HandlerUtil' in sys.modules):\n        try:\n\n            logFileName = 'extension.log'\n            if (operation == 'Telemetry'):\n                logFileName = 'watcher.log'\n\n            hutil = HUtil.HandlerUtility(waagent.Log, waagent.Error, logFileName=logFileName)\n            hutil.do_parse_context(operation)\n        # parse_context may throw KeyError if necessary JSON key is not\n        # present in settings\n        except KeyError as e:\n            waagent_log_error('Unable to parse context with error: ' \\\n                              '{0}'.format(e))\n            raise ParameterMissingException\n    return hutil\n\n\ndef is_vm_supported_for_extension():\n    \"\"\"\n    Checks if the VM this extension is running on is supported by OMSAgent\n    Returns for platform.linux_distribution() vary widely in format, such as\n    '7.3.1611' returned for a VM with CentOS 7, so the first provided\n    digits must match\n    The supported distros of the OMSAgent-for-Linux are allowed to utilize\n    this VM extension. All other distros will get error code 51\n    \"\"\"\n    supported_dists = {'redhat' : ['7', '8', '9'], 'red hat' : ['7', '8', '9'], 'rhel' : ['7', '8', '9'], # Red Hat\n                       'centos' : ['7', '8'], # CentOS\n                       'oracle' : ['7', '8'], 'ol': ['7', '8'], # Oracle\n                       'debian' : ['8', '9', '10', '11'], # Debian\n                       'ubuntu' : ['14.04', '16.04', '18.04', '20.04', '22.04'], # Ubuntu\n                       'suse' : ['12', '15'], 'sles' : ['12', '15'], # SLES\n                       'opensuse' : ['15'], # openSUSE\n                       'rocky' : ['8', '9'], # Rocky\n                       'alma' : ['8', '9'], # Alma\n                       'amzn' : ['2'] # AWS\n    }\n\n    vm_dist, vm_ver, vm_supported = '', '', False\n    parse_manually = False\n\n    # platform commands used below aren't available after Python 3.6\n    if sys.version_info < (3,7):\n        try:\n            vm_dist, vm_ver, vm_id = platform.linux_distribution()\n        except AttributeError:\n            try:\n                vm_dist, vm_ver, vm_id = platform.dist()\n            except AttributeError:\n                hutil_log_info(\"Falling back to /etc/os-release distribution parsing\")\n\n        # Some python versions *IF BUILT LOCALLY* (ex 3.5) give string responses (ex. 'bullseye/sid') to platform.dist() function\n        # This causes exception in the method below. Thus adding a check to switch to manual parsing in this case\n        try:\n            temp_vm_ver = int(vm_ver.split('.')[0])\n        except:\n            parse_manually = True\n    else:\n        parse_manually = True\n\n    # Fallback if either of the above platform commands fail, or we switch to manual parsing\n    if (not vm_dist and not vm_ver) or parse_manually:\n        try:\n            with open('/etc/os-release', 'r') as fp:\n                for line in fp:\n                    if line.startswith('ID='):\n                        vm_dist = line.split('=')[1]\n                        vm_dist = vm_dist.split('-')[0]\n                        vm_dist = vm_dist.replace('\\\"', '').replace('\\n', '')\n                    elif line.startswith('VERSION_ID='):\n                        vm_ver = line.split('=')[1]\n                        vm_ver = vm_ver.replace('\\\"', '').replace('\\n', '')\n        except:\n            return vm_supported, 'Indeterminate operating system', ''\n\n    # Find this VM distribution in the supported list\n    for supported_dist in list(supported_dists.keys()):\n        if not vm_dist.lower().startswith(supported_dist):\n            continue\n\n        # Check if this VM distribution version is supported\n        vm_ver_split = vm_ver.split('.')\n        for supported_ver in supported_dists[supported_dist]:\n            supported_ver_split = supported_ver.split('.')\n\n            # If vm_ver is at least as precise (at least as many digits) as\n            # supported_ver and matches all the supported_ver digits, then\n            # this VM is guaranteed to be supported\n            vm_ver_match = True\n            for idx, supported_ver_num in enumerate(supported_ver_split):\n                try:\n                    supported_ver_num = int(supported_ver_num)\n                    vm_ver_num = int(vm_ver_split[idx])\n                except IndexError:\n                    vm_ver_match = False\n                    break\n                if vm_ver_num is not supported_ver_num:\n                    vm_ver_match = False\n                    break\n            if vm_ver_match:\n                vm_supported = True\n                break\n\n        if vm_supported:\n            break\n\n    return vm_supported, vm_dist, vm_ver\n\n\ndef exit_if_vm_not_supported(operation):\n    \"\"\"\n    Check if this VM distro and version are supported by the OMSAgent.\n    If this VM is not supported, log the proper error code and exit.\n    \"\"\"\n    vm_supported, vm_dist, vm_ver = is_vm_supported_for_extension()\n    if not vm_supported:\n        log_and_exit(operation, UnsupportedOperatingSystem, 'Unsupported operating system: ' \\\n                                    '{0} {1}'.format(vm_dist, vm_ver))\n    return 0\n\n\ndef exit_if_openssl_unavailable(operation):\n    \"\"\"\n    Check if the openssl commandline interface is available to use\n    If not, throw error to return UnsupportedOpenSSL error code\n    \"\"\"\n    exit_code, output = run_get_output('which openssl', True, False)\n    if exit_code != 0:\n        log_and_exit(operation, UnsupportedOpenSSL, 'OpenSSL is not available')\n    return 0\n\n\ndef exit_if_gpg_unavailable(operation):\n    \"\"\"\n    Check if gpg is available to use\n    If not, attempt to install\n    If install fails, throw error to return UnsupportedGpg error code\n    \"\"\"\n    # Check if VM is Debian (Debian 10 doesn't have gpg)\n    vm_supp, vm_dist, _ = is_vm_supported_for_extension()\n    if (vm_supp and (vm_dist.lower().startswith('debian'))):\n        # Check if GPG already on VM\n        check_exit_code, _ = run_get_output('which gpg', True, False)\n        if check_exit_code != 0:\n            # GPG not on VM, attempt to install\n            hutil_log_info('GPG not found, attempting to install')\n            exit_code, output = run_get_output(InstallExtraPackageCommandApt.format('gpg'))\n            if exit_code != 0:\n                log_and_exit(operation, UnsupportedGpg, 'GPG could not be installed: {0}'.format(output))\n            else:\n                hutil_log_info('GPG successfully installed')\n    \n        else:\n            hutil_log_info('GPG already present on VM')\n            return 0\n\n\ndef check_workspace_id_and_key(workspace_id, workspace_key):\n    \"\"\"\n    Validate formats of workspace_id and workspace_key\n    \"\"\"\n    check_workspace_id(workspace_id)\n\n    # Validate that workspace_key is of the correct format (base64-encoded)\n    if workspace_key is None:\n        raise ParameterMissingException('Workspace key must be provided')\n\n    try:\n        encoded_key = base64.b64encode(base64.b64decode(workspace_key))\n        if sys.version_info >= (3,): # in python 3, base64.b64encode will return bytes, so decode to str for comparison\n            encoded_key = encoded_key.decode()\n\n        if encoded_key != workspace_key:\n            raise InvalidParameterError('Workspace key is invalid')\n    except TypeError:\n        raise InvalidParameterError('Workspace key is invalid')\n\n\ndef check_workspace_id(workspace_id):\n    \"\"\"\n    Validate that workspace_id matches the GUID regex\n    \"\"\"\n    if workspace_id is None:\n        raise ParameterMissingException('Workspace ID must be provided')\n\n    search = re.compile(GUIDOnlyRegex, re.M)\n    if not search.match(workspace_id):\n        raise InvalidParameterError('Workspace ID is invalid')\n\n\ndef detect_multiple_connections(workspace_id):\n    \"\"\"\n    If the VM already has a workspace/SCOM configured, then we should\n    disallow a new connection when stopOnMultipleConnections is used\n\n    Throw an exception in these cases:\n    - The workspace with the given workspace_id has not been onboarded\n      to the VM, but at least one other workspace has been\n    - The workspace with the given workspace_id has not been onboarded\n      to the VM, and the VM is connected to SCOM\n\n    If the extension operation is connecting to an already-configured\n    workspace, it is not a stopping case\n    \"\"\"\n    other_connection_exists = False\n    if os.path.exists(OMSAdminPath):\n        exit_code, utfoutput = run_get_output(WorkspaceCheckCommand,\n                                           chk_err = False)\n\n        # output may contain unicode characters not supported by ascii\n        # for e.g., generates the following error if used without conversion: UnicodeDecodeError: 'ascii' codec can't decode byte 0xc3 in position 18: ordinal not in range(128)\n        # default encoding in python is ascii in python < 3\n        if sys.version_info < (3,):\n            output = utfoutput.decode('utf8').encode('utf8')\n        else:\n            output = utfoutput\n\n        if output.strip().lower() != 'no workspace':\n            for line in output.split('\\n'):\n                if workspace_id in line:\n                    hutil_log_info('The workspace to be enabled has already ' \\\n                                   'been configured on the VM before; ' \\\n                                   'continuing despite ' \\\n                                   'stopOnMultipleConnections flag')\n                    return\n                else:\n                    # Note: if scom workspace dir is created, a line containing\n                    # \"Workspace(SCOM Workspace): scom\" will be here\n                    # If any other line is here, it may start sending data later\n                    other_connection_exists = True\n    else:\n        for dir_name, sub_dirs, files in os.walk(EtcOMSAgentPath):\n            for sub_dir in sub_dirs:\n                sub_dir_name = os.path.basename(sub_dir)\n                workspace_search = re.compile(GUIDOnlyRegex, re.M)\n                if sub_dir_name == workspace_id:\n                    hutil_log_info('The workspace to be enabled has already ' \\\n                                   'been configured on the VM before; ' \\\n                                   'continuing despite ' \\\n                                   'stopOnMultipleConnections flag')\n                    return\n                elif (workspace_search.match(sub_dir_name)\n                        or sub_dir_name == 'scom'):\n                    other_connection_exists = True\n\n    if other_connection_exists:\n        err_msg = ('This machine is already connected to some other Log ' \\\n                   'Analytics workspace, please set ' \\\n                   'stopOnMultipleConnections to false in public ' \\\n                   'settings or remove this property, so this machine ' \\\n                   'can connect to new workspaces, also it means this ' \\\n                   'machine will get billed multiple times for each ' \\\n                   'workspace it report to. ' \\\n                   '(LINUXOMSAGENTEXTENSION_ERROR_MULTIPLECONNECTIONS)')\n        # This exception will get caught by the main method\n        raise UnwantedMultipleConnectionsException(err_msg)\n    else:\n        detect_scom_connection()\n\n\ndef detect_scom_connection():\n    \"\"\"\n    If these two conditions are met, then we can assume the\n    VM is monitored\n    by SCOM:\n    1. SCOMPort is open and omiserver is listening on it\n    2. scx certificate is signed by SCOM server\n\n    To determine it check for existence of below two\n    conditions:\n    1. SCOMPort is open and omiserver is listening on it:\n       /etc/omi/conf/omiserver.conf can be parsed to\n       determine it.\n    2. scx certificate is signed by SCOM server: scom cert\n       is present @ /etc/opt/omi/ssl/omi-host-<hostname>.pem\n       (/etc/opt/microsoft/scx/ssl/scx.pem is a softlink to\n       this). If the VM is monitored by SCOM then issuer\n       field of the certificate will have a value like\n       CN=SCX-Certificate/title=<GUID>, DC=<SCOM server hostname>\n       (e.g CN=SCX-Certificate/title=SCX94a1f46d-2ced-4739-9b6a-1f06156ca4ac,\n       DC=NEB-OM-1502733)\n\n    Otherwise, if a scom configuration directory has been\n    created, we assume SCOM is in use\n    \"\"\"\n    scom_port_open = None # return when determine this is false\n    cert_signed_by_scom = False\n\n    if os.path.exists(OMSAdminPath):\n        scom_port_open = detect_scom_using_omsadmin()\n        if scom_port_open is False:\n            return\n\n    # If omsadmin.sh option is not available, use omiconfigeditor\n    if (scom_port_open is None and os.path.exists(OMIConfigEditorPath)\n            and os.path.exists(OMIServerConfPath)):\n        scom_port_open = detect_scom_using_omiconfigeditor()\n        if scom_port_open is False:\n            return\n\n    # If omiconfigeditor option is not available, directly parse omiserver.conf\n    if scom_port_open is None and os.path.exists(OMIServerConfPath):\n        scom_port_open = detect_scom_using_omiserver_conf()\n        if scom_port_open is False:\n            return\n\n    if scom_port_open is None:\n        hutil_log_info('SCOM port could not be determined to be open')\n        return\n\n    # Parse the certificate to determine if SCOM issued it\n    if os.path.exists(SCOMCertPath):\n        exit_if_openssl_unavailable('Install')\n        cert_cmd = 'openssl x509 -in {0} -noout -text'.format(SCOMCertPath)\n        cert_exit_code, cert_output = run_get_output(cert_cmd, chk_err = False,\n                                                     log_cmd = False)\n        if cert_exit_code == 0:\n            issuer_re = re.compile(SCOMCertIssuerRegex, re.M)\n            if issuer_re.search(cert_output):\n                hutil_log_info('SCOM cert exists and is signed by SCOM server')\n                cert_signed_by_scom = True\n            else:\n                hutil_log_info('SCOM cert exists but is not signed by SCOM ' \\\n                               'server')\n        else:\n            hutil_log_error('Error reading SCOM cert; cert could not be ' \\\n                            'determined to be signed by SCOM server')\n    else:\n        hutil_log_info('SCOM cert does not exist')\n\n    if scom_port_open and cert_signed_by_scom:\n        err_msg = ('This machine may already be connected to a System ' \\\n                   'Center Operations Manager server. Please set ' \\\n                   'stopOnMultipleConnections to false in public settings ' \\\n                   'or remove this property to allow connection to the Log ' \\\n                   'Analytics workspace. ' \\\n                   '(LINUXOMSAGENTEXTENSION_ERROR_MULTIPLECONNECTIONS)')\n        raise UnwantedMultipleConnectionsException(err_msg)\n\n\ndef detect_scom_using_omsadmin():\n    \"\"\"\n    This method assumes that OMSAdminPath exists; if packages have not\n    been installed yet, this may not exist\n    Returns True if omsadmin.sh indicates that SCOM port is open\n    \"\"\"\n    omsadmin_cmd = '{0} -o'.format(OMSAdminPath)\n    exit_code, output = run_get_output(omsadmin_cmd, False, False)\n    # Guard against older omsadmin.sh versions\n    if ('illegal option' not in output.lower()\n            and 'unknown option' not in output.lower()):\n        if exit_code == 0:\n            hutil_log_info('According to {0}, SCOM port is ' \\\n                           'open'.format(omsadmin_cmd))\n            return True\n        elif exit_code == 1:\n            hutil_log_info('According to {0}, SCOM port is not ' \\\n                           'open'.format(omsadmin_cmd))\n    return False\n\n\ndef detect_scom_using_omiconfigeditor():\n    \"\"\"\n    This method assumes that the relevant files exist\n    Returns True if omiconfigeditor indicates that SCOM port is open\n    \"\"\"\n    omi_cmd = '{0} httpsport -q {1} < {2}'.format(OMIConfigEditorPath,\n                                                  SCOMPort, OMIServerConfPath)\n    exit_code, output = run_get_output(omi_cmd, False, False)\n    # Guard against older omiconfigeditor versions\n    if ('illegal option' not in output.lower()\n            and 'unknown option' not in output.lower()):\n        if exit_code == 0:\n            hutil_log_info('According to {0}, SCOM port is ' \\\n                           'open'.format(omi_cmd))\n            return True\n        elif exit_code == 1:\n            hutil_log_info('According to {0}, SCOM port is not ' \\\n                           'open'.format(omi_cmd))\n    return False\n\n\ndef detect_scom_using_omiserver_conf():\n    \"\"\"\n    This method assumes that the relevant files exist\n    Returns True if omiserver.conf indicates that SCOM port is open\n    \"\"\"\n    with open(OMIServerConfPath, 'r') as omiserver_file:\n        omiserver_txt = omiserver_file.read()\n\n    httpsport_search = r'^[\\s]*httpsport[\\s]*=(.*)$'\n    httpsport_re = re.compile(httpsport_search, re.M)\n    httpsport_matches = httpsport_re.search(omiserver_txt)\n    if (httpsport_matches is not None and\n            httpsport_matches.group(1) is not None):\n        ports = httpsport_matches.group(1)\n        ports = ports.replace(',', ' ')\n        ports_list = ports.split(' ')\n        if str(SCOMPort) in ports_list:\n            hutil_log_info('SCOM port is listed in ' \\\n                           '{0}'.format(OMIServerConfPath))\n            return True\n        else:\n            hutil_log_info('SCOM port is not listed in ' \\\n                           '{0}'.format(OMIServerConfPath))\n    else:\n        hutil_log_info('SCOM port is not listed in ' \\\n                       '{0}'.format(OMIServerConfPath))\n    return False\n\n\ndef run_command_and_log(cmd, check_error = True, log_cmd = True):\n    \"\"\"\n    Run the provided shell command and log its output, including stdout and\n    stderr.\n    The output should not contain any PII, but the command might. In this case,\n    log_cmd should be set to False.\n    \"\"\"\n    exit_code, output = run_get_output(cmd, check_error, log_cmd)\n    if log_cmd:\n        hutil_log_info('Output of command \"{0}\": \\n{1}'.format(cmd.rstrip(), output))\n    else:\n        hutil_log_info('Output: \\n{0}'.format(output))\n\n    # For details, check logs in /var/log/azure/Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux/extension.log\n    if exit_code == 17:\n        if \"Failed dependencies:\" in output:\n            # 52 is the exit code for missing dependency\n            # https://github.com/Azure/azure-marketplace/wiki/Extension-Build-Notes-Best-Practices#error-codes-and-messages-output-to-stderr\n            exit_code = 52\n            output = \"Installation failed due to missing dependencies. For details, check logs in /var/log/azure/Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux/extension.log\"\n        elif \"waiting for transaction lock\" in output or \"dpkg: error processing package systemd\" in output or \"dpkg-deb\" in output or \"dpkg:\" in output:\n            # 52 is the exit code for missing dependency\n            # https://github.com/Azure/azure-marketplace/wiki/Extension-Build-Notes-Best-Practices#error-codes-and-messages-output-to-stderr\n            exit_code = 52\n            output = \"There seems to be an issue in your package manager dpkg or rpm being in lock state. For details, check logs in /var/log/azure/Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux/extension.log\"\n        elif \"Errors were encountered while processing:\" in output:\n            # 52 is the exit code for missing dependency\n            # https://github.com/Azure/azure-marketplace/wiki/Extension-Build-Notes-Best-Practices#error-codes-and-messages-output-to-stderr\n            exit_code = 52\n            output = \"There seems to be an issue while processing triggers in systemd. For details, check logs in /var/log/azure/Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux/extension.log\"\n        elif \"Cannot allocate memory\" in output:\n            # 52 is the exit code for missing dependency\n            # https://github.com/Azure/azure-marketplace/wiki/Extension-Build-Notes-Best-Practices#error-codes-and-messages-output-to-stderr\n            exit_code = 52\n            output = \"There seems to be insufficient memory for the installation. For details, check logs in /var/log/azure/Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux/extension.log\"\n    elif exit_code == 19:\n        if \"rpmdb\" in output or \"cannot open Packages database\" in output or \"dpkg (subprocess): cannot set security execution context for maintainer script\" in output or \"is locked by another process\" in output:\n            # OMI (19) happens to be the first package we install and if we get rpmdb failures, its a system issue\n            # 52 is the exit code for missing dependency i.e. rpmdb, libc6 or libpam-runtime\n            # https://github.com/Azure/azure-marketplace/wiki/Extension-Build-Notes-Best-Practices#error-codes-and-messages-output-to-stderr\n            exit_code = 52\n            output = \"There seems to be an issue in your package manager dpkg or rpm being in lock state. For details, check logs in /var/log/azure/Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux/extension.log\"\n        elif \"libc6 is not installed\" in output or \"libpam-runtime is not installed\" in output or \"exited with status 52\" in output or \"/bin/sh is needed\" in output:\n            # OMI (19) happens to be the first package we install and if we get rpmdb failures, its a system issue\n            # 52 is the exit code for missing dependency i.e. rpmdb, libc6 or libpam-runtime\n            # https://github.com/Azure/azure-marketplace/wiki/Extension-Build-Notes-Best-Practices#error-codes-and-messages-output-to-stderr\n            exit_code = 52\n            output = \"Installation failed due to missing dependencies. For details, check logs in /var/log/azure/Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux/extension.log\"\n    elif exit_code == 33:\n        if \"Permission denied\" in output:\n            # Enable failures\n            # 52 is the exit code for missing dependency. \n            # DSC metaconfig generation failure due to permissions.\n            # https://github.com/Azure/azure-marketplace/wiki/Extension-Build-Notes-Best-Practices#error-codes-and-messages-output-to-stderr\n            exit_code = 52\n            output = \"Installation failed due to insufficient permissions. Please ensure omsagent user is part of the sudoer file and has sufficient permissions, and omsconfig MetaConfig.mof can be generated. For details, check logs in /var/opt/microsoft/omsconfig/omsconfig.log and /var/log/azure/Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux/extension.log\"\n    elif exit_code == 18:\n            # Install failures\n            # DSC install failure\n            # https://github.com/Azure/azure-marketplace/wiki/Extension-Build-Notes-Best-Practices#error-codes-and-messages-output-to-stderr\n            output = \"Installation failed due to omsconfig package not being able to install. For details, check logs in /var/log/azure/Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux/extension.log\"\n    elif exit_code == 5:\n        if \"Reason: InvalidWorkspaceKey\" in output or \"Reason: MissingHeader\" in output:\n            # Enable failures\n            # 53 is the exit code for configuration errors\n            # https://github.com/Azure/azure-marketplace/wiki/Extension-Build-Notes-Best-Practices#error-codes-and-messages-output-to-stderr\n            exit_code = 53\n            output = \"Installation failed due to incorrect workspace key. Please check if the workspace key is correct. For details, check logs in /var/log/azure/Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux/extension.log\"\n    elif exit_code == 8:\n        if \"Check the correctness of the workspace ID and shared key\" in output or \"internet connectivity\" in output:\n            # Enable failures\n            # 53 is the exit code for configuration errors\n            # https://github.com/Azure/azure-marketplace/wiki/Extension-Build-Notes-Best-Practices#error-codes-and-messages-output-to-stderr\n            exit_code = 53\n            output = \"Installation failed due to curl error while onboarding. Please check the internet connectivity or the workspace key. For details, check logs in /var/log/azure/Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux/extension.log\"\n\n    if exit_code != 0 and exit_code != 52:\n        if \"dpkg:\" in output or \"dpkg :\" in output or \"rpmdb:\" in output or \"rpm.lock\" in output or \"locked by another process\" in output:\n            # If we get rpmdb failures, its a system issue.\n            # 52 is the exit code for missing dependency i.e. rpmdb, libc6 or libpam-runtime\n            # https://github.com/Azure/azure-marketplace/wiki/Extension-Build-Notes-Best-Practices#error-codes-and-messages-output-to-stderr\n            exit_code = 52\n            output = \"There seems to be an issue in your package manager dpkg or rpm being in lock state when installing omsagent bundle for one of the dependencies. For details, check logs in /var/log/azure/Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux/extension.log\"\n        if \"conflicts with file from package\" in output or \"Failed dependencies:\" in output or \"Please install curl\" in output or \"is needed by\" in output or \"check_version_installable\" in output or \"Error: curl was not installed\" in output or \"Please install the ctypes package\" in output or \"gpg is not installed\" in output:\n            # If we get rpmdb failures, its a system issue\n            # 52 is the exit code for missing dependency i.e. rpmdb, libc6 or libpam-runtime\n            # https://github.com/Azure/azure-marketplace/wiki/Extension-Build-Notes-Best-Practices#error-codes-and-messages-output-to-stderr\n            exit_code = 52\n            output = \"Installation failed due to missing dependencies. For details, check logs in /var/log/azure/Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux/extension.log\"\n        if \"Permission denied\" in output:\n            # Install/Enable failures\n            # 52 is the exit code for missing dependency.\n            # https://github.com/Azure/azure-marketplace/wiki/Extension-Build-Notes-Best-Practices#error-codes-and-messages-output-to-stderr\n            exit_code = 52\n            output = \"Installation failed due to insufficient permissions. Please ensure omsagent user is part of the sudoer file and has sufficient permissions to install and onboard. For details, check logs in /var/log/azure/Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux/extension.log\"\n\n    return exit_code, output\n\n\ndef run_command_with_retries(cmd, retries, retry_check, final_check = None,\n                             check_error = True, log_cmd = True,\n                             initial_sleep_time = InitialRetrySleepSeconds,\n                             sleep_increase_factor = 1):\n    \"\"\"\n    Caller provides a method, retry_check, to use to determine if a retry\n    should be performed. This must be a function with two parameters:\n    exit_code and output\n    The final_check can be provided as a method to perform a final check after\n    retries have been exhausted\n    Logic used: will retry up to retries times with initial_sleep_time in\n    between tries\n    If the retry_check returns True for retry_verbosely, we will try cmd with\n    the standard -v verbose flag added\n    \"\"\"\n    try_count = 0\n    sleep_time = initial_sleep_time\n    run_cmd = cmd\n    run_verbosely = False\n\n    while try_count <= retries:\n        if run_verbosely:\n            run_cmd = cmd + ' -v'\n        exit_code, output = run_command_and_log(run_cmd, check_error, log_cmd)\n        should_retry, retry_message, run_verbosely = retry_check(exit_code,\n                                                                 output)\n        if not should_retry:\n            break\n        try_count += 1\n        hutil_log_info(retry_message)\n        time.sleep(sleep_time)\n        sleep_time *= sleep_increase_factor\n\n    if final_check is not None:\n        exit_code = final_check(exit_code, output)\n\n    return exit_code\n\ndef run_command_with_retries_output(cmd, retries, retry_check, final_check = None,\n                             check_error = True, log_cmd = True,\n                             initial_sleep_time = InitialRetrySleepSeconds,\n                             sleep_increase_factor = 1):\n    \"\"\"\n    Caller provides a method, retry_check, to use to determine if a retry\n    should be performed. This must be a function with two parameters:\n    exit_code and output\n    The final_check can be provided as a method to perform a final check after\n    retries have been exhausted\n    Logic used: will retry up to retries times with initial_sleep_time in\n    between tries\n    If the retry_check retuns True for retry_verbosely, we will try cmd with\n    the standard -v verbose flag added\n    \"\"\"\n    try_count = 0\n    sleep_time = initial_sleep_time\n    run_cmd = cmd\n    run_verbosely = False\n\n    while try_count <= retries:\n        if run_verbosely:\n            run_cmd = cmd + ' -v'\n        exit_code, output = run_command_and_log(run_cmd, check_error, log_cmd)\n        should_retry, retry_message, run_verbosely = retry_check(exit_code,\n                                                                 output)\n        if not should_retry:\n            break\n        try_count += 1\n        hutil_log_info(retry_message)\n        time.sleep(sleep_time)\n        sleep_time *= sleep_increase_factor\n\n    if final_check is not None:\n        exit_code = final_check(exit_code, output)\n\n    return exit_code, output\n\n\ndef is_dpkg_locked(exit_code, output):\n    \"\"\"\n    If dpkg is locked, the output will contain a message similar to 'dpkg\n    status database is locked by another process'\n    \"\"\"\n    if exit_code != 0:\n        dpkg_locked_search = r'^.*dpkg.+lock.*$'\n        dpkg_locked_re = re.compile(dpkg_locked_search, re.M)\n        if dpkg_locked_re.search(output):\n            return True\n    return False\n\n\ndef was_curl_found(exit_code, output):\n    \"\"\"\n    Returns false if exit_code indicates that curl was not installed; this can\n    occur when package lists need to be updated, or when some archives are\n    out-of-date\n    \"\"\"\n    if exit_code is InstallErrorCurlNotInstalled:\n        return False\n    return True\n\ndef retry_skip(exit_code, output):\n    \"\"\"\n    skip retires\n    \"\"\"\n    return False, '', False\n\ndef retry_if_dpkg_locked_or_curl_is_not_found(exit_code, output):\n    \"\"\"\n    Some commands fail because the package manager is locked (apt-get/dpkg\n    only); this will allow retries on failing commands.\n    Sometimes curl's dependencies (i.e. libcurl) are not installed; if this\n    is the case on a VM with apt-get, 'apt-get -f install' should be run\n    Sometimes curl is not installed and is also not found in the package list;\n    if this is the case on a VM with apt-get, update the package list\n    \"\"\"\n    retry_verbosely = False\n    dpkg_locked = is_dpkg_locked(exit_code, output)\n    curl_found = was_curl_found(exit_code, output)\n    apt_get_exit_code, apt_get_output = run_get_output('which apt-get',\n                                                       chk_err = False,\n                                                       log_cmd = False)\n    if dpkg_locked:\n        return True, 'Retrying command because package manager is locked.', \\\n               retry_verbosely\n    elif (not curl_found and apt_get_exit_code == 0 and\n            ('apt-get -f install' in output\n            or 'Unmet dependencies' in output.lower())):\n        hutil_log_info('Installing all dependencies of curl:')\n        run_command_and_log('apt-get -f install')\n        return True, 'Retrying command because curl and its dependencies ' \\\n                     'needed to be installed', retry_verbosely\n    elif not curl_found and apt_get_exit_code == 0:\n        hutil_log_info('Updating package lists to make curl available')\n        run_command_and_log('apt-get update')\n        return True, 'Retrying command because package lists needed to be ' \\\n                     'updated', retry_verbosely\n    else:\n        return False, '', False\n\n\ndef final_check_if_dpkg_locked(exit_code, output):\n    \"\"\"\n    If dpkg is still locked after the retries, we want to return a specific\n    error code\n    \"\"\"\n    dpkg_locked = is_dpkg_locked(exit_code, output)\n    if dpkg_locked:\n        exit_code = DPKGLockedErrorCode\n    return exit_code\n\n\ndef retry_onboarding(exit_code, output):\n    \"\"\"\n    Retry under any of these conditions:\n    - If the onboarding request returns 403: this may indicate that the agent\n      GUID and certificate should be re-generated\n    - If the onboarding request returns a different non-200 code: the OMS\n      service may be temporarily unavailable\n    - If the onboarding curl command returns an unaccounted-for error code,\n      we should retry with verbose logging\n    \"\"\"\n    retry_verbosely = False\n\n    if exit_code is EnableErrorOMSReturned403:\n        return True, 'Retrying the onboarding command to attempt generating ' \\\n                     'a new agent ID and certificate.', retry_verbosely\n    elif exit_code is EnableErrorOMSReturnedNon200:\n        return True, 'Retrying; the OMS service may be temporarily ' \\\n                     'unavailable.', retry_verbosely\n    elif exit_code is EnableErrorOnboarding:\n        return True, 'Retrying with verbose logging.', True\n    return False, '', False\n\n\ndef raise_if_no_internet(exit_code, output):\n    \"\"\"\n    Raise the CannotConnectToOMSException exception if the onboarding\n    script returns the error code to indicate that the OMS service can't be\n    resolved\n    \"\"\"\n    if exit_code is EnableErrorResolvingHost:\n        raise CannotConnectToOMSException\n    return exit_code\n\n\ndef get_settings():\n    \"\"\"\n    Retrieve the configuration for this extension operation\n    \"\"\"\n    global SettingsDict\n    public_settings = None\n    protected_settings = None\n\n    if HUtilObject is not None:\n        public_settings = HUtilObject.get_public_settings()\n        protected_settings = HUtilObject.get_protected_settings()\n    elif SettingsDict is not None:\n        public_settings = SettingsDict['public_settings']\n        protected_settings = SettingsDict['protected_settings']\n    else:\n        SettingsDict = {}\n        handler_env = get_handler_env()\n        try:\n            config_dir = str(handler_env['handlerEnvironment']['configFolder'])\n        except:\n            config_dir = os.path.join(os.getcwd(), 'config')\n\n        seq_no = get_latest_seq_no()\n        settings_path = os.path.join(config_dir, '{0}.settings'.format(seq_no))\n        try:\n            with open(settings_path, 'r') as settings_file:\n                settings_txt = settings_file.read()\n            settings = json.loads(settings_txt)\n            h_settings = settings['runtimeSettings'][0]['handlerSettings']\n            public_settings = h_settings['publicSettings']\n            SettingsDict['public_settings'] = public_settings\n        except:\n            hutil_log_error('Unable to load handler settings from ' \\\n                            '{0}'.format(settings_path))\n\n        if ('protectedSettings' in h_settings\n                and 'protectedSettingsCertThumbprint' in h_settings\n                and h_settings['protectedSettings'] is not None\n                and h_settings['protectedSettingsCertThumbprint'] is not None):\n            encoded_settings = h_settings['protectedSettings']\n            settings_thumbprint = h_settings['protectedSettingsCertThumbprint']\n            encoded_cert_path = os.path.join('/var/lib/waagent',\n                                             '{0}.crt'.format(\n                                                       settings_thumbprint))\n            encoded_key_path = os.path.join('/var/lib/waagent',\n                                            '{0}.prv'.format(\n                                                      settings_thumbprint))\n            decoded_settings = base64.standard_b64decode(encoded_settings)\n            decrypt_cmd = 'openssl smime -inform DER -decrypt -recip {0} ' \\\n                          '-inkey {1}'.format(encoded_cert_path,\n                                              encoded_key_path)\n\n            try:\n                session = subprocess.Popen([decrypt_cmd], shell = True,\n                                           stdin = subprocess.PIPE,\n                                           stderr = subprocess.STDOUT,\n                                           stdout = subprocess.PIPE)\n                output = session.communicate(decoded_settings)\n            except OSError:\n                pass\n            protected_settings_str = output[0]\n\n            if protected_settings_str is None:\n                log_and_exit('Enable', 1, 'Failed decrypting ' \\\n                                          'protectedSettings')\n            protected_settings = ''\n            try:\n                protected_settings = json.loads(protected_settings_str)\n            except:\n                hutil_log_error('JSON exception decoding protected settings')\n            SettingsDict['protected_settings'] = protected_settings\n\n    return public_settings, protected_settings\n\n\ndef update_status_file(operation, exit_code, exit_status, message):\n    \"\"\"\n    Mimic HandlerUtil method do_status_report in case hutil method is not\n    available\n    Write status to status file\n    \"\"\"\n    handler_env = get_handler_env()\n    try:\n        extension_version = str(handler_env['version'])\n        status_dir = str(handler_env['handlerEnvironment']['statusFolder'])\n    except:\n        extension_version = \"1.0\"\n        status_dir = os.path.join(os.getcwd(), 'status')\n\n    status_txt = [{\n        \"version\" : extension_version,\n        \"timestampUTC\" : time.strftime(\"%Y-%m-%dT%H:%M:%SZ\", time.gmtime()),\n        \"status\" : {\n            \"name\" : \"Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux\",\n            \"operation\" : operation,\n            \"status\" : exit_status,\n            \"code\" : exit_code,\n            \"formattedMessage\" : {\n                \"lang\" : \"en-US\",\n                \"message\" : message\n            }\n        }\n    }]\n\n    status_json = json.dumps(status_txt)\n\n    # Find the most recently changed config file and then use the\n    # corresponding status file\n    latest_seq_no = get_latest_seq_no()\n\n    status_path = os.path.join(status_dir, '{0}.status'.format(latest_seq_no))\n    status_tmp = '{0}.tmp'.format(status_path)\n    with open(status_tmp, 'w+') as tmp_file:\n        tmp_file.write(status_json)\n    os.rename(status_tmp, status_path)\n\n\ndef get_handler_env():\n    \"\"\"\n    Set and retrieve the contents of HandlerEnvironment.json as JSON\n    \"\"\"\n    global HandlerEnvironment\n    if HandlerEnvironment is None:\n        handler_env_path = os.path.join(os.getcwd(), 'HandlerEnvironment.json')\n        try:\n            with open(handler_env_path, 'r') as handler_env_file:\n                handler_env_txt = handler_env_file.read()\n            handler_env = json.loads(handler_env_txt)\n            if type(handler_env) == list:\n                handler_env = handler_env[0]\n            HandlerEnvironment = handler_env\n        except Exception as e:\n            waagent_log_error(str(e))\n    return HandlerEnvironment\n\n\ndef get_latest_seq_no():\n    \"\"\"\n    Determine the latest operation settings number to use\n    \"\"\"\n    global SettingsSequenceNumber\n    if SettingsSequenceNumber is None:\n        handler_env = get_handler_env()\n        try:\n            config_dir = str(handler_env['handlerEnvironment']['configFolder'])\n        except:\n            config_dir = os.path.join(os.getcwd(), 'config')\n\n        latest_seq_no = -1\n        cur_seq_no = -1\n        latest_time = None\n        try:\n            for dir_name, sub_dirs, file_names in os.walk(config_dir):\n                for file_name in file_names:\n                    file_basename = os.path.basename(file_name)\n                    match = re.match(r'[0-9]{1,10}\\.settings', file_basename)\n                    if match is None:\n                        continue\n                    cur_seq_no = int(file_basename.split('.')[0])\n                    file_path = os.path.join(config_dir, file_name)\n                    cur_time = os.path.getmtime(file_path)\n                    if latest_time is None or cur_time > latest_time:\n                        latest_time = cur_time\n                        latest_seq_no = cur_seq_no\n        except:\n            pass\n        if latest_seq_no < 0:\n            latest_seq_no = 0\n        SettingsSequenceNumber = latest_seq_no\n\n    return SettingsSequenceNumber\n\n\ndef run_get_output(cmd, chk_err = False, log_cmd = True):\n    \"\"\"\n    Mimic waagent mothod RunGetOutput in case waagent is not available\n    Run shell command and return exit code and output\n    \"\"\"\n    if 'Utils.WAAgentUtil' in sys.modules:\n        # WALinuxAgent-2.0.14 allows only 2 parameters for RunGetOutput\n        # If checking the number of parameters fails, pass 2\n        try:\n            sig = inspect.signature(waagent.RunGetOutput)\n            params = sig.parameters\n            waagent_params = len(params)\n        except:\n            try:\n                spec = inspect.getargspec(waagent.RunGetOutput)\n                params = spec.args\n                waagent_params = len(params)\n            except:\n                waagent_params = 2\n        if waagent_params >= 3:\n            exit_code, output = waagent.RunGetOutput(cmd, chk_err, log_cmd)\n        else:\n            exit_code, output = waagent.RunGetOutput(cmd, chk_err)\n    else:\n        try:\n            output = subprocess.check_output(cmd, stderr = subprocess.STDOUT,\n                                             shell = True)\n            output = output.decode('latin-1')\n            exit_code = 0\n        except subprocess.CalledProcessError as e:\n            exit_code = e.returncode\n            output = e.output.decode('latin-1')\n\n    output = output.encode('utf-8', 'ignore')\n\n    # On python 3, encode returns a byte object, so we must decode back to a string\n    if sys.version_info >= (3,):\n        output = output.decode()\n\n    return exit_code, output.strip()\n\n\ndef get_tenant_id_from_metadata_api(vm_resource_id):\n    \"\"\"\n    Retrieve the Tenant ID using the Metadata API of the VM resource ID\n    Since we have not authenticated, the Metadata API will throw a 401, but\n    the headers of the 401 response will contain the tenant ID\n    \"\"\"\n    tenant_id = None\n    metadata_endpoint = get_metadata_api_endpoint(vm_resource_id)\n    metadata_request = urllib.Request(metadata_endpoint)\n    try:\n        # This request should fail with code 401\n        metadata_response = urllib.urlopen(metadata_request)\n        hutil_log_info('Request to Metadata API did not fail as expected; ' \\\n                       'attempting to use headers from response to ' \\\n                       'determine Tenant ID')\n        metadata_headers = metadata_response.headers\n    except urlerror.HTTPError as e:\n        metadata_headers = e.headers\n\n    if metadata_headers is not None and 'WWW-Authenticate' in metadata_headers:\n        auth_header = metadata_headers['WWW-Authenticate']\n        auth_header_regex = r'authorization_uri=\\\"https:\\/\\/login\\.windows\\.net/(' + GUIDRegex + ')\\\"'\n        auth_header_search = re.compile(auth_header_regex)\n        auth_header_matches = auth_header_search.search(auth_header)\n        if not auth_header_matches:\n            raise MetadataAPIException('The WWW-Authenticate header in the ' \\\n                                       'response does not contain expected ' \\\n                                       'authorization_uri format')\n        else:\n            tenant_id = auth_header_matches.group(1)\n    else:\n        raise MetadataAPIException('Expected information from Metadata API ' \\\n                                   'is not present')\n\n    return tenant_id\n\n\ndef get_metadata_api_endpoint(vm_resource_id):\n    \"\"\"\n    Extrapolate Metadata API endpoint from VM Resource ID\n    Example VM resource ID: /subscriptions/306ee7f1-3d0a-4605-9f39-ff253cc02708/resourceGroups/LinuxExtVMResourceGroup/providers/Microsoft.Compute/virtualMachines/lagalbraOCUb16C\n    Corresponding example endpoint: https://management.azure.com/subscriptions/306ee7f1-3d0a-4605-9f39-ff253cc02708/resourceGroups/LinuxExtVMResourceGroup?api-version=2016-09-01\n    \"\"\"\n    # Will match for ARM and Classic VMs, Availability Sets, VM Scale Sets\n    vm_resource_id_regex = r'^\\/subscriptions\\/(' + GUIDRegex + ')\\/' \\\n                            'resourceGroups\\/([^\\/]+)\\/providers\\/Microsoft' \\\n                            '\\.(?:Classic){0,1}Compute\\/(?:virtualMachines|' \\\n                            'availabilitySets|virtualMachineScaleSets)' \\\n                            '\\/[^\\/]+$'\n    vm_resource_id_search = re.compile(vm_resource_id_regex, re.M)\n    vm_resource_id_matches = vm_resource_id_search.search(vm_resource_id)\n    if not vm_resource_id_matches:\n        raise InvalidParameterError('VM Resource ID is invalid')\n    else:\n        subscription_id = vm_resource_id_matches.group(1)\n        resource_group = vm_resource_id_matches.group(2)\n\n    metadata_url = 'https://management.azure.com/subscriptions/{0}' \\\n                   '/resourceGroups/{1}'.format(subscription_id,\n                                                resource_group)\n    metadata_data = urlparse.urlencode({'api-version' : '2016-09-01'})\n    metadata_endpoint = '{0}?{1}'.format(metadata_url, metadata_data)\n    return metadata_endpoint\n\n\ndef get_access_token(tenant_id, resource):\n    \"\"\"\n    Retrieve an OAuth token by sending an OAuth2 token exchange\n    request to the local URL that the ManagedIdentity extension is\n    listening to\n    \"\"\"\n    # Extract the endpoint that the ManagedIdentity extension is listening on\n    with open(ManagedIdentityExtListeningURLPath, 'r') as listening_file:\n        listening_settings_txt = listening_file.read()\n    try:\n        listening_settings = json.loads(listening_settings_txt)\n        listening_url = listening_settings['url']\n    except:\n        raise ManagedIdentityExtException('Could not extract listening URL ' \\\n                                          'from settings file')\n\n    # Send an OAuth token exchange request\n    oauth_data = {'authority' : 'https://login.microsoftonline.com/' \\\n                                '{0}'.format(tenant_id),\n                  'resource' : resource\n    }\n    oauth_request = urllib.Request(listening_url + '/oauth2/token',\n                                    urlparse.urlencode(oauth_data))\n    oauth_request.add_header('Metadata', 'true')\n    try:\n        oauth_response = urllib.urlopen(oauth_request)\n        oauth_response_txt = oauth_response.read()\n    except urlerror.HTTPError as e:\n        hutil_log_error('Request to ManagedIdentity extension listening URL ' \\\n                        'failed with an HTTPError: {0}'.format(e))\n        hutil_log_info('Response from ManagedIdentity extension: ' \\\n                       '{0}'.format(e.read()))\n        raise ManagedIdentityExtException('Request to listening URL failed ' \\\n                                          'with HTTPError {0}'.format(e))\n    except:\n        raise ManagedIdentityExtException('Unexpected error from request to ' \\\n                                          'listening URL')\n\n    try:\n        oauth_response_json = json.loads(oauth_response_txt)\n    except:\n        raise ManagedIdentityExtException('Error parsing JSON from ' \\\n                                          'listening URL response')\n\n    if (oauth_response_json is not None\n            and 'access_token' in oauth_response_json):\n        return oauth_response_json['access_token']\n    else:\n        raise ManagedIdentityExtException('Could not retrieve access token ' \\\n                                          'in the listening URL response')\n\n\ndef get_workspace_info_from_oms(vm_resource_id, tenant_id, access_token):\n    \"\"\"\n    Send a request to the OMS service with the VM information to\n    determine the workspace the OMSAgent should onboard to\n    \"\"\"\n    oms_data = {'ResourceId' : vm_resource_id,\n                'TenantId' : tenant_id,\n                'JwtToken' : access_token\n    }\n    oms_request_json = json.dumps(oms_data)\n    oms_request = urllib.Request(OMSServiceValidationEndpoint)\n    oms_request.add_header('Content-Type', 'application/json')\n\n    retries = 5\n    initial_sleep_time = AutoManagedWorkspaceCreationSleepSeconds\n    sleep_increase_factor = 1\n    try_count = 0\n    sleep_time = initial_sleep_time\n\n    # Workspace may not be provisioned yet; sleep and retry if\n    # provisioning has been accepted\n    while try_count <= retries:\n        try:\n            oms_response = urllib.urlopen(oms_request, oms_request_json)\n            oms_response_txt = oms_response.read()\n        except urlerror.HTTPError as e:\n            hutil_log_error('Request to OMS threw HTTPError: {0}'.format(e))\n            hutil_log_info('Response from OMS: {0}'.format(e.read()))\n            raise OMSServiceOneClickException('ValidateMachineIdentity ' \\\n                                              'request returned an error ' \\\n                                              'HTTP code: {0}'.format(e))\n        except:\n            raise OMSServiceOneClickException('Unexpected error from ' \\\n                                              'ValidateMachineIdentity ' \\\n                                              'request')\n\n        should_retry = retry_get_workspace_info_from_oms(oms_response)\n        if not should_retry:\n            # TESTED\n            break\n        elif try_count == retries:\n            # TESTED\n            hutil_log_error('Retries for ValidateMachineIdentity request ran ' \\\n                            'out: required workspace information cannot be ' \\\n                            'extracted')\n            raise OneClickException('Workspace provisioning did not complete ' \\\n                                    'within the allotted time')\n\n        # TESTED\n        try_count += 1\n        time.sleep(sleep_time)\n        sleep_time *= sleep_increase_factor\n\n    if not oms_response_txt:\n        raise OMSServiceOneClickException('Body from ValidateMachineIdentity ' \\\n                                          'response is empty; required ' \\\n                                          'workspace information cannot be ' \\\n                                          'extracted')\n    try:\n        oms_response_json = json.loads(oms_response_txt)\n    except:\n        raise OMSServiceOneClickException('Error parsing JSON from ' \\\n                                          'ValidateMachineIdentity response')\n\n    if (oms_response_json is not None and 'WorkspaceId' in oms_response_json\n            and 'WorkspaceKey' in oms_response_json):\n        return oms_response_json\n    else:\n        hutil_log_error('Could not retrieve both workspace ID and key from ' \\\n                        'the OMS service response {0}; cannot determine ' \\\n                        'workspace ID and key'.format(oms_response_json))\n        raise OMSServiceOneClickException('Required workspace information ' \\\n                                          'was not found in the ' \\\n                                          'ValidateMachineIdentity response')\n\n\ndef retry_get_workspace_info_from_oms(oms_response):\n    \"\"\"\n    Return True to retry if the response from OMS for the\n    ValidateMachineIdentity request incidates that the request has\n    been accepted, but the managed workspace is still being\n    provisioned\n    \"\"\"\n    try:\n        oms_response_http_code = oms_response.getcode()\n    except:\n        hutil_log_error('Unable to get HTTP code from OMS repsonse')\n        return False\n\n    if (oms_response_http_code == 202 or oms_response_http_code == 204\n                                      or oms_response_http_code == 404):\n        hutil_log_info('Retrying ValidateMachineIdentity OMS request ' \\\n                       'because workspace is still being provisioned; HTTP ' \\\n                       'code from OMS is {0}'.format(oms_response_http_code))\n        return True\n    else:\n        hutil_log_info('Workspace is provisioned; HTTP code from OMS is ' \\\n                       '{0}'.format(oms_response_http_code))\n        return False\n\n\ndef init_waagent_logger():\n    \"\"\"\n    Initialize waagent logger\n    If waagent has not been imported, catch the exception\n    \"\"\"\n    try:\n        waagent.LoggerInit('/var/log/waagent.log', '/dev/stdout', True)\n    except Exception as e:\n        print('Unable to initialize waagent log because of exception ' \\\n              '{0}'.format(e))\n\n\ndef waagent_log_info(message):\n    \"\"\"\n    Log informational message, being cautious of possibility that waagent may\n    not be imported\n    \"\"\"\n    if 'Utils.WAAgentUtil' in sys.modules:\n        waagent.Log(message)\n    else:\n        print('Info: {0}'.format(message))\n\n\ndef waagent_log_error(message):\n    \"\"\"\n    Log error message, being cautious of possibility that waagent may not be\n    imported\n    \"\"\"\n    if 'Utils.WAAgentUtil' in sys.modules:\n        waagent.Error(message)\n    else:\n        print('Error: {0}'.format(message))\n\n\ndef hutil_log_info(message):\n    \"\"\"\n    Log informational message, being cautious of possibility that hutil may\n    not be imported and configured\n    \"\"\"\n    if HUtilObject is not None:\n        HUtilObject.log(message)\n    else:\n        print('Info: {0}'.format(message))\n\n\ndef hutil_log_error(message):\n    \"\"\"\n    Log error message, being cautious of possibility that hutil may not be\n    imported and configured\n    \"\"\"\n    if HUtilObject is not None:\n        HUtilObject.error(message)\n    else:\n        print('Error: {0}'.format(message))\n\n\ndef log_and_exit(operation, exit_code = 1, message = ''):\n    \"\"\"\n    Log the exit message and perform the exit\n    \"\"\"\n    if exit_code == 0:\n        waagent_log_info(message)\n        hutil_log_info(message)\n        exit_status = 'success'\n    else:\n        waagent_log_error(message)\n        hutil_log_error(message)\n        exit_status = 'failed'\n\n    if HUtilObject is not None:\n        HUtilObject.do_exit(exit_code, operation, exit_status, str(exit_code),\n                            message)\n    else:\n        update_status_file(operation, str(exit_code), exit_status, message)\n        sys.exit(exit_code)\n\n\n# Exceptions\n# If these exceptions are expected to be caught by the main method, they\n# include an error_code field with an integer with which to exit from main\n\nclass OmsAgentForLinuxException(Exception):\n    \"\"\"\n    Base exception class for all exceptions; as such, its error code is the\n    basic error code traditionally returned in Linux: 1\n    \"\"\"\n    error_code = 1\n    def get_error_message(self, operation):\n        \"\"\"\n        Return a descriptive error message based on this type of exception\n        \"\"\"\n        return '{0} failed with exit code {1}'.format(operation,\n                                                      self.error_code)\n\n\nclass ParameterMissingException(OmsAgentForLinuxException):\n    \"\"\"\n    There is a missing parameter for the OmsAgentForLinux Extension\n    \"\"\"\n    error_code = MissingorInvalidParameterErrorCode\n    def get_error_message(self, operation):\n        return '{0} failed due to a missing parameter: {1}'.format(operation,\n                                                                   self)\n\n\nclass InvalidParameterError(OmsAgentForLinuxException):\n    \"\"\"\n    There is an invalid parameter for the OmsAgentForLinux Extension\n    ex. Workspace ID does not match GUID regex\n    \"\"\"\n    error_code = MissingorInvalidParameterErrorCode\n    def get_error_message(self, operation):\n        return '{0} failed due to an invalid parameter: {1}'.format(operation,\n                                                                    self)\n\n\nclass UnwantedMultipleConnectionsException(OmsAgentForLinuxException):\n    \"\"\"\n    This VM is already connected to a different Log Analytics workspace\n    and stopOnMultipleConnections is set to true\n    \"\"\"\n    error_code = UnwantedMultipleConnectionsErrorCode\n    def get_error_message(self, operation):\n        return '{0} failed due to multiple connections: {1}'.format(operation,\n                                                                    self)\n\n\nclass CannotConnectToOMSException(OmsAgentForLinuxException):\n    \"\"\"\n    The OMSAgent cannot connect to the OMS service\n    \"\"\"\n    error_code = CannotConnectToOMSErrorCode # error code to indicate no internet access\n    def get_error_message(self, operation):\n        return 'The agent could not connect to the Microsoft Operations ' \\\n               'Management Suite service. Please check that the system ' \\\n               'either has Internet access, or that a valid HTTP proxy has ' \\\n               'been configured for the agent. Please also check the ' \\\n               'correctness of the workspace ID.'\n\n\nclass OneClickException(OmsAgentForLinuxException):\n    \"\"\"\n    A generic exception for OneClick-related issues\n    \"\"\"\n    error_code = OneClickErrorCode\n    def get_error_message(self, operation):\n        return 'Encountered an issue related to the OneClick scenario: ' \\\n               '{0}'.format(self)\n\n\nclass ManagedIdentityExtMissingException(OneClickException):\n    \"\"\"\n    This extension being present is required for the OneClick scenario\n    \"\"\"\n    error_code = ManagedIdentityExtMissingErrorCode\n    def get_error_message(self, operation):\n        return 'The ManagedIdentity extension is required to be installed ' \\\n               'for Automatic Management to be enabled. Please set ' \\\n               'EnableAutomaticManagement to false in public settings or ' \\\n               'install the ManagedIdentityExtensionForLinux Azure VM ' \\\n               'extension.'\n\n\nclass ManagedIdentityExtException(OneClickException):\n    \"\"\"\n    Thrown when we encounter an issue with ManagedIdentityExtensionForLinux\n    \"\"\"\n    error_code = ManagedIdentityExtErrorCode\n    def get_error_message(self, operation):\n        return 'Encountered an issue with the ManagedIdentity extension: ' \\\n               '{0}'.format(self)\n\n\nclass MetadataAPIException(OneClickException):\n    \"\"\"\n    Thrown when we encounter an issue with Metadata API\n    \"\"\"\n    error_code = MetadataAPIErrorCode\n    def get_error_message(self, operation):\n        return 'Encountered an issue with the Metadata API: {0}'.format(self)\n\n\nclass OMSServiceOneClickException(OneClickException):\n    \"\"\"\n    Thrown when prerequisites were satisfied but could not retrieve the managed\n    workspace information from OMS service\n    \"\"\"\n    error_code = OMSServiceOneClickErrorCode\n    def get_error_message(self, operation):\n        return 'Encountered an issue with the OMS service: ' \\\n               '{0}'.format(self)\n\n\nif __name__ == '__main__' :\n    main()\n"
  },
  {
    "path": "OmsAgent/omsagent.version",
    "content": "# Do NOT update the values here; CDPx will use the ones\n# defined in Build-OMS-Agent-for-Linux/omsagent.version\n\nOMS_VERSION_MAJOR=0\nOMS_VERSION_MINOR=0\nOMS_VERSION_PATCH_EXTENSION=0\nOMS_VERSION_PATCH_SHELL_BUNDLE=0\nOMS_VERSION_BUILDNR_SHELL_BUNDLE=0\nOMS_VERSION_DATE=0\n\nOMS_EXTENSION_VERSION=\"$OMS_VERSION_MAJOR.$OMS_VERSION_MINOR.$OMS_VERSION_PATCH_EXTENSION\"\nOMS_SHELL_BUNDLE_VERSION=\"$OMS_VERSION_MAJOR.$OMS_VERSION_MINOR.$OMS_VERSION_PATCH_SHELL_BUNDLE-$OMS_VERSION_BUILDNR_SHELL_BUNDLE\"\n"
  },
  {
    "path": "OmsAgent/omsagent_shim.sh",
    "content": "#!/usr/bin/env bash\n\n# The entry point for the OMS extension through which the correct python version (if any) is used to invoke omsagent.py.\n# We default to python2 and always invoke with the versioned python command to accomodate the RHEL 8+ python strategy.\n# Control arguments passed to the shim are redirected to omsagent.py without validation.\n\nCOMMAND=\"./omsagent.py\"\nPYTHON=\"\"\nARG=\"$@\"\n\nfunction find_python() {\n    local python_exec_command=$1\n\n    if command -v python2 >/dev/null 2>&1 ; then\n        eval ${python_exec_command}=\"python2\"\n    elif command -v python3 >/dev/null 2>&1 ; then\n        eval ${python_exec_command}=\"python3\"\n    fi\n}\n\nfind_python PYTHON\n\nif [ -z \"$PYTHON\" ]\nthen\n    echo \"No Python interpreter found, which is an OMS extension dependency. Please install either Python 2 or 3.\" >&2\n    exit 52 # Missing Dependency\nelse\n    ${PYTHON} --version 2>&1\nfi\n\nPYTHONPATH=${PYTHONPATH} ${PYTHON} ${COMMAND} ${ARG}\nexit $?\n"
  },
  {
    "path": "OmsAgent/packaging.sh",
    "content": "#! /bin/bash\nset -e\nsource omsagent.version\n\nusage()\n{\n    local basename=`basename $0`\n    echo \"usage: ./$basename <path to omsagent-<version>.universal.x64{.sh, .sha256sums, .asc}> [path for zip output]\"\n}\n\ninput_path=$1\noutput_path=$2\nPACKAGE_NAME=\"oms$OMS_EXTENSION_VERSION.zip\"\nif [[ \"$1\" == \"--help\" ]]; then\n    usage\n    exit 0\nelif [[ ! -d $input_path ]]; then\n    echo \"OMS files path '$input_path' not found\"\n    usage\n    exit 1\nfi\n\nif [[ \"$output_path\" == \"\" ]]; then\n    output_path=\"../\"\nfi\n\n# Packaging starts here\ncp -r ../Utils .\ncp ../Common/WALinuxAgent-2.0.16/waagent .\n\n# cleanup packages\n# copy shell bundle to packages/\ncp $input_path/omsagent-$OMS_SHELL_BUNDLE_VERSION.universal.x64.* packages/\n# sync the file copy\nsync\n\nif [[ -f $output_path/$PACKAGE_NAME ]]; then\n    echo \"Removing existing $PACKAGE_NAME ...\"\n    rm -f $output_path/$PACKAGE_NAME\nfi\n\necho \"Packaging extension $PACKAGE_NAME to $output_path\"\nexcluded_files=\"omsagent.version packaging.sh apply_version.sh update_version.sh\"\nzip -r $output_path/$PACKAGE_NAME * -x $excluded_files \"./test/*\" \"./extension-test/*\" \"./references\"\n\n# cleanup newly added dir or files\nrm -rf Utils/ waagent\n"
  },
  {
    "path": "OmsAgent/references",
    "content": "Utils/\n"
  },
  {
    "path": "OmsAgent/test/MockUtil.py",
    "content": "#!/usr/bin/env python\n#\n#OmsAgent extension\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nclass MockUtil():\n    def __init__(self, test):\n        self.test = test\n\n    def get_log_dir(self):\n        return \"/tmp\"\n\n    def log(self, msg):\n        print(msg)\n\n    def error(self, msg):\n        print(msg)\n\n    def get_seq_no(self):\n        return \"0\"\n\n    def do_status_report(self, operation, status, status_code, message):\n        self.test.assertNotEqual(None, message)\n\n    def do_exit(self,exit_code,operation,status,code,message):\n        self.test.assertNotEqual(None, message)\n"
  },
  {
    "path": "OmsAgent/test/env.py",
    "content": "#!/usr/bin/env python\n#\n#OmsAgent extension\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport sys\nimport os\n\n# append installer directory to sys.path\nroot = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append(root)\n"
  },
  {
    "path": "OmsAgent/test/test_install.py",
    "content": "#!/usr/bin/env python\n#\n# OmsAgent extension\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nimport env\nimport omsagent as oa\nimport os\nfrom MockUtil import MockUtil\n\nos.chdir(env.root)\n\n\nclass TestInstall(unittest.TestCase):\n    def test_install(self):\n        hutil = MockUtil(self)\n        oa.install(hutil)\n\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "OmsAgent/update_version.sh",
    "content": "#! /bin/bash\nset -x\n\nif [[ \"$1\" == \"--help\" ]]; then\n    echo \"update_version.sh <MAJOR> <MINOR> <PATCH> <BUILDNR>\"\n    exit 0\nfi\n\nUPDATE_DATE=`date +%Y%m%d`\nOMS_BUILDVERSION_MAJOR=$1\nOMS_BUILDVERSION_MINOR=$2\nOMS_BUILDVERSION_PATCH=$3\nOMS_BUILDVERSION_BUILDNR=$4\n\nif [[ \"$OMS_BUILDVERSION_MAJOR\" == \"\" ]]; then\n    echo \"MAJOR version is empty\"\n    exit 1\nfi\n\nif [[ \"$OMS_BUILDVERSION_MINOR\" == \"\" ]]; then\n    echo \"MINOR version is empty\"\n    exit 1\nfi\n\nif [[ \"$OMS_BUILDVERSION_PATCH\" == \"\" ]]; then\n    echo \"PATH version is empty\"\n    exit 1\nfi\n\nif [[ \"$OMS_BUILDVERSION_BUILDNR\" == \"\" ]]; then\n    echo \"BUILDNR version is empty\"\n    exit 1\nfi\n\n\nsed -i \"s/^OMS_VERSION_MAJOR=.*$/OMS_VERSION_MAJOR=$OMS_BUILDVERSION_MAJOR/\" omsagent.version\nsed -i \"s/^OMS_VERSION_MINOR=.*$/OMS_VERSION_MINOR=$OMS_BUILDVERSION_MINOR/\" omsagent.version\nsed -i \"s/^OMS_VERSION_PATCH_EXTENSION=.*$/OMS_VERSION_PATCH_EXTENSION=$OMS_BUILDVERSION_PATCH/\" omsagent.version\nsed -i \"s/^OMS_VERSION_PATCH_SHELL_BUNDLE=.*$/OMS_VERSION_PATCH_SHELL_BUNDLE=$OMS_BUILDVERSION_PATCH/\" omsagent.version\nsed -i \"s/^OMS_VERSION_BUILDNR_SHELL_BUNDLE=.*$/OMS_VERSION_BUILDNR_SHELL_BUNDLE=$OMS_BUILDVERSION_BUILDNR/\" omsagent.version\nsed -i \"s/^OMS_VERSION_DATE=.*$/OMS_VERSION_DATE=$UPDATE_DATE/\" omsagent.version\n"
  },
  {
    "path": "OmsAgent/watcherutil.py",
    "content": "#!/usr/bin/env python\n#\n# OmsAgentForLinux Extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport subprocess\nimport os\nimport io\nimport datetime\nfrom datetime import datetime, timedelta\nimport time\nimport string\nimport traceback\nimport shutil\nimport sys\nimport json\nimport uuid\nfrom threading import Thread\nimport re\nimport hashlib\nfrom omsagent import run_command_and_log\nfrom omsagent import RestartOMSAgentServiceCommand\n\n\"\"\"\n    Write now hardcode memory threshold to watch for to 20 %.\n    If agent is using more than 20% of memory it is definitely very high.\n    In future we may want to set it based on customer configuration.\n\"\"\"\n\n# Constants.\nMemoryThresholdToWatchFor = 20\nOmsAgentPidFile = \"/var/opt/microsoft/omsagent/run/omsagent.pid\"\nOmsAgentLogFile = \"/var/opt/microsoft/omsagent/log/omsagent.log\"\nreg_ex = re.compile('([0-9]{4}-[0-9]{2}-[0-9]{2}.*)\\[(\\w+)\\]:(.*)')\nmaxMessageSize = 100\nOMSExtensionVersion = '1.13.19'\n\"\"\"\nWe can add to the list below with more error messages to identify non recoverable errors.\n\"\"\"\nErrorStatements = [\"Errono::ENOSPC error=\", \"Fatal error, can not clear buffer file\", \"No space left on the device\"]\n\nclass SelfMonitorInfo(object):\n    \"\"\"\n        Class to hold self mon info for omsagent.\n    \"\"\"\n    def __init__(self):\n        self._consecutive_error_count = 0\n        self._last_reset_success = True\n        self._error_count = 0\n        self._memory_used_in_percent = 0\n        self._consecutive_high_memory_usage = 0\n\n    def reset(self):\n        self._consecutive_error_count = 0\n        self._consecutive_high_memory_usage = 0\n        self._memory_used_in_percent = 0\n\n    def reset_error_info(self):\n        self._consecutive_error_count = 0\n\n    def increment_heartbeat_missing_count(self):\n        self._consecutive_error_count += 1\n\n    def crossed_error_threshold(self):\n        if (self._consecutive_error_count > 3):\n            return True\n        else:\n            return False\n\n    def corssed_memory_threshold(self):\n        if (self._consecutive_high_memory_usage > 3):\n            return True\n        else:\n            return False\n\n    def increment_high_memory_count(self):\n        self._consecutive_high_memory_usage += 1\n\n    def reset_high_memory_count(self):\n        self._consecutive_high_memory_usage = 0\n\n    def current_status(self):\n        \"\"\"\n            Python 2.6 does not support enum.\n        \"\"\"\n        if (self._consecutive_error_count == 0 and self._consecutive_high_memory_usage == 0):\n            return \"Green\"\n        elif (self._consecutive_error_count < 3 and self._consecutive_high_memory_usage < 3):\n            return \"Yellow\"\n        else:\n            return \"Red\"\n\nclass LogFileMarker(object):\n    \"\"\"\n        Class to hold omsagent log file marker information.\n    \"\"\"\n    def __init__(self):\n        self._last_pos = 0\n        self._last_crc = \"\"\n\n    def reset_marker(self):\n        self._last_pos = 0\n        self._last_crc = \"\"\n\nclass Watcher(object):\n    \"\"\"\n    A class that handles periodic monitoring activities.\n    \"\"\"\n\n    def __init__(self, hutil_error, hutil_log):\n        \"\"\"\n        Constructor.\n        :param hutil_error: Error logging function (e.g., hutil.error). This is not a stream.\n        :param hutil_log: Normal logging function (e.g., hutil.log). This is not a stream.\n        \"\"\"\n        self._hutil_error = hutil_error\n        self._hutil_log = hutil_log\n        self._consecutive_error_count = 0\n        self._consecutive_restarts_due_to_error = 0\n\n    def write_waagent_event(self, event):\n        offset = str(int(time.time() * 1000000))\n\n        temp_fn = '/var/lib/waagent/events/'+str(uuid.uuid4())\n        with open(temp_fn,'w+') as fh:\n            fh.write(event)\n\n        fn_template = '/var/lib/waagent/events/{}.tld'\n        fn = fn_template.format(offset)\n        while os.path.isfile(fn):\n            offset += 1\n            fn = fn_template.format(offset)\n\n        shutil.move(temp_fn, fn)\n\n        self._hutil_log(fn)\n\n    def create_telemetry_event(self, operation, operation_success, message, duration):\n        template = \"\"\" {{\n            \"eventId\": 1,\n            \"providerId\": \"69B669B9-4AF8-4C50-BDC4-6006FA76E975\",\n            \"parameters\": [\n                        {{\n                        \"name\": \"Name\",\n                        \"value\": \"Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux\"\n                }},\n                        {{\n\n            \t\t\t\"name\": \"Version\",\n            \t\t\t\"value\": \\\"\"\"\" + OMSExtensionVersion + \"\"\"\\\"\n             \t\t}},\n\n                        {{\n                        \"name\": \"Operation\",\n                        \"value\": \"{}\"\n                }},\n                        {{\n                        \"name\": \"OperationSuccess\",\n                        \"value\": {}\n                }},\n                        {{\n                        \"name\": \"Message\",\n                        \"value\": \"{}\"\n                }},\n                        {{\n                        \"name\": \"Duration\",\n                        \"value\": {}\n                }}\n            ]\n            }}\"\"\"\n\n        operation_success_as_string = str(operation_success).lower()\n        formatted_message = message.replace(\"\\n\", \"\\\\n\").replace(\"\\t\", \"\\\\t\").replace('\"', '\\\"')\n\n        return template.format(operation, operation_success_as_string, formatted_message, duration)\n\n    def upload_telemetry(self):\n        status_files = [\n                \"/var/opt/microsoft/omsagent/log/ODSIngestion.status\",\n                \"/var/opt/microsoft/omsagent/log/ODSIngestionBlob.status\",\n                \"/var/opt/microsoft/omsagent/log/ODSIngestionAPI.status\",\n                \"/var/opt/microsoft/omsconfig/status/dscperformconsistency\",\n                \"/var/opt/microsoft/omsconfig/status/dscperforminventory\",\n                \"/var/opt/microsoft/omsconfig/status/dscsetlcm\",\n                \"/var/opt/microsoft/omsconfig/status/omsconfighost\"\n            ]\n        for sf in status_files:\n            if os.path.isfile(sf):\n                mod_time = os.path.getmtime(sf)\n                curr_time = int(time.time())\n                if (curr_time - mod_time < 300):\n                    with open(sf) as json_file:\n                        try:\n                            status_data = json.load(json_file)\n                            operation = status_data[\"operation\"]\n                            operation_success = status_data[\"success\"]\n                            # Truncating the message to prevent flooding the system\n                            message = status_data[\"message\"][:maxMessageSize]\n\n                            event = self.create_telemetry_event(operation,operation_success,message,\"300000\")\n                            self._hutil_log(\"Writing telemetry event: \"+event)\n                            self.write_waagent_event(event)\n                            self._hutil_log(\"Successfully processed telemetry status file: \"+sf)\n\n                        except Exception:\n                            self._hutil_log(\"Error parsing telemetry status file: \"+sf)\n                            self._hutil_log(\"Exception info: \"+traceback.format_exc())\n                    if sf.startswith(\"/var/opt/microsoft/omsconfig/status\"):\n                        try:\n                            self._hutil_log(\"Cleaning up: \" + sf)\n                            os.remove(sf)\n                        except Exception:\n                            self._hutil_log(\"Error removing telemetry status file: \"+  sf)\n                            self._hutil_log(\"Exception info: \" + traceback.format_exc())\n                else:\n                    self._hutil_log(\"Telemetry status file not updated in last 5 mins: \"+sf)\n            else:\n                self._hutil_log(\"Telemetry status file does not exist: \"+sf)\n        pass\n\n    def watch(self):\n        \"\"\"\n        Main loop performing various monitoring activities periodically.\n        Currently iterates every 5 minutes, and other periodic activities might be\n        added in the loop later.\n        :return: None\n        \"\"\"\n        self._hutil_log('started watcher thread')\n        while True:\n            self._hutil_log('watcher thread waking')\n\n            self.upload_telemetry()\n\n            # Sleep 5 minutes\n            self._hutil_log('watcher thread sleeping')\n            time.sleep(60 * 5)\n\n        pass\n\n    def monitor_heartbeat(self, self_mon_info, log_file_marker):\n        \"\"\"\n            Monitor heartbeat health. OMS output plugin will update the timestamp\n            of new heartbeat file every 5 minutes. We will check if it is updated\n            If not, we will look into omsagent logs and look for specific error logs\n            which indicate we are in non recoverable state.\n        \"\"\"\n        take_action = False\n\n        if (not self.received_heartbeat_recently()):\n            \"\"\"\n                We haven't seen heartbeat in more than past 300 seconds\n            \"\"\"\n            self_mon_info.increment_heartbeat_missing_count()\n            take_action = False\n            if (self_mon_info.crossed_error_threshold()):\n                # If we do not see heartbeat for last 3 iterations, take corrective action.\n                take_action = True\n\n            elif (self.check_for_fatal_oms_logs(log_file_marker)):\n\n                # If we see hearbeat missing and error message, no need to wait for more than one\n                # iteration. It is not a false positive. Take corrective action immediately.\n                take_action = True\n\n            if (take_action):\n                if (self._consecutive_restarts_due_to_error < 5):\n                    self.take_corrective_action(self_mon_info)\n                    self._consecutive_restarts_due_to_error += 1\n                else:\n                    self._hutil_error(\"Last 5 restarts did not help. So we will not restart the agent immediately\")\n\n                    # Reset historical infomration.\n                    self._consecutive_restarts_due_to_error = 0\n                    self_mon_info.reset_error_info()\n        else:\n            \"\"\"\n                If we are able to get the heartbeats, check omsagent logs\n                to identify if there are any error logs.\n            \"\"\"\n            self_mon_info.reset_error_info()\n            self._consecutive_restarts_due_to_error = 0\n\n    def received_heartbeat_recently(self):\n        heartbeat_file = '/var/opt/microsoft/omsagent/log/ODSIngestion.status'\n        curr_time = int(time.time())\n        return_val = True\n        file_update_time = curr_time\n\n        if (os.path.isfile(heartbeat_file)):\n            file_update_time = os.path.getmtime(heartbeat_file)\n            self._hutil_log(\"File update time={0}, current time={1}\".format(file_update_time, curr_time))\n        else:\n            self._hutil_log(\"Heartbeat file is not present on the disk.\")\n            file_update_time = curr_time - 1000\n\n        if (file_update_time + 360 < curr_time):\n            return_val = False\n        else:\n            try:\n                with open(heartbeat_file) as json_file:\n                    status_data = json.load(json_file)\n                    operation_success = status_data[\"success\"]\n                    if (operation_success.lower() == \"true\"):\n                        self._hutil_log(\"Found success message from ODS Ingestion.\")\n                        return_val = True\n                    else:\n                        self._hutil_log(\"Did not find success message in heart beat file. {0}\".format(operation_success))\n                        return_val = False\n            except Exception as e:\n                self._hutil_log(\"Error parsing ODS Ingestion status file: \" + e)\n\n                # Return True in case we failed to parse the file. We do not want to go into recycle loop in this scenario.\n                return_val = True\n\n        return return_val\n\n    def monitor_resource(self, self_mon_info):\n        \"\"\"\n            Monitor resource utilization of omsagent.\n            Check for memory and CPU periodically. If they cross the threshold for consecutive 3 iterations\n            we will restart the agent.\n        \"\"\"\n\n        resource_usage = self.get_oms_agent_resource_usage()\n        message = \"Memory : {0}, CPU : {1}\".format(resource_usage[0], resource_usage[1])\n        event = self.create_telemetry_event(\"agenttelemetry\",\"True\",message,\"300000\")\n        self.write_waagent_event(event)\n\n        self_mon_info._memory_used_in_percent = resource_usage[0]\n\n        if (self_mon_info._memory_used_in_percent > 0):\n            if (self_mon_info._memory_used_in_percent > MemoryThresholdToWatchFor):\n                # check consecutive memory usage.\n                self_mon_info.increment_high_memory_count()\n                if (self_mon_info.corssed_memory_threshold()):\n                    # if we have crossed the memory threshold take corrective action.\n                    self.take_corrective_action(self_mon_info)\n                else:\n                    self_mon_info.reset_high_memory_count()\n            else:\n                self_mon_info.reset_high_memory_count()\n\n    def monitor_health(self):\n        \"\"\"\n            Role of this function is monitor the health of the oms agent.\n            To begin with it will monitor heartbeats flowing through oms agent.\n            We will also read oms agent logs to determine some error conditions.\n            We don't want to interfare with log watcher function.\n            So we will start this on a new thread.\n        \"\"\"\n\n        self_mon_info = SelfMonitorInfo()\n        log_file_marker = LogFileMarker()\n\n        # check every 6 minutes. we want to be bit pessimistic while looking for health, especially heartbeats which is emitted every 5 minutes.\n        sleepTime =  6 * 60\n\n        # sleep before starting the monitoring.\n        time.sleep(sleepTime)\n\n        while True:\n            try:\n                # Monitor heartbeat and logs.\n                self.monitor_heartbeat(self_mon_info, log_file_marker)\n\n                # Monitor memory usage\n                self.monitor_resource(self_mon_info)\n\n            except IOError as e:\n                self._hutil_error('I/O error in monitoring health of the omsagent. Exception={0}'.format(e))\n\n            except Exception as e:\n                self._hutil_error('Error in monitoring health of the omsagent. Exception={0}'.format(e))\n\n            finally:\n                time.sleep(sleepTime)\n\n    def take_corrective_action(self, self_mon_info):\n        \"\"\"\n            Take a corrective action.\n        \"\"\"\n        run_command_and_log(RestartOMSAgentServiceCommand)\n        self._hutil_log(\"Successfully restarted OMS linux agent, resetting self mon information.\")\n\n        # Reset self mon information.\n        self_mon_info.reset()\n\n    def emit_telemetry_after_corrective_action(self):\n        \"\"\"\n            TODO : Emit telemetry after taking corrective action.\n        \"\"\"\n    def get_total_seconds_from_epoch_for_fluent_logs(self, datetime_string):\n        # fluentd logs timestamp format : 2018-08-02 19:27:34 +0000\n        # for python 2.7 or earlier there is no good way to convert it into seconds.\n        # so we parse upto seconds, and parse utc specific offset seperately.\n        try:\n            date_time_format = '%Y-%m-%d %H:%M:%S'\n            epoch = datetime(1970, 1, 1)\n\n            # get hours and minute delta for utc offset.\n            hours_delta_utc = int(datetime_string[21:23])\n            minutes_delta_utc= int(datetime_string[23:])\n\n            log_time = datetime.strptime(datetime_string[:19], date_time_format) + ((timedelta(hours=hours_delta_utc, minutes=minutes_delta_utc)) * (-1 if datetime_string[20] == \"+\" else 1))\n            return (log_time - epoch).total_seconds()\n        except Exception as e:\n            self._hutil_error('Error converting timestamp string to seconds. Exception={0}'.format(e))\n\n        return 0\n\n    def check_for_fatal_oms_logs(self, log_file_marker):\n        \"\"\"\n            This function will go through oms log file and check for the\n            logs indicating non recoverable state. That set is hardcoded right now\n            and we can add it to it as we learn more.\n            If we find there is atleast one occurance of such log line from last occurance,\n            we will return True else will return False.\n        \"\"\"\n\n        read_start_time = int(time.time())\n\n        if os.path.isfile(OmsAgentLogFile):\n            last_crc = log_file_marker._last_crc\n            last_pos = log_file_marker._last_pos\n\n            # We do not want to propogate any exception to the caller.\n\n            try:\n                f = open(OmsAgentLogFile, \"r\")\n\n                text = f.readline()\n\n                #  Handle log rotate. Check for CRC of first line of the log file.\n                #  Some of the agents like Splunk uses this technique.\n                #  If it matches with previous CRC, then file has not changed.\n                #  If it is not matching then file has changed and do not seek from\n                #  the last_pos rather continue from the begining.\n\n                if (text != ''):\n                    crc = hashlib.sha256(text).hexdigest()\n                    self._hutil_log(\"Last crc = {0}, current crc= {1} position = {2}\".format(last_crc, crc, last_pos))\n                    if (last_crc == crc):\n\n                        if (last_pos > 0):\n                            f.seek(last_pos)\n                    else:\n                        self._hutil_log(\"File has changed do not seek from the offset. current crc = {0}\".format(crc))\n\n                    log_file_marker._last_crc = crc\n                    total_lines_read = 1\n\n                while True:\n                    text = f.readline()\n\n                    if (text == ''):\n                        log_file_marker._last_pos = f.tell()\n                        break\n\n                    total_lines_read += 1\n                    res = reg_ex.match(text)\n\n                    if res:\n                        log_entry_time = self.get_total_seconds_from_epoch_for_fluent_logs(res.group(1))\n                        if (log_entry_time + (10 * 60) < read_start_time):\n                            # ignore log line if we are reading logs older than 10 minutes.\n                            pass\n                        elif (res.group(2) == \"warn\" or res.group(2) == \"error\"):\n                            for error_statement in ErrorStatements:\n                                if (res.group(3) in error_statement):\n                                    self._hutil_error(\"Found non recoverable error log in agent log file\")\n\n                                    # File should be closed in the finally block.\n                                    return True\n\n                self._hutil_log(\"Did not find any non recoverable logs in omsagent log file\")\n\n            except Exception as e:\n                self._hutil_error (\"Caught an exception {0}\".format(traceback.format_exc()))\n\n            finally:\n                f.close()\n        else:\n            self._hutil_error (\"Omsagent log file not found : {0}\".format(OmsAgentLogFile))\n\n        return False\n\n    def get_oms_agent_resource_usage(self):\n        \"\"\"\n            If we hit any exception in getting resoource usage of the omsagent return 0,0\n            We need not crash/fail in this case.\n            return tuple : memory, cpu.\n            Long run for north star we should use cgroups. cgroups tools are not available\n            by default on all the distros and we would need to package with the agent those and use.\n            Also at this point it is not very clear if customers would want us to create cgroups on their vms.\n        \"\"\"\n\n        try:\n            mem_usage = 0.0\n            cpu_usage = 0.0\n            with open(OmsAgentPidFile, 'r') as infile:\n                pid = infile.readline()\t # Get pid of omsagent process.\n\n                # top output:\n                # $1 - PID,\n                # $2 - account,\n                # $9 - CPU,\n                # $10 - Memory,\n                # $12 - Process name\n                out = subprocess.Popen('top -bn1 | grep -i omsagent | awk \\'{print $1 \" \" $2 \" \" $9 \" \" $10  \" \" $12}\\'', shell=True, stdout=subprocess.PIPE)\n                for line in out.stdout:\n                    s = line.split()\n\n                    if (len(s) >= 4 and s[0] == pid and s[1] == 'omsagent' and s[4] == 'omsagent'):\n                        return float(s[3]) , float(s[2])\n\n        except Exception as e:\n            self._hutil_error('Error getting memory usage for omsagent process. Exception={0}'.format(e))\n\n        # Control will reach here only in case of error condition. In that case it is ok to return 0 as it is harmless to be cautious.\n        return mem_usage, cpu_usage\n"
  },
  {
    "path": "RDMAUpdate/MANIFEST.in",
    "content": "include HandlerManifest.json handler.py\nprune test\n"
  },
  {
    "path": "RDMAUpdate/RDMAUpdate.pyproj",
    "content": "﻿<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<Project DefaultTargets=\"Build\" xmlns=\"http://schemas.microsoft.com/developer/msbuild/2003\" ToolsVersion=\"4.0\">\n  <PropertyGroup>\n    <Configuration Condition=\" '$(Configuration)' == '' \">Debug</Configuration>\n    <SchemaVersion>2.0</SchemaVersion>\n    <ProjectGuid>7883b2c9-5431-4fac-bfca-c92b8a17644a</ProjectGuid>\n    <ProjectHome>.</ProjectHome>\n    <StartupFile>RDMAUpdate.py</StartupFile>\n    <SearchPath>\n    </SearchPath>\n    <WorkingDirectory>.</WorkingDirectory>\n    <OutputPath>.</OutputPath>\n    <Name>RDMAUpdate</Name>\n    <RootNamespace>RDMAUpdate</RootNamespace>\n  </PropertyGroup>\n  <PropertyGroup Condition=\" '$(Configuration)' == 'Debug' \">\n    <DebugSymbols>true</DebugSymbols>\n    <EnableUnmanagedDebugging>false</EnableUnmanagedDebugging>\n  </PropertyGroup>\n  <PropertyGroup Condition=\" '$(Configuration)' == 'Release' \">\n    <DebugSymbols>true</DebugSymbols>\n    <EnableUnmanagedDebugging>false</EnableUnmanagedDebugging>\n  </PropertyGroup>\n  <ItemGroup>\n    <Compile Include=\"RDMAUpdate.py\" />\n  </ItemGroup>\n  <PropertyGroup>\n    <VisualStudioVersion Condition=\"'$(VisualStudioVersion)' == ''\">10.0</VisualStudioVersion>\n    <PtvsTargetsFile>$(MSBuildExtensionsPath32)\\Microsoft\\VisualStudio\\v$(VisualStudioVersion)\\Python Tools\\Microsoft.PythonTools.targets</PtvsTargetsFile>\n  </PropertyGroup>\n  <Import Condition=\"Exists($(PtvsTargetsFile))\" Project=\"$(PtvsTargetsFile)\" />\n  <Import Condition=\"!Exists($(PtvsTargetsFile))\" Project=\"$(MSBuildToolsPath)\\Microsoft.Common.targets\" />\n  <!-- Uncomment the CoreCompile target to enable the Build command in\n       Visual Studio and specify your pre- and post-build commands in\n       the BeforeBuild and AfterBuild targets below. -->\n  <!--<Target Name=\"CoreCompile\" />-->\n  <Target Name=\"BeforeBuild\">\n  </Target>\n  <Target Name=\"AfterBuild\">\n  </Target>\n</Project>"
  },
  {
    "path": "RDMAUpdate/README.txt",
    "content": ""
  },
  {
    "path": "RDMAUpdate/enableit.js",
    "content": "﻿"
  },
  {
    "path": "RDMAUpdate/main/CommandExecuter.py",
    "content": "﻿#!/usr/bin/env python\n#\n# VMEncryption extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport subprocess\nimport os\nimport os.path\nimport shlex\nimport sys\nfrom subprocess import *\n\nclass CommandExecuter(object):\n    \"\"\"description of class\"\"\"\n    def __init__(self, logger):\n        self.logger = logger\n\n    def Execute(self, command_to_execute):\n        self.logger.log(\"Executing:\" + command_to_execute)\n        args = shlex.split(command_to_execute)\n        proc = Popen(args)\n        returnCode = proc.wait()\n        return returnCode\n\n    def RunGetOutput(self, command_to_execute):\n        try:\n            output=subprocess.check_output(command_to_execute,stderr=subprocess.STDOUT,shell=True)\n            return 0,output.decode('latin-1')\n        except subprocess.CalledProcessError as e :\n            self.logger.log('CalledProcessError.  Error Code is ' + str(e.returncode)  )\n            self.logger.log('CalledProcessError.  Command string was ' + e.cmd  )\n            self.logger.log('CalledProcessError.  Command result was ' + (e.output[:-1]).decode('latin-1'))\n            return e.returncode,e.output.decode('latin-1')\n"
  },
  {
    "path": "RDMAUpdate/main/Common.py",
    "content": "﻿#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nclass CommonVariables:\n    azure_path = 'main/azure'\n    utils_path_name = 'Utils'\n    extension_name = 'RDMAUpdateForLinux'\n    extension_version = \"0.1.0.8\"\n    extension_type = extension_name\n    extension_media_link = 'https://andliu.blob.core.windows.net/extensions/' + extension_name + '-' + str(extension_version) + '.zip'\n    extension_label = 'Windows Azure RDMA Update Extension for Linux IaaS'\n    extension_description = extension_label\n\n    \"\"\"\n    configurations\n    \"\"\"\n    wrapper_package_name = 'msft-rdma-drivers'\n\n    \"\"\"\n    error code definitions\n    \"\"\"\n    process_success = 0\n    common_failed = 1\n    install_hv_utils_failed = 2\n    nd_driver_detect_error = 3\n    driver_version_not_found = 4\n    unknown_error = 5\n    package_not_found = 6\n    package_install_failed = 7\n\n    \"\"\"\n    logs related\n    \"\"\"\n    InfoLevel = 'Info'\n    WarningLevel = 'Warning'\n    ErrorLevel = 'Error'\n\n    \"\"\"\n    check_rdma_result\n    \"\"\"\n    UpToDate = 0\n    OutOfDate = 1\n    DriverVersionNotFound = 3\n    Unknown = -1\n\n"
  },
  {
    "path": "RDMAUpdate/main/CronUtil.py",
    "content": "﻿#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport os.path\nimport sys\nfrom Utils import HandlerUtil\nfrom CommandExecuter import CommandExecuter\nfrom Common import CommonVariables\n\n\nclass CronUtil(object):\n    \"\"\"description of class\"\"\"\n    def __init__(self,logger):\n        self.logger = logger\n        self.crontab = '/etc/crontab'\n        self.cron_restart_cmd = 'service cron restart'\n\n    def check_update_cron_config(self):\n        script_file_path = os.path.realpath(sys.argv[0])\n        script_dir = os.path.dirname(script_file_path)\n        script_file = os.path.basename(script_file_path)\n        old_line_end = ' '.join([script_file, '-chkrdma'])\n\n        new_line = ' '.join(['\\n0 0 * * *', 'root cd', script_dir + \"/..\", '&& python main/handle.py -chkrdma >/dev/null 2>&1\\n'])\n\n        HandlerUtil.waagent.ReplaceFileContentsAtomic(self.crontab, \\\n            '\\n'.join(filter(lambda a: a and (old_line_end not in a), HandlerUtil.waagent.GetFileContents(self.crontab).split('\\n')))+ new_line)\n    \n    def restart_cron(self):\n        commandExecuter = CommandExecuter(self.logger)\n        returnCode = commandExecuter.Execute(self.cron_restart_cmd)\n        if(returnCode != CommonVariables.process_success):\n            self.logger.log(msg=\"\",level=CommonVariables.ErrorLevel)\n"
  },
  {
    "path": "RDMAUpdate/main/RDMALogger.py",
    "content": "﻿#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport datetime\n\n\nclass RDMALogger(object):\n    def __init__(self, hutil):\n        self.msg = ''\n        self.hutil = hutil\n\n    \"\"\"description of class\"\"\"\n    def log(self, msg, level='Info'):\n        log_msg = (str(datetime.datetime.now()) + '   ' + level + '   ' + msg + '\\n')\n        self.hutil.log(log_msg)\n"
  },
  {
    "path": "RDMAUpdate/main/RdmaException.py",
    "content": "﻿#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nclass RdmaException(Exception):\n    def __init__(self, value):\n        self.value = value\n    def __str__(self):\n        return repr(self.value)\n\n\n"
  },
  {
    "path": "RDMAUpdate/main/SecondStageMarkConfig.py",
    "content": "﻿#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\n\n\nclass SecondStageMarkConfig(object):\n    \"\"\"description of class\"\"\"\n    def __init__(self):\n        self.mark_file_path = './second_stage_mark_FD76C85E-406F-4CFA-8EB0-CF18B123365C'\n\n    def MarkIt(self):\n        with open(self.mark_file_path,'w') as file:\n            file.write('marked')\n\n    def IsMarked(self):\n        return os.path.exists(self.mark_file_path)\n\n    def ClearIt(self):\n        if(self.IsMarked()):\n            os.remove(self.mark_file_path)\n        else:\n            pass"
  },
  {
    "path": "RDMAUpdate/main/Utils/HandlerUtil.py",
    "content": "﻿#\n# Handler library for Linux IaaS\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\"\nJSON def:\nHandlerEnvironment.json\n[{\n  \"name\": \"ExampleHandlerLinux\",\n  \"seqNo\": \"seqNo\",\n  \"version\": \"1.0\",\n  \"handlerEnvironment\": {\n    \"logFolder\": \"<your log folder location>\",\n    \"configFolder\": \"<your config folder location>\",\n    \"statusFolder\": \"<your status folder location>\",\n    \"heartbeatFile\": \"<your heartbeat file location>\",\n    \n  }\n}]\n\nExample ./config/1.settings\n\"{\"runtimeSettings\":[{\"handlerSettings\":{\"protectedSettingsCertThumbprint\":\"1BE9A13AA1321C7C515EF109746998BAB6D86FD1\",\"protectedSettings\":\n\"MIIByAYJKoZIhvcNAQcDoIIBuTCCAbUCAQAxggFxMIIBbQIBADBVMEExPzA9BgoJkiaJk/IsZAEZFi9XaW5kb3dzIEF6dXJlIFNlcnZpY2UgTWFuYWdlbWVudCBmb3IgR+nhc6VHQTQpCiiV2zANBgkqhkiG9w0BAQEFAASCAQCKr09QKMGhwYe+O4/a8td+vpB4eTR+BQso84cV5KCAnD6iUIMcSYTrn9aveY6v6ykRLEw8GRKfri2d6tvVDggUrBqDwIgzejGTlCstcMJItWa8Je8gHZVSDfoN80AEOTws9Fp+wNXAbSuMJNb8EnpkpvigAWU2v6pGLEFvSKC0MCjDTkjpjqciGMcbe/r85RG3Zo21HLl0xNOpjDs/qqikc/ri43Y76E/Xv1vBSHEGMFprPy/Hwo3PqZCnulcbVzNnaXN3qi/kxV897xGMPPC3IrO7Nc++AT9qRLFI0841JLcLTlnoVG1okPzK9w6ttksDQmKBSHt3mfYV+skqs+EOMDsGCSqGSIb3DQEHATAUBggqhkiG9w0DBwQITgu0Nu3iFPuAGD6/QzKdtrnCI5425fIUy7LtpXJGmpWDUA==\",\"publicSettings\":{\"port\":\"3000\"}}}]}\"\n\n\nExample HeartBeat\n{\n\"version\": 1.0,\n    \"heartbeat\" : {\n        \"status\": \"ready\",\n        \"code\": 0,\n        \"Message\": \"Sample Handler running. Waiting for a new configuration from user.\"\n    }\n}\nExample Status Report:\n[{\"version\":\"1.0\",\"timestampUTC\":\"2014-05-29T04:20:13Z\",\"status\":{\"name\":\"Chef Extension Handler\",\"operation\":\"chef-client-run\",\"status\":\"success\",\"code\":0,\"formattedMessage\":{\"lang\":\"en-US\",\"message\":\"Chef-client run success\"}}}]\n\n\"\"\"\n\n\nimport base64\nimport os\nimport os.path\nimport sys\nimport json\nimport time\nimport tempfile\nfrom os.path import join\nfrom Utils.WAAgentUtil import waagent\nfrom waagent import LoggerInit\nimport logging\nimport logging.handlers\n\nDateTimeFormat = \"%Y-%m-%dT%H:%M:%SZ\"\n\nclass HandlerContext:\n    def __init__(self,name):\n        self._name = name\n        self._version = '0.0'\n        return\n\nclass HandlerUtility:\n    def __init__(self, log, error, short_name):\n        self._log = log\n        self._error = error\n        self._short_name = short_name\n        self.syslogger = logging.getLogger(self._short_name)\n        self.syslogger.setLevel(logging.INFO)\n        handler = logging.handlers.SysLogHandler(address='/dev/log')\n        formatter = logging.Formatter('%(name)s: %(levelname)s %(message)s')\n        handler.setFormatter(formatter)\n        self.syslogger.addHandler(handler)\n\n    def _get_log_prefix(self):\n        return '[%s-%s]' % (self._context._name, self._context._version)\n\n    def _get_current_seq_no(self, config_folder):\n        seq_no = -1\n        cur_seq_no = -1\n        freshest_time = None\n        for subdir, dirs, files in os.walk(config_folder):\n            for file in files:\n                try:\n                    cur_seq_no = int(os.path.basename(file).split('.')[0])\n                    if(freshest_time == None):\n                        freshest_time = os.path.getmtime(join(config_folder,file))\n                        seq_no = cur_seq_no\n                    else:\n                        current_file_m_time = os.path.getmtime(join(config_folder,file))\n                        if(current_file_m_time > freshest_time):\n                            freshest_time = current_file_m_time\n                            seq_no = cur_seq_no\n                except ValueError:\n                    continue\n        return seq_no\n\n    def log(self, message):\n        self._log(self._get_log_prefix() + message)\n\n    def error(self, message):\n        self._error(self._get_log_prefix() + message)\n\n    def syslog(self, level, message):\n        if level == logging.INFO:\n            self.syslogger.info(message)\n        elif level == logging.WARNING:\n            self.syslogger.warning(message)\n        elif level == logging.ERROR:\n            self.syslogger.error(message)\n\n    def log_and_syslog(self, level, message):\n        self.syslog(level, message)\n        if level == logging.INFO:\n            self.log(message)\n        elif level == logging.WARNING:\n            self.log(\" \".join([\"Warning:\", message]))\n        elif level == logging.ERROR:\n            self.error(message)\n\n    def _parse_config(self, ctxt):\n        config = None\n        try:\n            config = json.loads(ctxt)\n        except:\n            self.error('JSON exception decoding ' + ctxt)\n\n        if config is None:\n            self.error(\"JSON error processing settings file:\" + ctxt)\n        else:\n            handlerSettings = config['runtimeSettings'][0]['handlerSettings']\n            if handlerSettings.has_key('protectedSettings') and \\\n                    handlerSettings.has_key(\"protectedSettingsCertThumbprint\") and \\\n                    handlerSettings['protectedSettings'] is not None and \\\n                    handlerSettings[\"protectedSettingsCertThumbprint\"] is not None:\n                protectedSettings = handlerSettings['protectedSettings']\n                thumb = handlerSettings['protectedSettingsCertThumbprint']\n                cert = waagent.LibDir + '/' + thumb + '.crt'\n                pkey = waagent.LibDir + '/' + thumb + '.prv'\n                unencodedSettings = base64.standard_b64decode(protectedSettings)\n                openSSLcmd = \"openssl smime -inform DER -decrypt -recip {0} -inkey {1}\"\n                cleartxt = waagent.RunSendStdin(openSSLcmd.format(cert, pkey), unencodedSettings)[1]\n                if cleartxt is None:\n                    self.error(\"OpenSSL decode error using thumbprint \" + thumb)\n                    self.do_exit(1, \"Enable\", 'error', '1', 'Failed to decrypt protectedSettings')\n                jctxt = ''\n                try:\n                    jctxt = json.loads(cleartxt)\n                except:\n                    self.error('JSON exception decoding ' + cleartxt)\n                handlerSettings['protectedSettings'] = jctxt\n                self.log('Config decoded correctly.')\n        return config\n\n    def do_parse_context(self,operation):\n        _context = self.try_parse_context()\n        if not _context:\n            self.do_exit(1,operation,'error','1', operation + ' Failed')\n        return _context\n            \n    def try_parse_context(self):\n        self._context = HandlerContext(self._short_name)\n        handler_env = None\n        config = None\n        ctxt = None\n        code = 0\n        # get the HandlerEnvironment.json.  According to the extension handler\n        # spec, it is always in the ./ directory\n        self.log('cwd is ' + os.path.realpath(os.path.curdir))\n        handler_env_file = './HandlerEnvironment.json'\n        if not os.path.isfile(handler_env_file):\n            self.error(\"Unable to locate \" + handler_env_file)\n            return None\n        ctxt = waagent.GetFileContents(handler_env_file)\n        if ctxt == None :\n            self.error(\"Unable to read \" + handler_env_file)\n        try:\n            handler_env = json.loads(ctxt)\n        except:\n            pass\n        if handler_env == None :\n            self.log(\"JSON error processing \" + handler_env_file)\n            return None\n        if type(handler_env) == list:\n            handler_env = handler_env[0]\n\n        self._context._name = handler_env['name']\n        self._context._version = str(handler_env['version'])\n        self._context._config_dir = handler_env['handlerEnvironment']['configFolder']\n        self._context._log_dir = handler_env['handlerEnvironment']['logFolder']\n        self._context._log_file = os.path.join(handler_env['handlerEnvironment']['logFolder'],'extension.log')\n        self._change_log_file()\n        self._context._status_dir = handler_env['handlerEnvironment']['statusFolder']\n        self._context._heartbeat_file = handler_env['handlerEnvironment']['heartbeatFile']\n        self._context._seq_no = self._get_current_seq_no(self._context._config_dir)\n        if self._context._seq_no < 0:\n            self.error(\"Unable to locate a .settings file!\")\n            return None\n        self._context._seq_no = str(self._context._seq_no)\n        self.log('sequence number is ' + self._context._seq_no)\n        self._context._status_file = os.path.join(self._context._status_dir, self._context._seq_no + '.status')\n        self._context._settings_file = os.path.join(self._context._config_dir, self._context._seq_no + '.settings')\n        self.log(\"setting file path is\" + self._context._settings_file)\n        ctxt = None\n        ctxt = waagent.GetFileContents(self._context._settings_file)\n        if ctxt == None :\n            error_msg = 'Unable to read ' + self._context._settings_file + '. '\n            self.error(error_msg)\n            return None\n\n        self.log(\"JSON config: \" + ctxt)\n        self._context._config = self._parse_config(ctxt)\n        return self._context\n\n\n    def _change_log_file(self):\n        self.log(\"Change log file to \" + self._context._log_file)\n        LoggerInit(self._context._log_file,'/dev/stdout')\n        self._log = waagent.Log\n        self._error = waagent.Error\n\n    def set_verbose_log(self, verbose):\n        if(verbose == \"1\" or verbose == 1):\n            self.log(\"Enable verbose log\")\n            LoggerInit(self._context._log_file, '/dev/stdout', verbose=True)\n        else:\n            self.log(\"Disable verbose log\")\n            LoggerInit(self._context._log_file, '/dev/stdout', verbose=False)\n\n    def is_seq_smaller(self):\n        return int(self._context._seq_no) <= self._get_most_recent_seq()\n\n    def save_seq(self):\n        self._set_most_recent_seq(self._context._seq_no)\n        self.log(\"set most recent sequence number to \" + self._context._seq_no)\n\n    def exit_if_enabled(self):\n        self.exit_if_seq_smaller()\n\n    def exit_if_seq_smaller(self):\n        if(self.is_seq_smaller()):\n            self.log(\"Current sequence number, \" + self._context._seq_no + \", is not greater than the sequence number of the most recent executed configuration. Exiting...\")\n            sys.exit(0)\n        self.save_seq()\n\n    def _get_most_recent_seq(self):\n        if(os.path.isfile('mrseq')):\n            seq = waagent.GetFileContents('mrseq')\n            if(seq):\n                return int(seq)\n\n        return -1\n\n    def is_current_config_seq_greater_inused(self):\n        return int(self._context._seq_no) > self._get_most_recent_seq()\n\n    def get_inused_config_seq(self):\n        return self._get_most_recent_seq()\n\n    def set_inused_config_seq(self,seq):\n        self._set_most_recent_seq(seq)\n\n    def _set_most_recent_seq(self,seq):\n        waagent.SetFileContents('mrseq', str(seq))\n\n    def do_status_report(self, operation, status, status_code, message):\n        self.log(\"{0},{1},{2},{3}\".format(operation, status, status_code, message))\n        tstamp = time.strftime(DateTimeFormat, time.gmtime())\n        stat = [{\n            \"version\" : self._context._version,\n            \"timestampUTC\" : tstamp,\n            \"status\" : {\n                \"name\" : self._context._name,\n                \"operation\" : operation,\n                \"status\" : status,\n                \"code\" : status_code,\n                \"formattedMessage\" : {\n                    \"lang\" : \"en-US\",\n                    \"message\" : message\n                }\n            }\n        }]\n        stat_rept = json.dumps(stat)\n        if self._context._status_file:\n            with open(self._context._status_file,'w+') as f:\n                f.write(stat_rept)\n\n    def do_heartbeat_report(self, heartbeat_file,status,code,message):\n        # heartbeat\n        health_report = '[{\"version\":\"1.0\",\"heartbeat\":{\"status\":\"' + status + '\",\"code\":\"' + code + '\",\"Message\":\"' + message + '\"}}]'\n        if waagent.SetFileContents(heartbeat_file,health_report) == None :\n            self.error('Unable to wite heartbeat info to ' + heartbeat_file)\n\n    def do_exit(self,exit_code,operation,status,code,message):\n        try:\n            self.do_status_report(operation, status,code,message)\n        except Exception as e:\n            self.log(\"Can't update status: \" + str(e))\n        sys.exit(exit_code)\n\n    def get_name(self):\n        return self._context._name\n    \n    def get_seq_no(self):\n        return self._context._seq_no\n\n    def get_log_dir(self):\n        return self._context._log_dir\n\n    def get_handler_settings(self):\n        return self._context._config['runtimeSettings'][0]['handlerSettings']\n\n    def get_protected_settings(self):\n        return self.get_handler_settings().get('protectedSettings')\n\n    def get_public_settings(self):\n        return self.get_handler_settings().get('publicSettings')\n\n"
  },
  {
    "path": "RDMAUpdate/main/Utils/WAAgentUtil.py",
    "content": "﻿# Wrapper module for waagent\n#\n# waagent is not written as a module. This wrapper module is created \n# to use the waagent code as a module.\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport imp\nimport os\nimport os.path\n\n#\n# The following code will search and load waagent code and expose\n# it as a submodule of current module\n#\ndef searchWAAgent():\n    agentPath = '/usr/sbin/waagent'\n    if(os.path.isfile(agentPath)):\n        return agentPath\n    user_paths = os.environ['PYTHONPATH'].split(os.pathsep)\n    for user_path in user_paths:\n        agentPath = os.path.join(user_path, 'waagent')\n        if(os.path.isfile(agentPath)):\n            return agentPath\n    return None\n\nagentPath = searchWAAgent()\nif(agentPath):\n    waagent = imp.load_source('waagent', agentPath)\nelse:\n    raise Exception(\"Can't load waagent.\")\n\nif not hasattr(waagent, \"AddExtensionEvent\"):\n    \"\"\"\n    If AddExtensionEvent is not defined, provide a dummy impl.\n    \"\"\"\n    def _AddExtensionEvent(*args, **kwargs):\n        pass\n    waagent.AddExtensionEvent = _AddExtensionEvent\n\nif not hasattr(waagent, \"WALAEventOperation\"):\n    class _WALAEventOperation:\n        HeartBeat = \"HeartBeat\"\n        Provision = \"Provision\"\n        Install = \"Install\"\n        UnIsntall = \"UnInstall\"\n        Disable = \"Disable\"\n        Enable = \"Enable\"\n        Download = \"Download\"\n        Upgrade = \"Upgrade\"\n        Update = \"Update\"           \n    waagent.WALAEventOperation = _WALAEventOperation\n\n__ExtensionName__ = None\ndef InitExtensionEventLog(name):\n    __ExtensionName__ = name\n\ndef AddExtensionEvent(name=__ExtensionName__,\n                      op=waagent.WALAEventOperation.Enable, \n                      isSuccess=False, \n                      message=None):\n    if name is not None:\n        waagent.AddExtensionEvent(name=name,\n                                  op=op,\n                                  isSuccess=isSuccess,\n                                  message=message)\n"
  },
  {
    "path": "RDMAUpdate/main/Utils/__init__.py",
    "content": "#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n"
  },
  {
    "path": "RDMAUpdate/main/__init__.py",
    "content": "﻿#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n"
  },
  {
    "path": "RDMAUpdate/main/handle.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport array\nimport base64\nimport os\nimport os.path\nimport re\nimport json\nimport string\nimport subprocess\nimport sys\nimport imp\nimport time\nimport shlex\nimport traceback\nimport httplib\nimport xml.parsers.expat\nimport datetime\nfrom patch import *\nfrom os.path import join\nfrom Common import CommonVariables\nfrom Utils import HandlerUtil\nfrom urlparse import urlparse\nfrom RDMALogger import RDMALogger\nfrom CronUtil import *\nfrom SecondStageMarkConfig import SecondStageMarkConfig\n\n\ndef main():\n    global logger\n    global hutil\n    global MyPatching\n    HandlerUtil.LoggerInit('/var/log/waagent.log','/dev/stdout')\n    HandlerUtil.waagent.Log(\"%s started to handle.\" % (CommonVariables.extension_name)) \n    hutil = HandlerUtil.HandlerUtility(HandlerUtil.waagent.Log, HandlerUtil.waagent.Error, CommonVariables.extension_name)\n    logger = RDMALogger(hutil)\n    MyPatching = GetMyPatching(logger)\n    hutil.patching = MyPatching\n    for a in sys.argv[1:]:\n        if re.match(\"^([-/]*)(disable)\", a):\n            disable()\n        elif re.match(\"^([-/]*)(uninstall)\", a):\n            uninstall()\n        elif re.match(\"^([-/]*)(install)\", a):\n            install()\n        elif re.match(\"^([-/]*)(enable)\", a):\n            enable()\n        elif re.match(\"^([-/]*)(update)\", a):\n            update()\n        elif re.match(\"^([-/]*)(rdmaupdate)\", a):\n            rdmaupdate()\n        elif re.match(\"^([-/]*)(chkrdma)\", a):\n            chkrdma()\n\ndef chkrdma():\n    hutil.do_parse_context('Executing')\n    check_result = MyPatching.check_rdma()\n    if(check_result == CommonVariables.UpToDate):\n        hutil.do_exit(0, 'Enable','success','0', 'RDMA Driver up to date.')\n    if(check_result == CommonVariables.OutOfDate):\n        hutil.do_exit(0, 'Enable','success','0', 'RDMA Driver out of date.')\n    if(check_result == CommonVariables.DriverVersionNotFound):\n        hutil.do_exit(0, 'Enable','success','0', 'RDMA Driver not found.')\n    if(check_result == CommonVariables.Unknown):\n        hutil.do_exit(0, 'Enable','success','0', 'RDMA version not found.')\n\ndef rdmaupdate():\n    hutil.do_parse_context('Executing')\n    try:\n        MyPatching.rdmaupdate()\n        hutil.do_status_report('Enable','success','0', 'Enable Succeeded')\n        MyPatching.reboot_machine()\n    except Exception as e:\n        logger.log(\"Failed to update with error: %s, stack trace: %s\" % (str(e), traceback.format_exc()))\n        hutil.do_exit(0, 'Enable','success','0','enable failed, please take a look at the extension log.')\n\ndef start_daemon():\n    args = [os.path.join(os.getcwd(), __file__), \"-rdmaupdate\"]\n    logger.log(\"start_daemon with args:\" + str(args))\n    devnull = open(os.devnull, 'w')\n    child = subprocess.Popen(args, stdout=devnull, stderr=devnull)\n\ndef enable():\n    # do it one time when enabling.\n    # config the cron job\n    hutil.do_parse_context('Enable')\n    secondStageMarkConfig = SecondStageMarkConfig()\n    if(secondStageMarkConfig.IsMarked()):\n        secondStageMarkConfig.ClearIt()\n        start_daemon()\n    else:\n        hutil.exit_if_enabled()\n        cronUtil = CronUtil(logger)\n        cronUtil.check_update_cron_config()\n        cronUtil.restart_cron()\n        start_daemon()\n\ndef install():\n    hutil.do_parse_context('Install')\n    hutil.do_exit(0, 'Install','success','0', 'Install Succeeded')\n\ndef uninstall():\n    hutil.do_parse_context('Uninstall')\n    hutil.do_exit(0,'Uninstall','success','0', 'Uninstall succeeded')\n\ndef disable():\n    hutil.do_parse_context('Disable')\n    hutil.do_exit(0,'Disable','success','0', 'Disable Succeeded')\n\ndef update():\n    hutil.do_parse_context('Upadate')\n    hutil.do_exit(0,'Update','success','0', 'Update Succeeded')\n\nif __name__ == '__main__' :\n    main()"
  },
  {
    "path": "RDMAUpdate/main/patch/AbstractPatching.py",
    "content": "﻿#!/usr/bin/python\n#\n# AbstractPatching is the base patching class of all the linux distros\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.4+\n\n\nimport os\nimport sys\nimport imp\nimport base64\nimport re\nimport json\nimport platform\nimport shutil\nimport time\nimport traceback\nimport datetime\nimport subprocess\n\nclass AbstractPatching(object):\n    \"\"\"\n    AbstractPatching defines a skeleton neccesary for a concrete Patching class.\n    \"\"\"\n    def __init__(self,distro_info):\n        self.distro_info = distro_info\n        self.base64_path = '/usr/bin/base64'\n        self.bash_path = '/bin/bash'\n        self.blkid_path = '/usr/bin/blkid'\n        self.cat_path = '/bin/cat'\n        self.cryptsetup_path = '/usr/sbin/cryptsetup'\n        self.dd_path = '/usr/bin/dd'\n        self.e2fsck_path = '/sbin/e2fsck'\n        self.echo_path = '/usr/bin/echo'\n        self.lsblk_path = '/usr/bin/lsblk'\n        self.lsscsi_path = '/usr/bin/lsscsi'\n        self.mkdir_path = '/usr/bin/mkdir'\n        self.mount_path = '/usr/bin/mount'\n        self.openssl_path = '/usr/bin/openssl'\n        self.resize2fs_path = '/sbin/resize2fs'\n        self.umount_path = '/usr/bin/umount'\n\n    def CreateCronJob(self):\n        pass"
  },
  {
    "path": "RDMAUpdate/main/patch/OraclePatching.py",
    "content": "﻿#!/usr/bin/python\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.4+\n\n\nimport os\nimport sys\nimport imp\nimport base64\nimport re\nimport json\nimport platform\nimport shutil\nimport time\nimport traceback\nimport datetime\nimport subprocess\nfrom redhatPatching import redhatPatching\nfrom Common import *\n\nclass OraclePatching(redhatPatching):\n    def __init__(self,logger,distro_info):\n        super(OraclePatching,self).__init__(distro_info)\n        self.logger = logger\n        self.base64_path = '/usr/bin/base64'\n        self.bash_path = '/usr/bin/bash'\n        self.blkid_path = '/usr/bin/blkid'\n        self.cat_path = '/bin/cat'\n        self.cryptsetup_path = '/usr/sbin/cryptsetup'\n        self.dd_path = '/usr/bin/dd'\n        self.e2fsck_path = '/sbin/e2fsck'\n        self.echo_path = '/usr/bin/echo'\n        self.lsblk_path = '/usr/bin/lsblk'\n        self.lsscsi_path = '/usr/bin/lsscsi'\n        self.mkdir_path = '/usr/bin/mkdir'\n        self.mount_path = '/usr/bin/mount'\n        self.openssl_path = '/usr/bin/openssl'\n        self.resize2fs_path = '/sbin/resize2fs'\n        self.umount_path = '/usr/bin/umount'\n\n    #def install_extras(self):\n    #    common_extras = ['cryptsetup','lsscsi']\n    #    for extra in common_extras:\n    #        self.logger.log(\"installation for \" + extra + 'result is ' + str(subprocess.call(['yum', 'install','-y', extra])))\n\n        #if(paras.filesystem == \"btrfs\"):\n        #    extras = ['btrfs-tools']\n        #    for extra in extras:\n        #        print(\"installation for \" + extra + 'result is ' + str(subprocess.call(['yum', 'install','-y', extra])))\n        #pass"
  },
  {
    "path": "RDMAUpdate/main/patch/SuSEPatching.py",
    "content": "#!/usr/bin/python\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.4+\n\n\nimport os\nimport sys\nimport imp\nimport base64\nimport re\nimport json\nimport platform\nimport shutil\nimport time\nimport traceback\nimport datetime\nimport subprocess\nfrom AbstractPatching import AbstractPatching\nfrom Common import *\nfrom CommandExecuter import CommandExecuter\nfrom RdmaException import RdmaException\nfrom SecondStageMarkConfig import SecondStageMarkConfig\n\nclass SuSEPatching(AbstractPatching):\n    def __init__(self,logger,distro_info):\n        super(SuSEPatching,self).__init__(distro_info)\n        self.logger = logger\n        if(distro_info[1] == \"11\"):\n            self.base64_path = '/usr/bin/base64'\n            self.bash_path = '/bin/bash'\n            self.blkid_path = '/sbin/blkid'\n            self.cryptsetup_path = '/sbin/cryptsetup'\n            self.cat_path = '/bin/cat'\n            self.dd_path = '/bin/dd'\n            self.e2fsck_path = '/sbin/e2fsck'\n            self.echo_path = '/bin/echo'\n            self.lsblk_path = '/bin/lsblk'\n            self.lsscsi_path = '/usr/bin/lsscsi'\n            self.mkdir_path = '/bin/mkdir'\n            self.modprobe_path = '/usr/bin/modprobe'\n            self.mount_path = '/bin/mount'\n            self.openssl_path = '/usr/bin/openssl'\n            self.ps_path = '/bin/ps'\n            self.resize2fs_path = '/sbin/resize2fs'\n            self.reboot_path = '/sbin/reboot'\n            self.rmmod_path = '/sbin/rmmod'\n            self.service_path='/usr/sbin/service'\n            self.umount_path = '/bin/umount'\n            self.zypper_path = '/usr/bin/zypper'\n        else:\n            self.base64_path = '/usr/bin/base64'\n            self.bash_path = '/bin/bash'\n            self.blkid_path = '/usr/bin/blkid'\n            self.cat_path = '/bin/cat'\n            self.cryptsetup_path = '/usr/sbin/cryptsetup'\n            self.dd_path = '/usr/bin/dd'\n            self.e2fsck_path = '/sbin/e2fsck'\n            self.echo_path = '/usr/bin/echo'\n            self.lsblk_path = '/usr/bin/lsblk'\n            self.lsscsi_path = '/usr/bin/lsscsi'\n            self.mkdir_path = '/usr/bin/mkdir'\n            self.modprobe_path = '/usr/sbin/modprobe'\n            self.mount_path = '/usr/bin/mount'\n            self.openssl_path = '/usr/bin/openssl'\n            self.ps_path = '/usr/bin/ps'\n            self.resize2fs_path = '/sbin/resize2fs'\n            self.reboot_path = '/sbin/reboot'\n            self.rmmod_path = '/usr/sbin/rmmod'\n            self.service_path = '/usr/sbin/service'\n            self.umount_path = '/usr/bin/umount'\n            self.zypper_path = '/usr/bin/zypper'\n\n    def rdmaupdate(self):\n        check_install_result = self.check_install_hv_utils()\n        if(check_install_result == CommonVariables.process_success):\n            time.sleep(40)\n            check_result = self.check_rdma()\n\n            if(check_result == CommonVariables.UpToDate):\n                return\n            elif(check_result == CommonVariables.OutOfDate):\n                nd_driver_version = self.get_nd_driver_version()\n                rdma_package_installed_version = self.get_rdma_package_version()\n                update_rdma_driver_result = self.update_rdma_driver(nd_driver_version, rdma_package_installed_version)\n            elif(check_result == CommonVariables.DriverVersionNotFound):\n                raise RdmaException(CommonVariables.driver_version_not_found)\n            elif(check_result == CommonVariables.Unknown):\n                raise RdmaException(CommonVariables.unknown_error)\n        else:\n            raise RdmaException(CommonVariables.install_hv_utils_failed)\n\n    def check_rdma(self):\n        nd_driver_version = self.get_nd_driver_version()\n        if(nd_driver_version is None or nd_driver_version == \"\"):\n            return CommonVariables.DriverVersionNotFound\n        package_version = self.get_rdma_package_version()\n        if(package_version is None or package_version == \"\"):\n            return CommonVariables.OutOfDate\n        else:\n            # package_version would be like this :20150707_k3.12.28_4-3.1\n            # nd_driver_version 140.0\n            self.logger.log(\"nd_driver_version is \" + str(nd_driver_version) + \" package_version is \" + str(package_version))\n            if(nd_driver_version is not None):\n                r = re.match(\".+(%s)$\" % nd_driver_version, package_version)# NdDriverVersion should be at the end of package version\n                if not r :\t#host ND version is the same as the package version, do an update\n                    return CommonVariables.OutOfDate\n                else:\n                    return CommonVariables.UpToDate\n            return CommonVariables.Unknown\n\n    def reload_hv_utils(self):\n        commandExecuter = CommandExecuter(self.logger)\n        #clear /run/hv_kvp_daemon folder for the service could not be restart walkaround\n\n        error,output = commandExecuter.RunGetOutput(self.rmmod_path + \" hv_utils\")\t#find a way to force install non-prompt\n        self.logger.log(\"rmmod hv_utils return code: \" + str(error) + \" output:\" + str(output))\n        if(error != CommonVariables.process_success):\n            return CommonVariables.common_failed\n        error,output = commandExecuter.RunGetOutput(self.modprobe_path + \" hv_utils\")\t#find a way to force install non-prompt\n        self.logger.log(\"modprobe hv_utils return code: \" + str(error) + \" output:\" + str(output))\n        if(error != CommonVariables.process_success):\n            return CommonVariables.common_failed\n        return CommonVariables.process_success\n\n    def restart_hv_kvp_daemon(self):\n        commandExecuter = CommandExecuter(self.logger)\n        reload_result = self.reload_hv_utils()\n        if(reload_result == CommonVariables.process_success):\n            if(os.path.exists('/run/hv_kvp_daemon')):\n                os.rmdir('/run/hv_kvp_daemon')\n            error,output = commandExecuter.RunGetOutput(self.service_path + \" hv_kvp_daemon start\")\t#find a way to force install non-prompt\n            self.logger.log(\"service hv_kvp_daemon start return code: \" + str(error) + \" output:\" + str(output))\n            if(error != CommonVariables.process_success):\n                return CommonVariables.common_failed\n            return CommonVariables.process_success\n        else:\n            return CommonVariables.common_failed\n\n    def check_install_hv_utils(self):\n        commandExecuter = CommandExecuter(self.logger)\n        error, output = commandExecuter.RunGetOutput(self.ps_path + \" -ef\")\n        if(error != CommonVariables.process_success):\n            return CommonVariables.common_failed\n        else:\n            r = re.search(\"hv_kvp_daemon\", output)\n            if r is None :\n                self.logger.log(\"KVP deamon is not running, install it\")\n                error,output = commandExecuter.RunGetOutput(self.zypper_path + \" -n install --force hyper-v\")\n                self.logger.log(\"install hyper-v return code: \" + str(error) + \" output:\" + str(output))\n                if(error != CommonVariables.process_success):\n                    return CommonVariables.common_failed\n                secondStageMarkConfig = SecondStageMarkConfig()\n                secondStageMarkConfig.MarkIt()\n                self.reboot_machine()\n                return CommonVariables.process_success\n            else :\n                self.logger.log(\"KVP deamon is running\")\n                return CommonVariables.process_success\n\n    def get_nd_driver_version(self):\n        \"\"\"\n        if error happens, raise a RdmaException\n        \"\"\"\n        try:\n            with open(\"/var/lib/hyperv/.kvp_pool_0\", \"r\") as f:\n                lines = f.read()\n            r = re.search(\"NdDriverVersion\\0+(\\d\\d\\d\\.\\d)\", lines)\n            if r is not None:\n                NdDriverVersion = r.groups()[0]\n                return NdDriverVersion #e.g.  NdDriverVersion = 142.0\n            else :\n                self.logger.log(\"Error: NdDriverVersion not found.\")\n                return None\n        except Exception as e:\n            errMsg = 'Failed to enable the extension with error: %s, stack trace: %s' % (str(e), traceback.format_exc())\n            self.logger.log(\"Can't update status: \" + errMsg)\n            raise RdmaException(CommonVariables.nd_driver_detect_error)\n\n    def get_rdma_package_version(self):\n        \"\"\"\n        \"\"\"\n        commandExecuter = CommandExecuter(self.logger)\n        error, output = commandExecuter.RunGetOutput(self.zypper_path + \" info msft-lis-rdma-kmp-default\")\n        if(error == CommonVariables.process_success):\n            r = re.search(\"Version: (\\S+)\", output)\n            if r is not None:\n                package_version = r.groups()[0]# e.g.  package_version is \"20150707_k3.12.28_4-3.1.140.0\"\n                return package_version\n            else:\n                return None\n        else:\n            return None\n\n    def update_rdma_driver(self, host_version, rdma_package_installed_version):\n        \"\"\"\n        \"\"\"\n        commandExecuter = CommandExecuter(self.logger)\n        error, output = commandExecuter.RunGetOutput(self.zypper_path + \" lr -u\")\n        rdma_pack_result = re.search(\"msft-rdma-pack\", output)\n        if rdma_pack_result is None :\n            self.logger.log(\"rdma_pack_result is None\")\n            error, output = commandExecuter.RunGetOutput(self.zypper_path + \" ar https://drivers.suse.com/microsoft/Microsoft-LIS-RDMA/sle-12/updates msft-rdma-pack\")\n            #wait for the cache build.\n            time.sleep(20)\n            self.logger.log(\"error result is \" + str(error) + \" output is : \" + str(output))\n        else:\n            self.logger.log(\"output is: \"+str(output))\n            self.logger.log(\"msft-rdma-pack found\")\n        returnCode,message = commandExecuter.RunGetOutput(self.zypper_path + \" --no-gpg-checks refresh\")\n        self.logger.log(\"refresh repro return code is \" + str(returnCode) + \" output is: \" + str(message))\n        #install the wrapper package, that will put the driver RPM packages under /opt/microsoft/rdma\n        returnCode,message = commandExecuter.RunGetOutput(self.zypper_path + \" -n remove \" + CommonVariables.wrapper_package_name)\n        self.logger.log(\"remove wrapper package return code is \" + str(returnCode) + \" output is: \" + str(message))\n        returnCode,message = commandExecuter.RunGetOutput(self.zypper_path + \" --non-interactive install --force \" + CommonVariables.wrapper_package_name)\n        self.logger.log(\"install wrapper package return code is \" + str(returnCode) + \" output is: \" + str(message))\n        r = os.listdir(\"/opt/microsoft/rdma\")\n        if r is not None :\n            for filename in r :\n                if re.match(\"msft-lis-rdma-kmp-default-\\d{8}\\.(%s).+\" % host_version, filename) :\n                    error,output = commandExecuter.RunGetOutput(self.zypper_path + \" --non-interactive remove msft-lis-rdma-kmp-default\")\n                    self.logger.log(\"remove msft-lis-rdma-kmp-default result is \" + str(error) + \" output is: \" + str(output))\n                    self.logger.log(\"Installing RPM /opt/microsoft/rdma/\" + filename)\n                    error,output = commandExecuter.RunGetOutput(self.zypper_path + \" --non-interactive install --force /opt/microsoft/rdma/%s\" % filename)\n                    self.logger.log(\"Install msft-lis-rdma-kmp-default result is \" + str(error) + \" output is: \" + str(output))\n                    if(error == CommonVariables.process_success):\n                        self.reboot_machine()\n                    else:\n                        raise RdmaException(CommonVariables.package_install_failed)\n        else:\n            self.logger.log(\"RDMA drivers not found in /opt/microsoft/rdma\")\n            raise RdmaException(CommonVariables.package_not_found)\n\n    def reboot_machine(self):\n        self.logger.log(\"rebooting machine\")\n        commandExecuter = CommandExecuter(self.logger)\n        commandExecuter.RunGetOutput(self.reboot_path)\n"
  },
  {
    "path": "RDMAUpdate/main/patch/UbuntuPatching.py",
    "content": "﻿#!/usr/bin/python\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.4+\n\n\nimport os\nimport sys\nimport imp\nimport base64\nimport re\nimport json\nimport platform\nimport shutil\nimport time\nimport traceback\nimport datetime\nimport subprocess\nfrom AbstractPatching import AbstractPatching\nfrom Common import *\n\n\nclass UbuntuPatching(AbstractPatching):\n    def __init__(self,logger,distro_info):\n        super(UbuntuPatching,self).__init__(distro_info)\n        self.logger = logger\n        self.base64_path = '/usr/bin/base64'\n        self.bash_path = '/bin/bash'\n        self.blkid_path = '/sbin/blkid'\n        self.cat_path = '/bin/cat'\n        self.cryptsetup_path = '/sbin/cryptsetup'\n        self.dd_path = '/bin/dd'\n        self.e2fsck_path = '/sbin/e2fsck'\n        self.echo_path = '/bin/echo'\n        self.lsblk_path = '/bin/lsblk'\n        self.lsscsi_path = '/usr/bin/lsscsi'\n        self.mkdir_path = '/bin/mkdir'\n        self.mount_path = '/bin/mount'\n        self.openssl_path = '/usr/bin/openssl'\n        self.resize2fs_path = '/sbin/resize2fs'\n        self.umount_path = '/bin/umount'\n\n    #def install_extras(self):\n    #    \"\"\"\n    #    install the sg_dd because the default dd do not support the sparse write\n    #    \"\"\"\n    #    if(self.distro_info[0].lower() == \"ubuntu\" and self.distro_info[1] == \"12.04\"):\n    #        common_extras = ['cryptsetup-bin','lsscsi']\n    #    else:\n    #        common_extras = ['cryptsetup-bin','lsscsi']\n    #    for extra in common_extras:\n    #        self.logger.log(\"installation for \" + extra + 'result is ' + str(subprocess.call(['apt-get', 'install','-y', extra])))"
  },
  {
    "path": "RDMAUpdate/main/patch/__init__.py",
    "content": "#!/usr/bin/python\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.4+\n\nimport os\nimport re\nimport platform\n\nfrom UbuntuPatching import UbuntuPatching\nfrom redhatPatching import redhatPatching\nfrom centosPatching import centosPatching\nfrom OraclePatching import OraclePatching\nfrom SuSEPatching import SuSEPatching\n\n# Define the function in case waagent(<2.0.4) doesn't have DistInfo()\ndef DistInfo():\n    if 'FreeBSD' in platform.system():\n        release = re.sub('\\-.*\\Z', '', str(platform.release()))\n        distinfo = ['FreeBSD', release]\n        return distinfo\n    if os.path.isfile('/etc/oracle-release'):\n        release = re.sub('\\-.*\\Z', '', str(platform.release()))\n        distinfo = ['Oracle', release]\n        return distinfo\n    if 'linux_distribution' in dir(platform):\n        distinfo = list(platform.linux_distribution(full_distribution_name=0))\n        # remove trailing whitespace in distro name\n        distinfo[0] = distinfo[0].strip()\n        return distinfo\n    else:\n        return platform.dist()\n\ndef GetMyPatching(logger):\n    \"\"\"\n    Return MyPatching object.\n    NOTE: Logging is not initialized at this point.\n    \"\"\"\n    dist_info = DistInfo()\n    if 'Linux' in platform.system():\n        Distro = dist_info[0]\n    else: # I know this is not Linux!\n        if 'FreeBSD' in platform.system():\n            Distro = platform.system()\n    Distro = Distro.strip('\"')\n    Distro = Distro.strip(' ')\n    patching_class_name = Distro + 'Patching'\n\n    if not globals().has_key(patching_class_name):\n        print Distro + ' is not a supported distribution.'\n        return None\n    patchingInstance = globals()[patching_class_name](logger,dist_info)\n    return patchingInstance"
  },
  {
    "path": "RDMAUpdate/main/patch/centosPatching.py",
    "content": "﻿#!/usr/bin/python\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.4+\n\n\nimport os\nimport sys\nimport imp\nimport base64\nimport re\nimport json\nimport platform\nimport shutil\nimport time\nimport traceback\nimport datetime\nimport subprocess\nfrom redhatPatching import redhatPatching\nfrom Common import *\n\nclass centosPatching(redhatPatching):\n    def __init__(self,logger,distro_info):\n        super(centosPatching,self).__init__(logger,distro_info)\n        self.logger = logger\n        if(distro_info[1] == \"6.8\" or distro_info[1] == \"6.7\" or distro_info[1] == \"6.6\" or distro_info[1] == \"6.5\"):\n            self.base64_path = '/usr/bin/base64'\n            self.bash_path = '/bin/bash'\n            self.blkid_path = '/sbin/blkid'\n            self.cat_path = '/bin/cat'\n            self.cryptsetup_path = '/sbin/cryptsetup'\n            self.dd_path = '/bin/dd'\n            self.e2fsck_path = '/sbin/e2fsck'\n            self.echo_path = '/bin/echo'\n            self.lsblk_path = '/bin/lsblk' \n            self.lsscsi_path = '/usr/bin/lsscsi'\n            self.mkdir_path = '/bin/mkdir'\n            self.mount_path = '/bin/mount'\n            self.openssl_path = '/usr/bin/openssl'\n            self.resize2fs_path = '/sbin/resize2fs'\n            self.umount_path = '/bin/umount'\n        else:\n            self.base64_path = '/usr/bin/base64'\n            self.bash_path = '/usr/bin/bash'\n            self.blkid_path = '/usr/bin/blkid'\n            self.cat_path = '/bin/cat'\n            self.cryptsetup_path = '/usr/sbin/cryptsetup'\n            self.dd_path = '/usr/bin/dd'\n            self.e2fsck_path = '/sbin/e2fsck'\n            self.echo_path = '/usr/bin/echo'\n            self.lsblk_path = '/usr/bin/lsblk'\n            self.lsscsi_path = '/usr/bin/lsscsi'\n            self.mkdir_path = '/usr/bin/mkdir'\n            self.mount_path = '/usr/bin/mount'\n            self.openssl_path = '/usr/bin/openssl'\n            self.resize2fs_path = '/sbin/resize2fs'\n            self.umount_path = '/usr/bin/umount'\n\n    #def install_extras(self):\n    #    common_extras = ['cryptsetup','lsscsi']\n    #    for extra in common_extras:\n    #        self.logger.log(\"installation for \" + extra + 'result is ' + str(subprocess.call(['yum', 'install','-y', extra])))"
  },
  {
    "path": "RDMAUpdate/main/patch/redhatPatching.py",
    "content": "﻿#!/usr/bin/python\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.4+\n\n\nimport os\nimport sys\nimport imp\nimport base64\nimport re\nimport json\nimport platform\nimport shutil\nimport time\nimport traceback\nimport datetime\nimport subprocess\nfrom AbstractPatching import AbstractPatching\nfrom Common import *\n\n\nclass redhatPatching(AbstractPatching):\n    def __init__(self,logger,distro_info):\n        super(redhatPatching,self).__init__(distro_info)\n        self.logger = logger\n        if(distro_info[1] == \"6.7\"):\n            self.base64_path = '/usr/bin/base64'\n            self.bash_path = '/bin/bash'\n            self.blkid_path = '/sbin/blkid'\n            self.cat_path = '/bin/cat'\n            self.cryptsetup_path = '/sbin/cryptsetup'\n            self.dd_path = '/bin/dd'\n            self.e2fsck_path = '/sbin/e2fsck'\n            self.echo_path = '/bin/echo'\n            self.getenforce_path = '/usr/sbin/getenforce'\n            self.setenforce_path = '/usr/sbin/setenforce'\n            self.lsblk_path = '/bin/lsblk' \n            self.lsscsi_path = '/usr/bin/lsscsi'\n            self.mkdir_path = '/bin/mkdir'\n            self.mount_path = '/bin/mount'\n            self.openssl_path = '/usr/bin/openssl'\n            self.resize2fs_path = '/sbin/resize2fs'\n            self.umount_path = '/bin/umount'\n        else:\n            self.base64_path = '/usr/bin/base64'\n            self.bash_path = '/usr/bin/bash'\n            self.blkid_path = '/usr/bin/blkid'\n            self.cat_path = '/bin/cat'\n            self.cryptsetup_path = '/usr/sbin/cryptsetup'\n            self.dd_path = '/usr/bin/dd'\n            self.e2fsck_path = '/sbin/e2fsck'\n            self.echo_path = '/usr/bin/echo'\n            self.getenforce_path = '/usr/sbin/getenforce'\n            self.setenforce_path = '/usr/sbin/setenforce'\n            self.lsblk_path = '/usr/bin/lsblk'\n            self.lsscsi_path = '/usr/bin/lsscsi'\n            self.mkdir_path = '/usr/bin/mkdir'\n            self.mount_path = '/usr/bin/mount'\n            self.openssl_path = '/usr/bin/openssl'\n            self.resize2fs_path = '/sbin/resize2fs'\n            self.umount_path = '/usr/bin/umount'\n\n    #def install_extras(self):\n    #    common_extras = ['cryptsetup','lsscsi']\n    #    for extra in common_extras:\n    #        self.logger.log(\"installation for \" + extra + 'result is ' + str(subprocess.call(['yum', 'install','-y', extra])))"
  },
  {
    "path": "RDMAUpdate/references",
    "content": "Utils/\n"
  },
  {
    "path": "RDMAUpdate/setup.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# To build:\n# python setup.py sdist\n#\n# To install:\n# python setup.py install\n#\n# To register (only needed once):\n# python setup.py register\n#\n# To upload:\n# python setup.py sdist upload\n\nfrom distutils.core import setup\nimport os\nimport json\nimport subprocess\nfrom zipfile import ZipFile\nfrom main.Common import CommonVariables\n\npackages_array = []\nmain_folder = 'main'\nmain_entry = main_folder + '/handle.py'\npackages_array.append(main_folder)\n\npatch_folder = main_folder + '/patch'\npackages_array.append(patch_folder)\n\n\"\"\"\ncopy the dependency to the local\n\"\"\"\n\n\"\"\"\ncopy the utils lib to local\n\"\"\"\ntarget_utils_path = main_folder + '/' + CommonVariables.utils_path_name\npackages_array.append(target_utils_path)\n\n\n\"\"\"\ngenerate the HandlerManifest.json file.\n\"\"\"\nmanifest_obj = [{\n  \"name\": CommonVariables.extension_name,\n  \"version\": CommonVariables.extension_version,\n  \"handlerManifest\": {\n    \"installCommand\": main_entry + \" -install\",\n    \"uninstallCommand\": main_entry + \" -uninstall\",\n    \"updateCommand\": main_entry + \" -update\",\n    \"enableCommand\": main_entry + \" -enable\",\n    \"disableCommand\": main_entry + \" -disable\",\n    \"rebootAfterInstall\": False,\n    \"reportHeartbeat\": False\n  }\n}]\n\nmanifest_str = json.dumps(manifest_obj, sort_keys = True, indent = 4)\nmanifest_file = open(\"HandlerManifest.json\", \"w\") \nmanifest_file.write(manifest_str)\nmanifest_file.close()\n\n\n\"\"\"\ngenerate the extension xml file\n\"\"\"\nextension_xml_file_content = \"\"\"<ExtensionImage xmlns=\"http://schemas.microsoft.com/windowsazure\">\n<ProviderNameSpace>Microsoft.OSTCExtensions</ProviderNameSpace>\n<Type>%s</Type>\n<Version>%s</Version>\n<Label>%s</Label>\n<HostingResources>VmRole</HostingResources>\n<MediaLink>%s</MediaLink>\n<Description>%s</Description>\n<IsInternalExtension>true</IsInternalExtension>\n<Eula>https://github.com/Azure/azure-linux-extensions/blob/1.0/LICENSE-2_0.txt</Eula>\n<PrivacyUri>https://github.com/Azure/azure-linux-extensions/blob/1.0/LICENSE-2_0.txt</PrivacyUri>\n<HomepageUri>https://github.com/Azure/azure-linux-extensions</HomepageUri>\n<IsJsonExtension>true</IsJsonExtension>\n<CompanyName>Microsoft Open Source Technology Center</CompanyName>\n</ExtensionImage>\"\"\" % (CommonVariables.extension_type,CommonVariables.extension_version,CommonVariables.extension_label,CommonVariables.extension_media_link,CommonVariables.extension_description)\n\nextension_xml_file = open(CommonVariables.extension_name + '-' + str(CommonVariables.extension_version) + '.xml', 'w')\nextension_xml_file.write(extension_xml_file_content)\nextension_xml_file.close()\n\n\"\"\"\nsetup script, to package the files up\n\"\"\"\nsetup(name = CommonVariables.extension_name,\n      version = CommonVariables.extension_version,\n      description=CommonVariables.extension_description,\n      license='Apache License 2.0',\n      author='Microsoft Corporation',\n      author_email='andliu@microsoft.com',\n      url='https://github.com/Azure/azure-linux-extensions',\n      classifiers = ['Development Status :: 5 - Production/Stable',\n        'Programming Language :: Python',\n        'Programming Language :: Python :: 2',\n        'Programming Language :: Python :: 2.7',\n        'Programming Language :: Python :: 3',\n        'Programming Language :: Python :: 3.3',\n        'Programming Language :: Python :: 3.4',\n        'License :: OSI Approved :: Apache Software License'],\n      packages = packages_array)\n\n\"\"\"\nunzip the package files and re-package it.\n\"\"\"\ntarget_zip_file_location = './dist/'\ntarget_folder_name = CommonVariables.extension_name + '-' + str(CommonVariables.extension_version)\ntarget_zip_file_path = target_zip_file_location + target_folder_name + '.zip'\n\ntarget_zip_file = ZipFile(target_zip_file_path)\ntarget_zip_file.extractall(target_zip_file_location)\n\ndef dos2unix(src):\n    args = [\"dos2unix\",src]\n    devnull = open(os.devnull, 'w')\n    child = subprocess.Popen(args, stdout=devnull, stderr=devnull)\n    print('dos2unix %s ' % (src))\n    child.wait()\n\ndef zip(src, dst):\n    zf = ZipFile(\"%s\" % (dst), \"w\")\n    abs_src = os.path.abspath(src)\n    for dirname, subdirs, files in os.walk(src):\n        for filename in files:\n            absname = os.path.abspath(os.path.join(dirname, filename))\n            dos2unix(absname)\n            arcname = absname[len(abs_src) + 1:]\n            print('zipping %s as %s' % (os.path.join(dirname, filename), arcname))\n            zf.write(absname, arcname)\n    zf.close()\n\nfinal_folder_path = target_zip_file_location + target_folder_name\nzip(final_folder_path, target_zip_file_path)\n\n"
  },
  {
    "path": "RDMAUpdate/test/update_rdma_driver.py",
    "content": "import subprocess, re, os\n\ndef RunGetOutput(cmd,chk_err=True):\n\ttry:\n\t\toutput=subprocess.check_output(cmd,stderr=subprocess.STDOUT,shell=True)\n\texcept subprocess.CalledProcessError,e :\n\t\tif chk_err :\n\t\t\tprint('CalledProcessError.  Error Code is ' + str(e.returncode)  )\n\t\t\tprint('CalledProcessError.  Command string was ' + e.cmd  )\n\t\t\tprint('CalledProcessError.  Command result was ' + (e.output[:-1]).decode('latin-1'))\n\t\treturn e.returncode,e.output.decode('latin-1')\n\treturn 0,output.decode('latin-1')\n#def\n\ndef InstallRDMADriver(host_version) :\n\n\t#make sure we have the correct repo configured\n\terror, output = RunGetOutput(\"zypper lr -u\")\n\tif not re.search(\"msft-rdma-pack\", output) :\n\t\tRunGetOutput(\"zypper ar https://drivers.suse.com/microsoft/Microsoft-LIS-RDMA/sle-12/updates msft-rdma-pack\")\n\n\t#install the wrapper package, that will put the driver RPM packages under /opt/microsoft/rdma\n\tRunGetOutput(\"zypper --non-interactive install --force msft-rdma-drivers\")\n\n\t#install the driver RPM package\n\tr = os.listdir(\"/opt/microsoft/rdma\")\n\tif r :\n\t\tfor filename in r :\n\t\t\tif re.match(\"msft-lis-rdma-kmp-default-\\d{8}\\.(%s).+\" % host_version, filename) :\n\t\t\t\tprint \"Installing RPM /opt/microsoft/rdma/\" + filename\n\t\t\t\tRunGetOutput(\"zypper --non-interactive install --force /opt/microsoft/rdma/%s\" % filename)\n\t\t\t\treturn\n\n\tprint \"RDMA drivers not found in /opt/microsoft/rdma\"\n#def\n\n#1. check if kvp daemon is running, if not install it and reboot\nerror, output = RunGetOutput(\"ps -ef\")\t# how about error != 0\nr = re.search(\"hv_kvp_daemon\", output)\nif not r :\n\tprint \"KVP deamon is not running, install it\"\n\tRunGetOutput(\"zypper --non-interactive install --force hyper-v\")\n\tRunGetOutput(\"reboot\")\nelse :\n\tprint \"KVP deamon is running\"\n\n\n#2. get the host ND version\nf = open(\"/var/lib/hyperv/.kvp_pool_0\", \"r\")\nlines = f.read();\nf.close()\n\nr = re.match(\"NdDriverVersion\\0+(\\d\\d\\d\\.\\d)\", lines)\nif r :\n\tNdDriverVersion = r.groups()[0]\n\tprint \"ND version = \" + NdDriverVersion\t\t#e.g. NdDriverVersion = 142.0\nelse :\n\tprint \"Error: NdDriverVersion not found. Abort\"\n\texit()\n\n\n#3. if the ND version doesn't match the RDMA driver package version, do an update\nerror, output = RunGetOutput(\"zypper --non-interactive info msft-lis-rdma-kmp-default\")\n\nr = re.search(\"Version:\\s+(\\S+)\", output)\nif r :\n\tpackage_version = r.groups()[0]\t\t# e.g. package_version is \"20151119.142.0_k3.12.28_4-1.1\"\n\tprint \"msft-lis-rdma-kmp-default package version = \" + package_version\n\n\tr = re.match(\"\\d{8}\\.(%s).+\" % NdDriverVersion, package_version)\t# NdDriverVersion should be at the end of package version\n\tif not r :\t#host ND version is the same as the package version, do an update\n\t\tprint \"ND and package version don't match, doing an update\"\n\t\tRunGetOutput(\"zypper --non-interactive remove msft-lis-rdma-kmp-default\")\n\t\tInstallRDMADriver(NdDriverVersion)\n\t\tRunGetOutput(\"reboot\")\n\telse :\n\t\tprint \"ND and package version match, not doing an update\"\n\nelse :\n\tprint \"msft-lis-rdma-kmp-default not found, installing new version\"\n\n\tInstallRDMADriver(NdDriverVersion)\n\tRunGetOutput(\"reboot\");\n"
  },
  {
    "path": "RDMAUpdate/test.ps1",
    "content": "Add-AzureRmAccount \nSet-AzureRmContext -SubscriptionName \"OSTC Shanghai Dev\"\n$RGName = 'andliu-northus'\n$VmName = 'andliu-sles12'\n$Location = 'North Central US'\n\n$ExtensionName = 'RDMAUpdateForLinux'\n$Publisher = 'Microsoft.OSTCExtensions'\n$Version = \"0.1\"\n\n$PublicConf = '{}'\n$PrivateConf = '{}'\n\nSet-AzureRmVMExtension -ResourceGroupName $RGName -VMName $VmName -Location $Location `\n  -Name $ExtensionName -Publisher $Publisher -ExtensionType $ExtensionName `\n  -TypeHandlerVersion $Version -Settingstring $PublicConf -ProtectedSettingString $PrivateConf\n\n"
  },
  {
    "path": "README.md",
    "content": "# Linux extensions for Microsoft Azure IaaS\n\nThis project provides the source code of Linux extensions for Microsoft Azure IaaS.\n\nVM Extensions are injected components authored by Microsoft and Partners into Linux VM (IaaS) to enable software and configuration automation.\n\nYou can read the document [about virtual machine extensions and features](https://azure.microsoft.com/en-us/documentation/articles/virtual-machines-extensions-features/).\n\n# Extension List\n\n| Name | Lastest Version | Description |\n|:---|:---|:---|\n| [Custom Script](./CustomScript) | 1.5 | Allow the owner of the Azure Virtual Machines to run customized scripts in the VM |\n| [DSC](./DSC) | 2.71 | Allow the owner of the Azure Virtual Machines to configure the VM using Windows PowerShell Desired State Configuration (DSC) for Linux |\n| [OS Patching](./OSPatching) | 2.0 | Allow the owner of the Azure VM to configure the Linux VM patching schedule cycle |\n| [VM Access](./VMAccess) | [1.5](https://github.com/Azure/azure-linux-extensions/releases/tag/VMAccess-1.5.1) | Provide several ways to allow owner of the VM to get the SSH access back |\n| [OMS Agent](./OmsAgent) | 1.0 | Allow the owner of the Azure VM to install the omsagent and attach it to an OMS workspace |\n| [Diagnostic](./Diagnostic) | 3.0.129 | Allow the owner of the Azure Virtual Machines to obtain diagnostic data for a Linux virtual machine |\n| [Backup](./VMBackup) | 1.0.9124.0 | Provide application consistent backup of the virtual machine(Needs to be used in conjunction with [Azure Backup](https://azure.microsoft.com/services/backup/)) |\n\n# Contributing\n\nPlease refer to the [Contribution Guide](./docs/contribution-guide.md).\n\n# Known Issues\n1. When you run the PowerShell command \"Set-AzureVMExtension\" on Linux VM, you may hit following error: \"Provision Guest Agent must be enabled on the VM object before setting IaaS VM Access Extension\". \n\n  * Root Cause: When you create the Linux VM via portal, the value of provision guest agent on the VM is not always set to \"True\". If your VM is created using PowerShell or using the Azure new portal, you will not see this issue.\n\n  * Resolution: Add the following PowerShell command to set the ProvisionGuestAgent to \"True\".\n  ```powershell\n  $vm = Get-AzureVM -ServiceName 'MyServiceName' -Name 'MyVMName'\n  $vm.GetInstance().ProvisionGuestAgent = $true\n  ```\n\n# Support\n\nThe extensions in this repository are tested against Python 2.7 and higher.\nThe extensions in this repository use OpenSSL 1.0 and higher.\n\n-----\nThis project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.\n"
  },
  {
    "path": "SECURITY.md",
    "content": "<!-- BEGIN MICROSOFT SECURITY.MD V0.0.7 BLOCK -->\n\n## Security\n\nMicrosoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/).\n\nIf you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below.\n\n## Reporting Security Issues\n\n**Please do not report security vulnerabilities through public GitHub issues.**\n\nInstead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report).\n\nIf you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com).  If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey).\n\nYou should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc). \n\nPlease include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue:\n\n  * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.)\n  * Full paths of source file(s) related to the manifestation of the issue\n  * The location of the affected source code (tag/branch/commit or direct URL)\n  * Any special configuration required to reproduce the issue\n  * Step-by-step instructions to reproduce the issue\n  * Proof-of-concept or exploit code (if possible)\n  * Impact of the issue, including how an attacker might exploit the issue\n\nThis information will help us triage your report more quickly.\n\nIf you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs.\n\n## Preferred Languages\n\nWe prefer all communications to be in English.\n\n## Policy\n\nMicrosoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd).\n\n<!-- END MICROSOFT SECURITY.MD BLOCK -->\n"
  },
  {
    "path": "SampleExtension/HandlerManifest.json",
    "content": "[{\n  \"name\": \"SampleExtension\",\n  \"version\": 1.0,\n  \"handlerManifest\": {\n    \"installCommand\": \"./install.py\",\n    \"uninstallCommand\": \"./uninstall.py\",\n    \"updateCommand\": \"./update.py\",\n    \"enableCommand\": \"./enable.py\",\n    \"disableCommand\": \"./disable.py\",\n    \"rebootAfterInstall\": false,\n    \"reportHeartbeat\": false\n  }\n}]\n"
  },
  {
    "path": "SampleExtension/disable.py",
    "content": "#!/usr/bin/env python\n\nfrom Utils.WAAgentUtil import waagent\nimport Utils.HandlerUtil as Util\n\nExtensionShortName = \"SampleExtension\"\n\ndef main():\n    waagent.LoggerInit('/var/log/waagent.log','/dev/stdout')\n    waagent.Log(\"%s started to handle.\" % ExtensionShortName)\n\n    operation = \"disable\"\n    status = \"success\"\n    msg = \"Disabled successfully.\"\n\n    hutil = parse_context(operation)\n    hutil.log(\"Start to disable.\")\n    hutil.log(msg)\n    hutil.do_exit(0, operation, status, '0', msg)\n\n\ndef parse_context(operation):\n    hutil = Util.HandlerUtility(waagent.Log, waagent.Error)\n    hutil.do_parse_context(operation)\n    return hutil\n\n\nif __name__ == '__main__' :\n    main()\n"
  },
  {
    "path": "SampleExtension/enable.py",
    "content": "#!/usr/bin/env python\n\nfrom Utils.WAAgentUtil import waagent\nimport Utils.HandlerUtil as Util\n\nExtensionShortName = \"SampleExtension\"\n\ndef main():\n    #Global Variables definition\n    waagent.LoggerInit('/var/log/waagent.log','/dev/stdout')\n    waagent.Log(\"%s started to handle.\" %(ExtensionShortName))\n\n    operation = \"enable\"\n    status = \"success\"\n    msg = \"Enabled successfully.\"\n\n    hutil = parse_context(operation)\n    hutil.log(\"Start to enable.\")\n    public_settings = hutil.get_public_settings()\n    name = public_settings.get(\"name\")\n    if name:\n        hutil.log(\"Hello {0}\".format(name))\n    else:\n        hutil.error(\"The name in public settings is not provided.\")\n    hutil.log(msg)\n    hutil.do_exit(0, operation, status, '0', msg)\n\n\ndef parse_context(operation):\n    hutil = Util.HandlerUtility(waagent.Log, waagent.Error)\n    hutil.do_parse_context(operation)\n    return hutil\n\n\nif __name__ == '__main__' :\n    main()\n"
  },
  {
    "path": "SampleExtension/install.py",
    "content": "#!/usr/bin/env python\n\nfrom Utils.WAAgentUtil import waagent\nimport Utils.HandlerUtil as Util\n\nExtensionShortName = \"SampleExtension\"\n\ndef main():\n    #Global Variables definition\n    waagent.LoggerInit('/var/log/waagent.log','/dev/stdout')\n    waagent.Log(\"%s started to handle.\" %(ExtensionShortName))\n\n    operation = \"install\"\n    status = \"success\"\n    msg = \"Installed successfully.\"\n\n    hutil = parse_context(operation)\n    hutil.log(\"Start to install.\")\n    hutil.log(msg)\n    hutil.do_exit(0, operation, status, '0', msg)\n\n\ndef parse_context(operation):\n    hutil = Util.HandlerUtility(waagent.Log, waagent.Error)\n    hutil.do_parse_context(operation)\n    return hutil\n\n\nif __name__ == '__main__' :\n    main()\n"
  },
  {
    "path": "SampleExtension/references",
    "content": "Utils/\n"
  },
  {
    "path": "SampleExtension/uninstall.py",
    "content": "#!/usr/bin/env python\n\nfrom Utils.WAAgentUtil import waagent\nimport Utils.HandlerUtil as Util\n\nExtensionShortName = \"SampleExtension\"\n\ndef main():\n    #Global Variables definition\n    waagent.LoggerInit('/var/log/waagent.log','/dev/stdout')\n    waagent.Log(\"%s started to handle.\" %(ExtensionShortName))\n\n    operation = \"uninstall\"\n    status = \"success\"\n    msg = \"Uninstalled successfully.\"\n\n    hutil = parse_context(operation)\n    hutil.log(\"Start to uninstall.\")\n    hutil.log(msg)\n    hutil.do_exit(0, operation, status, '0', msg)\n\n\ndef parse_context(operation):\n    hutil = Util.HandlerUtility(waagent.Log, waagent.Error)\n    hutil.do_parse_context(operation)\n    return hutil\n\n\nif __name__ == '__main__' :\n    main()\n"
  },
  {
    "path": "SampleExtension/update.py",
    "content": "#!/usr/bin/env python\n\nfrom Utils.WAAgentUtil import waagent\nimport Utils.HandlerUtil as Util\n\nExtensionShortName = \"SampleExtension\"\n\ndef main():\n    #Global Variables definition\n    waagent.LoggerInit('/var/log/waagent.log','/dev/stdout')\n    waagent.Log(\"%s started to handle.\" %(ExtensionShortName))\n\n    operation = \"update\"\n    status = \"success\"\n    msg = \"Updated successfully.\"\n\n    hutil = parse_context(operation)\n    hutil.log(\"Start to update.\")\n    hutil.log(msg)\n    hutil.do_exit(0, operation, status, '0', msg)\n\n\ndef parse_context(operation):\n    hutil = Util.HandlerUtility(waagent.Log, waagent.Error)\n    hutil.do_parse_context(operation)\n    return hutil\n\n\nif __name__ == '__main__' :\n    main()\n"
  },
  {
    "path": "TestHandlerLinux/HandlerManifest.json",
    "content": "[{\n  \"name\": \"TestHandlerLinux\",\n  \"version\": 1.1,\n  \"handlerManifest\": {\n    \"installCommand\": \"installer/install.py\",\n    \"uninstallCommand\": \"installer/uninstall.py\",\n    \"updateCommand\": \"bin/update.py\",\n    \"enableCommand\": \"bin/enable.py\",\n    \"disableCommand\": \"bin/disable.py\",\n    \"rebootAfterInstall\": false,\n    \"reportHeartbeat\": true\n  }\n}]"
  },
  {
    "path": "TestHandlerLinux/bin/#heartbeat.py#",
    "content": "#!/usr/bin/env python\n\n\"\"\"\nExample Azure Handler script for Linux IaaS\nHeartbeat example\n\"\"\"\nimport os\nimport imp\nimport time\n\nwaagent=imp.load_source('waagent','/usr/sbin/waagent')\nfrom waagent import LoggerInit\n\nhutil=imp.load_source('HandlerUtil','./resources/HandlerUtil.py')\nLoggerInit('/var/log/waagent.log','/dev/stdout')\n\nwaagent.Log(\"hearbeat.py starting.\") \n\nlogfile=waagent.Log\n\nname,seqNo,version,config_dir,log_dir,settings_file,status_file,heartbeat_file,config=hutil.doParse(logfile,'Hearbeat')\nLoggerInit('/var/log/'+name+'_Hearbeat.log','/dev/stdout')\n\nwaagent.Log(name+\" - hearbeat.py starting.\") \n\nlogfile=waagent.Log\npid=None\npidfile='./service_pid.txt'\nretries=5\n\nwaagent.SetFileContents('./heartbeat.pid',str(os.getpid()))\n\nwhile(True):\n    if os.path.exists(pidfile):\n        pid=waagent.GetFileContents('./service_pid.txt')\n        if waagent.Run(\"ps --no-headers \" + str(pid),chk_err=False) == 0:\n            # running\n            retries=5\n            waagent.Log(name+\" service.py is running with PID=\"+pid)\n            hutil.doHealthReport(heartbeat_file,'Ready','0','service.py is running.')\n            time.sleep(30)\n            continue\n        else:\n            # died -- retries and wait for 2 min\n            retries-=1\n            waagent.Error(name+\" service.py is Not running.\")\n            if retries==4:\n                hutil.doHealthReport(heartbeat_file,'NotRunning','1','ERROR -  service.py Unknown or NOT running')\n            if retries!=0:\n                time.sleep(120)\n            else:\n                break\n    else:\n        # dead.  report not ready \n        waagent.Error(name+\" service.py is Not running.\")\n        hutil.doHealthReport(heartbeat_file,'NotReady','1','ERROR -  service.py is NOT running')\n        break\n\nwaagent.Log(name+\" heartbeat.py exiting.  service.py is NOT running\")\n\n"
  },
  {
    "path": "TestHandlerLinux/bin/disable.py",
    "content": "#!/usr/bin/env python\n\n\"\"\"\nExample Azure Handler script for Linux IaaS\nDiable example\n\"\"\"\nimport os\nimport imp\nimport time\nimport json\n\nwaagent=imp.load_source('waagent','/usr/sbin/waagent')\nfrom waagent import LoggerInit\n\nhutil=imp.load_source('HandlerUtil','./resources/HandlerUtil.py')\n\nLoggerInit('/var/log/waagent.log','/dev/stdout')\n\nwaagent.Log(\"disable.py starting.\") \n\nlogfile=waagent.Log\n\nname,seqNo,version,config_dir,log_dir,settings_file,status_file,heartbeat_file,config=hutil.doParse(logfile,'Disable')\n\nLoggerInit('/var/log/'+name+'_Disable.log','/dev/stdout')\n\nwaagent.Log(name+\" - disable.py starting.\") \n\nlogfile=waagent.Log\n\nhutil.doStatusReport(name,seqNo,version,status_file,time.strftime(\"%Y-%m-%dT%H:%M:%SZ\", time.gmtime()),\n                     time.strftime(\"%Y-%m-%dT%H:%M:%SZ\", time.gmtime()),name,\n                     'Disable', 'transitioning', '0', 'Disabling', 'Process Config', 'transitioning', '0', 'Parsing ' + settings_file)\nhutil.doHealthReport(heartbeat_file,'NotReady','0','Proccessing Settings')\n\nerror_string=''\npid=None\npidfile='./service_pid.txt'\nif not os.path.isfile(pidfile):\n    error_string += pidfile +\" is missing.\"\n    error_string = \"Error: \" + error_string\n    waagent.Error(error_string)\n    hutil.doStatusReport(name,seqNo,version,status_file,time.strftime(\"%Y-%m-%dT%H:%M:%SZ\", time.gmtime()),\n                     time.strftime(\"%Y-%m-%dT%H:%M:%SZ\", time.gmtime()),name,\n                     'Disable', 'transitioning', '0', 'Disabling', 'Process Config', 'transitioning', '0', 'Parsing ' + settings_file)\nelse:\n    pid = waagent.GetFileContents(pidfile)\n    \n    #stop service.py\n    try:\n        os.kill(int(pid),7)\n    except Exception as e:\n        pass\n    \n    # remove pifdile\n    try:\n        os.unlink(pidfile)\n    except Exception as e:\n        pass\n    \n#Kill heartbeat.py if required.\nmanifest = waagent.GetFileContents('./HandlerManifest.json')\ntry:\n    s=json.loads(manifest)\nexcept:\n    waagent.Error('Error parsing HandlerManifest.json.  Heath report will not be available.')\n    hutil.doExit(name,seqNo,version,0,status_file,heartbeat_file,'Disable','NotReady','0', 'Disable service.py succeeded.' + str(pid) + ' created.', 'Exit Successfull', 'success', '0', 'Enable Completed.','NotReady','0',name+' enabled.')\nif s[0]['handlerManifest']['reportHeartbeat'] != True :\n    hutil.doExit(name,seqNo,version,0,status_file,heartbeat_file,'Disable','NotReady','0', 'Disable service.py succeeded.' + str(pid) + ' created.', 'Exit Successfull', 'success', '0', 'Enable Completed.','Ready','0',name+' enabled.')\ntry:\n    pid = waagent.GetFileContents('./heartbeat.pid')\nexcept:\n    waagent.Error('Error reading ./heartbeat.pid.')\n    hutil.doExit(name,seqNo,version,0,status_file,heartbeat_file,'Disable','NotReady','0', 'Disable service.py succeeded.' + str(pid) + ' created.', 'Exit Successfull', 'success', '0', 'Enable Completed.','NotReady','0',name+' enabled.')\n\nif waagent.Run('kill '+pid)==0:\n    waagent.Log(name+\" disabled.\")\n    hutil.doExit(name,seqNo,version,0,status_file,heartbeat_file,'Disable','NotReady','0', 'Disable service Succeed. Health reporting stoppped.', 'Exit Successfull', 'success', '0', 'Disable Completed.','NotReady','0',name+' disabled.')\n\n"
  },
  {
    "path": "TestHandlerLinux/bin/enable.py",
    "content": "#!/usr/bin/env python\n\n\"\"\"\nExample Azure Handler script for Linux IaaS\nEnable example\n\"\"\"\nimport os\nimport imp\nimport subprocess\nimport time\nimport json\n\nwaagent=imp.load_source('waagent','/usr/sbin/waagent')\nfrom waagent import LoggerInit\n\nhutil=imp.load_source('HandlerUtil','./resources/HandlerUtil.py')\n\n\nLoggerInit('/var/log/waagent.log','/dev/stdout')\n\nwaagent.Log(\"enable.py starting.\") \n\nlogfile=waagent.Log\n\nname,seqNo,version,config_dir,log_dir,settings_file,status_file,heartbeat_file,config=hutil.doParse(logfile,'Enable')\nLoggerInit('/var/log/'+name+'_Enable.log','/dev/stdout')\n\nwaagent.Log(name+\" - enable.py starting.\") \n\nlogfile=waagent.Log\n\nhutil.doStatusReport(name,seqNo,version,status_file,time.strftime(\"%Y-%m-%dT%H:%M:%SZ\", time.gmtime()),\n                     time.strftime(\"%Y-%m-%dT%H:%M:%SZ\", time.gmtime()),name,'Enable', 'NotReady', '0', 'Enabling',\n                     'Process Config', 'NotReady', '0', 'Parsing ' + settings_file)\npub=\"\"\npriv = \"\"\n# process the config info from public and private config\ntry:\n    pub = config['runtimeSettings'][0]['handlerSettings']['publicSettings']\nexcept:\n    waagent.Error(\"json threw an exception processing config PublicSettings.\")    \ntry:\n    priv = config['runtimeSettings'][0]['handlerSettings']['protectedSettings']\nexcept:\n    waagent.Error(\"json threw an exception processing config protectedSettings.\")    \n\nwaagent.Log(\"PublicConfig =\" + repr(pub) )\nport=None\nif len(pub):\n    try:\n        port = pub['port']\n    except:\n        waagent.Error(\"json threw an exception processing public setting: port\")\n\nwaagent.Log(\"ProtectedConfig =\" + repr(priv) )\nif len(priv):\n    try:\n        port = priv['port']\n    except:\n        waagent.Error(\"json threw an exception processing protected setting: port\")\n\nif port == None:\n    port = \"3000\"\n\nerror_string=None\nif port == None:\n    error_string += \"ServicePort is empty. \"\n    error_string = \"Error: \" + error_string\n    waagent.Error(error_string)\n    hutil.doExit(name,seqNo,version,1,status_file,heartbeat_file,'Install/Enable','errior','1', 'Install Failed', 'Parse Config', 'error', '1',error_string,'NotReady','1','Exiting')\n\nerror_string=None\nwaagent.SetFileContents('./resources/service_port.txt',port)\n\nerror_string=''\n\nif port == None:\n    error_string += \"ServicePort is empty. \"\n    error_string = \"Error: \" + error_string\n    waagent.Error(error_string)\n    hutil.doExit(name,seqNo,version,1,status_file,heartbeat_file,'Enable','NotReady','1', 'Enable Failed', 'Read service_port.txt', 'NotReady', '1',error_string,'NotReady','1','Exiting')\n\n\n#if already running, kill and spawn new service.py to get current port\npid=None\npathdir='/usr/sbin'\nfilepath=pathdir+'/service.py'\npidfile='./service_pid.txt'\nif os.path.exists(pidfile):\n    pid=waagent.GetFileContents('./service_pid.txt')\n    try :\n        os.kill(int(pid),7)\n    except Exception as e:\n        pass\n    try:\n        os.unlink(pidfile)\n    except Exception as e:\n        pass\n    time.sleep(3) # wait for the socket to close\ntry:\n    pid = subprocess.Popen(filepath+' -p ' + port,shell=True,cwd=pathdir).pid\nexcept Exception as e:\n    waagent.Error('Exception launching ' + filepath + str(e))\n\nif pid == None or pid < 1 :\n    waagent.Error('Error launching ' + filepath + '.')\nelse :\n    waagent.Log(\"Spawned \"+ filepath + \" PID = \" + str(pid))\n        \nwaagent.SetFileContents('./service_pid.txt',str(pid))\n\n# report ready \nwaagent.Log(name+\" enabled.\")\n\nhutil.doStatusReport(name,seqNo,version,status_file,time.strftime(\"%Y-%m-%dT%H:%M:%SZ\", time.gmtime()),\n                     time.strftime(\"%Y-%m-%dT%H:%M:%SZ\", time.gmtime()),name,'Enable','Ready','0',\n                     'Enable service Succeed.', 'Exit Successfull', 'Ready', '0', 'Enable Completed.')\n\n#Spawn heartbeat.py if required.\nmanifest = waagent.GetFileContents('./HandlerManifest.json')\ns=None\ntry:\n    s=json.loads(manifest)\nexcept:\n    waagent.Error('Error parsing HandlerManifest.json.  Health reports will not be available.')\n    hutil.doExit(name,seqNo,version,0,status_file,heartbeat_file,'Enable','Ready','0', 'Enable service Succeed.  Health  reports will not be available.', 'Exit Successfull', 'success', '0', 'Enable Completed.','Ready','0',name+' enabled.')\nif s and s[0]['handlerManifest']['reportHeartbeat'] != True :\n        waagent.Log('No heartbeat required.  Health reports will not be available.')\n        hutil.doExit(name,seqNo,version,0,status_file,heartbeat_file,'Enable','Ready','0', 'Enable service Succeed.  Health  reports will not be available.', 'Exit Successfull', 'success', '0', 'Enable Completed.','Ready','0',name+' enabled.')\n\ndirpath=os.path.realpath('./')\ntry:\n    pid = subprocess.Popen(dirpath+'/bin/heartbeat.py',shell=True,cwd=dirpath).pid\nexcept:\n    waagent.Error('Error launching'+dirpath+'/bin/heartbeat.py!  Health reports will not be available.')\n    hutil.doExit(name,seqNo,version,0,status_file,heartbeat_file,'Enable','Ready','0', 'Enable service Succeed.  Health reports will not be available.', 'Exit Successfull', 'success', '0', 'Enable Completed.','Ready','0',name+' enabled.')\n    \nwaagent.Log(name+\" heartbeat.py started Health reports are available.\")\nhutil.doExit(name,seqNo,version,0,status_file,heartbeat_file,'Enable','Ready','0', 'Enable service Succeed.  Health reports are available.', 'Exit Successfull', 'success', '0', 'Enable Completed.','Ready','0',name+' enabled.')\n\n        \n        \n"
  },
  {
    "path": "TestHandlerLinux/bin/heartbeat.py",
    "content": "#!/usr/bin/env python\n\n\"\"\"\nExample Azure Handler script for Linux IaaS\nHeartbeat example\n\"\"\"\nimport os\nimport imp\nimport time\n\nwaagent=imp.load_source('waagent','/usr/sbin/waagent')\nfrom waagent import LoggerInit\n\nhutil=imp.load_source('HandlerUtil','./resources/HandlerUtil.py')\nLoggerInit('/var/log/waagent.log','/dev/stdout')\n\nwaagent.Log(\"hearbeat.py starting.\") \n\nlogfile=waagent.Log\n\nname,seqNo,version,config_dir,log_dir,settings_file,status_file,heartbeat_file,config=hutil.doParse(logfile,'Hearbeat')\nLoggerInit('/var/log/'+name+'_Hearbeat.log','/dev/stdout')\n\nwaagent.Log(name+\" - hearbeat.py starting.\") \n\nlogfile=waagent.Log\npid=None\npidfile='./service_pid.txt'\nretries=5\n\nwaagent.SetFileContents('./heartbeat.pid',str(os.getpid()))\n\nwhile(True):\n    if os.path.exists(pidfile):\n        pid=waagent.GetFileContents('./service_pid.txt')\n        if waagent.Run(\"ps --no-headers \" + str(pid),chk_err=False) == 0:\n            # running\n            retries=5\n            waagent.Log(name+\" service.py is running with PID=\"+pid)\n            hutil.doHealthReport(heartbeat_file,'Ready','0','service.py is running.')\n            time.sleep(30)\n            continue\n        else:\n            # died -- retries and wait for 2 min\n            retries-=1\n            waagent.Error(name+\" service.py is Not running.\")\n            if retries==4:\n                hutil.doHealthReport(heartbeat_file,'NotRunning','1','ERROR -  service.py Unknown or NOT running')\n            if retries!=0:\n                time.sleep(120)\n            else:\n                break\n    else:\n        # dead.  report not ready \n        waagent.Error(name+\" service.py is Not running.\")\n        hutil.doHealthReport(heartbeat_file,'NotReady','1','ERROR -  service.py is NOT running')\n        break\n\nwaagent.Log(name+\" heartbeat.py exiting.  service.py is NOT running\")\n\n"
  },
  {
    "path": "TestHandlerLinux/bin/service.py",
    "content": "#!/usr/bin/env python\n\nimport imp\n\n\"\"\"\nservice example\n\"\"\"\nresources_dir = 'RESOURCES_PATH'\nmypydoc=imp.load_source('mypydoc',resources_dir+'/mypydoc.py')\nmypydoc.cli()\n"
  },
  {
    "path": "TestHandlerLinux/bin/update.py",
    "content": "#!/usr/bin/env python\n\n\"\"\"\nExample Azure Handler script for Linux IaaS\nUpdate example\nReads port from Public Config if present.\nCreates service_port.txt in resources dir.\nCopies the service to /usr/bin and updates it\nwith the resource path.\n\"\"\"\nimport os\nimport sys\nimport imp\nimport time\n\nwaagent=imp.load_source('waagent','/usr/sbin/waagent')\nfrom waagent import LoggerInit\n\nhutil=imp.load_source('HandlerUtil','./resources/HandlerUtil.py')\n\n\nLoggerInit('/var/log/waagent.log','/dev/stdout')\n\nwaagent.Log(\"update.py starting.\") \nwaagent.MyDistro=waagent.GetMyDistro()\nlogfile=waagent.Log\n\nname,seqNo,version,config_dir,log_dir,settings_file,status_file,heartbeat_file,config=hutil.doParse(logfile,'Update')\nLoggerInit('/var/log/'+name+'_Update.log','/dev/stdout')\n\nwaagent.Log(name+\" - update.py starting.\") \n\nlogfile=waagent.Log\n\nhutil.doStatusReport(name,seqNo,version,status_file,time.strftime(\"%Y-%m-%dT%H:%M:%SZ\", time.gmtime()),time.strftime(\"%Y-%m-%dT%H:%M:%SZ\", time.gmtime()),name,\n               'Update', 'transitioning', '0', 'Updating', 'Process Config', 'transitioning', '0', 'Parsing ' + settings_file)\nhutil.doHealthReport(heartbeat_file,'NotReady','0','Proccessing Settings')\n\n# capture the config info from previous installation\n# argv[1] is the path to the previous version.\n\nwaagent.SetFileContents('./resources/service_port.txt',waagent.GetFileContents(sys.argv[1]+'/resources/service_port.txt'))\n\n# move the service to sbin\nwaagent.SetFileContents('/usr/sbin/service.py',waagent.GetFileContents('./bin/service.py'))\nwaagent.ReplaceStringInFile('/usr/sbin/service.py','RESOURCES_PATH',os.path.realpath('./resources'))\nos.chmod('/usr/sbin/service.py',0700)\n\n\n# report ready \nwaagent.Log(name+\"updating completed.\")\nhutil.doExit(name,seqNo,version,0,status_file,heartbeat_file,'Update','success','0', 'Update Succeeded.', 'Exit Successfull', 'success', '0', 'Updating Completed.','Ready','0',name+' update completed.')\n\n"
  },
  {
    "path": "TestHandlerLinux/installer/install.py",
    "content": "#!/usr/bin/env python\n\n\"\"\"\nExample Azure Handler script for Linux IaaS\nInstall example\nReads port from Public Config if present.\nCreates service_port.txt in resources dir.\n\"\"\"\nimport os\nimport imp\nimport time\n\nwaagent=imp.load_source('waagent','/usr/sbin/waagent')\nfrom waagent import LoggerInit\n\nhutil=imp.load_source('HandlerUtil','./resources/HandlerUtil.py')\n\nLoggerInit('/var/log/waagent.log','/dev/stdout')\n\nwaagent.Log(\"install.py starting.\") \nlogfile=waagent.Log\n\nname,seqNo,version,config_dir,log_dir,settings_file,status_file,heartbeat_file,config=hutil.doParse(logfile,'Install')\nLoggerInit('/var/log/'+name+'_Install.log','/dev/stdout')\n\nwaagent.Log(name+\" - install.py starting.\") \n\nlogfile=waagent.Log\n\nhutil.doStatusReport(name,seqNo,version,status_file,time.strftime(\"%Y-%m-%dT%H:%M:%SZ\", time.gmtime()),time.strftime(\"%Y-%m-%dT%H:%M:%SZ\", time.gmtime()),name,\n               'Install', 'transitioning', '0', 'Installing', 'Process Config', 'transitioning', '0', 'Parsing ' + settings_file)\nhutil.doHealthReport(heartbeat_file,'NotReady','0','Proccessing Settings')\npub=\"\"\npriv = \"\"\n# process the config info from public and private config\ntry:\n    pub = config['runtimeSettings'][0]['handlerSettings']['publicSettings']\nexcept:\n    waagent.Error(\"json threw an exception processing config PublicSettings.\")    \ntry:\n    priv = config['runtimeSettings'][0]['handlerSettings']['protectedSettings']\nexcept:\n    waagent.Error(\"json threw an exception processing config protectedSettings.\")    \n\nwaagent.Log(\"PublicConfig =\" + repr(pub) )\nport=None\nif len(pub):\n    try:\n        port = pub['port']\n    except:\n        waagent.Error(\"json threw an exception processing public setting: port\")\n\nwaagent.Log(\"ProtectedConfig =\" + repr(priv) )\nif len(priv):\n    try:\n        port = priv['port']\n    except:\n        waagent.Error(\"json threw an exception processing protected setting: port\")\n\nif port == None:\n    port = \"3000\"\n\nerror_string=None\nif port == None:\n    error_string += \"ServicePort is empty. \"\n    error_string = \"Error: \" + error_string\n    waagent.Error(error_string)\n    hutil.doExit(name,seqNo,version,1,status_file,heartbeat_file,'Install/Enable','errior','1', 'Install Failed', 'Parse Config', 'error', '1',error_string,'NotReady','1','Exiting')\n\nerror_string=None\nwaagent.SetFileContents('./resources/service_port.txt',port)\n\n# move the service to sbin\nwaagent.SetFileContents('/usr/sbin/service.py',waagent.GetFileContents('./bin/service.py'))\nwaagent.ReplaceStringInFile('/usr/sbin/service.py','RESOURCES_PATH',os.path.realpath('./resources'))\nos.chmod('/usr/sbin/service.py',0700)\n\n\n# report ready \nwaagent.Log(\"HandlerTestLinux installation completed.\")\nhutil.doExit(name,seqNo,version,0,status_file,heartbeat_file,'Install','success','0', 'Install Succeeded.', 'Exit Successfull', 'success', '0', 'Installation Completed.','Ready','0',name+' installation completed.')\n\n"
  },
  {
    "path": "TestHandlerLinux/installer/uninstall.py",
    "content": "#!/usr/bin/env python\n\n\"\"\"\nExample Azure Handler script for Linux IaaS\nDiable example\n\"\"\"\nimport os\nimport imp\nimport time\n\nwaagent=imp.load_source('waagent','/usr/sbin/waagent')\nfrom waagent import LoggerInit\nhutil=imp.load_source('HandlerUtil','./resources/HandlerUtil.py')\n\nLoggerInit('/var/log/waagent.log','/dev/stdout')\n\nwaagent.Log(\"uninstall.py starting.\") \nlogfile=waagent.Log\n\nname,seqNo,version,config_dir,log_dir,settings_file,status_file,heartbeat_file,config=hutil.doParse(logfile,'Uninstall')\n\nwaagent.Log(name+\" - uninstall.py starting.\") \n\nlogfile=waagent.Log\n\nhutil.doStatusReport(name,seqNo,version,status_file,time.strftime(\"%Y-%m-%dT%H:%M:%SZ\", time.gmtime()),time.strftime(\"%Y-%m-%dT%H:%M:%SZ\", time.gmtime()),name,\n               'Uninstall', 'transitioning', '0', 'Uninstalling', 'Process Config', 'transitioning', '0', 'Parsing ' + settings_file)\nhutil.doHealthReport(heartbeat_file,'NotReady','0','Proccessing Settings')\n\nerror_string=None\nservicefile='/usr/sbin/service.py'\nif not os.path.isfile(servicefile):\n    error_string += servicefile +\" is missing.\"\n    error_string = \"Error: \" + error_string\n    waagent.Error(error_string)\n    hutil.doExit(name,seqNo,version,1,status_file,heartbeat_file,'Uninstall','error','1', 'Uninstall Failed', 'Remove service.py failed.', 'error', '1',error_string,'NotReady','1','Exiting')\n# remove \nos.unlink(servicefile)\n# report ready \nwaagent.Log(name+\" uninstalled.\")\nhutil.doExit(name,seqNo,version,0,status_file,heartbeat_file,'Uninstall','success','0', 'Uninstall service.py Succeeded', 'Exit Successfull', 'success', '0', 'Uninstall Completed.','Ready','0',name+' uninstalled.')\n\n"
  },
  {
    "path": "TestHandlerLinux/manifest.xml",
    "content": "<?xml version='1.0' encoding='utf-8' ?>\n<ExtensionImage xmlns=\"http://schemas.microsoft.com/windowsazure\">\n  <ProviderNameSpace>Microsoft.OSTCExtensions</ProviderNameSpace>\n  <Type>OSTCTestHandlerLinux</Type>\n  <Version>1.1</Version>\n  <Label>Windows Azure Example Extension Handler for Linux Virtual Machines</Label>\n  <HostingResources>VmRole</HostingResources>\n  <MediaLink></MediaLink>\n  <Description>Windows Azure Example Extension Handler for Linux Virtual Machines</Description>\n  <IsInternalExtension>true</IsInternalExtension>\n  <Eula>https://github.com/Azure/azure-linux-extensions/blob/master/LICENSE-2_0.txt</Eula>\n  <PrivacyUri>http://www.microsoft.com/privacystatement/en-us/OnlineServices/Default.aspx</PrivacyUri>\n  <HomepageUri>https://github.com/Azure/azure-linux-extensions</HomepageUri>\n  <IsJsonExtension>true</IsJsonExtension> \n  <SupportedOS>Linux</SupportedOS>\n  <CompanyName>Microsoft</CompanyName>\n  <!--%REGIONS%-->\n</ExtensionImage>\n"
  },
  {
    "path": "TestHandlerLinux/references",
    "content": "Utils/\n"
  },
  {
    "path": "TestHandlerLinux/resources/HandlerUtil.py",
    "content": "#!/usr/bin/env python\n\n\"\"\"\nHandler library for Linux IaaS\n\nJSON def:\nHandlerEnvironment.json\n[{\n  \"name\": \"ExampleHandlerLinux\",\n  \"seqNo\": \"seqNo\",\n  \"version\": \"1.0\",\n  \"handlerEnvironment\": {\n    \"logFolder\": \"<your log folder location>\",\n    \"configFolder\": \"<your config folder location>\",\n    \"statusFolder\": \"<your status folder location>\",\n    \"heartbeatFile\": \"<your heartbeat file location>\",\n    \n  }\n}]\n\n{\n   \"handlerSettings\": \n  {\n    \"protectedSettings\": \n    {\n      \"Password\": \"UserPassword\"\n        },\n       \"publicSettings\": \n    {\t\n      \"UserName\": \"UserName\",\n      \"Expiration\": \"Password expiration date in yyy-mm-dd\"\n\t}\n  }\n }\n\nExample ./config/1.settings\n\"{\"runtimeSettings\":[{\"handlerSettings\":{\"protectedSettingsCertThumbprint\":\"1BE9A13AA1321C7C515EF109746998BAB6D86FD1\",\"protectedSettings\":\n\"MIIByAYJKoZIhvcNAQcDoIIBuTCCAbUCAQAxggFxMIIBbQIBADBVMEExPzA9BgoJkiaJk/IsZAEZFi9XaW5kb3dzIEF6dXJlIFNlcnZpY2UgTWFuYWdlbWVudCBmb3IgR+nhc6VHQTQpCiiV2zANBgkqhkiG9w0BAQEFAASCAQCKr09QKMGhwYe+O4/a8td+vpB4eTR+BQso84cV5KCAnD6iUIMcSYTrn9aveY6v6ykRLEw8GRKfri2d6tvVDggUrBqDwIgzejGTlCstcMJItWa8Je8gHZVSDfoN80AEOTws9Fp+wNXAbSuMJNb8EnpkpvigAWU2v6pGLEFvSKC0MCjDTkjpjqciGMcbe/r85RG3Zo21HLl0xNOpjDs/qqikc/ri43Y76E/Xv1vBSHEGMFprPy/Hwo3PqZCnulcbVzNnaXN3qi/kxV897xGMPPC3IrO7Nc++AT9qRLFI0841JLcLTlnoVG1okPzK9w6ttksDQmKBSHt3mfYV+skqs+EOMDsGCSqGSIb3DQEHATAUBggqhkiG9w0DBwQITgu0Nu3iFPuAGD6/QzKdtrnCI5425fIUy7LtpXJGmpWDUA==\",\"publicSettings\":{\"port\":\"3000\"}}}]}\"\n\n\nExample HeartBeat\n{\n\"version\": 1.0,\n    \"heartbeat\" : {\n        \"status\": \"ready\",\n        \"code\": 0,\n        \"Message\": \"Sample Handler running. Waiting for a new configuration from user.\"\n    }\n}\nStatus uses either non-localized 'message' or localized 'formattedMessage' but not both.\n{\n    \"version\": 1.0,\n    \"timestampUTC\": \"<current utc time>\",\n    \"status\" : {\n        \"name\": \"<Handler workload name>\",\n        \"operation\": \"<name of the operation being performed>\",\n        \"configurationAppliedTime\": \"<UTC time indicating when the configuration was last successfully applied>\",\n        \"status\": \"<transitioning | error | success | warning>\",\n        \"code\": <Valid integer status code>,\n        \"message\": {\n            \"id\": \"id of the localized resource\",\n            \"params\": [\n                \"MyParam0\",\n                \"MyParam1\"\n            ]\n        },\n        \"formattedMessage\": {\n            \"lang\": \"Lang[-locale]\",\n            \"message\": \"formatted user message\"\n        }\n    }\n}\n\"\"\"\n\n\nimport os\nimport sys\nimport imp\nimport base64\nimport json\nimport time\n\n# waagent has no '.py' therefore create waagent module import manually.\nwaagent=imp.load_source('waagent','/usr/sbin/waagent')\ndef doParse(Log,operation):\n    handler_env=None\n    config=None\n    ctxt=None\n    code=0\n    \n    # get the HandlerEnvironment.json. it should always be in ./\n    waagent.Log('cwd is ' + os.path.realpath(os.path.curdir))\n    handler_env_file='./HandlerEnvironment.json'\n    if not os.path.isfile(handler_env_file):\n        waagent.Error(\"Unable to locate \" + handler_env_file)\n        sys.exit(1)\n    ctxt=waagent.GetFileContents(handler_env_file)\n    if ctxt == None :\n        waagent.Error(\"Unable to read \" + handler_env_file)    \n    try:\n        handler_env=json.loads(ctxt)\n    except:\n        pass\n    if handler_env == None :\n        waagent.Error(\"JSON error processing \" + handler_env_file)    \n        sys.exit(1)\n    if type(handler_env) == list:\n        handler_env = handler_env[0]\n    \n    # parse the dirs\n    name='NULL'\n    seqNo='0'\n    version='0.0'\n    config_dir='./'\n    log_dir='./'\n    status_dir='./'\n    heartbeat_file='NULL.log'\n    \n    name=handler_env['name']\n    seqNo=handler_env['seqNo']\n    version=str(handler_env['version'])\n    config_dir=handler_env['handlerEnvironment']['configFolder']\n    log_dir=handler_env['handlerEnvironment']['logFolder']\n    status_dir=handler_env['handlerEnvironment']['statusFolder']\n    heartbeat_file=handler_env['handlerEnvironment']['heartbeatFile']\n    \n    # always get the newest settings file\n    code,settings_file=waagent.RunGetOutput('ls -rt ' + config_dir + '/*.settings | tail -1')\n    if code != 0:\n        waagent.Error(\"Unable to locate a .settings file!\")\n        sys.exit(1)\n    settings_file=settings_file[:-1]\n    # get our incarnation # from the number of the .settings file\n    incarnation=os.path.splitext(os.path.basename(settings_file))[0]\n    waagent.Log('Incarnation is ' + incarnation)\n    status_file=status_dir+'/'+incarnation+'.status'\n    waagent.Log(\"setting file path is\" + settings_file)\n    ctxt=None\n    ctxt=waagent.GetFileContents(settings_file)\n    if ctxt == None :\n        waagent.Error('Unable to read ' + settings_file + '. ')    \n        doExit(name,seqNo,version,1,status_file,heartbeat_file,operation,'error','1', operation+' Failed', 'Read .settings', 'error', '1','Unable to read ' + settings_file + '. ','NotReady','1','Exiting')\n    waagent.Log(\"Read: \" + ctxt)\n    # parse json\n    config = None\n    try:\n        config=json.loads(ctxt)\n    except:\n        waagent.Error('JSON exception decoding ' + ctxt)\n        \n    if config == None:\n        waagent.Error(\"JSON error processing \" + settings_file)\n        return (name,seqNo,version,config_dir,log_dir,settings_file,status_file,heartbeat_file,config)\n    \n#        doExit(name,seqNo,version,1,status_file,heartbeat_file,operation,'errior','1', operation + ' Failed', 'Parse Config', 'error', '1', 'JSON error processing ' + settings_file,'NotReady','1','Exiting')\n #       sys.exit(1)\n    print repr(config)\n    if config['runtimeSettings'][0]['handlerSettings'].has_key('protectedSettings') == True:\n        thumb=config['runtimeSettings'][0]['handlerSettings']['protectedSettingsCertThumbprint']\n        cert=waagent.LibDir+'/'+thumb+'.crt'\n        pkey=waagent.LibDir+'/'+thumb+'.prv'\n        waagent.SetFileContents('/tmp/kk',config['runtimeSettings'][0]['handlerSettings']['protectedSettings'])\n        cleartxt=None\n        cleartxt=waagent.RunGetOutput(\"base64 -d /tmp/kk | openssl smime  -inform DER -decrypt -recip \" +  cert + \"  -inkey \" + pkey )[1]\n        if cleartxt == None:\n            waagent.Error(\"OpenSSh decode error using  thumbprint \" + thumb )    \n            doExit(name,seqNo,version,1,status_file,heartbeat_file,operation,'errior','1', operation + ' Failed', 'Parse Config', 'error', '1', 'OpenSsh decode error  using  thumbprint ' + thumb,'NotReady','1','Exiting')\n            sys.exit(1)\n        jctxt=''\n        try:\n            jctxt=json.loads(cleartxt)\n        except:\n            waagent.Error('JSON exception decoding ' + cleartxt)\n        config['runtimeSettings'][0]['handlerSettings']['protectedSettings']=jctxt\n        waagent.Log('Config decoded correctly.')\n\n    return (name,seqNo,version,config_dir,log_dir,settings_file,status_file,heartbeat_file,config)\n\n\ndef doStatusReport(name,seqNo,version,stat_file,current_utc, started_at_utc, workload_name, operation_name, status, status_code, status_message, sub_workload_name, sub_status, sub_status_code, sub_status_message):\n    #'{\"handlerName\":\"Chef.Bootstrap.WindowsAzure.ChefClient\",\"handlerVersion\":\"11.12.0.0\",\"status\":\"NotReady\",\"code\":1,\"formattedMessage\":{\"lang\":\"en-US\",\"message\":\"Enable command of plugin (name: Chef.Bootstrap.WindowsAzure.ChefClient, version 11.12.0.0) failed with exception Command C:/Packages/Plugins/Chef.Bootstrap.WindowsAzure.ChefClient/11.12.0.0/enable.cmd of Chef.Bootstrap.WindowsAzure.ChefClient has exited with Exit code: 1\"}},{\"handlerName\":\"Microsoft.Compute.BGInfo\",\"handlerVersion\":\"1.1\",\"status\":\"Ready\",\"formattedMessage\":{\"lang\":\"en-US\",\"message\":\"plugin (name: Microsoft.Compute.BGInfo, version: 1.1) enabled successfully.\"}}'\n\n    stat_rept='{\"handlerName\":\"' + name + '\",\"handlerVersion\":\"'+version+ '\",\"status\":\"' +status + '\",\"code\":' + status_code + ',\"formattedMessage\":{\"lang\":\"en-US\",\"message\":\"' + status_message + '\"}}'\n    cur_file=stat_file+'_current'\n    with open(cur_file,'w+') as f:\n        f.write(stat_rept)\n    # if inc.status exists, rename the inc.status to inc.status_sent\n    if os.path.exists(stat_file) == True:\n        os.rename(stat_file,stat_file+'_sent')\n    # rename inc.status_current to inc.status\n    os.rename(cur_file,stat_file)\n    # remove  inc.status_sent\n    if os.path.exists(stat_file+'_sent') == True:\n        os.unlink(stat_file+'_sent')\n        \n\ndef doHealthReport(heartbeat_file,status,code,message):\n    # heartbeat\n    health_report='[{\"version\":\"1.0\",\"heartbeat\":{\"status\":\"' + status+ '\",\"code\":\"'+ code + '\",\"Message\":\"' + message + '\"}}]'\n    if waagent.SetFileContents(heartbeat_file,health_report) == None :\n        waagent.Error('Unable to wite heartbeat info to ' + heartbeat_file)    \n\ndef doExit(name,seqNo,version,exit_code,status_file,heartbeat_file,operation,status,code,message,sub_operation,sub_status,sub_code,sub_message,health_state,health_code,health_message):\n    doStatusReport(name,seqNo,version,status_file,time.strftime(\"%Y-%m-%dT%H:%M:%SZ\", time.gmtime()),time.strftime(\"%Y-%m-%dT%H:%M:%SZ\", time.gmtime()),name,\n                   operation,status,code,message,sub_operation,sub_status,sub_code,sub_message)\n    doHealthReport(heartbeat_file,'NotReady','1','Exiting')\n    sys.exit(exit_code)\n\n"
  },
  {
    "path": "TestHandlerLinux/resources/mypydoc.py",
    "content": "#! /usr/bin/python2.7\n# -*- coding: latin-1 -*-\n\"\"\"Generate Python documentation in HTML or text for interactive use.\n\nIn the Python interpreter, do \"from pydoc import help\" to provide online\nhelp.  Calling help(thing) on a Python object documents the object.\n\nOr, at the shell command line outside of Python:\n\nRun \"pydoc <name>\" to show documentation on something.  <name> may be\nthe name of a function, module, package, or a dotted reference to a\nclass or function within a module or module in a package.  If the\nargument contains a path segment delimiter (e.g. slash on Unix,\nbackslash on Windows) it is treated as the path to a Python source file.\n\nRun \"pydoc -k <keyword>\" to search for a keyword in the synopsis lines\nof all available modules.\n\nRun \"pydoc -p <port>\" to start an HTTP server on a given port on the\nlocal machine to generate documentation web pages.\n\nFor platforms without a command line, \"pydoc -g\" starts the HTTP server\nand also pops up a little window for controlling it.\n\nRun \"pydoc -w <name>\" to write out the HTML documentation for a module\nto a file named \"<name>.html\".\n\nModule docs for core modules are assumed to be in\n\n    http://docs.python.org/library/\n\nThis can be overridden by setting the PYTHONDOCS environment variable\nto a different URL or to a local directory containing the Library\nReference Manual pages.\n\"\"\"\n\n__author__ = \"Ka-Ping Yee <ping@lfw.org>\"\n__date__ = \"26 February 2001\"\n\n__version__ = \"$Revision: 88564 $\"\n__credits__ = \"\"\"Guido van Rossum, for an excellent programming language.\nTommy Burnette, the original creator of manpy.\nPaul Prescod, for all his work on onlinehelp.\nRichard Chamberlain, for the first implementation of textdoc.\n\"\"\"\n\n# Known bugs that can't be fixed here:\n#   - imp.load_module() cannot be prevented from clobbering existing\n#     loaded modules, so calling synopsis() on a binary module file\n#     changes the contents of any existing module with the same name.\n#   - If the __file__ attribute on a module is a relative path and\n#     the current directory is changed with os.chdir(), an incorrect\n#     path will be displayed.\n\nimport sys, imp, os, re, types, inspect, __builtin__, pkgutil, warnings\nfrom repr import Repr\nfrom string import expandtabs, find, join, lower, split, strip, rfind, rstrip\nfrom traceback import extract_tb\ntry:\n    from collections import deque\nexcept ImportError:\n    # Python 2.3 compatibility\n    class deque(list):\n        def popleft(self):\n            return self.pop(0)\n\n# --------------------------------------------------------- common routines\n\ndef pathdirs():\n    \"\"\"Convert sys.path into a list of absolute, existing, unique paths.\"\"\"\n    dirs = []\n    normdirs = []\n    for dir in sys.path:\n        dir = os.path.abspath(dir or '.')\n        normdir = os.path.normcase(dir)\n        if normdir not in normdirs and os.path.isdir(dir):\n            dirs.append(dir)\n            normdirs.append(normdir)\n    return dirs\n\ndef getdoc(object):\n    \"\"\"Get the doc string or comments for an object.\"\"\"\n    result = inspect.getdoc(object) or inspect.getcomments(object)\n    return result and re.sub('^ *\\n', '', rstrip(result)) or ''\n\ndef splitdoc(doc):\n    \"\"\"Split a doc string into a synopsis line (if any) and the rest.\"\"\"\n    lines = split(strip(doc), '\\n')\n    if len(lines) == 1:\n        return lines[0], ''\n    elif len(lines) >= 2 and not rstrip(lines[1]):\n        return lines[0], join(lines[2:], '\\n')\n    return '', join(lines, '\\n')\n\ndef classname(object, modname):\n    \"\"\"Get a class name and qualify it with a module name if necessary.\"\"\"\n    name = object.__name__\n    if object.__module__ != modname:\n        name = object.__module__ + '.' + name\n    return name\n\ndef isdata(object):\n    \"\"\"Check if an object is of a type that probably means it's data.\"\"\"\n    return not (inspect.ismodule(object) or inspect.isclass(object) or\n                inspect.isroutine(object) or inspect.isframe(object) or\n                inspect.istraceback(object) or inspect.iscode(object))\n\ndef replace(text, *pairs):\n    \"\"\"Do a series of global replacements on a string.\"\"\"\n    while pairs:\n        text = join(split(text, pairs[0]), pairs[1])\n        pairs = pairs[2:]\n    return text\n\ndef cram(text, maxlen):\n    \"\"\"Omit part of a string if needed to make it fit in a maximum length.\"\"\"\n    if len(text) > maxlen:\n        pre = max(0, (maxlen-3)//2)\n        post = max(0, maxlen-3-pre)\n        return text[:pre] + '...' + text[len(text)-post:]\n    return text\n\n_re_stripid = re.compile(r' at 0x[0-9a-f]{6,16}(>+)$', re.IGNORECASE)\ndef stripid(text):\n    \"\"\"Remove the hexadecimal id from a Python object representation.\"\"\"\n    # The behaviour of %p is implementation-dependent in terms of case.\n    return _re_stripid.sub(r'\\1', text)\n\ndef _is_some_method(obj):\n    return inspect.ismethod(obj) or inspect.ismethoddescriptor(obj)\n\ndef allmethods(cl):\n    methods = {}\n    for key, value in inspect.getmembers(cl, _is_some_method):\n        methods[key] = 1\n    for base in cl.__bases__:\n        methods.update(allmethods(base)) # all your base are belong to us\n    for key in methods.keys():\n        methods[key] = getattr(cl, key)\n    return methods\n\ndef _split_list(s, predicate):\n    \"\"\"Split sequence s via predicate, and return pair ([true], [false]).\n\n    The return value is a 2-tuple of lists,\n        ([x for x in s if predicate(x)],\n         [x for x in s if not predicate(x)])\n    \"\"\"\n\n    yes = []\n    no = []\n    for x in s:\n        if predicate(x):\n            yes.append(x)\n        else:\n            no.append(x)\n    return yes, no\n\ndef visiblename(name, all=None, obj=None):\n    \"\"\"Decide whether to show documentation on a variable.\"\"\"\n    # Certain special names are redundant.\n    _hidden_names = ('__builtins__', '__doc__', '__file__', '__path__',\n                     '__module__', '__name__', '__slots__', '__package__')\n    if name in _hidden_names: return 0\n    # Private names are hidden, but special names are displayed.\n    if name.startswith('__') and name.endswith('__'): return 1\n    # Namedtuples have public fields and methods with a single leading underscore\n    if name.startswith('_') and hasattr(obj, '_fields'):\n        return 1\n    if all is not None:\n        # only document that which the programmer exported in __all__\n        return name in all\n    else:\n        return not name.startswith('_')\n\ndef classify_class_attrs(object):\n    \"\"\"Wrap inspect.classify_class_attrs, with fixup for data descriptors.\"\"\"\n    def fixup(data):\n        name, kind, cls, value = data\n        if inspect.isdatadescriptor(value):\n            kind = 'data descriptor'\n        return name, kind, cls, value\n    return map(fixup, inspect.classify_class_attrs(object))\n\n# ----------------------------------------------------- module manipulation\n\ndef ispackage(path):\n    \"\"\"Guess whether a path refers to a package directory.\"\"\"\n    if os.path.isdir(path):\n        for ext in ('.py', '.pyc', '.pyo'):\n            if os.path.isfile(os.path.join(path, '__init__' + ext)):\n                return True\n    return False\n\ndef source_synopsis(file):\n    line = file.readline()\n    while line[:1] == '#' or not strip(line):\n        line = file.readline()\n        if not line: break\n    line = strip(line)\n    if line[:4] == 'r\"\"\"': line = line[1:]\n    if line[:3] == '\"\"\"':\n        line = line[3:]\n        if line[-1:] == '\\\\': line = line[:-1]\n        while not strip(line):\n            line = file.readline()\n            if not line: break\n        result = strip(split(line, '\"\"\"')[0])\n    else: result = None\n    return result\n\ndef synopsis(filename, cache={}):\n    \"\"\"Get the one-line summary out of a module file.\"\"\"\n    mtime = os.stat(filename).st_mtime\n    lastupdate, result = cache.get(filename, (None, None))\n    if lastupdate is None or lastupdate < mtime:\n        info = inspect.getmoduleinfo(filename)\n        try:\n            file = open(filename)\n        except IOError:\n            # module can't be opened, so skip it\n            return None\n        if info and 'b' in info[2]: # binary modules have to be imported\n            try: module = imp.load_module('__temp__', file, filename, info[1:])\n            except: return None\n            result = (module.__doc__ or '').splitlines()[0]\n            del sys.modules['__temp__']\n        else: # text modules can be directly examined\n            result = source_synopsis(file)\n            file.close()\n        cache[filename] = (mtime, result)\n    return result\n\nclass ErrorDuringImport(Exception):\n    \"\"\"Errors that occurred while trying to import something to document it.\"\"\"\n    def __init__(self, filename, exc_info):\n        exc, value, tb = exc_info\n        self.filename = filename\n        self.exc = exc\n        self.value = value\n        self.tb = tb\n\n    def __str__(self):\n        exc = self.exc\n        if type(exc) is types.ClassType:\n            exc = exc.__name__\n        return 'problem in %s - %s: %s' % (self.filename, exc, self.value)\n\ndef importfile(path):\n    \"\"\"Import a Python source file or compiled file given its path.\"\"\"\n    magic = imp.get_magic()\n    file = open(path, 'r')\n    if file.read(len(magic)) == magic:\n        kind = imp.PY_COMPILED\n    else:\n        kind = imp.PY_SOURCE\n    file.close()\n    filename = os.path.basename(path)\n    name, ext = os.path.splitext(filename)\n    file = open(path, 'r')\n    try:\n        module = imp.load_module(name, file, path, (ext, 'r', kind))\n    except:\n        raise ErrorDuringImport(path, sys.exc_info())\n    file.close()\n    return module\n\ndef safeimport(path, forceload=0, cache={}):\n    \"\"\"Import a module; handle errors; return None if the module isn't found.\n\n    If the module *is* found but an exception occurs, it's wrapped in an\n    ErrorDuringImport exception and reraised.  Unlike __import__, if a\n    package path is specified, the module at the end of the path is returned,\n    not the package at the beginning.  If the optional 'forceload' argument\n    is 1, we reload the module from disk (unless it's a dynamic extension).\"\"\"\n    try:\n        # If forceload is 1 and the module has been previously loaded from\n        # disk, we always have to reload the module.  Checking the file's\n        # mtime isn't good enough (e.g. the module could contain a class\n        # that inherits from another module that has changed).\n        if forceload and path in sys.modules:\n            if path not in sys.builtin_module_names:\n                # Avoid simply calling reload() because it leaves names in\n                # the currently loaded module lying around if they're not\n                # defined in the new source file.  Instead, remove the\n                # module from sys.modules and re-import.  Also remove any\n                # submodules because they won't appear in the newly loaded\n                # module's namespace if they're already in sys.modules.\n                subs = [m for m in sys.modules if m.startswith(path + '.')]\n                for key in [path] + subs:\n                    # Prevent garbage collection.\n                    cache[key] = sys.modules[key]\n                    del sys.modules[key]\n        module = __import__(path)\n    except:\n        # Did the error occur before or after the module was found?\n        (exc, value, tb) = info = sys.exc_info()\n        if path in sys.modules:\n            # An error occurred while executing the imported module.\n            raise ErrorDuringImport(sys.modules[path].__file__, info)\n        elif exc is SyntaxError:\n            # A SyntaxError occurred before we could execute the module.\n            raise ErrorDuringImport(value.filename, info)\n        elif exc is ImportError and extract_tb(tb)[-1][2]=='safeimport':\n            # The import error occurred directly in this function,\n            # which means there is no such module in the path.\n            return None\n        else:\n            # Some other error occurred during the importing process.\n            raise ErrorDuringImport(path, sys.exc_info())\n    for part in split(path, '.')[1:]:\n        try: module = getattr(module, part)\n        except AttributeError: return None\n    return module\n\n# ---------------------------------------------------- formatter base class\n\nclass Doc:\n    def document(self, object, name=None, *args):\n        \"\"\"Generate documentation for an object.\"\"\"\n        args = (object, name) + args\n        # 'try' clause is to attempt to handle the possibility that inspect\n        # identifies something in a way that pydoc itself has issues handling;\n        # think 'super' and how it is a descriptor (which raises the exception\n        # by lacking a __name__ attribute) and an instance.\n        if inspect.isgetsetdescriptor(object): return self.docdata(*args)\n        if inspect.ismemberdescriptor(object): return self.docdata(*args)\n        try:\n            if inspect.ismodule(object): return self.docmodule(*args)\n            if inspect.isclass(object): return self.docclass(*args)\n            if inspect.isroutine(object): return self.docroutine(*args)\n        except AttributeError:\n            pass\n        if isinstance(object, property): return self.docproperty(*args)\n        return self.docother(*args)\n\n    def fail(self, object, name=None, *args):\n        \"\"\"Raise an exception for unimplemented types.\"\"\"\n        message = \"don't know how to document object%s of type %s\" % (\n            name and ' ' + repr(name), type(object).__name__)\n        raise TypeError, message\n\n    docmodule = docclass = docroutine = docother = docproperty = docdata = fail\n\n    def getdocloc(self, object):\n        \"\"\"Return the location of module docs or None\"\"\"\n\n        try:\n            file = inspect.getabsfile(object)\n        except TypeError:\n            file = '(built-in)'\n\n        docloc = os.environ.get(\"PYTHONDOCS\",\n                                \"http://docs.python.org/library\")\n        basedir = os.path.join(sys.exec_prefix, \"lib\",\n                               \"python\"+sys.version[0:3])\n        if (isinstance(object, type(os)) and\n            (object.__name__ in ('errno', 'exceptions', 'gc', 'imp',\n                                 'marshal', 'posix', 'signal', 'sys',\n                                 'thread', 'zipimport') or\n             (file.startswith(basedir) and\n              not file.startswith(os.path.join(basedir, 'dist-packages')) and\n              not file.startswith(os.path.join(basedir, 'site-packages')))) and\n            object.__name__ not in ('xml.etree', 'test.pydoc_mod')):\n            if docloc.startswith(\"http://\"):\n                docloc = \"%s/%s\" % (docloc.rstrip(\"/\"), object.__name__)\n            else:\n                docloc = os.path.join(docloc, object.__name__ + \".html\")\n        else:\n            docloc = None\n        return docloc\n\n# -------------------------------------------- HTML documentation generator\n\nclass HTMLRepr(Repr):\n    \"\"\"Class for safely making an HTML representation of a Python object.\"\"\"\n    def __init__(self):\n        Repr.__init__(self)\n        self.maxlist = self.maxtuple = 20\n        self.maxdict = 10\n        self.maxstring = self.maxother = 100\n\n    def escape(self, text):\n        return replace(text, '&', '&amp;', '<', '&lt;', '>', '&gt;')\n\n    def repr(self, object):\n        return Repr.repr(self, object)\n\n    def repr1(self, x, level):\n        if hasattr(type(x), '__name__'):\n            methodname = 'repr_' + join(split(type(x).__name__), '_')\n            if hasattr(self, methodname):\n                return getattr(self, methodname)(x, level)\n        return self.escape(cram(stripid(repr(x)), self.maxother))\n\n    def repr_string(self, x, level):\n        test = cram(x, self.maxstring)\n        testrepr = repr(test)\n        if '\\\\' in test and '\\\\' not in replace(testrepr, r'\\\\', ''):\n            # Backslashes are only literal in the string and are never\n            # needed to make any special characters, so show a raw string.\n            return 'r' + testrepr[0] + self.escape(test) + testrepr[0]\n        return re.sub(r'((\\\\[\\\\abfnrtv\\'\"]|\\\\[0-9]..|\\\\x..|\\\\u....)+)',\n                      r'<font color=\"#c040c0\">\\1</font>',\n                      self.escape(testrepr))\n\n    repr_str = repr_string\n\n    def repr_instance(self, x, level):\n        try:\n            return self.escape(cram(stripid(repr(x)), self.maxstring))\n        except:\n            return self.escape('<%s instance>' % x.__class__.__name__)\n\n    repr_unicode = repr_string\n\nclass HTMLDoc(Doc):\n    \"\"\"Formatter class for HTML documentation.\"\"\"\n\n    # ------------------------------------------- HTML formatting utilities\n\n    _repr_instance = HTMLRepr()\n    repr = _repr_instance.repr\n    escape = _repr_instance.escape\n\n    def page(self, title, contents):\n        \"\"\"Format an HTML page.\"\"\"\n        return '''\n<!DOCTYPE html PUBLIC \"-//W3C//DTD HTML 4.0 Transitional//EN\">\n<html><head><title>Python: %s</title>\n</head><body bgcolor=\"#f0f0f8\">\n%s\n</body></html>''' % (title, contents)\n\n    def heading(self, title, fgcol, bgcol, extras=''):\n        \"\"\"Format a page heading.\"\"\"\n        return '''\n<table width=\"100%%\" cellspacing=0 cellpadding=2 border=0 summary=\"heading\">\n<tr bgcolor=\"%s\">\n<td valign=bottom>&nbsp;<br>\n<font color=\"%s\" face=\"helvetica, arial\">&nbsp;<br>%s</font></td\n><td align=right valign=bottom\n><font color=\"%s\" face=\"helvetica, arial\">%s</font></td></tr></table>\n    ''' % (bgcol, fgcol, title, fgcol, extras or '&nbsp;')\n\n    def section(self, title, fgcol, bgcol, contents, width=6,\n                prelude='', marginalia=None, gap='&nbsp;'):\n        \"\"\"Format a section with a heading.\"\"\"\n        if marginalia is None:\n            marginalia = '<tt>' + '&nbsp;' * width + '</tt>'\n        result = '''<p>\n<table width=\"100%%\" cellspacing=0 cellpadding=2 border=0 summary=\"section\">\n<tr bgcolor=\"%s\">\n<td colspan=3 valign=bottom>&nbsp;<br>\n<font color=\"%s\" face=\"helvetica, arial\">%s</font></td></tr>\n    ''' % (bgcol, fgcol, title)\n        if prelude:\n            result = result + '''\n<tr bgcolor=\"%s\"><td rowspan=2>%s</td>\n<td colspan=2>%s</td></tr>\n<tr><td>%s</td>''' % (bgcol, marginalia, prelude, gap)\n        else:\n            result = result + '''\n<tr><td bgcolor=\"%s\">%s</td><td>%s</td>''' % (bgcol, marginalia, gap)\n\n        return result + '\\n<td width=\"100%%\">%s</td></tr></table>' % contents\n\n    def bigsection(self, title, *args):\n        \"\"\"Format a section with a big heading.\"\"\"\n        title = '<big><strong>%s</strong></big>' % title\n        return self.section(title, *args)\n\n    def preformat(self, text):\n        \"\"\"Format literal preformatted text.\"\"\"\n        text = self.escape(expandtabs(text))\n        return replace(text, '\\n\\n', '\\n \\n', '\\n\\n', '\\n \\n',\n                             ' ', '&nbsp;', '\\n', '<br>\\n')\n\n    def multicolumn(self, list, format, cols=4):\n        \"\"\"Format a list of items into a multi-column list.\"\"\"\n        result = ''\n        rows = (len(list)+cols-1)//cols\n        for col in range(cols):\n            result = result + '<td width=\"%d%%\" valign=top>' % (100//cols)\n            for i in range(rows*col, rows*col+rows):\n                if i < len(list):\n                    result = result + format(list[i]) + '<br>\\n'\n            result = result + '</td>'\n        return '<table width=\"100%%\" summary=\"list\"><tr>%s</tr></table>' % result\n\n    def grey(self, text): return '<font color=\"#909090\">%s</font>' % text\n\n    def namelink(self, name, *dicts):\n        \"\"\"Make a link for an identifier, given name-to-URL mappings.\"\"\"\n        for dict in dicts:\n            if name in dict:\n                return '<a href=\"%s\">%s</a>' % (dict[name], name)\n        return name\n\n    def classlink(self, object, modname):\n        \"\"\"Make a link for a class.\"\"\"\n        name, module = object.__name__, sys.modules.get(object.__module__)\n        if hasattr(module, name) and getattr(module, name) is object:\n            return '<a href=\"%s.html#%s\">%s</a>' % (\n                module.__name__, name, classname(object, modname))\n        return classname(object, modname)\n\n    def modulelink(self, object):\n        \"\"\"Make a link for a module.\"\"\"\n        return '<a href=\"%s.html\">%s</a>' % (object.__name__, object.__name__)\n\n    def modpkglink(self, data):\n        \"\"\"Make a link for a module or package to display in an index.\"\"\"\n        name, path, ispackage, shadowed = data\n        if shadowed:\n            return self.grey(name)\n        if path:\n            url = '%s.%s.html' % (path, name)\n        else:\n            url = '%s.html' % name\n        if ispackage:\n            text = '<strong>%s</strong>&nbsp;(package)' % name\n        else:\n            text = name\n        return '<a href=\"%s\">%s</a>' % (url, text)\n\n    def markup(self, text, escape=None, funcs={}, classes={}, methods={}):\n        \"\"\"Mark up some plain text, given a context of symbols to look for.\n        Each context dictionary maps object names to anchor names.\"\"\"\n        escape = escape or self.escape\n        results = []\n        here = 0\n        pattern = re.compile(r'\\b((http|ftp)://\\S+[\\w/]|'\n                                r'RFC[- ]?(\\d+)|'\n                                r'PEP[- ]?(\\d+)|'\n                                r'(self\\.)?(\\w+))')\n        while True:\n            match = pattern.search(text, here)\n            if not match: break\n            start, end = match.span()\n            results.append(escape(text[here:start]))\n\n            all, scheme, rfc, pep, selfdot, name = match.groups()\n            if scheme:\n                url = escape(all).replace('\"', '&quot;')\n                results.append('<a href=\"%s\">%s</a>' % (url, url))\n            elif rfc:\n                url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)\n                results.append('<a href=\"%s\">%s</a>' % (url, escape(all)))\n            elif pep:\n                url = 'http://www.python.org/dev/peps/pep-%04d/' % int(pep)\n                results.append('<a href=\"%s\">%s</a>' % (url, escape(all)))\n            elif text[end:end+1] == '(':\n                results.append(self.namelink(name, methods, funcs, classes))\n            elif selfdot:\n                results.append('self.<strong>%s</strong>' % name)\n            else:\n                results.append(self.namelink(name, classes))\n            here = end\n        results.append(escape(text[here:]))\n        return join(results, '')\n\n    # ---------------------------------------------- type-specific routines\n\n    def formattree(self, tree, modname, parent=None):\n        \"\"\"Produce HTML for a class tree as given by inspect.getclasstree().\"\"\"\n        result = ''\n        for entry in tree:\n            if type(entry) is type(()):\n                c, bases = entry\n                result = result + '<dt><font face=\"helvetica, arial\">'\n                result = result + self.classlink(c, modname)\n                if bases and bases != (parent,):\n                    parents = []\n                    for base in bases:\n                        parents.append(self.classlink(base, modname))\n                    result = result + '(' + join(parents, ', ') + ')'\n                result = result + '\\n</font></dt>'\n            elif type(entry) is type([]):\n                result = result + '<dd>\\n%s</dd>\\n' % self.formattree(\n                    entry, modname, c)\n        return '<dl>\\n%s</dl>\\n' % result\n\n    def docmodule(self, object, name=None, mod=None, *ignored):\n        \"\"\"Produce HTML documentation for a module object.\"\"\"\n        name = object.__name__ # ignore the passed-in name\n        try:\n            all = object.__all__\n        except AttributeError:\n            all = None\n        parts = split(name, '.')\n        links = []\n        for i in range(len(parts)-1):\n            links.append(\n                '<a href=\"%s.html\"><font color=\"#ffffff\">%s</font></a>' %\n                (join(parts[:i+1], '.'), parts[i]))\n        linkedname = join(links + parts[-1:], '.')\n        head = '<big><big><strong>%s</strong></big></big>' % linkedname\n        try:\n            path = inspect.getabsfile(object)\n            url = path\n            if sys.platform == 'win32':\n                import nturl2path\n                url = nturl2path.pathname2url(path)\n            filelink = '<a href=\"file:%s\">%s</a>' % (url, path)\n        except TypeError:\n            filelink = '(built-in)'\n        info = []\n        if hasattr(object, '__version__'):\n            version = str(object.__version__)\n            if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':\n                version = strip(version[11:-1])\n            info.append('version %s' % self.escape(version))\n        if hasattr(object, '__date__'):\n            info.append(self.escape(str(object.__date__)))\n        if info:\n            head = head + ' (%s)' % join(info, ', ')\n        docloc = self.getdocloc(object)\n        if docloc is not None:\n            docloc = '<br><a href=\"%(docloc)s\">Module Docs</a>' % locals()\n        else:\n            docloc = ''\n        result = self.heading(\n            head, '#ffffff', '#7799ee',\n            '<a href=\".\">index</a><br>' + filelink + docloc)\n\n        modules = inspect.getmembers(object, inspect.ismodule)\n\n        classes, cdict = [], {}\n        for key, value in inspect.getmembers(object, inspect.isclass):\n            # if __all__ exists, believe it.  Otherwise use old heuristic.\n            if (all is not None or\n                (inspect.getmodule(value) or object) is object):\n                if visiblename(key, all, object):\n                    classes.append((key, value))\n                    cdict[key] = cdict[value] = '#' + key\n        for key, value in classes:\n            for base in value.__bases__:\n                key, modname = base.__name__, base.__module__\n                module = sys.modules.get(modname)\n                if modname != name and module and hasattr(module, key):\n                    if getattr(module, key) is base:\n                        if not key in cdict:\n                            cdict[key] = cdict[base] = modname + '.html#' + key\n        funcs, fdict = [], {}\n        for key, value in inspect.getmembers(object, inspect.isroutine):\n            # if __all__ exists, believe it.  Otherwise use old heuristic.\n            if (all is not None or\n                inspect.isbuiltin(value) or inspect.getmodule(value) is object):\n                if visiblename(key, all, object):\n                    funcs.append((key, value))\n                    fdict[key] = '#-' + key\n                    if inspect.isfunction(value): fdict[value] = fdict[key]\n        data = []\n        for key, value in inspect.getmembers(object, isdata):\n            if visiblename(key, all, object):\n                data.append((key, value))\n\n        doc = self.markup(getdoc(object), self.preformat, fdict, cdict)\n        doc = doc and '<tt>%s</tt>' % doc\n        result = result + '<p>%s</p>\\n' % doc\n\n        if hasattr(object, '__path__'):\n            modpkgs = []\n            for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):\n                modpkgs.append((modname, name, ispkg, 0))\n            modpkgs.sort()\n            contents = self.multicolumn(modpkgs, self.modpkglink)\n            result = result + self.bigsection(\n                'Package Contents', '#ffffff', '#aa55cc', contents)\n        elif modules:\n            contents = self.multicolumn(\n                modules, lambda key_value, s=self: s.modulelink(key_value[1]))\n            result = result + self.bigsection(\n                'Modules', '#ffffff', '#aa55cc', contents)\n\n        if classes:\n            classlist = map(lambda key_value: key_value[1], classes)\n            contents = [\n                self.formattree(inspect.getclasstree(classlist, 1), name)]\n            for key, value in classes:\n                contents.append(self.document(value, key, name, fdict, cdict))\n            result = result + self.bigsection(\n                'Classes', '#ffffff', '#ee77aa', join(contents))\n        if funcs:\n            contents = []\n            for key, value in funcs:\n                contents.append(self.document(value, key, name, fdict, cdict))\n            result = result + self.bigsection(\n                'Functions', '#ffffff', '#eeaa77', join(contents))\n        if data:\n            contents = []\n            for key, value in data:\n                contents.append(self.document(value, key))\n            result = result + self.bigsection(\n                'Data', '#ffffff', '#55aa55', join(contents, '<br>\\n'))\n        if hasattr(object, '__author__'):\n            contents = self.markup(str(object.__author__), self.preformat)\n            result = result + self.bigsection(\n                'Author', '#ffffff', '#7799ee', contents)\n        if hasattr(object, '__credits__'):\n            contents = self.markup(str(object.__credits__), self.preformat)\n            result = result + self.bigsection(\n                'Credits', '#ffffff', '#7799ee', contents)\n\n        return result\n\n    def docclass(self, object, name=None, mod=None, funcs={}, classes={},\n                 *ignored):\n        \"\"\"Produce HTML documentation for a class object.\"\"\"\n        realname = object.__name__\n        name = name or realname\n        bases = object.__bases__\n\n        contents = []\n        push = contents.append\n\n        # Cute little class to pump out a horizontal rule between sections.\n        class HorizontalRule:\n            def __init__(self):\n                self.needone = 0\n            def maybe(self):\n                if self.needone:\n                    push('<hr>\\n')\n                self.needone = 1\n        hr = HorizontalRule()\n\n        # List the mro, if non-trivial.\n        mro = deque(inspect.getmro(object))\n        if len(mro) > 2:\n            hr.maybe()\n            push('<dl><dt>Method resolution order:</dt>\\n')\n            for base in mro:\n                push('<dd>%s</dd>\\n' % self.classlink(base,\n                                                      object.__module__))\n            push('</dl>\\n')\n\n        def spill(msg, attrs, predicate):\n            ok, attrs = _split_list(attrs, predicate)\n            if ok:\n                hr.maybe()\n                push(msg)\n                for name, kind, homecls, value in ok:\n                    try:\n                        value = getattr(object, name)\n                    except Exception:\n                        # Some descriptors may meet a failure in their __get__.\n                        # (bug #1785)\n                        push(self._docdescriptor(name, value, mod))\n                    else:\n                        push(self.document(value, name, mod,\n                                        funcs, classes, mdict, object))\n                    push('\\n')\n            return attrs\n\n        def spilldescriptors(msg, attrs, predicate):\n            ok, attrs = _split_list(attrs, predicate)\n            if ok:\n                hr.maybe()\n                push(msg)\n                for name, kind, homecls, value in ok:\n                    push(self._docdescriptor(name, value, mod))\n            return attrs\n\n        def spilldata(msg, attrs, predicate):\n            ok, attrs = _split_list(attrs, predicate)\n            if ok:\n                hr.maybe()\n                push(msg)\n                for name, kind, homecls, value in ok:\n                    base = self.docother(getattr(object, name), name, mod)\n                    if (hasattr(value, '__call__') or\n                            inspect.isdatadescriptor(value)):\n                        doc = getattr(value, \"__doc__\", None)\n                    else:\n                        doc = None\n                    if doc is None:\n                        push('<dl><dt>%s</dl>\\n' % base)\n                    else:\n                        doc = self.markup(getdoc(value), self.preformat,\n                                          funcs, classes, mdict)\n                        doc = '<dd><tt>%s</tt>' % doc\n                        push('<dl><dt>%s%s</dl>\\n' % (base, doc))\n                    push('\\n')\n            return attrs\n\n        attrs = filter(lambda data: visiblename(data[0], obj=object),\n                       classify_class_attrs(object))\n        mdict = {}\n        for key, kind, homecls, value in attrs:\n            mdict[key] = anchor = '#' + name + '-' + key\n            try:\n                value = getattr(object, name)\n            except Exception:\n                # Some descriptors may meet a failure in their __get__.\n                # (bug #1785)\n                pass\n            try:\n                # The value may not be hashable (e.g., a data attr with\n                # a dict or list value).\n                mdict[value] = anchor\n            except TypeError:\n                pass\n\n        while attrs:\n            if mro:\n                thisclass = mro.popleft()\n            else:\n                thisclass = attrs[0][2]\n            attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)\n\n            if thisclass is __builtin__.object:\n                attrs = inherited\n                continue\n            elif thisclass is object:\n                tag = 'defined here'\n            else:\n                tag = 'inherited from %s' % self.classlink(thisclass,\n                                                           object.__module__)\n            tag += ':<br>\\n'\n\n            # Sort attrs by name.\n            try:\n                attrs.sort(key=lambda t: t[0])\n            except TypeError:\n                attrs.sort(lambda t1, t2: cmp(t1[0], t2[0]))    # 2.3 compat\n\n            # Pump out the attrs, segregated by kind.\n            attrs = spill('Methods %s' % tag, attrs,\n                          lambda t: t[1] == 'method')\n            attrs = spill('Class methods %s' % tag, attrs,\n                          lambda t: t[1] == 'class method')\n            attrs = spill('Static methods %s' % tag, attrs,\n                          lambda t: t[1] == 'static method')\n            attrs = spilldescriptors('Data descriptors %s' % tag, attrs,\n                                     lambda t: t[1] == 'data descriptor')\n            attrs = spilldata('Data and other attributes %s' % tag, attrs,\n                              lambda t: t[1] == 'data')\n            assert attrs == []\n            attrs = inherited\n\n        contents = ''.join(contents)\n\n        if name == realname:\n            title = '<a name=\"%s\">class <strong>%s</strong></a>' % (\n                name, realname)\n        else:\n            title = '<strong>%s</strong> = <a name=\"%s\">class %s</a>' % (\n                name, name, realname)\n        if bases:\n            parents = []\n            for base in bases:\n                parents.append(self.classlink(base, object.__module__))\n            title = title + '(%s)' % join(parents, ', ')\n        doc = self.markup(getdoc(object), self.preformat, funcs, classes, mdict)\n        doc = doc and '<tt>%s<br>&nbsp;</tt>' % doc\n\n        return self.section(title, '#000000', '#ffc8d8', contents, 3, doc)\n\n    def formatvalue(self, object):\n        \"\"\"Format an argument default value as text.\"\"\"\n        return self.grey('=' + self.repr(object))\n\n    def docroutine(self, object, name=None, mod=None,\n                   funcs={}, classes={}, methods={}, cl=None):\n        \"\"\"Produce HTML documentation for a function or method object.\"\"\"\n        realname = object.__name__\n        name = name or realname\n        anchor = (cl and cl.__name__ or '') + '-' + name\n        note = ''\n        skipdocs = 0\n        if inspect.ismethod(object):\n            imclass = object.im_class\n            if cl:\n                if imclass is not cl:\n                    note = ' from ' + self.classlink(imclass, mod)\n            else:\n                if object.im_self is not None:\n                    note = ' method of %s instance' % self.classlink(\n                        object.im_self.__class__, mod)\n                else:\n                    note = ' unbound %s method' % self.classlink(imclass,mod)\n            object = object.im_func\n\n        if name == realname:\n            title = '<a name=\"%s\"><strong>%s</strong></a>' % (anchor, realname)\n        else:\n            if (cl and realname in cl.__dict__ and\n                cl.__dict__[realname] is object):\n                reallink = '<a href=\"#%s\">%s</a>' % (\n                    cl.__name__ + '-' + realname, realname)\n                skipdocs = 1\n            else:\n                reallink = realname\n            title = '<a name=\"%s\"><strong>%s</strong></a> = %s' % (\n                anchor, name, reallink)\n        if inspect.isfunction(object):\n            args, varargs, varkw, defaults = inspect.getargspec(object)\n            argspec = inspect.formatargspec(\n                args, varargs, varkw, defaults, formatvalue=self.formatvalue)\n            if realname == '<lambda>':\n                title = '<strong>%s</strong> <em>lambda</em> ' % name\n                argspec = argspec[1:-1] # remove parentheses\n        else:\n            argspec = '(...)'\n\n        decl = title + argspec + (note and self.grey(\n               '<font face=\"helvetica, arial\">%s</font>' % note))\n\n        if skipdocs:\n            return '<dl><dt>%s</dt></dl>\\n' % decl\n        else:\n            doc = self.markup(\n                getdoc(object), self.preformat, funcs, classes, methods)\n            doc = doc and '<dd><tt>%s</tt></dd>' % doc\n            return '<dl><dt>%s</dt>%s</dl>\\n' % (decl, doc)\n\n    def _docdescriptor(self, name, value, mod):\n        results = []\n        push = results.append\n\n        if name:\n            push('<dl><dt><strong>%s</strong></dt>\\n' % name)\n        if value.__doc__ is not None:\n            doc = self.markup(getdoc(value), self.preformat)\n            push('<dd><tt>%s</tt></dd>\\n' % doc)\n        push('</dl>\\n')\n\n        return ''.join(results)\n\n    def docproperty(self, object, name=None, mod=None, cl=None):\n        \"\"\"Produce html documentation for a property.\"\"\"\n        return self._docdescriptor(name, object, mod)\n\n    def docother(self, object, name=None, mod=None, *ignored):\n        \"\"\"Produce HTML documentation for a data object.\"\"\"\n        lhs = name and '<strong>%s</strong> = ' % name or ''\n        return lhs + self.repr(object)\n\n    def docdata(self, object, name=None, mod=None, cl=None):\n        \"\"\"Produce html documentation for a data descriptor.\"\"\"\n        return self._docdescriptor(name, object, mod)\n\n    def index(self, dir, shadowed=None):\n        \"\"\"Generate an HTML index for a directory of modules.\"\"\"\n        modpkgs = []\n        if shadowed is None: shadowed = {}\n        for importer, name, ispkg in pkgutil.iter_modules([dir]):\n            modpkgs.append((name, '', ispkg, name in shadowed))\n            shadowed[name] = 1\n\n        modpkgs.sort()\n        contents = self.multicolumn(modpkgs, self.modpkglink)\n        return self.bigsection(dir, '#ffffff', '#ee77aa', contents)\n\n# -------------------------------------------- text documentation generator\n\nclass TextRepr(Repr):\n    \"\"\"Class for safely making a text representation of a Python object.\"\"\"\n    def __init__(self):\n        Repr.__init__(self)\n        self.maxlist = self.maxtuple = 20\n        self.maxdict = 10\n        self.maxstring = self.maxother = 100\n\n    def repr1(self, x, level):\n        if hasattr(type(x), '__name__'):\n            methodname = 'repr_' + join(split(type(x).__name__), '_')\n            if hasattr(self, methodname):\n                return getattr(self, methodname)(x, level)\n        return cram(stripid(repr(x)), self.maxother)\n\n    def repr_string(self, x, level):\n        test = cram(x, self.maxstring)\n        testrepr = repr(test)\n        if '\\\\' in test and '\\\\' not in replace(testrepr, r'\\\\', ''):\n            # Backslashes are only literal in the string and are never\n            # needed to make any special characters, so show a raw string.\n            return 'r' + testrepr[0] + test + testrepr[0]\n        return testrepr\n\n    repr_str = repr_string\n\n    def repr_instance(self, x, level):\n        try:\n            return cram(stripid(repr(x)), self.maxstring)\n        except:\n            return '<%s instance>' % x.__class__.__name__\n\nclass TextDoc(Doc):\n    \"\"\"Formatter class for text documentation.\"\"\"\n\n    # ------------------------------------------- text formatting utilities\n\n    _repr_instance = TextRepr()\n    repr = _repr_instance.repr\n\n    def bold(self, text):\n        \"\"\"Format a string in bold by overstriking.\"\"\"\n        return join(map(lambda ch: ch + '\\b' + ch, text), '')\n\n    def indent(self, text, prefix='    '):\n        \"\"\"Indent text by prepending a given prefix to each line.\"\"\"\n        if not text: return ''\n        lines = split(text, '\\n')\n        lines = map(lambda line, prefix=prefix: prefix + line, lines)\n        if lines: lines[-1] = rstrip(lines[-1])\n        return join(lines, '\\n')\n\n    def section(self, title, contents):\n        \"\"\"Format a section with a given heading.\"\"\"\n        return self.bold(title) + '\\n' + rstrip(self.indent(contents)) + '\\n\\n'\n\n    # ---------------------------------------------- type-specific routines\n\n    def formattree(self, tree, modname, parent=None, prefix=''):\n        \"\"\"Render in text a class tree as returned by inspect.getclasstree().\"\"\"\n        result = ''\n        for entry in tree:\n            if type(entry) is type(()):\n                c, bases = entry\n                result = result + prefix + classname(c, modname)\n                if bases and bases != (parent,):\n                    parents = map(lambda c, m=modname: classname(c, m), bases)\n                    result = result + '(%s)' % join(parents, ', ')\n                result = result + '\\n'\n            elif type(entry) is type([]):\n                result = result + self.formattree(\n                    entry, modname, c, prefix + '    ')\n        return result\n\n    def docmodule(self, object, name=None, mod=None):\n        \"\"\"Produce text documentation for a given module object.\"\"\"\n        name = object.__name__ # ignore the passed-in name\n        synop, desc = splitdoc(getdoc(object))\n        result = self.section('NAME', name + (synop and ' - ' + synop))\n\n        try:\n            all = object.__all__\n        except AttributeError:\n            all = None\n\n        try:\n            file = inspect.getabsfile(object)\n        except TypeError:\n            file = '(built-in)'\n        result = result + self.section('FILE', file)\n\n        docloc = self.getdocloc(object)\n        if docloc is not None:\n            result = result + self.section('MODULE DOCS', docloc)\n\n        if desc:\n            result = result + self.section('DESCRIPTION', desc)\n\n        classes = []\n        for key, value in inspect.getmembers(object, inspect.isclass):\n            # if __all__ exists, believe it.  Otherwise use old heuristic.\n            if (all is not None\n                or (inspect.getmodule(value) or object) is object):\n                if visiblename(key, all, object):\n                    classes.append((key, value))\n        funcs = []\n        for key, value in inspect.getmembers(object, inspect.isroutine):\n            # if __all__ exists, believe it.  Otherwise use old heuristic.\n            if (all is not None or\n                inspect.isbuiltin(value) or inspect.getmodule(value) is object):\n                if visiblename(key, all, object):\n                    funcs.append((key, value))\n        data = []\n        for key, value in inspect.getmembers(object, isdata):\n            if visiblename(key, all, object):\n                data.append((key, value))\n\n        modpkgs = []\n        modpkgs_names = set()\n        if hasattr(object, '__path__'):\n            for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):\n                modpkgs_names.add(modname)\n                if ispkg:\n                    modpkgs.append(modname + ' (package)')\n                else:\n                    modpkgs.append(modname)\n\n            modpkgs.sort()\n            result = result + self.section(\n                'PACKAGE CONTENTS', join(modpkgs, '\\n'))\n\n        # Detect submodules as sometimes created by C extensions\n        submodules = []\n        for key, value in inspect.getmembers(object, inspect.ismodule):\n            if value.__name__.startswith(name + '.') and key not in modpkgs_names:\n                submodules.append(key)\n        if submodules:\n            submodules.sort()\n            result = result + self.section(\n                'SUBMODULES', join(submodules, '\\n'))\n\n        if classes:\n            classlist = map(lambda key_value: key_value[1], classes)\n            contents = [self.formattree(\n                inspect.getclasstree(classlist, 1), name)]\n            for key, value in classes:\n                contents.append(self.document(value, key, name))\n            result = result + self.section('CLASSES', join(contents, '\\n'))\n\n        if funcs:\n            contents = []\n            for key, value in funcs:\n                contents.append(self.document(value, key, name))\n            result = result + self.section('FUNCTIONS', join(contents, '\\n'))\n\n        if data:\n            contents = []\n            for key, value in data:\n                contents.append(self.docother(value, key, name, maxlen=70))\n            result = result + self.section('DATA', join(contents, '\\n'))\n\n        if hasattr(object, '__version__'):\n            version = str(object.__version__)\n            if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':\n                version = strip(version[11:-1])\n            result = result + self.section('VERSION', version)\n        if hasattr(object, '__date__'):\n            result = result + self.section('DATE', str(object.__date__))\n        if hasattr(object, '__author__'):\n            result = result + self.section('AUTHOR', str(object.__author__))\n        if hasattr(object, '__credits__'):\n            result = result + self.section('CREDITS', str(object.__credits__))\n        return result\n\n    def docclass(self, object, name=None, mod=None, *ignored):\n        \"\"\"Produce text documentation for a given class object.\"\"\"\n        realname = object.__name__\n        name = name or realname\n        bases = object.__bases__\n\n        def makename(c, m=object.__module__):\n            return classname(c, m)\n\n        if name == realname:\n            title = 'class ' + self.bold(realname)\n        else:\n            title = self.bold(name) + ' = class ' + realname\n        if bases:\n            parents = map(makename, bases)\n            title = title + '(%s)' % join(parents, ', ')\n\n        doc = getdoc(object)\n        contents = doc and [doc + '\\n'] or []\n        push = contents.append\n\n        # List the mro, if non-trivial.\n        mro = deque(inspect.getmro(object))\n        if len(mro) > 2:\n            push(\"Method resolution order:\")\n            for base in mro:\n                push('    ' + makename(base))\n            push('')\n\n        # Cute little class to pump out a horizontal rule between sections.\n        class HorizontalRule:\n            def __init__(self):\n                self.needone = 0\n            def maybe(self):\n                if self.needone:\n                    push('-' * 70)\n                self.needone = 1\n        hr = HorizontalRule()\n\n        def spill(msg, attrs, predicate):\n            ok, attrs = _split_list(attrs, predicate)\n            if ok:\n                hr.maybe()\n                push(msg)\n                for name, kind, homecls, value in ok:\n                    try:\n                        value = getattr(object, name)\n                    except Exception:\n                        # Some descriptors may meet a failure in their __get__.\n                        # (bug #1785)\n                        push(self._docdescriptor(name, value, mod))\n                    else:\n                        push(self.document(value,\n                                        name, mod, object))\n            return attrs\n\n        def spilldescriptors(msg, attrs, predicate):\n            ok, attrs = _split_list(attrs, predicate)\n            if ok:\n                hr.maybe()\n                push(msg)\n                for name, kind, homecls, value in ok:\n                    push(self._docdescriptor(name, value, mod))\n            return attrs\n\n        def spilldata(msg, attrs, predicate):\n            ok, attrs = _split_list(attrs, predicate)\n            if ok:\n                hr.maybe()\n                push(msg)\n                for name, kind, homecls, value in ok:\n                    if (hasattr(value, '__call__') or\n                            inspect.isdatadescriptor(value)):\n                        doc = getdoc(value)\n                    else:\n                        doc = None\n                    push(self.docother(getattr(object, name),\n                                       name, mod, maxlen=70, doc=doc) + '\\n')\n            return attrs\n\n        attrs = filter(lambda data: visiblename(data[0], obj=object),\n                       classify_class_attrs(object))\n        while attrs:\n            if mro:\n                thisclass = mro.popleft()\n            else:\n                thisclass = attrs[0][2]\n            attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)\n\n            if thisclass is __builtin__.object:\n                attrs = inherited\n                continue\n            elif thisclass is object:\n                tag = \"defined here\"\n            else:\n                tag = \"inherited from %s\" % classname(thisclass,\n                                                      object.__module__)\n\n            # Sort attrs by name.\n            attrs.sort()\n\n            # Pump out the attrs, segregated by kind.\n            attrs = spill(\"Methods %s:\\n\" % tag, attrs,\n                          lambda t: t[1] == 'method')\n            attrs = spill(\"Class methods %s:\\n\" % tag, attrs,\n                          lambda t: t[1] == 'class method')\n            attrs = spill(\"Static methods %s:\\n\" % tag, attrs,\n                          lambda t: t[1] == 'static method')\n            attrs = spilldescriptors(\"Data descriptors %s:\\n\" % tag, attrs,\n                                     lambda t: t[1] == 'data descriptor')\n            attrs = spilldata(\"Data and other attributes %s:\\n\" % tag, attrs,\n                              lambda t: t[1] == 'data')\n            assert attrs == []\n            attrs = inherited\n\n        contents = '\\n'.join(contents)\n        if not contents:\n            return title + '\\n'\n        return title + '\\n' + self.indent(rstrip(contents), ' |  ') + '\\n'\n\n    def formatvalue(self, object):\n        \"\"\"Format an argument default value as text.\"\"\"\n        return '=' + self.repr(object)\n\n    def docroutine(self, object, name=None, mod=None, cl=None):\n        \"\"\"Produce text documentation for a function or method object.\"\"\"\n        realname = object.__name__\n        name = name or realname\n        note = ''\n        skipdocs = 0\n        if inspect.ismethod(object):\n            imclass = object.im_class\n            if cl:\n                if imclass is not cl:\n                    note = ' from ' + classname(imclass, mod)\n            else:\n                if object.im_self is not None:\n                    note = ' method of %s instance' % classname(\n                        object.im_self.__class__, mod)\n                else:\n                    note = ' unbound %s method' % classname(imclass,mod)\n            object = object.im_func\n\n        if name == realname:\n            title = self.bold(realname)\n        else:\n            if (cl and realname in cl.__dict__ and\n                cl.__dict__[realname] is object):\n                skipdocs = 1\n            title = self.bold(name) + ' = ' + realname\n        if inspect.isfunction(object):\n            args, varargs, varkw, defaults = inspect.getargspec(object)\n            argspec = inspect.formatargspec(\n                args, varargs, varkw, defaults, formatvalue=self.formatvalue)\n            if realname == '<lambda>':\n                title = self.bold(name) + ' lambda '\n                argspec = argspec[1:-1] # remove parentheses\n        else:\n            argspec = '(...)'\n        decl = title + argspec + note\n\n        if skipdocs:\n            return decl + '\\n'\n        else:\n            doc = getdoc(object) or ''\n            return decl + '\\n' + (doc and rstrip(self.indent(doc)) + '\\n')\n\n    def _docdescriptor(self, name, value, mod):\n        results = []\n        push = results.append\n\n        if name:\n            push(self.bold(name))\n            push('\\n')\n        doc = getdoc(value) or ''\n        if doc:\n            push(self.indent(doc))\n            push('\\n')\n        return ''.join(results)\n\n    def docproperty(self, object, name=None, mod=None, cl=None):\n        \"\"\"Produce text documentation for a property.\"\"\"\n        return self._docdescriptor(name, object, mod)\n\n    def docdata(self, object, name=None, mod=None, cl=None):\n        \"\"\"Produce text documentation for a data descriptor.\"\"\"\n        return self._docdescriptor(name, object, mod)\n\n    def docother(self, object, name=None, mod=None, parent=None, maxlen=None, doc=None):\n        \"\"\"Produce text documentation for a data object.\"\"\"\n        repr = self.repr(object)\n        if maxlen:\n            line = (name and name + ' = ' or '') + repr\n            chop = maxlen - len(line)\n            if chop < 0: repr = repr[:chop] + '...'\n        line = (name and self.bold(name) + ' = ' or '') + repr\n        if doc is not None:\n            line += '\\n' + self.indent(str(doc))\n        return line\n\n# --------------------------------------------------------- user interfaces\n\ndef pager(text):\n    \"\"\"The first time this is called, determine what kind of pager to use.\"\"\"\n    global pager\n    pager = getpager()\n    pager(text)\n\ndef getpager():\n    \"\"\"Decide what method to use for paging through text.\"\"\"\n    if type(sys.stdout) is not types.FileType:\n        return plainpager\n    if not sys.stdin.isatty() or not sys.stdout.isatty():\n        return plainpager\n    if 'PAGER' in os.environ:\n        if sys.platform == 'win32': # pipes completely broken in Windows\n            return lambda text: tempfilepager(plain(text), os.environ['PAGER'])\n        elif os.environ.get('TERM') in ('dumb', 'emacs'):\n            return lambda text: pipepager(plain(text), os.environ['PAGER'])\n        else:\n            return lambda text: pipepager(text, os.environ['PAGER'])\n    if os.environ.get('TERM') in ('dumb', 'emacs'):\n        return plainpager\n    if sys.platform == 'win32' or sys.platform.startswith('os2'):\n        return lambda text: tempfilepager(plain(text), 'more <')\n    if hasattr(os, 'system') and os.system('(less) 2>/dev/null') == 0:\n        return lambda text: pipepager(text, 'less')\n\n    import tempfile\n    (fd, filename) = tempfile.mkstemp()\n    os.close(fd)\n    try:\n        if hasattr(os, 'system') and os.system('more \"%s\"' % filename) == 0:\n            return lambda text: pipepager(text, 'more')\n        else:\n            return ttypager\n    finally:\n        os.unlink(filename)\n\ndef plain(text):\n    \"\"\"Remove boldface formatting from text.\"\"\"\n    return re.sub('.\\b', '', text)\n\ndef pipepager(text, cmd):\n    \"\"\"Page through text by feeding it to another program.\"\"\"\n    pipe = os.popen(cmd, 'w')\n    try:\n        pipe.write(text)\n        pipe.close()\n    except IOError:\n        pass # Ignore broken pipes caused by quitting the pager program.\n\ndef tempfilepager(text, cmd):\n    \"\"\"Page through text by invoking a program on a temporary file.\"\"\"\n    import tempfile\n    fd, filename = tempfile.mkstemp()\n    file = open(filename, 'w')\n    file.write(text)\n    file.close()\n    try:\n        os.system(cmd + ' \"' + filename + '\"')\n    finally:\n        os.close(fd)\n\ndef ttypager(text):\n    \"\"\"Page through text on a text terminal.\"\"\"\n    lines = split(plain(text), '\\n')\n    try:\n        import tty\n        fd = sys.stdin.fileno()\n        old = tty.tcgetattr(fd)\n        tty.setcbreak(fd)\n        getchar = lambda: sys.stdin.read(1)\n    except (ImportError, AttributeError):\n        tty = None\n        getchar = lambda: sys.stdin.readline()[:-1][:1]\n\n    try:\n        r = inc = os.environ.get('LINES', 25) - 1\n        sys.stdout.write(join(lines[:inc], '\\n') + '\\n')\n        while lines[r:]:\n            sys.stdout.write('-- more --')\n            sys.stdout.flush()\n            c = getchar()\n\n            if c in ('q', 'Q'):\n                sys.stdout.write('\\r          \\r')\n                break\n            elif c in ('\\r', '\\n'):\n                sys.stdout.write('\\r          \\r' + lines[r] + '\\n')\n                r = r + 1\n                continue\n            if c in ('b', 'B', '\\x1b'):\n                r = r - inc - inc\n                if r < 0: r = 0\n            sys.stdout.write('\\n' + join(lines[r:r+inc], '\\n') + '\\n')\n            r = r + inc\n\n    finally:\n        if tty:\n            tty.tcsetattr(fd, tty.TCSAFLUSH, old)\n\ndef plainpager(text):\n    \"\"\"Simply print unformatted text.  This is the ultimate fallback.\"\"\"\n    sys.stdout.write(plain(text))\n\ndef describe(thing):\n    \"\"\"Produce a short description of the given thing.\"\"\"\n    if inspect.ismodule(thing):\n        if thing.__name__ in sys.builtin_module_names:\n            return 'built-in module ' + thing.__name__\n        if hasattr(thing, '__path__'):\n            return 'package ' + thing.__name__\n        else:\n            return 'module ' + thing.__name__\n    if inspect.isbuiltin(thing):\n        return 'built-in function ' + thing.__name__\n    if inspect.isgetsetdescriptor(thing):\n        return 'getset descriptor %s.%s.%s' % (\n            thing.__objclass__.__module__, thing.__objclass__.__name__,\n            thing.__name__)\n    if inspect.ismemberdescriptor(thing):\n        return 'member descriptor %s.%s.%s' % (\n            thing.__objclass__.__module__, thing.__objclass__.__name__,\n            thing.__name__)\n    if inspect.isclass(thing):\n        return 'class ' + thing.__name__\n    if inspect.isfunction(thing):\n        return 'function ' + thing.__name__\n    if inspect.ismethod(thing):\n        return 'method ' + thing.__name__\n    if type(thing) is types.InstanceType:\n        return 'instance of ' + thing.__class__.__name__\n    return type(thing).__name__\n\ndef locate(path, forceload=0):\n    \"\"\"Locate an object by name or dotted path, importing as necessary.\"\"\"\n    parts = [part for part in split(path, '.') if part]\n    module, n = None, 0\n    while n < len(parts):\n        nextmodule = safeimport(join(parts[:n+1], '.'), forceload)\n        if nextmodule: module, n = nextmodule, n + 1\n        else: break\n    if module:\n        object = module\n    else:\n        object = __builtin__\n    for part in parts[n:]:\n        try:\n            object = getattr(object, part)\n        except AttributeError:\n            return None\n    return object\n\n# --------------------------------------- interactive interpreter interface\n\ntext = TextDoc()\nhtml = HTMLDoc()\n\nclass _OldStyleClass: pass\n_OLD_INSTANCE_TYPE = type(_OldStyleClass())\n\ndef resolve(thing, forceload=0):\n    \"\"\"Given an object or a path to an object, get the object and its name.\"\"\"\n    if isinstance(thing, str):\n        object = locate(thing, forceload)\n        if not object:\n            raise ImportError, 'no Python documentation found for %r' % thing\n        return object, thing\n    else:\n        name = getattr(thing, '__name__', None)\n        return thing, name if isinstance(name, str) else None\n\ndef render_doc(thing, title='Python Library Documentation: %s', forceload=0):\n    \"\"\"Render text documentation, given an object or a path to an object.\"\"\"\n    object, name = resolve(thing, forceload)\n    desc = describe(object)\n    module = inspect.getmodule(object)\n    if name and '.' in name:\n        desc += ' in ' + name[:name.rfind('.')]\n    elif module and module is not object:\n        desc += ' in module ' + module.__name__\n    if type(object) is _OLD_INSTANCE_TYPE:\n        # If the passed object is an instance of an old-style class,\n        # document its available methods instead of its value.\n        object = object.__class__\n    elif not (inspect.ismodule(object) or\n              inspect.isclass(object) or\n              inspect.isroutine(object) or\n              inspect.isgetsetdescriptor(object) or\n              inspect.ismemberdescriptor(object) or\n              isinstance(object, property)):\n        # If the passed object is a piece of data or an instance,\n        # document its available methods instead of its value.\n        object = type(object)\n        desc += ' object'\n    return title % desc + '\\n\\n' + text.document(object, name)\n\ndef doc(thing, title='Python Library Documentation: %s', forceload=0):\n    \"\"\"Display text documentation, given an object or a path to an object.\"\"\"\n    try:\n        pager(render_doc(thing, title, forceload))\n    except (ImportError, ErrorDuringImport), value:\n        print value\n\ndef writedoc(thing, forceload=0):\n    \"\"\"Write HTML documentation to a file in the current directory.\"\"\"\n    try:\n        object, name = resolve(thing, forceload)\n        page = html.page(describe(object), html.document(object, name))\n        file = open(name + '.html', 'w')\n        file.write(page)\n        file.close()\n        print 'wrote', name + '.html'\n    except (ImportError, ErrorDuringImport), value:\n        print value\n\ndef writedocs(dir, pkgpath='', done=None):\n    \"\"\"Write out HTML documentation for all modules in a directory tree.\"\"\"\n    if done is None: done = {}\n    for importer, modname, ispkg in pkgutil.walk_packages([dir], pkgpath):\n        writedoc(modname)\n    return\n\nclass Helper:\n\n    # These dictionaries map a topic name to either an alias, or a tuple\n    # (label, seealso-items).  The \"label\" is the label of the corresponding\n    # section in the .rst file under Doc/ and an index into the dictionary\n    # in pydoc_data/topics.py.\n    #\n    # CAUTION: if you change one of these dictionaries, be sure to adapt the\n    #          list of needed labels in Doc/tools/sphinxext/pyspecific.py and\n    #          regenerate the pydoc_data/topics.py file by running\n    #              make pydoc-topics\n    #          in Doc/ and copying the output file into the Lib/ directory.\n\n    keywords = {\n        'and': 'BOOLEAN',\n        'as': 'with',\n        'assert': ('assert', ''),\n        'break': ('break', 'while for'),\n        'class': ('class', 'CLASSES SPECIALMETHODS'),\n        'continue': ('continue', 'while for'),\n        'def': ('function', ''),\n        'del': ('del', 'BASICMETHODS'),\n        'elif': 'if',\n        'else': ('else', 'while for'),\n        'except': 'try',\n        'exec': ('exec', ''),\n        'finally': 'try',\n        'for': ('for', 'break continue while'),\n        'from': 'import',\n        'global': ('global', 'NAMESPACES'),\n        'if': ('if', 'TRUTHVALUE'),\n        'import': ('import', 'MODULES'),\n        'in': ('in', 'SEQUENCEMETHODS2'),\n        'is': 'COMPARISON',\n        'lambda': ('lambda', 'FUNCTIONS'),\n        'not': 'BOOLEAN',\n        'or': 'BOOLEAN',\n        'pass': ('pass', ''),\n        'print': ('print', ''),\n        'raise': ('raise', 'EXCEPTIONS'),\n        'return': ('return', 'FUNCTIONS'),\n        'try': ('try', 'EXCEPTIONS'),\n        'while': ('while', 'break continue if TRUTHVALUE'),\n        'with': ('with', 'CONTEXTMANAGERS EXCEPTIONS yield'),\n        'yield': ('yield', ''),\n    }\n    # Either add symbols to this dictionary or to the symbols dictionary\n    # directly: Whichever is easier. They are merged later.\n    _symbols_inverse = {\n        'STRINGS' : (\"'\", \"'''\", \"r'\", \"u'\", '\"\"\"', '\"', 'r\"', 'u\"'),\n        'OPERATORS' : ('+', '-', '*', '**', '/', '//', '%', '<<', '>>', '&',\n                       '|', '^', '~', '<', '>', '<=', '>=', '==', '!=', '<>'),\n        'COMPARISON' : ('<', '>', '<=', '>=', '==', '!=', '<>'),\n        'UNARY' : ('-', '~'),\n        'AUGMENTEDASSIGNMENT' : ('+=', '-=', '*=', '/=', '%=', '&=', '|=',\n                                '^=', '<<=', '>>=', '**=', '//='),\n        'BITWISE' : ('<<', '>>', '&', '|', '^', '~'),\n        'COMPLEX' : ('j', 'J')\n    }\n    symbols = {\n        '%': 'OPERATORS FORMATTING',\n        '**': 'POWER',\n        ',': 'TUPLES LISTS FUNCTIONS',\n        '.': 'ATTRIBUTES FLOAT MODULES OBJECTS',\n        '...': 'ELLIPSIS',\n        ':': 'SLICINGS DICTIONARYLITERALS',\n        '@': 'def class',\n        '\\\\': 'STRINGS',\n        '_': 'PRIVATENAMES',\n        '__': 'PRIVATENAMES SPECIALMETHODS',\n        '`': 'BACKQUOTES',\n        '(': 'TUPLES FUNCTIONS CALLS',\n        ')': 'TUPLES FUNCTIONS CALLS',\n        '[': 'LISTS SUBSCRIPTS SLICINGS',\n        ']': 'LISTS SUBSCRIPTS SLICINGS'\n    }\n    for topic, symbols_ in _symbols_inverse.iteritems():\n        for symbol in symbols_:\n            topics = symbols.get(symbol, topic)\n            if topic not in topics:\n                topics = topics + ' ' + topic\n            symbols[symbol] = topics\n\n    topics = {\n        'TYPES': ('types', 'STRINGS UNICODE NUMBERS SEQUENCES MAPPINGS '\n                  'FUNCTIONS CLASSES MODULES FILES inspect'),\n        'STRINGS': ('strings', 'str UNICODE SEQUENCES STRINGMETHODS FORMATTING '\n                    'TYPES'),\n        'STRINGMETHODS': ('string-methods', 'STRINGS FORMATTING'),\n        'FORMATTING': ('formatstrings', 'OPERATORS'),\n        'UNICODE': ('strings', 'encodings unicode SEQUENCES STRINGMETHODS '\n                    'FORMATTING TYPES'),\n        'NUMBERS': ('numbers', 'INTEGER FLOAT COMPLEX TYPES'),\n        'INTEGER': ('integers', 'int range'),\n        'FLOAT': ('floating', 'float math'),\n        'COMPLEX': ('imaginary', 'complex cmath'),\n        'SEQUENCES': ('typesseq', 'STRINGMETHODS FORMATTING xrange LISTS'),\n        'MAPPINGS': 'DICTIONARIES',\n        'FUNCTIONS': ('typesfunctions', 'def TYPES'),\n        'METHODS': ('typesmethods', 'class def CLASSES TYPES'),\n        'CODEOBJECTS': ('bltin-code-objects', 'compile FUNCTIONS TYPES'),\n        'TYPEOBJECTS': ('bltin-type-objects', 'types TYPES'),\n        'FRAMEOBJECTS': 'TYPES',\n        'TRACEBACKS': 'TYPES',\n        'NONE': ('bltin-null-object', ''),\n        'ELLIPSIS': ('bltin-ellipsis-object', 'SLICINGS'),\n        'FILES': ('bltin-file-objects', ''),\n        'SPECIALATTRIBUTES': ('specialattrs', ''),\n        'CLASSES': ('types', 'class SPECIALMETHODS PRIVATENAMES'),\n        'MODULES': ('typesmodules', 'import'),\n        'PACKAGES': 'import',\n        'EXPRESSIONS': ('operator-summary', 'lambda or and not in is BOOLEAN '\n                        'COMPARISON BITWISE SHIFTING BINARY FORMATTING POWER '\n                        'UNARY ATTRIBUTES SUBSCRIPTS SLICINGS CALLS TUPLES '\n                        'LISTS DICTIONARIES BACKQUOTES'),\n        'OPERATORS': 'EXPRESSIONS',\n        'PRECEDENCE': 'EXPRESSIONS',\n        'OBJECTS': ('objects', 'TYPES'),\n        'SPECIALMETHODS': ('specialnames', 'BASICMETHODS ATTRIBUTEMETHODS '\n                           'CALLABLEMETHODS SEQUENCEMETHODS1 MAPPINGMETHODS '\n                           'SEQUENCEMETHODS2 NUMBERMETHODS CLASSES'),\n        'BASICMETHODS': ('customization', 'cmp hash repr str SPECIALMETHODS'),\n        'ATTRIBUTEMETHODS': ('attribute-access', 'ATTRIBUTES SPECIALMETHODS'),\n        'CALLABLEMETHODS': ('callable-types', 'CALLS SPECIALMETHODS'),\n        'SEQUENCEMETHODS1': ('sequence-types', 'SEQUENCES SEQUENCEMETHODS2 '\n                             'SPECIALMETHODS'),\n        'SEQUENCEMETHODS2': ('sequence-methods', 'SEQUENCES SEQUENCEMETHODS1 '\n                             'SPECIALMETHODS'),\n        'MAPPINGMETHODS': ('sequence-types', 'MAPPINGS SPECIALMETHODS'),\n        'NUMBERMETHODS': ('numeric-types', 'NUMBERS AUGMENTEDASSIGNMENT '\n                          'SPECIALMETHODS'),\n        'EXECUTION': ('execmodel', 'NAMESPACES DYNAMICFEATURES EXCEPTIONS'),\n        'NAMESPACES': ('naming', 'global ASSIGNMENT DELETION DYNAMICFEATURES'),\n        'DYNAMICFEATURES': ('dynamic-features', ''),\n        'SCOPING': 'NAMESPACES',\n        'FRAMES': 'NAMESPACES',\n        'EXCEPTIONS': ('exceptions', 'try except finally raise'),\n        'COERCIONS': ('coercion-rules','CONVERSIONS'),\n        'CONVERSIONS': ('conversions', 'COERCIONS'),\n        'IDENTIFIERS': ('identifiers', 'keywords SPECIALIDENTIFIERS'),\n        'SPECIALIDENTIFIERS': ('id-classes', ''),\n        'PRIVATENAMES': ('atom-identifiers', ''),\n        'LITERALS': ('atom-literals', 'STRINGS BACKQUOTES NUMBERS '\n                     'TUPLELITERALS LISTLITERALS DICTIONARYLITERALS'),\n        'TUPLES': 'SEQUENCES',\n        'TUPLELITERALS': ('exprlists', 'TUPLES LITERALS'),\n        'LISTS': ('typesseq-mutable', 'LISTLITERALS'),\n        'LISTLITERALS': ('lists', 'LISTS LITERALS'),\n        'DICTIONARIES': ('typesmapping', 'DICTIONARYLITERALS'),\n        'DICTIONARYLITERALS': ('dict', 'DICTIONARIES LITERALS'),\n        'BACKQUOTES': ('string-conversions', 'repr str STRINGS LITERALS'),\n        'ATTRIBUTES': ('attribute-references', 'getattr hasattr setattr '\n                       'ATTRIBUTEMETHODS'),\n        'SUBSCRIPTS': ('subscriptions', 'SEQUENCEMETHODS1'),\n        'SLICINGS': ('slicings', 'SEQUENCEMETHODS2'),\n        'CALLS': ('calls', 'EXPRESSIONS'),\n        'POWER': ('power', 'EXPRESSIONS'),\n        'UNARY': ('unary', 'EXPRESSIONS'),\n        'BINARY': ('binary', 'EXPRESSIONS'),\n        'SHIFTING': ('shifting', 'EXPRESSIONS'),\n        'BITWISE': ('bitwise', 'EXPRESSIONS'),\n        'COMPARISON': ('comparisons', 'EXPRESSIONS BASICMETHODS'),\n        'BOOLEAN': ('booleans', 'EXPRESSIONS TRUTHVALUE'),\n        'ASSERTION': 'assert',\n        'ASSIGNMENT': ('assignment', 'AUGMENTEDASSIGNMENT'),\n        'AUGMENTEDASSIGNMENT': ('augassign', 'NUMBERMETHODS'),\n        'DELETION': 'del',\n        'PRINTING': 'print',\n        'RETURNING': 'return',\n        'IMPORTING': 'import',\n        'CONDITIONAL': 'if',\n        'LOOPING': ('compound', 'for while break continue'),\n        'TRUTHVALUE': ('truth', 'if while and or not BASICMETHODS'),\n        'DEBUGGING': ('debugger', 'pdb'),\n        'CONTEXTMANAGERS': ('context-managers', 'with'),\n    }\n\n    def __init__(self, input=None, output=None):\n        self._input = input\n        self._output = output\n\n    input  = property(lambda self: self._input or sys.stdin)\n    output = property(lambda self: self._output or sys.stdout)\n\n    def __repr__(self):\n        if inspect.stack()[1][3] == '?':\n            self()\n            return ''\n        return '<pydoc.Helper instance>'\n\n    _GoInteractive = object()\n    def __call__(self, request=_GoInteractive):\n        if request is not self._GoInteractive:\n            self.help(request)\n        else:\n            self.intro()\n            self.interact()\n            self.output.write('''\nYou are now leaving help and returning to the Python interpreter.\nIf you want to ask for help on a particular object directly from the\ninterpreter, you can type \"help(object)\".  Executing \"help('string')\"\nhas the same effect as typing a particular string at the help> prompt.\n''')\n\n    def interact(self):\n        self.output.write('\\n')\n        while True:\n            try:\n                request = self.getline('help> ')\n                if not request: break\n            except (KeyboardInterrupt, EOFError):\n                break\n            request = strip(replace(request, '\"', '', \"'\", ''))\n            if lower(request) in ('q', 'quit'): break\n            self.help(request)\n\n    def getline(self, prompt):\n        \"\"\"Read one line, using raw_input when available.\"\"\"\n        if self.input is sys.stdin:\n            return raw_input(prompt)\n        else:\n            self.output.write(prompt)\n            self.output.flush()\n            return self.input.readline()\n\n    def help(self, request):\n        if type(request) is type(''):\n            request = request.strip()\n            if request == 'help': self.intro()\n            elif request == 'keywords': self.listkeywords()\n            elif request == 'symbols': self.listsymbols()\n            elif request == 'topics': self.listtopics()\n            elif request == 'modules': self.listmodules()\n            elif request[:8] == 'modules ':\n                self.listmodules(split(request)[1])\n            elif request in self.symbols: self.showsymbol(request)\n            elif request in self.keywords: self.showtopic(request)\n            elif request in self.topics: self.showtopic(request)\n            elif request: doc(request, 'Help on %s:')\n        elif isinstance(request, Helper): self()\n        else: doc(request, 'Help on %s:')\n        self.output.write('\\n')\n\n    def intro(self):\n        self.output.write('''\nWelcome to Python %s!  This is the online help utility.\n\nIf this is your first time using Python, you should definitely check out\nthe tutorial on the Internet at http://docs.python.org/%s/tutorial/.\n\nEnter the name of any module, keyword, or topic to get help on writing\nPython programs and using Python modules.  To quit this help utility and\nreturn to the interpreter, just type \"quit\".\n\nTo get a list of available modules, keywords, or topics, type \"modules\",\n\"keywords\", or \"topics\".  Each module also comes with a one-line summary\nof what it does; to list the modules whose summaries contain a given word\nsuch as \"spam\", type \"modules spam\".\n''' % tuple([sys.version[:3]]*2))\n\n    def list(self, items, columns=4, width=80):\n        items = items[:]\n        items.sort()\n        colw = width / columns\n        rows = (len(items) + columns - 1) / columns\n        for row in range(rows):\n            for col in range(columns):\n                i = col * rows + row\n                if i < len(items):\n                    self.output.write(items[i])\n                    if col < columns - 1:\n                        self.output.write(' ' + ' ' * (colw-1 - len(items[i])))\n            self.output.write('\\n')\n\n    def listkeywords(self):\n        self.output.write('''\nHere is a list of the Python keywords.  Enter any keyword to get more help.\n\n''')\n        self.list(self.keywords.keys())\n\n    def listsymbols(self):\n        self.output.write('''\nHere is a list of the punctuation symbols which Python assigns special meaning\nto. Enter any symbol to get more help.\n\n''')\n        self.list(self.symbols.keys())\n\n    def listtopics(self):\n        self.output.write('''\nHere is a list of available topics.  Enter any topic name to get more help.\n\n''')\n        self.list(self.topics.keys())\n\n    def showtopic(self, topic, more_xrefs=''):\n        try:\n            import pydoc_data.topics\n        except ImportError:\n            self.output.write('''\nSorry, topic and keyword documentation is not available because the\nmodule \"pydoc_data.topics\" could not be found.\n''')\n            return\n        target = self.topics.get(topic, self.keywords.get(topic))\n        if not target:\n            self.output.write('no documentation found for %s\\n' % repr(topic))\n            return\n        if type(target) is type(''):\n            return self.showtopic(target, more_xrefs)\n\n        label, xrefs = target\n        try:\n            doc = pydoc_data.topics.topics[label]\n        except KeyError:\n            self.output.write('no documentation found for %s\\n' % repr(topic))\n            return\n        pager(strip(doc) + '\\n')\n        if more_xrefs:\n            xrefs = (xrefs or '') + ' ' + more_xrefs\n        if xrefs:\n            import StringIO, formatter\n            buffer = StringIO.StringIO()\n            formatter.DumbWriter(buffer).send_flowing_data(\n                'Related help topics: ' + join(split(xrefs), ', ') + '\\n')\n            self.output.write('\\n%s\\n' % buffer.getvalue())\n\n    def showsymbol(self, symbol):\n        target = self.symbols[symbol]\n        topic, _, xrefs = target.partition(' ')\n        self.showtopic(topic, xrefs)\n\n    def listmodules(self, key=''):\n        if key:\n            self.output.write('''\nHere is a list of matching modules.  Enter any module name to get more help.\n\n''')\n            apropos(key)\n        else:\n            self.output.write('''\nPlease wait a moment while I gather a list of all available modules...\n\n''')\n            modules = {}\n            def callback(path, modname, desc, modules=modules):\n                if modname and modname[-9:] == '.__init__':\n                    modname = modname[:-9] + ' (package)'\n                if find(modname, '.') < 0:\n                    modules[modname] = 1\n            def onerror(modname):\n                callback(None, modname, None)\n            ModuleScanner().run(callback, onerror=onerror)\n            self.list(modules.keys())\n            self.output.write('''\nEnter any module name to get more help.  Or, type \"modules spam\" to search\nfor modules whose descriptions contain the word \"spam\".\n''')\n\nhelp = Helper()\n\nclass Scanner:\n    \"\"\"A generic tree iterator.\"\"\"\n    def __init__(self, roots, children, descendp):\n        self.roots = roots[:]\n        self.state = []\n        self.children = children\n        self.descendp = descendp\n\n    def next(self):\n        if not self.state:\n            if not self.roots:\n                return None\n            root = self.roots.pop(0)\n            self.state = [(root, self.children(root))]\n        node, children = self.state[-1]\n        if not children:\n            self.state.pop()\n            return self.next()\n        child = children.pop(0)\n        if self.descendp(child):\n            self.state.append((child, self.children(child)))\n        return child\n\n\nclass ModuleScanner:\n    \"\"\"An interruptible scanner that searches module synopses.\"\"\"\n\n    def run(self, callback, key=None, completer=None, onerror=None):\n        if key: key = lower(key)\n        self.quit = False\n        seen = {}\n\n        for modname in sys.builtin_module_names:\n            if modname != '__main__':\n                seen[modname] = 1\n                if key is None:\n                    callback(None, modname, '')\n                else:\n                    desc = split(__import__(modname).__doc__ or '', '\\n')[0]\n                    if find(lower(modname + ' - ' + desc), key) >= 0:\n                        callback(None, modname, desc)\n\n        for importer, modname, ispkg in pkgutil.walk_packages(onerror=onerror):\n            if self.quit:\n                break\n            if key is None:\n                callback(None, modname, '')\n            else:\n                loader = importer.find_module(modname)\n                if hasattr(loader,'get_source'):\n                    import StringIO\n                    desc = source_synopsis(\n                        StringIO.StringIO(loader.get_source(modname))\n                    ) or ''\n                    if hasattr(loader,'get_filename'):\n                        path = loader.get_filename(modname)\n                    else:\n                        path = None\n                else:\n                    module = loader.load_module(modname)\n                    desc = (module.__doc__ or '').splitlines()[0]\n                    path = getattr(module,'__file__',None)\n                if find(lower(modname + ' - ' + desc), key) >= 0:\n                    callback(path, modname, desc)\n\n        if completer:\n            completer()\n\ndef apropos(key):\n    \"\"\"Print all the one-line module summaries that contain a substring.\"\"\"\n    def callback(path, modname, desc):\n        if modname[-9:] == '.__init__':\n            modname = modname[:-9] + ' (package)'\n        print modname, desc and '- ' + desc\n    def onerror(modname):\n        pass\n    with warnings.catch_warnings():\n        warnings.filterwarnings('ignore') # ignore problems during import\n        ModuleScanner().run(callback, key, onerror=onerror)\n\n# --------------------------------------------------- web browser interface\n\ndef serve(port, callback=None, completer=None):\n    import BaseHTTPServer, mimetools, select\n\n    # Patch up mimetools.Message so it doesn't break if rfc822 is reloaded.\n    class Message(mimetools.Message):\n        def __init__(self, fp, seekable=1):\n            Message = self.__class__\n            Message.__bases__[0].__bases__[0].__init__(self, fp, seekable)\n            self.encodingheader = self.getheader('content-transfer-encoding')\n            self.typeheader = self.getheader('content-type')\n            self.parsetype()\n            self.parseplist()\n\n    class DocHandler(BaseHTTPServer.BaseHTTPRequestHandler):\n        def send_document(self, title, contents):\n            try:\n                self.send_response(200)\n                self.send_header('Content-Type', 'text/html')\n                self.end_headers()\n                self.wfile.write(html.page(title, contents))\n            except IOError: pass\n\n        def do_GET(self):\n            path = self.path\n            if path[-5:] == '.html': path = path[:-5]\n            if path[:1] == '/': path = path[1:]\n            if path and path != '.':\n                try:\n                    obj = locate(path, forceload=1)\n                except ErrorDuringImport, value:\n                    self.send_document(path, html.escape(str(value)))\n                    return\n                if obj:\n                    self.send_document(describe(obj), html.document(obj, path))\n                else:\n                    self.send_document(path,\n'no Python documentation found for %s' % repr(path))\n            else:\n                heading = html.heading(\n'<big><big><strong>Python: Index of Modules</strong></big></big>',\n'#ffffff', '#7799ee')\n                def bltinlink(name):\n                    return '<a href=\"%s.html\">%s</a>' % (name, name)\n                names = filter(lambda x: x != '__main__',\n                               sys.builtin_module_names)\n                contents = html.multicolumn(names, bltinlink)\n                indices = ['<p>' + html.bigsection(\n                    'Built-in Modules', '#ffffff', '#ee77aa', contents)]\n\n                seen = {}\n                for dir in sys.path:\n                    indices.append(html.index(dir, seen))\n                contents = heading + join(indices) + '''<p align=right>\n<font color=\"#909090\" face=\"helvetica, arial\"><strong>\npydoc</strong> by Ka-Ping Yee &lt;ping@lfw.org&gt;</font>'''\n                self.send_document('Index of Modules', contents)\n\n        def log_message(self, *args): pass\n\n    class DocServer(BaseHTTPServer.HTTPServer):\n        def __init__(self, port, callback):\n            host = ''\n            self.address = (host, port)\n            self.url = 'http://%s:%d/' % (host, port)\n            self.callback = callback\n            self.base.__init__(self, self.address, self.handler)\n\n        def serve_until_quit(self):\n            import select\n            self.quit = False\n            while not self.quit:\n                rd, wr, ex = select.select([self.socket.fileno()], [], [], 1)\n                if rd: self.handle_request()\n\n        def server_activate(self):\n            self.base.server_activate(self)\n            if self.callback: self.callback(self)\n\n    DocServer.base = BaseHTTPServer.HTTPServer\n    DocServer.handler = DocHandler\n    DocHandler.MessageClass = Message\n    try:\n        try:\n            DocServer(port, callback).serve_until_quit()\n        except (KeyboardInterrupt, select.error):\n            pass\n    finally:\n        if completer: completer()\n\n# ----------------------------------------------------- graphical interface\n\ndef gui():\n    \"\"\"Graphical interface (starts web server and pops up a control window).\"\"\"\n    class GUI:\n        def __init__(self, window, port=7464):\n            self.window = window\n            self.server = None\n            self.scanner = None\n\n            import Tkinter\n            self.server_frm = Tkinter.Frame(window)\n            self.title_lbl = Tkinter.Label(self.server_frm,\n                text='Starting server...\\n ')\n            self.open_btn = Tkinter.Button(self.server_frm,\n                text='open browser', command=self.open, state='disabled')\n            self.quit_btn = Tkinter.Button(self.server_frm,\n                text='quit serving', command=self.quit, state='disabled')\n\n            self.search_frm = Tkinter.Frame(window)\n            self.search_lbl = Tkinter.Label(self.search_frm, text='Search for')\n            self.search_ent = Tkinter.Entry(self.search_frm)\n            self.search_ent.bind('<Return>', self.search)\n            self.stop_btn = Tkinter.Button(self.search_frm,\n                text='stop', pady=0, command=self.stop, state='disabled')\n            if sys.platform == 'win32':\n                # Trying to hide and show this button crashes under Windows.\n                self.stop_btn.pack(side='right')\n\n            self.window.title('pydoc')\n            self.window.protocol('WM_DELETE_WINDOW', self.quit)\n            self.title_lbl.pack(side='top', fill='x')\n            self.open_btn.pack(side='left', fill='x', expand=1)\n            self.quit_btn.pack(side='right', fill='x', expand=1)\n            self.server_frm.pack(side='top', fill='x')\n\n            self.search_lbl.pack(side='left')\n            self.search_ent.pack(side='right', fill='x', expand=1)\n            self.search_frm.pack(side='top', fill='x')\n            self.search_ent.focus_set()\n\n            font = ('helvetica', sys.platform == 'win32' and 8 or 10)\n            self.result_lst = Tkinter.Listbox(window, font=font, height=6)\n            self.result_lst.bind('<Button-1>', self.select)\n            self.result_lst.bind('<Double-Button-1>', self.goto)\n            self.result_scr = Tkinter.Scrollbar(window,\n                orient='vertical', command=self.result_lst.yview)\n            self.result_lst.config(yscrollcommand=self.result_scr.set)\n\n            self.result_frm = Tkinter.Frame(window)\n            self.goto_btn = Tkinter.Button(self.result_frm,\n                text='go to selected', command=self.goto)\n            self.hide_btn = Tkinter.Button(self.result_frm,\n                text='hide results', command=self.hide)\n            self.goto_btn.pack(side='left', fill='x', expand=1)\n            self.hide_btn.pack(side='right', fill='x', expand=1)\n\n            self.window.update()\n            self.minwidth = self.window.winfo_width()\n            self.minheight = self.window.winfo_height()\n            self.bigminheight = (self.server_frm.winfo_reqheight() +\n                                 self.search_frm.winfo_reqheight() +\n                                 self.result_lst.winfo_reqheight() +\n                                 self.result_frm.winfo_reqheight())\n            self.bigwidth, self.bigheight = self.minwidth, self.bigminheight\n            self.expanded = 0\n            self.window.wm_geometry('%dx%d' % (self.minwidth, self.minheight))\n            self.window.wm_minsize(self.minwidth, self.minheight)\n            self.window.tk.willdispatch()\n\n            import threading\n            threading.Thread(\n                target=serve, args=(port, self.ready, self.quit)).start()\n\n        def ready(self, server):\n            self.server = server\n            self.title_lbl.config(\n                text='Python documentation server at\\n' + server.url)\n            self.open_btn.config(state='normal')\n            self.quit_btn.config(state='normal')\n\n        def open(self, event=None, url=None):\n            url = url or self.server.url\n            try:\n                import webbrowser\n                webbrowser.open(url)\n            except ImportError: # pre-webbrowser.py compatibility\n                if sys.platform == 'win32':\n                    os.system('start \"%s\"' % url)\n                else:\n                    rc = os.system('netscape -remote \"openURL(%s)\" &' % url)\n                    if rc: os.system('netscape \"%s\" &' % url)\n\n        def quit(self, event=None):\n            if self.server:\n                self.server.quit = 1\n            self.window.quit()\n\n        def search(self, event=None):\n            key = self.search_ent.get()\n            self.stop_btn.pack(side='right')\n            self.stop_btn.config(state='normal')\n            self.search_lbl.config(text='Searching for \"%s\"...' % key)\n            self.search_ent.forget()\n            self.search_lbl.pack(side='left')\n            self.result_lst.delete(0, 'end')\n            self.goto_btn.config(state='disabled')\n            self.expand()\n\n            import threading\n            if self.scanner:\n                self.scanner.quit = 1\n            self.scanner = ModuleScanner()\n            threading.Thread(target=self.scanner.run,\n                             args=(self.update, key, self.done)).start()\n\n        def update(self, path, modname, desc):\n            if modname[-9:] == '.__init__':\n                modname = modname[:-9] + ' (package)'\n            self.result_lst.insert('end',\n                modname + ' - ' + (desc or '(no description)'))\n\n        def stop(self, event=None):\n            if self.scanner:\n                self.scanner.quit = 1\n                self.scanner = None\n\n        def done(self):\n            self.scanner = None\n            self.search_lbl.config(text='Search for')\n            self.search_lbl.pack(side='left')\n            self.search_ent.pack(side='right', fill='x', expand=1)\n            if sys.platform != 'win32': self.stop_btn.forget()\n            self.stop_btn.config(state='disabled')\n\n        def select(self, event=None):\n            self.goto_btn.config(state='normal')\n\n        def goto(self, event=None):\n            selection = self.result_lst.curselection()\n            if selection:\n                modname = split(self.result_lst.get(selection[0]))[0]\n                self.open(url=self.server.url + modname + '.html')\n\n        def collapse(self):\n            if not self.expanded: return\n            self.result_frm.forget()\n            self.result_scr.forget()\n            self.result_lst.forget()\n            self.bigwidth = self.window.winfo_width()\n            self.bigheight = self.window.winfo_height()\n            self.window.wm_geometry('%dx%d' % (self.minwidth, self.minheight))\n            self.window.wm_minsize(self.minwidth, self.minheight)\n            self.expanded = 0\n\n        def expand(self):\n            if self.expanded: return\n            self.result_frm.pack(side='bottom', fill='x')\n            self.result_scr.pack(side='right', fill='y')\n            self.result_lst.pack(side='top', fill='both', expand=1)\n            self.window.wm_geometry('%dx%d' % (self.bigwidth, self.bigheight))\n            self.window.wm_minsize(self.minwidth, self.bigminheight)\n            self.expanded = 1\n\n        def hide(self, event=None):\n            self.stop()\n            self.collapse()\n\n    import Tkinter\n    try:\n        root = Tkinter.Tk()\n        # Tk will crash if pythonw.exe has an XP .manifest\n        # file and the root has is not destroyed explicitly.\n        # If the problem is ever fixed in Tk, the explicit\n        # destroy can go.\n        try:\n            gui = GUI(root)\n            root.mainloop()\n        finally:\n            root.destroy()\n    except KeyboardInterrupt:\n        pass\n\n# -------------------------------------------------- command-line interface\n\ndef ispath(x):\n    return isinstance(x, str) and find(x, os.sep) >= 0\n\ndef cli():\n    \"\"\"Command-line interface (looks at sys.argv to decide what to do).\"\"\"\n    import getopt\n    class BadUsage: pass\n\n    # Scripts don't get the current directory in their path by default\n    # unless they are run with the '-m' switch\n    if '' not in sys.path:\n        scriptdir = os.path.dirname(sys.argv[0])\n        if scriptdir in sys.path:\n            sys.path.remove(scriptdir)\n        sys.path.insert(0, '.')\n\n    try:\n        opts, args = getopt.getopt(sys.argv[1:], 'gk:p:w')\n        writing = 0\n\n        for opt, val in opts:\n            if opt == '-g':\n                gui()\n                return\n            if opt == '-k':\n                apropos(val)\n                return\n            if opt == '-p':\n                try:\n                    port = int(val)\n                except ValueError:\n                    raise BadUsage\n                def ready(server):\n                    print 'pydoc server ready at %s' % server.url\n                def stopped():\n                    print 'pydoc server stopped'\n                serve(port, ready, stopped)\n                return\n            if opt == '-w':\n                writing = 1\n\n        if not args: raise BadUsage\n        for arg in args:\n            if ispath(arg) and not os.path.exists(arg):\n                print 'file %r does not exist' % arg\n                break\n            try:\n                if ispath(arg) and os.path.isfile(arg):\n                    arg = importfile(arg)\n                if writing:\n                    if ispath(arg) and os.path.isdir(arg):\n                        writedocs(arg)\n                    else:\n                        writedoc(arg)\n                else:\n                    help.help(arg)\n            except ErrorDuringImport, value:\n                print value\n\n    except (getopt.error, BadUsage):\n        cmd = os.path.basename(sys.argv[0])\n        print \"\"\"pydoc - the Python documentation tool\n\n%s <name> ...\n    Show text documentation on something.  <name> may be the name of a\n    Python keyword, topic, function, module, or package, or a dotted\n    reference to a class or function within a module or module in a\n    package.  If <name> contains a '%s', it is used as the path to a\n    Python source file to document. If name is 'keywords', 'topics',\n    or 'modules', a listing of these things is displayed.\n\n%s -k <keyword>\n    Search for a keyword in the synopsis lines of all available modules.\n\n%s -p <port>\n    Start an HTTP server on the given port on the local machine.\n\n%s -g\n    Pop up a graphical interface for finding and serving documentation.\n\n%s -w <name> ...\n    Write out the HTML documentation for a module to a file in the current\n    directory.  If <name> contains a '%s', it is treated as a filename; if\n    it names a directory, documentation is written for all the contents.\n\"\"\" % (cmd, os.sep, cmd, cmd, cmd, cmd, os.sep)\n\nif __name__ == '__main__': cli()\n"
  },
  {
    "path": "Utils/HandlerUtil.py",
    "content": "﻿#\n# Handler library for Linux IaaS\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\"\nJSON def:\nHandlerEnvironment.json\n[{\n  \"name\": \"ExampleHandlerLinux\",\n  \"seqNo\": \"seqNo\",\n  \"version\": \"1.0\",\n  \"handlerEnvironment\": {\n    \"logFolder\": \"<your log folder location>\",\n    \"configFolder\": \"<your config folder location>\",\n    \"statusFolder\": \"<your status folder location>\",\n    \"heartbeatFile\": \"<your heartbeat file location>\",\n\n  }\n}]\n\nExample ./config/1.settings\n\"{\"runtimeSettings\":[{\"handlerSettings\":{\"protectedSettingsCertThumbprint\":\"1BE9A13AA1321C7C515EF109746998BAB6D86FD1\",\"protectedSettings\":\n\"MIIByAYJKoZIhvcNAQcDoIIBuTCCAbUCAQAxggFxMIIBbQIBADBVMEExPzA9BgoJkiaJk/IsZAEZFi9XaW5kb3dzIEF6dXJlIFNlcnZpY2UgTWFuYWdlbWVudCBmb3IgR+nhc6VHQTQpCiiV2zANBgkqhkiG9w0BAQEFAASCAQCKr09QKMGhwYe+O4/a8td+vpB4eTR+BQso84cV5KCAnD6iUIMcSYTrn9aveY6v6ykRLEw8GRKfri2d6tvVDggUrBqDwIgzejGTlCstcMJItWa8Je8gHZVSDfoN80AEOTws9Fp+wNXAbSuMJNb8EnpkpvigAWU2v6pGLEFvSKC0MCjDTkjpjqciGMcbe/r85RG3Zo21HLl0xNOpjDs/qqikc/ri43Y76E/Xv1vBSHEGMFprPy/Hwo3PqZCnulcbVzNnaXN3qi/kxV897xGMPPC3IrO7Nc++AT9qRLFI0841JLcLTlnoVG1okPzK9w6ttksDQmKBSHt3mfYV+skqs+EOMDsGCSqGSIb3DQEHATAUBggqhkiG9w0DBwQITgu0Nu3iFPuAGD6/QzKdtrnCI5425fIUy7LtpXJGmpWDUA==\",\"publicSettings\":{\"port\":\"3000\"}}}]}\"\n\n\nExample HeartBeat\n{\n\"version\": 1.0,\n    \"heartbeat\" : {\n        \"status\": \"ready\",\n        \"code\": 0,\n        \"Message\": \"Sample Handler running. Waiting for a new configuration from user.\"\n    }\n}\nExample Status Report:\n[{\"version\":\"1.0\",\"timestampUTC\":\"2014-05-29T04:20:13Z\",\"status\":{\"name\":\"Chef Extension Handler\",\"operation\":\"chef-client-run\",\"status\":\"success\",\"code\":0,\"formattedMessage\":{\"lang\":\"en-US\",\"message\":\"Chef-client run success\"}}}]\n\n\"\"\"\n\nimport os\nimport os.path\nimport sys\nimport base64\nimport json\nimport time\nimport re\nimport subprocess\n# imp was deprecated in python 3.12\nif sys.version_info >= (3, 12):\n    import importlib\nelse:\n    import imp\n\nfrom xml.etree import ElementTree\nfrom os.path import join\nfrom Utils.WAAgentUtil import waagent\nfrom waagent import LoggerInit\n\nDateTimeFormat = \"%Y-%m-%dT%H:%M:%SZ\"\n\nMANIFEST_XML = \"manifest.xml\"\n\n\nclass HandlerContext:\n    def __init__(self, name):\n        self._name = name\n        self._version = '0.0'\n        self._config_dir = None\n        self._log_dir = None\n        self._log_file = None\n        self._status_dir = None\n        self._heartbeat_file = None\n        self._seq_no = -1\n        self._status_file = None\n        self._settings_file = None\n        self._config = None\n        return\n\n\nclass HandlerUtility:\n    def __init__(self, log, error, s_name=None, l_name=None, extension_version=None, logFileName='extension.log',\n                 console_logger=None, file_logger=None):\n        self._log = log\n        self._log_to_con = console_logger\n        self._log_to_file = file_logger\n        self._error = error\n        self._logFileName = logFileName\n        if s_name is None or l_name is None or extension_version is None:\n            (l_name, s_name, extension_version) = self._get_extension_info()\n\n        self._short_name = s_name\n        self._extension_version = extension_version\n        self._log_prefix = '[%s-%s] ' % (l_name, extension_version)\n\n    def get_extension_version(self):\n        return self._extension_version\n\n    def _get_log_prefix(self):\n        return self._log_prefix\n\n    def _get_extension_info(self):\n        if os.path.isfile(MANIFEST_XML):\n            return self._get_extension_info_manifest()\n\n        ext_dir = os.path.basename(os.getcwd())\n        (long_name, version) = ext_dir.split('-')\n        short_name = long_name.split('.')[-1]\n\n        return long_name, short_name, version\n\n    def _get_extension_info_manifest(self):\n        with open(MANIFEST_XML) as fh:\n            doc = ElementTree.parse(fh)\n            namespace = doc.find('{http://schemas.microsoft.com/windowsazure}ProviderNameSpace').text\n            short_name = doc.find('{http://schemas.microsoft.com/windowsazure}Type').text\n            version = doc.find('{http://schemas.microsoft.com/windowsazure}Version').text\n\n            long_name = \"%s.%s\" % (namespace, short_name)\n            return (long_name, short_name, version)\n\n    def _get_current_seq_no(self, config_folder):\n        seq_no = -1\n        cur_seq_no = -1\n        freshest_time = None\n        for subdir, dirs, files in os.walk(config_folder):\n            for file in files:\n                try:\n                    cur_seq_no = int(os.path.basename(file).split('.')[0])\n                    if (freshest_time == None):\n                        freshest_time = os.path.getmtime(join(config_folder, file))\n                        seq_no = cur_seq_no\n                    else:\n                        current_file_m_time = os.path.getmtime(join(config_folder, file))\n                        if (current_file_m_time > freshest_time):\n                            freshest_time = current_file_m_time\n                            seq_no = cur_seq_no\n                except ValueError:\n                    continue\n        return seq_no\n\n    def log(self, message):\n        self._log(self._get_log_prefix() + message)\n\n    def log_to_console(self, message):\n        if self._log_to_con is not None:\n            self._log_to_con(self._get_log_prefix() + message)\n        else:\n            self.error(\"Unable to log to console, console log method not set\")\n\n    def log_to_file(self, message):\n        if self._log_to_file is not None:\n            self._log_to_file(self._get_log_prefix() + message)\n        else:\n            self.error(\"Unable to log to file, file log method not set\")\n\n    def error(self, message):\n        self._error(self._get_log_prefix() + message)\n\n    @staticmethod\n    def redact_protected_settings(content):\n        redacted_tmp = re.sub(r'\"protectedSettings\":\\s*\"[^\"]+==\"', '\"protectedSettings\": \"*** REDACTED ***\"', content)\n        redacted = re.sub(r'\"protectedSettingsCertThumbprint\":\\s*\"[^\"]+\"', '\"protectedSettingsCertThumbprint\": \"*** REDACTED ***\"', redacted_tmp)\n        return redacted\n\n    def _parse_config(self, ctxt):\n        config = None\n        try:\n            config = json.loads(ctxt)\n        except:\n            self.error('JSON exception decoding ' + HandlerUtility.redact_protected_settings(ctxt))\n\n        if config is None:\n            self.error(\"JSON error processing settings file:\" + HandlerUtility.redact_protected_settings(ctxt))\n        else:\n            handlerSettings = config['runtimeSettings'][0]['handlerSettings']\n            if 'protectedSettings' in handlerSettings and \\\n                    'protectedSettingsCertThumbprint' in handlerSettings and \\\n                    handlerSettings['protectedSettings'] is not None and \\\n                    handlerSettings[\"protectedSettingsCertThumbprint\"] is not None:\n                protectedSettings = handlerSettings['protectedSettings']\n                thumb = handlerSettings['protectedSettingsCertThumbprint']\n                cert = waagent.LibDir + '/' + thumb + '.crt'\n                pkey = waagent.LibDir + '/' + thumb + '.prv'\n                unencodedSettings = base64.standard_b64decode(protectedSettings)\n\n                # FIPS 140-3: use 'openssl cms' (supports AES256 & DES_EDE3_CBC) with fallback to legacy 'openssl smime'\n                cms_cmd = 'openssl cms -inform DER -decrypt -recip {0} -inkey {1}'.format(cert,pkey)\n                smime_cmd = 'openssl smime -inform DER -decrypt -recip {0} -inkey {1}'.format(cert,pkey)\n\n                protected_settings_str = ''\n                for decrypt_cmd in [cms_cmd, smime_cmd]:\n                    try:\n                        # waagent.RunSendStdin returns a tuple (return code, stdout)\n                        output = waagent.RunSendStdin(decrypt_cmd, unencodedSettings)\n                        if output and output[0] == 0 and output[1]:\n                            protected_settings_str = output[1]\n                            if decrypt_cmd == cms_cmd:\n                                self.log('Decrypted protectedSettings using openssl cms.')\n                            else:\n                                self.log('Decrypted protectedSettings using openssl smime fallback.')\n                            break\n                        else:\n                            rc = output[0] if output else 'N/A'\n                            self.log('Attempt to decrypt protectedSettings with \"{0}\" failed (rc={1}).'.format(decrypt_cmd, rc))\n                    except OSError:\n                        pass\n\n                jctxt = ''\n                try:\n                    jctxt = json.loads(protected_settings_str)\n                except:\n                    self.error('JSON exception decoding ' + HandlerUtility.redact_protected_settings(protected_settings_str))\n                handlerSettings['protectedSettings']=jctxt\n                self.log('Config decoded correctly.')\n        return config\n\n    def do_parse_context(self, operation):\n        _context = self.try_parse_context()\n        if not _context:\n            self.do_exit(1, operation, 'error', '1', operation + ' Failed')\n        return _context\n\n    def try_parse_context(self):\n        self._context = HandlerContext(self._short_name)\n        handler_env = None\n        config = None\n        ctxt = None\n        code = 0\n        # get the HandlerEnvironment.json. According to the extension handler spec, it is always in the ./ directory\n        self.log('cwd is ' + os.path.realpath(os.path.curdir))\n        handler_env_file = './HandlerEnvironment.json'\n        if not os.path.isfile(handler_env_file):\n            self.error(\"Unable to locate \" + handler_env_file)\n            return None\n        ctxt = waagent.GetFileContents(handler_env_file)\n        if ctxt == None:\n            self.error(\"Unable to read \" + handler_env_file)\n        try:\n            handler_env = json.loads(ctxt)\n        except:\n            pass\n        if handler_env == None:\n            self.log(\"JSON error processing \" + handler_env_file)\n            return None\n        if type(handler_env) == list:\n            handler_env = handler_env[0]\n\n        self._context._name = handler_env['name']\n        self._context._version = str(handler_env['version'])\n        self._context._config_dir = handler_env['handlerEnvironment']['configFolder']\n        self._context._log_dir = handler_env['handlerEnvironment']['logFolder']\n\n        self._context._log_file = os.path.join(handler_env['handlerEnvironment']['logFolder'], self._logFileName)\n        self._change_log_file()\n        self._context._status_dir = handler_env['handlerEnvironment']['statusFolder']\n        self._context._heartbeat_file = handler_env['handlerEnvironment']['heartbeatFile']\n        self._context._seq_no = self._get_current_seq_no(self._context._config_dir)\n        if self._context._seq_no < 0:\n            self.error(\"Unable to locate a .settings file!\")\n            return None\n        self._context._seq_no = str(self._context._seq_no)\n        self.log('sequence number is ' + self._context._seq_no)\n        self._context._status_file = os.path.join(self._context._status_dir, self._context._seq_no + '.status')\n        self._context._settings_file = os.path.join(self._context._config_dir, self._context._seq_no + '.settings')\n        self.log(\"setting file path is\" + self._context._settings_file)\n        ctxt = None\n        ctxt = waagent.GetFileContents(self._context._settings_file)\n        if ctxt == None:\n            error_msg = 'Unable to read ' + self._context._settings_file + '. '\n            self.error(error_msg)\n            return None\n\n        self.log(\"JSON config: \" + HandlerUtility.redact_protected_settings(ctxt))\n        self._context._config = self._parse_config(ctxt)\n        return self._context\n\n    def _change_log_file(self):\n        self.log(\"Change log file to \" + self._context._log_file)\n        LoggerInit(self._context._log_file, '/dev/stdout')\n        self._log = waagent.Log\n        self._error = waagent.Error\n\n    def set_verbose_log(self, verbose):\n        if (verbose == \"1\" or verbose == 1):\n            self.log(\"Enable verbose log\")\n            LoggerInit(self._context._log_file, '/dev/stdout', verbose=True)\n        else:\n            self.log(\"Disable verbose log\")\n            LoggerInit(self._context._log_file, '/dev/stdout', verbose=False)\n\n    def is_seq_smaller(self):\n        return int(self._context._seq_no) <= self._get_most_recent_seq()\n\n    def save_seq(self):\n        self._set_most_recent_seq(self._context._seq_no)\n        self.log(\"set most recent sequence number to \" + self._context._seq_no)\n\n    def exit_if_enabled(self, remove_protected_settings=False):\n        self.exit_if_seq_smaller(remove_protected_settings)\n\n    def exit_if_seq_smaller(self, remove_protected_settings):\n        if(self.is_seq_smaller()):\n            self.log(\"Current sequence number, \" + self._context._seq_no + \", is not greater than the sequence number of the most recent executed configuration. Exiting...\")\n            sys.exit(0)\n        self.save_seq()\n\n        if remove_protected_settings:\n            self.scrub_settings_file()\n\n    def _get_most_recent_seq(self):\n        if (os.path.isfile('mrseq')):\n            seq = waagent.GetFileContents('mrseq')\n            if (seq):\n                return int(seq)\n\n        return -1\n\n    def is_current_config_seq_greater_inused(self):\n        return int(self._context._seq_no) > self._get_most_recent_seq()\n\n    def get_inused_config_seq(self):\n        return self._get_most_recent_seq()\n\n    def set_inused_config_seq(self, seq):\n        self._set_most_recent_seq(seq)\n\n    def _set_most_recent_seq(self, seq):\n        waagent.SetFileContents('mrseq', str(seq))\n\n    def do_status_report(self, operation, status, status_code, message):\n        self.log(\"{0},{1},{2},{3}\".format(operation, status, status_code, message))\n        tstamp = time.strftime(DateTimeFormat, time.gmtime())\n        stat = [{\n            \"version\": self._context._version,\n            \"timestampUTC\": tstamp,\n            \"status\": {\n                \"name\": self._context._name,\n                \"operation\": operation,\n                \"status\": status,\n                \"code\": status_code,\n                \"formattedMessage\": {\n                    \"lang\": \"en-US\",\n                    \"message\": message\n                }\n            }\n        }]\n        stat_rept = json.dumps(stat)\n        if self._context._status_file:\n            tmp = \"%s.tmp\" % (self._context._status_file)\n            with open(tmp, 'w+') as f:\n                f.write(stat_rept)\n            os.rename(tmp, self._context._status_file)\n\n    def do_heartbeat_report(self, heartbeat_file, status, code, message):\n        # heartbeat\n        health_report = '[{\"version\":\"1.0\",\"heartbeat\":{\"status\":\"' + status + '\",\"code\":\"' + code + '\",\"Message\":\"' + message + '\"}}]'\n        if waagent.SetFileContents(heartbeat_file, health_report) == None:\n            self.error('Unable to wite heartbeat info to ' + heartbeat_file)\n\n    def do_exit(self, exit_code, operation, status, code, message):\n        try:\n            self.do_status_report(operation, status, code, message)\n        except Exception as e:\n            self.log(\"Can't update status: \" + str(e))\n        sys.exit(exit_code)\n\n    def get_name(self):\n        return self._context._name\n\n    def get_seq_no(self):\n        return self._context._seq_no\n\n    def get_log_dir(self):\n        return self._context._log_dir\n\n    def get_handler_settings(self):\n        if (self._context._config != None):\n            return self._context._config['runtimeSettings'][0]['handlerSettings']\n        return None\n\n    def get_protected_settings(self):\n        if (self._context._config != None):\n            return self.get_handler_settings().get('protectedSettings')\n        return None\n\n    def get_public_settings(self):\n        handlerSettings = self.get_handler_settings()\n        if (handlerSettings != None):\n            return self.get_handler_settings().get('publicSettings')\n        return None\n\n    def scrub_settings_file(self):\n        content = waagent.GetFileContents(self._context._settings_file)\n        redacted = HandlerUtility.redact_protected_settings(content)\n\n        waagent.SetFileContents(self._context._settings_file, redacted)\n"
  },
  {
    "path": "Utils/LogUtil.py",
    "content": "# Logging utilities \n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport os\nimport os.path\nimport string\nimport sys\n\nOutputSize = 4 * 1024\n\n\ndef tail(log_file, output_size = OutputSize):\n    pos = min(output_size, os.path.getsize(log_file))\n    with open(log_file, \"r\") as log:\n        log.seek(0, os.SEEK_END)\n        log.seek(log.tell() - pos, os.SEEK_SET)\n        buf = log.read(output_size)\n        buf = filter(lambda x: x in string.printable, buf)\n\n        # encoding works different for between interpreter version, we are keeping separate implementation to ensure\n        # backward compatibility\n        if sys.version_info[0] == 3:\n            buf = ''.join(list(buf)).encode('ascii', 'ignore').decode(\"ascii\", \"ignore\")\n        elif sys.version_info[0] == 2:\n            buf = buf.decode(\"ascii\", \"ignore\")\n\n        return buf\n\n\ndef get_formatted_log(summary, stdout, stderr):\n    msg_format = (\"{0}\\n\"\n                  \"---stdout---\\n\"\n                  \"{1}\\n\"\n                  \"---errout---\\n\"\n                  \"{2}\\n\")\n    return msg_format.format(summary, stdout, stderr)"
  },
  {
    "path": "Utils/ScriptUtil.py",
    "content": "# Script utilities\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport os\nimport os.path\nimport time\nimport subprocess\nimport traceback\nimport string\nimport shlex\nimport sys\n\nfrom Utils import LogUtil\nfrom Utils.WAAgentUtil import waagent\n\nDefaultStdoutFile = \"stdout\"\nDefaultErroutFile = \"errout\"\n\n\ndef run_command(hutil, args, cwd, operation, extension_short_name, version, exit_after_run=True, interval=30,\n                std_out_file_name=DefaultStdoutFile, std_err_file_name=DefaultErroutFile):\n    std_out_file = os.path.join(cwd, std_out_file_name)\n    err_out_file = os.path.join(cwd, std_err_file_name)\n    std_out = None\n    err_out = None\n    try:\n        std_out = open(std_out_file, \"w\")\n        err_out = open(err_out_file, \"w\")\n        start_time = time.time()\n        child = subprocess.Popen(args,\n                                 cwd=cwd,\n                                 stdout=std_out,\n                                 stderr=err_out)\n        time.sleep(1)\n        while child.poll() is None:\n            msg = \"Command is running...\"\n            msg_with_cmd_output = LogUtil.get_formatted_log(msg, LogUtil.tail(std_out_file), LogUtil.tail(err_out_file))\n            msg_without_cmd_output = msg + \" Stdout/Stderr omitted from output.\"\n\n            hutil.log_to_file(msg_with_cmd_output)\n            hutil.log_to_console(msg_without_cmd_output)\n            hutil.do_status_report(operation, 'transitioning', '0', msg_without_cmd_output)\n            time.sleep(interval)\n\n        exit_code = child.returncode\n        if child.returncode and child.returncode != 0:\n            msg = \"Command returned an error.\"\n            msg_with_cmd_output = LogUtil.get_formatted_log(msg, LogUtil.tail(std_out_file), LogUtil.tail(err_out_file))\n            msg_without_cmd_output = msg + \" Stdout/Stderr omitted from output.\"\n\n            hutil.error(msg_without_cmd_output)\n            waagent.AddExtensionEvent(name=extension_short_name,\n                                      op=operation,\n                                      isSuccess=False,\n                                      version=version,\n                                      message=\"(01302)\" + msg_without_cmd_output)\n        else:\n            msg = \"Command is finished.\"\n            msg_with_cmd_output = LogUtil.get_formatted_log(msg, LogUtil.tail(std_out_file), LogUtil.tail(err_out_file))\n            msg_without_cmd_output = msg + \" Stdout/Stderr omitted from output.\"\n\n            hutil.log_to_file(msg_with_cmd_output)\n            hutil.log_to_console(msg_without_cmd_output)\n            waagent.AddExtensionEvent(name=extension_short_name,\n                                      op=operation,\n                                      isSuccess=True,\n                                      version=version,\n                                      message=\"(01302)\" + msg_without_cmd_output)\n            end_time = time.time()\n            waagent.AddExtensionEvent(name=extension_short_name,\n                                      op=operation,\n                                      isSuccess=True,\n                                      version=version,\n                                      message=(\"(01304)Command execution time: \"\n                                               \"{0}s\").format(str(end_time - start_time)))\n\n        log_or_exit(hutil, exit_after_run, exit_code, operation, msg_with_cmd_output)\n    except Exception as e:\n        error_msg = (\"Failed to launch command with error: {0},\"\n                     \"stacktrace: {1}\").format(e, traceback.format_exc())\n        hutil.error(error_msg)\n        waagent.AddExtensionEvent(name=extension_short_name,\n                                  op=operation,\n                                  isSuccess=False,\n                                  version=version,\n                                  message=\"(01101)\" + error_msg)\n        exit_code = 1\n        msg = 'Launch command failed: {0}'.format(e)\n\n        log_or_exit(hutil, exit_after_run, exit_code, operation, msg)\n    finally:\n        if std_out:\n            std_out.close()\n        if err_out:\n            err_out.close()\n    return exit_code\n\n\n# do_exit calls sys.exit which raises an exception so we do not call it from the finally block\ndef log_or_exit(hutil, exit_after_run, exit_code, operation, msg):\n    status = 'success' if exit_code == 0 else 'failed'\n    if exit_after_run:\n        hutil.do_exit(exit_code, operation, status, str(exit_code), msg)\n    else:\n        hutil.do_status_report(operation, status, str(exit_code), msg)\n\n\ndef parse_args(cmd):\n    cmd = filter(lambda x: x in string.printable, cmd)\n\n    # encoding works different for between interpreter version, we are keeping separate implementation to ensure\n    # backward compatibility\n    if sys.version_info[0] == 3:\n        cmd = ''.join(list(cmd)).encode('ascii', 'ignore').decode(\"ascii\", \"ignore\")\n    elif sys.version_info[0] == 2:\n        cmd = cmd.decode(\"ascii\", \"ignore\")\n\n    args = shlex.split(cmd)\n    # From python 2.6 to python 2.7.2, shlex.split output UCS-4 result like\n    # '\\x00\\x00a'. Temp workaround is to replace \\x00\n    for idx, val in enumerate(args):\n        if '\\x00' in args[idx]:\n            args[idx] = args[idx].replace('\\x00', '')\n    return args\n\n\n"
  },
  {
    "path": "Utils/WAAgentUtil.py",
    "content": "# Wrapper module for waagent\n#\n# waagent is not written as a module. This wrapper module is created \n# to use the waagent code as a module.\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport os.path\nimport sys\n# imp was deprecated in python 3.12\nif sys.version_info >= (3, 12):\n    import importlib\n    import types\nelse:\n    import imp\n\n#\n# The following code will search and load waagent code and expose\n# it as a submodule of current module\n#\ndef searchWAAgent():\n    # if the extension ships waagent in its package to default to this version first\n    pkg_agent_path = os.path.join(os.getcwd(), 'waagent')\n    if os.path.isfile(pkg_agent_path):\n        return pkg_agent_path\n\n    agentPath = '/usr/sbin/waagent'\n    if os.path.isfile(agentPath):\n        return agentPath\n\n    user_paths = os.environ['PYTHONPATH'].split(os.pathsep)\n    for user_path in user_paths:\n        agentPath = os.path.join(user_path, 'waagent')\n        if os.path.isfile(agentPath):\n            return agentPath\n    return None\n\nwaagent = None\nagentPath = searchWAAgent()\nif agentPath:\n    # imp was deprecated in python 3.12\n    if sys.version_info >= (3, 12):\n        # Create a module spec from the waagent python file, then create module from spec and load it\n        loader = importlib.machinery.SourceFileLoader('waagent', agentPath)\n        code = loader.get_code(loader.name)\n        waagent = types.ModuleType(loader.name)\n        exec(code, waagent.__dict__)\n        # Add the module to sys.modules\n        sys.modules['waagent'] = waagent\n    else:\n        waagent = imp.load_source('waagent', agentPath)\nelse:\n    raise Exception(\"Can't load waagent.\")\n\nif not hasattr(waagent, \"AddExtensionEvent\"):\n    \"\"\"\n    If AddExtensionEvent is not defined, provide a dummy impl.\n    \"\"\"\n    def _AddExtensionEvent(*args, **kwargs):\n        pass\n    waagent.AddExtensionEvent = _AddExtensionEvent\n\nif not hasattr(waagent, \"WALAEventOperation\"):\n    class _WALAEventOperation:\n        HeartBeat=\"HeartBeat\"\n        Provision = \"Provision\"\n        Install = \"Install\"\n        UnIsntall = \"UnInstall\"\n        Disable = \"Disable\"\n        Enable = \"Enable\"\n        Download = \"Download\"\n        Upgrade = \"Upgrade\"\n        Update = \"Update\"           \n    waagent.WALAEventOperation = _WALAEventOperation\n\n# Better deal with the silly waagent typo, in anticipation of a proper fix of the typo later on waagent\nif not hasattr(waagent.WALAEventOperation, 'Uninstall'):\n    if hasattr(waagent.WALAEventOperation, 'UnIsntall'):\n        waagent.WALAEventOperation.Uninstall = waagent.WALAEventOperation.UnIsntall\n    else:  # This shouldn't happen, but just in case...\n        waagent.WALAEventOperation.Uninstall = 'Uninstall'\n\n\ndef GetWaagentHttpProxyConfigString():\n    \"\"\"\n    Get http_proxy and https_proxy from waagent config.\n    Username and password is not supported now.\n    This code is adopted from /usr/sbin/waagent\n    \"\"\"\n    host = None\n    port = None\n    try:\n        waagent.Config = waagent.ConfigurationProvider(None)  # Use default waagent conf file (most likely /etc/waagent.conf)\n\n        host = waagent.Config.get(\"HttpProxy.Host\")\n        port = waagent.Config.get(\"HttpProxy.Port\")\n    except Exception as e:\n        # waagent.ConfigurationProvider(None) will throw an exception on an old waagent\n        # Has to silently swallow because logging is not yet available here\n        # and we don't want to bring that in here. Also if the call fails, then there's\n        # no proxy config in waagent.conf anyway, so it's safe to silently swallow.\n        pass\n\n    result = ''\n    if host is not None:\n        result = \"http://\" + host\n        if port is not None:\n            result += \":\" + port\n\n    return result\n\n\nwaagent.HttpProxyConfigString = GetWaagentHttpProxyConfigString()\n\n# end: waagent http proxy config stuff\n\n__ExtensionName__ = None\n\n\ndef InitExtensionEventLog(name):\n    global __ExtensionName__\n    __ExtensionName__ = name\n\n\ndef AddExtensionEvent(name=__ExtensionName__,\n                      op=waagent.WALAEventOperation.Enable, \n                      isSuccess=False, \n                      message=None):\n    if name is not None:\n        waagent.AddExtensionEvent(name=name,\n                                  op=op,\n                                  isSuccess=isSuccess,\n                                  message=message)\n"
  },
  {
    "path": "Utils/__init__.py",
    "content": "#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n"
  },
  {
    "path": "Utils/constants.py",
    "content": "LibDir = \"/var/lib/waagent\"\n\nOpenssl = \"openssl\"\nos_release = \"/etc/os-release\"\nsystem_release = \"/etc/system-release\"\n\n\nclass WALAEventOperation:\n    HeartBeat = \"HeartBeat\"\n    Provision = \"Provision\"\n    Install = \"Install\"\n    UnInstall = \"UnInstall\"\n    Disable = \"Disable\"\n    Enable = \"Enable\"\n    Download = \"Download\"\n    Upgrade = \"Upgrade\"\n    Update = \"Update\"\n"
  },
  {
    "path": "Utils/crypt_fallback.py",
    "content": "\"\"\"\nFallback crypt implementation using ctypes for Python 3.13+.\n\nThis module provides crypt.crypt() functionality without requiring pip install\nby directly calling the system's libxcrypt/libcrypt library via ctypes.\n\nUsage:\n    # In your code, use this import pattern:\n    try:\n        import crypt\n    except ImportError:\n        try:\n            import crypt_r as crypt\n        except ImportError:\n            from Common import crypt_fallback as crypt\n\"\"\"\n\nimport ctypes\nimport ctypes.util\nimport string\nimport random\n\n__all__ = ['crypt', 'mksalt', 'METHOD_SHA512', 'METHOD_SHA256', 'methods']\n\n\n# Try to load libcrypt\n_libcrypt = None\n_libcrypt_path = ctypes.util.find_library(\"crypt\")\n\nif _libcrypt_path:\n    try:\n        _libcrypt = ctypes.CDLL(_libcrypt_path)\n        _libcrypt.crypt.argtypes = [ctypes.c_char_p, ctypes.c_char_p]\n        _libcrypt.crypt.restype = ctypes.c_char_p\n    except (OSError, AttributeError):\n        _libcrypt = None\n\n\nclass _Method:\n    \"\"\"Class representing a crypt method.\"\"\"\n    def __init__(self, name, ident, salt_chars, total_size):\n        self.name = name\n        self.ident = ident\n        self.salt_chars = salt_chars\n        self.total_size = total_size\n    \n    def __repr__(self):\n        return '<crypt.METHOD_{0}>'.format(self.name)\n\n\n# Define standard methods\nMETHOD_SHA512 = _Method('SHA512', '6', 16, 106)\nMETHOD_SHA256 = _Method('SHA256', '5', 16, 63)\nMETHOD_MD5 = _Method('MD5', '1', 8, 34)\nMETHOD_CRYPT = _Method('CRYPT', None, 2, 13)\n\nmethods = [METHOD_SHA512, METHOD_SHA256, METHOD_MD5, METHOD_CRYPT]\n\n\ndef mksalt(method=None, rounds=None):\n    \"\"\"Generate a salt for the specified method.\n    \n    If not specified, the strongest available method (SHA512) will be used.\n    \"\"\"\n    if method is None:\n        method = METHOD_SHA512\n    \n    saltchars = string.ascii_letters + string.digits + './'\n    \n    if method.ident:\n        salt = '${0}$'.format(method.ident)\n        if method.ident in ('5', '6') and rounds is not None:\n            if not 1000 <= rounds <= 999999999:\n                raise ValueError('rounds out of the range 1000 to 999_999_999')\n            salt += 'rounds={0}$'.format(rounds)\n    else:\n        salt = ''\n    \n    salt += ''.join(random.choice(saltchars) for _ in range(method.salt_chars))\n    return salt\n\n\ndef crypt(word, salt=None):\n    \"\"\"Return a string representing the one-way hash of a password.\n    \n    If salt is not specified, the strongest available method will be used.\n    \n    Args:\n        word: The password to hash\n        salt: The salt string (e.g., '$6$rounds=5000$saltsalt$') or a METHOD_* constant\n        \n    Returns:\n        The hashed password string\n        \n    Raises:\n        ImportError: If libcrypt is not available on the system\n    \"\"\"\n    if _libcrypt is None:\n        raise ImportError(\n            \"crypt_fallback requires libcrypt/libxcrypt. \"\n            \"Install with: sudo tdnf install libxcrypt (Azure Linux) or \"\n            \"sudo apt install libcrypt1 (Debian/Ubuntu)\"\n        )\n    \n    # Handle METHOD_* constants passed as salt\n    if salt is None or isinstance(salt, _Method):\n        salt = mksalt(salt)\n    \n    # Encode strings to bytes for ctypes\n    if isinstance(word, str):\n        word = word.encode('utf-8')\n    if isinstance(salt, str):\n        salt = salt.encode('utf-8')\n    \n    result = _libcrypt.crypt(word, salt)\n    \n    if result is None:\n        raise ValueError(\"crypt() failed - invalid salt or system error\")\n    \n    return result.decode('utf-8')\n"
  },
  {
    "path": "Utils/distroutils.py",
    "content": "import os\nimport pwd\nimport random\nimport string\nimport hashlib\nimport sys\n\n# crypt module was removed in Python 3.13\n# For Python < 3.11: use builtin crypt\n# For Python >= 3.11: try crypt_r package, then ctypes fallback\nif sys.version_info >= (3, 11):\n    try:\n        import crypt_r as crypt\n    except ImportError:\n        try:\n            from Utils import crypt_fallback as crypt\n        except ImportError:\n            crypt = None\nelse:\n    try:\n        import crypt\n    except ImportError:\n        try:\n            from Utils import crypt_fallback as crypt\n        except ImportError:\n            crypt = None\n\nimport platform\nimport re\nimport Utils.logger as logger\nimport Utils.extensionutils as ext_utils\nimport Utils.constants as constants\n\n\ndef get_my_distro(config, os_name=None):\n    if 'FreeBSD' in platform.system():\n        return FreeBSDDistro(config)\n\n    if os_name is None:\n        if os.path.isfile(constants.os_release):\n            os_name = ext_utils.get_line_starting_with(\"NAME\", constants.os_release)\n        elif os.path.isfile(constants.system_release):\n            os_name = ext_utils.get_file_contents(constants.system_release)\n\n    if os_name is not None:\n        if re.search(\"fedora\", os_name, re.IGNORECASE):\n            # Fedora\n            return FedoraDistro(config)\n        if re.search(\"red\\s?hat\", os_name, re.IGNORECASE):\n            # Red Hat\n            return RedhatDistro(config)\n        if re.search(\"centos\", os_name, re.IGNORECASE):\n            # CentOS\n            return CentOSDistro(config)\n        if re.search(\"coreos\", os_name, re.IGNORECASE):\n            # CoreOs\n            return CoreOSDistro(config)\n        if re.search(\"freebsd\", os_name, re.IGNORECASE):\n            # FreeBSD\n            return FreeBSDDistro(config)\n        if re.search(\"sles\", os_name, re.IGNORECASE):\n            # SuSE\n            return SuSEDistro(config)\n        if re.search(\"ubuntu\", os_name, re.IGNORECASE):\n            return UbuntuDistro(config)\n        if re.search(\"mariner\", os_name, re.IGNORECASE):\n            return MarinerDistro(config)\n    return GenericDistro(config)\n\n\n# noinspection PyMethodMayBeStatic\nclass GenericDistro(object):\n    \"\"\"\n    GenericiDstro defines a skeleton necessary for a concrete Distro class.\n\n    Generic methods and attributes are kept here, distribution specific attributes\n    and behavior are to be placed in the concrete child named distroDistro, where\n    distro is the string returned by calling python platform.linux_distribution()[0].\n    So for CentOS the derived class is called 'centosDistro'.\n    \"\"\"\n\n    def __init__(self, config):\n        \"\"\"\n        Generic Attributes go here.  These are based on 'majority rules'.\n        This __init__() may be called or overriden by the child.\n        \"\"\"\n        self.selinux = None\n        self.service_cmd = '/usr/sbin/service'\n        self.ssh_service_restart_option = 'restart'\n        self.ssh_service_name = 'ssh'\n        self.distro_name = 'default'\n        self.config = config\n\n    def is_se_linux_system(self):\n        \"\"\"\n        Checks and sets self.selinux = True if SELinux is available on system.\n        \"\"\"\n        if self.selinux is None:\n            if ext_utils.run(['which', 'getenforce'], chk_err=False):\n                self.selinux = False\n            else:\n                self.selinux = True\n        return self.selinux\n\n    def get_home(self):\n        \"\"\"\n        Attempt to guess the $HOME location.\n        Return the path string.\n        \"\"\"\n        home = None\n        try:\n            home = ext_utils.get_line_starting_with(\"HOME\", \"/etc/default/useradd\").split('=')[1].strip()\n        except (ValueError, KeyError, AttributeError, EnvironmentError):\n            pass\n        if (home is None) or (not home.startswith(\"/\")):\n            home = \"/home\"\n        return home\n\n    def set_se_linux_context(self, path, cn):\n        \"\"\"\n        Calls shell 'chcon' with 'path' and 'cn' context.\n        Returns exit result.\n        \"\"\"\n        if self.is_se_linux_system():\n            return ext_utils.run(['chcon', cn, path])\n\n    def restart_ssh_service(self):\n        \"\"\"\n        Service call to re(start) the SSH service\n        \"\"\"\n        ssh_restart_cmd = [self.service_cmd, self.ssh_service_name, self.ssh_service_restart_option]\n        ret_code = ext_utils.run(ssh_restart_cmd)\n        if ret_code != 0:\n            logger.error(\"Failed to restart SSH service with return code:\" + str(ret_code))\n        return ret_code\n\n    def ssh_deploy_public_key(self, fprint, path):\n        \"\"\"\n        Generic sshDeployPublicKey - over-ridden in some concrete Distro classes due to minor differences\n        in openssl packages deployed\n        \"\"\"\n        keygen_retcode = ext_utils.run_command_and_write_stdout_to_file(\n            ['ssh-keygen', '-i', '-m', 'PKCS8', '-f', fprint], path)\n        if keygen_retcode:\n            return 1\n        else:\n            return 0\n\n    def change_password(self, user, password):\n        logger.log(\"Change user password\")\n        crypt_id = self.config.get(\"Provisioning.PasswordCryptId\")\n        if crypt_id is None:\n            crypt_id = \"6\"\n\n        salt_len = self.config.get(\"Provisioning.PasswordCryptSaltLength\")\n        try:\n            salt_len = int(salt_len)\n            if salt_len < 0 or salt_len > 10:\n                salt_len = 10\n        except (ValueError, TypeError):\n            salt_len = 10\n\n        return self.chpasswd(user, password, crypt_id=crypt_id,\n                             salt_len=salt_len)\n\n    def chpasswd(self, username, password, crypt_id=6, salt_len=10):\n        passwd_hash = self.gen_password_hash(password, crypt_id, salt_len)\n        cmd = ['usermod', '-p', passwd_hash, username]\n        ret, output = ext_utils.run_command_get_output(cmd, log_cmd=False)\n        if ret != 0:\n            return \"Failed to set password for {0}: {1}\".format(username, output)\n\n    def gen_password_hash(self, password, crypt_id, salt_len):\n        collection = string.ascii_letters + string.digits\n        salt = ''.join(random.choice(collection) for _ in range(salt_len))\n        salt = \"${0}${1}\".format(crypt_id, salt)\n        return crypt.crypt(password, salt)\n\n    def create_account(self, user, password, expiration, thumbprint, enable_nopasswd):\n        \"\"\"\n        Create a user account, with 'user', 'password', 'expiration', ssh keys\n        and sudo permissions.\n        Returns None if successful, error string on failure.\n        \"\"\"\n        user_entry = None\n        try:\n            user_entry = pwd.getpwnam(user)\n        except (KeyError, EnvironmentError):\n            pass\n        uid_min = None\n        try:\n            uid_min = int(ext_utils.get_line_starting_with(\"UID_MIN\", \"/etc/login.defs\").split()[1])\n        except (ValueError, KeyError, AttributeError, EnvironmentError):\n            pass\n        if uid_min is None:\n            uid_min = 100\n        if user_entry is not None and user_entry[2] < uid_min:\n            logger.error(\n                \"CreateAccount: \" + user + \" is a system user. Will not set password.\")\n            return \"Failed to set password for system user: \" + user + \" (0x06).\"\n        if user_entry is None:\n            command = ['useradd', '-m', user]\n            if expiration is not None:\n                command += ['-e', expiration.split('.')[0]]\n            if ext_utils.run(command):\n                logger.error(\"Failed to create user account: \" + user)\n                return \"Failed to create user account: \" + user + \" (0x07).\"\n        else:\n            logger.log(\"CreateAccount: \" + user + \" already exists. Will update password.\")\n        if password is not None:\n            self.change_password(user, password)\n        try:\n            # for older distros create sudoers.d\n            if not os.path.isdir('/etc/sudoers.d/'):\n                # create the /etc/sudoers.d/ directory\n                os.mkdir('/etc/sudoers.d/')\n                # add the include of sudoers.d to the /etc/sudoers\n                ext_utils.set_file_contents(\n                    '/etc/sudoers', ext_utils.get_file_contents('/etc/sudoers') + '\\n#includedir /etc/sudoers.d\\n')\n            if password is None or enable_nopasswd:\n                ext_utils.set_file_contents(\"/etc/sudoers.d/waagent\", user + \" ALL = (ALL) NOPASSWD: ALL\\n\")\n            else:\n                ext_utils.set_file_contents(\"/etc/sudoers.d/waagent\", user + \" ALL = (ALL) ALL\\n\")\n            os.chmod(\"/etc/sudoers.d/waagent\", 0o440)\n        except EnvironmentError:\n            logger.error(\"CreateAccount: Failed to configure sudo access for user.\")\n            return \"Failed to configure sudo privileges (0x08).\"\n        home = self.get_home()\n        if thumbprint is not None:\n            ssh_dir = home + \"/\" + user + \"/.ssh\"\n            ext_utils.create_dir(ssh_dir, user, 0o700)\n            pub = ssh_dir + \"/id_rsa.pub\"\n            prv = ssh_dir + \"/id_rsa\"\n            ext_utils.run_command_and_write_stdout_to_file(['ssh-keygen', '-y', '-f', thumbprint + '.prv'], pub)\n            for f in [pub, prv]:\n                os.chmod(f, 0o600)\n                ext_utils.change_owner(f, user)\n            ext_utils.set_file_contents(ssh_dir + \"/authorized_keys\", ext_utils.get_file_contents(pub))\n            ext_utils.change_owner(ssh_dir + \"/authorized_keys\", user)\n        logger.log(\"Created user account: \" + user)\n        return None\n\n    def delete_account(self, user):\n        \"\"\"\n            Delete the 'user'.\n            Clear utmp first, to avoid error.\n            Removes the /etc/sudoers.d/waagent file.\n            \"\"\"\n        user_entry = None\n        try:\n            user_entry = pwd.getpwnam(user)\n        except (KeyError, EnvironmentError):\n            pass\n        if user_entry is None:\n            logger.error(\"DeleteAccount: \" + user + \" not found.\")\n            return\n        uid_min = None\n        try:\n            uid_min = int(ext_utils.get_line_starting_with(\"UID_MIN\", \"/etc/login.defs\").split()[1])\n        except (ValueError, KeyError, AttributeError, EnvironmentError):\n            pass\n        if uid_min is None:\n            uid_min = 100\n        if user_entry[2] < uid_min:\n            logger.error(\n                \"DeleteAccount: \" + user + \" is a system user. Will not delete account.\")\n            return\n        ext_utils.run(['rm', '-f', '/var/run/utmp'])  # Delete utmp to prevent error if we are the 'user' deleted\n        ext_utils.run(['userdel', '-f', '-r', user])\n        try:\n            os.remove(\"/etc/sudoers.d/waagent\")\n        except EnvironmentError:\n            pass\n        return\n\nclass UbuntuDistro(GenericDistro):\n    def __init__(self, config):\n        \"\"\"\n        Generic Attributes go here.  These are based on 'majority rules'.\n        This __init__() may be called or overriden by the child.\n        \"\"\"\n        super(UbuntuDistro, self).__init__(config)\n        self.selinux = False\n        self.ssh_service_name = 'sshd'\n        self.sudoers_dir_base = '/usr/local/etc'\n        self.distro_name = 'Ubuntu'\n\n    def restart_ssh_service(self):\n        \"\"\"\n        Service call to re(start) the SSH service\n        starting with Ubuntu 22.10, the service name is ssh not sshd, adding fallback incase sshd fails\n        \"\"\"\n        ssh_restart_cmd = [self.service_cmd, self.ssh_service_name, self.ssh_service_restart_option]\n        ret_code = ext_utils.run(ssh_restart_cmd)\n        if ret_code != 0:\n            self.ssh_service_name = 'ssh'\n            ssh_restart_cmd = [self.service_cmd, self.ssh_service_name, self.ssh_service_restart_option]\n            ret_code = ext_utils.run(ssh_restart_cmd)\n            if ret_code != 0:\n                logger.error(\"Failed to restart SSH service with return code:\" + str(ret_code))\n        return ret_code\n\nclass FreeBSDDistro(GenericDistro):\n    \"\"\"\n    \"\"\"\n\n    def __init__(self, config):\n        \"\"\"\n        Generic Attributes go here.  These are based on 'majority rules'.\n        This __init__() may be called or overriden by the child.\n        \"\"\"\n        super(FreeBSDDistro, self).__init__(config)\n        self.selinux = False\n        self.ssh_service_name = 'sshd'\n        self.sudoers_dir_base = '/usr/local/etc'\n        self.distro_name = 'FreeBSD'\n\n\n    # noinspection PyMethodOverriding\n    def chpasswd(self, user, password, **kwargs):\n        return ext_utils.run_send_stdin(['pw', 'usermod', 'user', '-h', '0'], password.encode('utf-8'), log_cmd=False)\n\n    def create_account(self, user, password, expiration, thumbprint, enable_nopasswd):\n        \"\"\"\n        Create a user account, with 'user', 'password', 'expiration', ssh keys\n        and sudo permissions.\n        Returns None if successful, error string on failure.\n        \"\"\"\n        userentry = None\n        try:\n            userentry = pwd.getpwnam(user)\n        except (EnvironmentError, KeyError):\n            pass\n        uidmin = None\n        try:\n            if os.path.isfile(\"/etc/pw.conf\"):\n                uidmin = int(ext_utils.get_line_starting_with(\"minuid\", \"/etc/pw.conf\").split('=')[1].strip(' \"'))\n        except (ValueError, KeyError, AttributeError, EnvironmentError):\n            pass\n            pass\n        if uidmin is None:\n            uidmin = 100\n        if userentry is not None and userentry[2] < uidmin:\n            logger.error(\n                \"CreateAccount: \" + user + \" is a system user. Will not set password.\")\n            return \"Failed to set password for system user: \" + user + \" (0x06).\"\n        if userentry is None:\n            command = ['pw', 'useradd', user, '-m']\n            if expiration is not None:\n                command += ['-e', expiration.split('.')[0]]\n            if ext_utils.run(command):\n                logger.error(\"Failed to create user account: \" + user)\n                return \"Failed to create user account: \" + user + \" (0x07).\"\n            else:\n                logger.log(\n                    \"CreateAccount: \" + user + \" already exists. Will update password.\")\n\n        if password is not None:\n            self.change_password(user, password)\n        try:\n            # for older distros create sudoers.d\n            if not os.path.isdir(self.sudoers_dir_base + '/sudoers.d/'):\n                # create the /etc/sudoers.d/ directory\n                os.mkdir(self.sudoers_dir_base + '/sudoers.d')\n                # add the include of sudoers.d to the /etc/sudoers\n                ext_utils.set_file_contents(\n                    self.sudoers_dir_base + '/sudoers',\n                    ext_utils.get_file_contents(\n                        self.sudoers_dir_base + '/sudoers') + '\\n#includedir ' + self.sudoers_dir_base + '/sudoers.d\\n')\n            if password is None or enable_nopasswd:\n                ext_utils.set_file_contents(\n                    self.sudoers_dir_base + \"/sudoers.d/waagent\", user + \" ALL = (ALL) NOPASSWD: ALL\\n\")\n            else:\n                ext_utils.set_file_contents(self.sudoers_dir_base + \"/sudoers.d/waagent\", user + \" ALL = (ALL) ALL\\n\")\n            os.chmod(self.sudoers_dir_base + \"/sudoers.d/waagent\", 0o440)\n        except (ValueError, KeyError, AttributeError, EnvironmentError):\n            logger.error(\"CreateAccount: Failed to configure sudo access for user.\")\n            return \"Failed to configure sudo privileges (0x08).\"\n        home = self.get_home()\n        if thumbprint is not None:\n            ssh_dir = home + \"/\" + user + \"/.ssh\"\n            ext_utils.create_dir(ssh_dir, user, 0o700)\n            pub = ssh_dir + \"/id_rsa.pub\"\n            prv = ssh_dir + \"/id_rsa\"\n            ext_utils.run_command_and_write_stdout_to_file(['sh-keygen', '-y', '-f',  thumbprint + '.prv'], pub)\n            ext_utils.set_file_contents(\n                prv, ext_utils.get_file_contents(thumbprint + \".prv\"))\n            for f in [pub, prv]:\n                os.chmod(f, 0o600)\n                ext_utils.change_owner(f, user)\n            ext_utils.set_file_contents(\n                ssh_dir + \"/authorized_keys\",\n                ext_utils.get_file_contents(pub))\n            ext_utils.change_owner(ssh_dir + \"/authorized_keys\", user)\n        logger.log(\"Created user account: \" + user)\n        return None\n\n    def delete_account(self, user):\n        \"\"\"\n        Delete the 'user'.\n        Clear utmp first, to avoid error.\n        Removes the /etc/sudoers.d/waagent file.\n        \"\"\"\n        userentry = None\n        try:\n            userentry = pwd.getpwnam(user)\n        except (EnvironmentError, KeyError):\n            pass\n        if userentry is None:\n            logger.error(\"DeleteAccount: \" + user + \" not found.\")\n            return\n        uidmin = None\n        try:\n            if os.path.isfile(\"/etc/pw.conf\"):\n                uidmin = int(ext_utils.get_line_starting_with(\"minuid\", \"/etc/pw.conf\").split('=')[1].strip(' \"'))\n        except (ValueError, KeyError, AttributeError, EnvironmentError):\n            pass\n        if uidmin is None:\n            uidmin = 100\n        if userentry[2] < uidmin:\n            logger.error(\n                \"DeleteAccount: \" + user + \" is a system user. Will not delete account.\")\n            return\n        # empty contents of utmp to prevent error if we are the 'user' deleted\n        ext_utils.run_command_and_write_stdout_to_file(['echo'], '/var/run/utmp')\n        ext_utils.run(['rmuser', '-y', user], chk_err=False)\n        try:\n            os.remove(self.sudoers_dir_base + \"/sudoers.d/waagent\")\n        except EnvironmentError:\n            pass\n        return\n\n    def get_home(self):\n        return '/home'\n\n\nclass CoreOSDistro(GenericDistro):\n    \"\"\"\n    CoreOS Distro concrete class\n    Put CoreOS specific behavior here...\n    \"\"\"\n    CORE_UID = 500\n\n    def __init__(self, config):\n        super(CoreOSDistro, self).__init__(config)\n        self.waagent_path = '/usr/share/oem/bin'\n        self.python_path = '/usr/share/oem/python/bin'\n        self.distro_name = 'CoreOS'\n        if 'PATH' in os.environ:\n            os.environ['PATH'] = \"{0}:{1}\".format(os.environ['PATH'], self.python_path)\n        else:\n            os.environ['PATH'] = self.python_path\n\n        if 'PYTHONPATH' in os.environ:\n            os.environ['PYTHONPATH'] = \"{0}:{1}\".format(os.environ['PYTHONPATH'], self.waagent_path)\n        else:\n            os.environ['PYTHONPATH'] = self.waagent_path\n\n    def restart_ssh_service(self):\n        \"\"\"\n        SSH is socket activated on CoreOS. No need to restart it.\n        \"\"\"\n        return 0\n\n    def create_account(self, user, password, expiration, thumbprint, enable_nopasswd):\n        \"\"\"\n        Create a user account, with 'user', 'password', 'expiration', ssh keys\n        and sudo permissions.\n        Returns None if successful, error string on failure.\n        \"\"\"\n        userentry = None\n        try:\n            userentry = pwd.getpwnam(user)\n        except (EnvironmentError, KeyError):\n            pass\n        uidmin = None\n        try:\n            uidmin = int(ext_utils.get_line_starting_with(\"UID_MIN\", \"/etc/login.defs\").split()[1])\n        except (ValueError, KeyError, AttributeError, EnvironmentError):\n            pass\n        if uidmin is None:\n            uidmin = 100\n        if userentry is not None and userentry[2] < uidmin and userentry[2] != self.CORE_UID:\n            logger.error(\n                \"CreateAccount: \" + user + \" is a system user. Will not set password.\")\n            return \"Failed to set password for system user: \" + user + \" (0x06).\"\n        if userentry is None:\n            command = ['useradd', '--create-home',  '--password', '*',  user]\n            if expiration is not None:\n                command += ['--expiredate', expiration.split('.')[0]]\n            if ext_utils.run(command):\n                logger.error(\"Failed to create user account: \" + user)\n                return \"Failed to create user account: \" + user + \" (0x07).\"\n        else:\n            logger.log(\"CreateAccount: \" + user + \" already exists. Will update password.\")\n        if password is not None:\n            self.change_password(user, password)\n        try:\n            if password is None or enable_nopasswd:\n                ext_utils.set_file_contents(\"/etc/sudoers.d/waagent\", user + \" ALL = (ALL) NOPASSWD: ALL\\n\")\n            else:\n                ext_utils.set_file_contents(\"/etc/sudoers.d/waagent\", user + \" ALL = (ALL) ALL\\n\")\n            os.chmod(\"/etc/sudoers.d/waagent\", 0o440)\n        except EnvironmentError:\n            logger.error(\"CreateAccount: Failed to configure sudo access for user.\")\n            return \"Failed to configure sudo privileges (0x08).\"\n        home = self.get_home()\n        if thumbprint is not None:\n            ssh_dir = home + \"/\" + user + \"/.ssh\"\n            ext_utils.create_dir(ssh_dir, user, 0o700)\n            pub = ssh_dir + \"/id_rsa.pub\"\n            prv = ssh_dir + \"/id_rsa\"\n            ext_utils.run_command_and_write_stdout_to_file(['ssh-keygen', '-y', '-f', thumbprint + '.prv'], pub)\n            ext_utils.set_file_contents(prv, ext_utils.get_file_contents(thumbprint + \".prv\"))\n            for f in [pub, prv]:\n                os.chmod(f, 0o600)\n                ext_utils.change_owner(f, user)\n            ext_utils.set_file_contents(ssh_dir + \"/authorized_keys\", ext_utils.get_file_contents(pub))\n            ext_utils.change_owner(ssh_dir + \"/authorized_keys\", user)\n        logger.log(\"Created user account: \" + user)\n        return None\n\n\nclass RedhatDistro(GenericDistro):\n    \"\"\"\n    Redhat Distro concrete class\n    Put Redhat specific behavior here...\n    \"\"\"\n    def __init__(self, config):\n        super(RedhatDistro, self).__init__(config)\n        self.service_cmd = '/sbin/service'\n        self.ssh_service_restart_option = 'condrestart'\n        self.ssh_service_name = 'sshd'\n        self.distro_name = 'Red Hat'\n\n\nclass CentOSDistro(RedhatDistro):\n    def __init__(self, config):\n        super(CentOSDistro, self).__init__(config)\n        self.distro_name = \"CentOS\"\n\n\nclass FedoraDistro(RedhatDistro):\n    \"\"\"\n    FedoraDistro concrete class\n    Put Fedora specific behavior here...\n    \"\"\"\n    def __init__(self, config):\n        super(FedoraDistro, self).__init__(config)\n        self.service_cmd = '/usr/bin/systemctl'\n        self.hostname_file_path = '/etc/hostname'\n        self.distro_name = 'Fedora'\n\n    def restart_ssh_service(self):\n        \"\"\"\n        Service call to re(start) the SSH service\n        \"\"\"\n        ssh_restart_cmd = [self.service_cmd, self.ssh_service_restart_option, self.ssh_service_name]\n        retcode = ext_utils.run(ssh_restart_cmd)\n        if retcode > 0:\n            logger.error(\"Failed to restart SSH service with return code:\" + str(retcode))\n        return retcode\n\n    def create_account(self, user, password, expiration, thumbprint, enable_nopasswd):\n        ext_utils.run(['/sbin/usermod', user, '-G', 'wheel'])\n\n    def delete_account(self, user):\n        ext_utils.run(['/sbin/usermod', user, '-G', ''])\n\n\nclass SuSEDistro(GenericDistro):\n    def __init__(self, config):\n        super(SuSEDistro, self).__init__(config)\n        self.ssh_service_name = 'sshd'\n        self.distro_name = \"SuSE\"\n\n\nclass MarinerDistro(GenericDistro):\n    def __init__(self, config):\n        super(MarinerDistro, self).__init__(config)\n        self.ssh_service_name = 'sshd'\n        self.service_cmd = '/usr/bin/systemctl'\n        self.distro_name = 'Mariner'\n    \n    def restart_ssh_service(self):\n        \"\"\"\n        Service call to re(start) the SSH service\n        \"\"\"\n        ssh_restart_cmd = [self.service_cmd, self.ssh_service_restart_option, self.ssh_service_name]\n        retcode = ext_utils.run(ssh_restart_cmd)\n        if retcode > 0:\n            logger.error(\"Failed to restart SSH service with return code:\" + str(retcode))\n        return retcode\n"
  },
  {
    "path": "Utils/extensionutils.py",
    "content": "import subprocess\nimport os\nimport tempfile\nimport traceback\nimport time\nimport sys\nimport pwd\nimport Utils.constants as constants\nimport xml.sax.saxutils as xml_utils\nimport Utils.logger as logger\n\n\nif not hasattr(subprocess, 'check_output'):\n    def check_output(*popenargs, **kwargs):\n        r\"\"\"Backport from subprocess module from python 2.7\"\"\"\n        if 'stdout' in kwargs:\n            raise ValueError('stdout argument not allowed, it will be overridden.')\n        process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)\n        output, unused_err = process.communicate()\n        retcode = process.poll()\n        if retcode:\n            cmd = kwargs.get(\"args\")\n            if cmd is None:\n                cmd = popenargs[0]\n            raise subprocess.CalledProcessError(retcode, cmd, output=output)\n        return output\n\n\n    # Exception classes used by this module.\n    class CalledProcessError(Exception):\n        def __init__(self, returncode, cmd, output=None):\n            self.returncode = returncode\n            self.cmd = cmd\n            self.output = output\n\n        def __str__(self):\n            return \"Command '%s' returned non-zero exit status %d\" % (self.cmd, self.returncode)\n\n\n    subprocess.check_output = check_output\n    subprocess.CalledProcessError = CalledProcessError\n    \n\ndef change_owner(file_path, user):\n    \"\"\"\n    Lookup user.  Attempt chown 'filepath' to 'user'.\n    \"\"\"\n    p = None\n    try:\n        p = pwd.getpwnam(user)\n    except (KeyError, EnvironmentError):\n        pass\n    if p is not None:\n        os.chown(file_path, p[2], p[3])\n\n\ndef create_dir(dir_path, user, mode):\n    \"\"\"\n    Attempt os.makedirs, catch all exceptions.\n    Call ChangeOwner afterwards.\n    \"\"\"\n    try:\n        os.makedirs(dir_path, mode)\n    except EnvironmentError:\n        pass\n    change_owner(dir_path, user)\n\n\ndef encode_for_writing_to_file(contents):\n    if type(contents) == str:\n        if sys.version_info[0] == 3:\n            \"\"\"\n            utf-8 is a superset of ASCII and latin-1\n            in python 2 str is an alias for bytes, no need to encode it again\n            \"\"\"\n            return contents.encode('utf-8')\n    return contents\n\n\ndef set_file_contents(file_path, contents):\n    \"\"\"\n    Write 'contents' to 'file_path'.\n    \"\"\"\n    bytes_to_write = encode_for_writing_to_file(contents)\n    try:\n        with open(file_path, \"wb+\") as F:\n            F.write(bytes_to_write)\n    except EnvironmentError as e:\n        logger.error_with_prefix(\n            'SetFileContents', 'Writing to file ' + file_path + ' Exception is ' + str(e))\n        return None\n    return 0\n\n\ndef append_file_contents(file_path, contents):\n    \"\"\"\n    Append 'contents' to 'file_path'.\n    \"\"\"\n    bytes_to_write = encode_for_writing_to_file(contents)\n    try:\n        with open(file_path, \"ab+\") as F:\n            F.write(bytes_to_write)\n    except EnvironmentError as e:\n        logger.error_with_prefix(\n            'AppendFileContents', 'Appending to file ' + file_path + ' Exception is ' + str(e))\n        return None\n    return 0\n\n\ndef get_file_contents(file_path, as_bin=False):\n    \"\"\"\n    Read and return contents of 'file_path'.\n    \"\"\"\n    mode = 'r'\n    if as_bin:\n        mode += 'b'\n    try:\n        with open(file_path, mode) as F:\n            contents = F.read()\n            return contents\n    except EnvironmentError as e:\n        logger.error_with_prefix(\n            'GetFileContents', 'Reading from file ' + file_path + ' Exception is ' + str(e))\n        return None\n\n\ndef replace_file_with_contents_atomic(filepath, contents):\n    \"\"\"\n    Write 'contents' to 'filepath' by creating a temp file, and replacing original.\n    \"\"\"\n    handle, temp = tempfile.mkstemp(dir=os.path.dirname(filepath))\n    bytes_to_write = encode_for_writing_to_file(contents)\n    try:\n        os.write(handle, bytes_to_write)\n    except EnvironmentError as e:\n        logger.error_with_prefix(\n            'ReplaceFileContentsAtomic', 'Writing to file ' + filepath + ' Exception is ' + str(e))\n        return None\n    finally:\n        os.close(handle)\n    try:\n        os.rename(temp, filepath)\n        return None\n    except EnvironmentError as e:\n        logger.error_with_prefix(\n            'ReplaceFileContentsAtomic', 'Renaming ' + temp + ' to ' + filepath + ' Exception is ' + str(e)\n        )\n    try:\n        os.remove(filepath)\n    except EnvironmentError as e:\n        logger.error_with_prefix(\n            'ReplaceFileContentsAtomic', 'Removing ' + filepath + ' Exception is ' + str(e))\n    try:\n        os.rename(temp, filepath)\n    except EnvironmentError as e:\n        logger.error_with_prefix(\n            'ReplaceFileContentsAtomic', 'Removing ' + filepath + ' Exception is ' + str(e))\n        return 1\n    return 0\n\n\ndef run_command_and_write_stdout_to_file(command, output_file):\n    # meant to replace commands of the nature command > output_file\n    try:\n        p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False)\n        stdout, stderr = p.communicate()\n    except EnvironmentError as e:\n        logger.error('CalledProcessError.  Error message is ' + str(e))\n        return e.errno\n    if p.returncode != 0:\n        logger.error('CalledProcessError.  Error Code is ' + str(p.returncode))\n        logger.error('CalledProcessError.  Command string was ' + ' '.join(command))\n        logger.error(\n            'CalledProcessError.  Command result was stdout: ' + str(stdout) + ' stderr: ' + str(stderr))\n        return p.returncode\n    set_file_contents(output_file, stdout)\n    return 0\n\n\ndef run_command_get_output(cmd, chk_err=True, log_cmd=True):\n    \"\"\"\n    Wrapper for subprocess.check_output.\n    Execute 'cmd'.  Returns return code and STDOUT, trapping expected exceptions.\n    Reports exceptions to Error if chk_err parameter is True\n    \"\"\"\n    if log_cmd:\n        logger.log_if_verbose(cmd)\n    try:\n        output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=False)\n    except subprocess.CalledProcessError as e:\n        if chk_err and log_cmd:\n            logger.error('CalledProcessError.  Error Code is ' + str(e.returncode))\n            logger.error('CalledProcessError.  Command string was ' + str(cmd))\n            logger.error(\n                'CalledProcessError.  Command result was ' + (e.output[:-1]).decode('utf-8'))\n        return e.returncode, e.output.decode('utf-8')\n    except EnvironmentError as e:\n        if chk_err and log_cmd:\n            logger.error(\n                'CalledProcessError.  Error message is ' + str(e))\n            return e.errno, str(e)\n    # noinspection PyUnboundLocalVariable\n    return 0, output.decode('utf-8')\n\n\ndef run(cmd, chk_err=True):\n    \"\"\"\n    Calls RunGetOutput on 'cmd', returning only the return code.\n    If chk_err=True then errors will be reported in the log.\n    If chk_err=False then errors will be suppressed from the log.\n    \"\"\"\n    return_code, _ = run_command_get_output(cmd, chk_err)\n    return return_code\n\n\n# noinspection PyUnboundLocalVariable\ndef run_send_stdin(cmd, cmd_input, chk_err=True, log_cmd=True):\n    \"\"\"\n    Wrapper for subprocess.Popen.\n    Execute 'cmd', sending 'input' to STDIN of 'cmd'.\n    Returns return code and STDOUT, trapping expected exceptions.\n    Reports exceptions to Error if chk_err parameter is True\n    \"\"\"\n    if log_cmd:\n        logger.log_if_verbose(str(cmd) + str(cmd_input))\n    subprocess_executed = False\n    try:\n        me = subprocess.Popen(cmd, shell=False, stdin=subprocess.PIPE, stderr=subprocess.STDOUT, stdout=subprocess.PIPE)\n        output = me.communicate(cmd_input)\n        subprocess_executed = True\n    except EnvironmentError as e:\n        if chk_err and log_cmd:\n            logger.error('CalledProcessError.  Error Code is ' + str(e.errno))\n            logger.error('CalledProcessError.  Command was ' + str(cmd))\n            logger.error('CalledProcessError.  Command result was ' + str(e))\n            return 1, str(e)\n    if subprocess_executed and me.returncode != 0 and chk_err and log_cmd:\n        logger.error('CalledProcessError.  Error Code is ' + str(me.returncode))\n        logger.error('CalledProcessError.  Command was ' + str(cmd))\n        logger.error(\n            'CalledProcessError.  Command result was ' + output[0].decode('utf-8'))\n    return me.returncode, output[0].decode('utf-8')\n\n\ndef get_line_starting_with(prefix, filepath):\n    \"\"\"\n    Return line from 'filepath' if the line startswith 'prefix'\n    \"\"\"\n    for line in get_file_contents(filepath).split('\\n'):\n        if line.startswith(prefix):\n            return line\n    return None\n\n\nclass WALAEvent(object):\n    def __init__(self):\n        self.providerId = \"\"\n        self.eventId = 1\n        self.OpcodeName = \"\"\n        self.KeywordName = \"\"\n        self.TaskName = \"\"\n        self.TenantName = \"\"\n        self.RoleName = \"\"\n        self.RoleInstanceName = \"\"\n        self.ContainerId = \"\"\n        self.ExecutionMode = \"IAAS\"\n        self.OSVersion = \"\"\n        self.GAVersion = \"\"\n        self.RAM = 0\n        self.Processors = 0\n\n    def to_xml(self):\n        str_event_id = u'<Event id=\"{0}\"/>'.format(self.eventId)\n        str_provider_id = u'<Provider id=\"{0}\"/>'.format(self.providerId)\n        str_record_format = u'<Param Name=\"{0}\" Value=\"{1}\" T=\"{2}\" />'\n        str_record_no_quote_format = u'<Param Name=\"{0}\" Value={1} T=\"{2}\" />'\n        str_mt_str = u'mt:wstr'\n        str_mt_u_int64 = u'mt:uint64'\n        str_mt_bool = u'mt:bool'\n        str_mt_float = u'mt:float64'\n        str_events_data = u\"\"\n\n        for attName in self.__dict__:\n            if attName in [\"eventId\", \"filedCount\", \"providerId\"]:\n                continue\n\n            att_value = self.__dict__[attName]\n            if type(att_value) is int:\n                str_events_data += str_record_format.format(attName, att_value, str_mt_u_int64)\n                continue\n            if type(att_value) is str:\n                att_value = xml_utils.quoteattr(att_value)\n                str_events_data += str_record_no_quote_format.format(attName, att_value, str_mt_str)\n                continue\n            if str(type(att_value)).count(\"'unicode'\") > 0:\n                att_value = xml_utils.quoteattr(att_value)\n                str_events_data += str_record_no_quote_format.format(attName, att_value, str_mt_str)\n                continue\n            if type(att_value) is bool:\n                str_events_data += str_record_format.format(attName, att_value, str_mt_bool)\n                continue\n            if type(att_value) is float:\n                str_events_data += str_record_format.format(attName, att_value, str_mt_float)\n                continue\n\n            logger.log(\n                \"Warning: property \" + attName + \":\" + str(type(att_value)) + \":type\" +\n                str(type(att_value)) + \"Can't convert to events data:\" + \":type not supported\")\n\n        return u\"<Data>{0}{1}{2}</Data>\".format(str_provider_id, str_event_id, str_events_data)\n\n    def save(self):\n        event_folder = constants.LibDir + \"/events\"\n        if not os.path.exists(event_folder):\n            os.mkdir(event_folder)\n            os.chmod(event_folder, 0o700)\n        if len(os.listdir(event_folder)) > 1000:\n            logger.log(\"Warning: Too many files under \" + event_folder)\n\n        filename = os.path.join(event_folder, str(int(time.time() * 1000000)))\n        with open(filename + \".tmp\", 'wb+') as h_file:\n            h_file.write(self.to_xml().encode(\"utf-8\"))\n        os.rename(filename + \".tmp\", filename + \".tld\")\n\n\nclass ExtensionEvent(WALAEvent):\n    def __init__(self):\n        WALAEvent.__init__(self)\n        self.eventId = 1\n        self.providerId = \"69B669B9-4AF8-4C50-BDC4-6006FA76E975\"\n        self.Name = \"\"\n        self.Version = \"\"\n        self.IsInternal = False\n        self.Operation = \"\"\n        self.OperationSuccess = True\n        self.ExtensionType = \"\"\n        self.Message = \"\"\n        self.Duration = 0\n\n\ndef add_extension_event(name, op, is_success, duration=0, version=\"1.0\", message=\"\", extension_type=\"\",\n                        is_internal=False):\n    event = ExtensionEvent()\n    event.Name = name\n    event.Version = version\n    event.IsInternal = is_internal\n    event.Operation = op\n    event.OperationSuccess = is_success\n    event.Message = message\n    event.Duration = duration\n    event.ExtensionType = extension_type\n    try:\n        event.save()\n    except EnvironmentError:\n        logger.error(\"Error \" + traceback.format_exc())\n"
  },
  {
    "path": "Utils/handlerutil2.py",
    "content": "#\n# Handler library for Linux IaaS\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\"\nJSON def:\nHandlerEnvironment.json\n[{\n  \"name\": \"ExampleHandlerLinux\",\n  \"seqNo\": \"seqNo\",\n  \"version\": \"1.0\",\n  \"handlerEnvironment\": {\n    \"logFolder\": \"<your log folder location>\",\n    \"configFolder\": \"<your config folder location>\",\n    \"statusFolder\": \"<your status folder location>\",\n    \"heartbeatFile\": \"<your heartbeat file location>\",\n\n  }\n}]\n\nExample ./config/1.settings\n\"{\"runtimeSettings\":[{\"handlerSettings\":{\"protectedSettingsCertThumbprint\":\"1BE9A13AA1321C7C515EF109746998BAB6D86FD1\",\"protectedSettings\":\n\"MIIByAYJKoZIhvcNAQcDoIIBuTCCAbUCAQAxggFxMIIBbQIBADBVMEExPzA9BgoJkiaJk/IsZAEZFi9XaW5kb3dzIEF6dXJlIFNlcnZpY2UgTWFuYWdlbWVudCBmb3IgR+nhc6VHQTQpCiiV2zANBgkqhkiG9w0BAQEFAASCAQCKr09QKMGhwYe+O4/a8td+vpB4eTR+BQso84cV5KCAnD6iUIMcSYTrn9aveY6v6ykRLEw8GRKfri2d6tvVDggUrBqDwIgzejGTlCstcMJItWa8Je8gHZVSDfoN80AEOTws9Fp+wNXAbSuMJNb8EnpkpvigAWU2v6pGLEFvSKC0MCjDTkjpjqciGMcbe/r85RG3Zo21HLl0xNOpjDs/qqikc/ri43Y76E/Xv1vBSHEGMFprPy/Hwo3PqZCnulcbVzNnaXN3qi/kxV897xGMPPC3IrO7Nc++AT9qRLFI0841JLcLTlnoVG1okPzK9w6ttksDQmKBSHt3mfYV+skqs+EOMDsGCSqGSIb3DQEHATAUBggqhkiG9w0DBwQITgu0Nu3iFPuAGD6/QzKdtrnCI5425fIUy7LtpXJGmpWDUA==\",\"publicSettings\":{\"port\":\"3000\"}}}]}\"\n\n\nExample HeartBeat\n{\n\"version\": 1.0,\n    \"heartbeat\" : {\n        \"status\": \"ready\",\n        \"code\": 0,\n        \"Message\": \"Sample Handler running. Waiting for a new configuration from user.\"\n    }\n}\nExample Status Report:\n[{\"version\":\"1.0\",\"timestampUTC\":\"2014-05-29T04:20:13Z\",\"status\":{\"name\":\"Chef Extension Handler\",\"operation\":\"chef-client-run\",\"status\":\"success\",\"code\":0,\"formattedMessage\":{\"lang\":\"en-US\",\"message\":\"Chef-client run success\"}}}]\n\n\"\"\"\n\nimport os\nimport os.path\nimport sys\nimport base64\nimport json\nimport time\nimport re\n\nimport Utils.extensionutils as ext_utils\nimport Utils.constants as constants\nimport Utils.logger as logger\nfrom xml.etree import ElementTree\nfrom os.path import join\n\nDateTimeFormat = \"%Y-%m-%dT%H:%M:%SZ\"\n\nMANIFEST_XML = \"manifest.xml\"\n\nENV_CONFIG_SEQUENCE_NUMBER = \"ConfigSequenceNumber\"\n\nclass HandlerContext:\n    def __init__(self, name):\n        self._name = name\n        self._version = '0.0'\n        self._config_dir = None\n        self._log_dir = None\n        self._log_file = None\n        self._status_dir = None\n        self._heartbeat_file = None\n        self._seq_no = -1\n        self._status_file = None\n        self._settings_file = None\n        self._config = None\n        return\n\n\nclass HandlerUtility:\n    def __init__(self, s_name=None, l_name=None, extension_version=None, logFileName='extension.log',\n                 console_logger=None, file_logger=None):\n        self._log = logger.log\n        self._log_to_con = console_logger\n        self._log_to_file = file_logger\n        self._error = logger.error\n        self._logFileName = logFileName\n        if s_name is None or l_name is None or extension_version is None:\n            (l_name, s_name, extension_version) = self._get_extension_info()\n\n        self._short_name = s_name\n        self._extension_version = extension_version\n        self._log_prefix = '[%s-%s] ' % (l_name, extension_version)\n\n    def get_extension_version(self):\n        return self._extension_version\n\n    def _get_log_prefix(self):\n        return self._log_prefix\n\n    def _get_extension_info(self):\n        if os.path.isfile(MANIFEST_XML):\n            return self._get_extension_info_manifest()\n\n        ext_dir = os.path.basename(os.getcwd())\n        (long_name, version) = ext_dir.split('-')\n        short_name = long_name.split('.')[-1]\n\n        return long_name, short_name, version\n\n    def _get_extension_info_manifest(self):\n        with open(MANIFEST_XML) as fh:\n            doc = ElementTree.parse(fh)\n            namespace = doc.find('{http://schemas.microsoft.com/windowsazure}ProviderNameSpace').text\n            short_name = doc.find('{http://schemas.microsoft.com/windowsazure}Type').text\n            version = doc.find('{http://schemas.microsoft.com/windowsazure}Version').text\n\n            long_name = \"%s.%s\" % (namespace, short_name)\n            return (long_name, short_name, version)\n\n    def _get_current_seq_no(self, config_folder):\n        seq_no = -1\n        cur_seq_no = -1\n        freshest_time = None\n\n        # First read the sequence number from the environment variable\n        seq_no_from_env = os.getenv(ENV_CONFIG_SEQUENCE_NUMBER)\n        if (seq_no_from_env is not None):\n            try:\n                seq_no = int(seq_no_from_env)\n            except ValueError:\n                self.error(\"Unable to convert sequence number to int:\" + seq_no_from_env)\n        if seq_no == -1:\n            # Otherwise look for the most recent sequence number from the files\n            self.log(\"Searching for sequence number in config folder: \" + config_folder)\n            for subdir, dirs, file_names in os.walk(config_folder):\n                for file_name in file_names:\n                    try:\n                        file_basename = os.path.basename(file_name)\n                        if \".\" in file_basename:\n                            cur_seq_no = int(file_basename.split('.')[0])\n                            if (freshest_time is None):\n                                freshest_time = os.path.getmtime(join(config_folder, file_name))\n                                seq_no = cur_seq_no\n                            else:\n                                current_file_m_time = os.path.getmtime(join(config_folder, file_name))\n                                if (current_file_m_time > freshest_time):\n                                    freshest_time = current_file_m_time\n                                    seq_no = cur_seq_no\n                    except ValueError:\n                        continue\n        return seq_no\n\n    def log(self, message):\n        self._log(self._get_log_prefix() + message)\n\n    def log_to_console(self, message):\n        if self._log_to_con is not None:\n            self._log_to_con(self._get_log_prefix() + message)\n        else:\n            self.error(\"Unable to log to console, console log method not set\")\n\n    def log_to_file(self, message):\n        if self._log_to_file is not None:\n            self._log_to_file(self._get_log_prefix() + message)\n        else:\n            self.error(\"Unable to log to file, file log method not set\")\n\n    def error(self, message):\n        self._error(self._get_log_prefix() + message)\n\n    @staticmethod\n    def redact_protected_settings(content):\n        redacted_tmp = re.sub('\"protectedSettings\":\\s*\"[^\"]+==\"', '\"protectedSettings\": \"*** REDACTED ***\"', content)\n        redacted = re.sub('\"protectedSettingsCertThumbprint\":\\s*\"[^\"]+\"', '\"protectedSettingsCertThumbprint\": \"*** REDACTED ***\"', redacted_tmp)\n        return redacted\n\n    def _parse_config(self, ctxt):\n        config = None\n        try:\n            config = json.loads(ctxt)\n        except:\n            self.error('JSON exception decoding ' + HandlerUtility.redact_protected_settings(ctxt))\n\n        if config is None:\n            self.error(\"JSON error processing settings file:\" + HandlerUtility.redact_protected_settings(ctxt))\n        else:\n            handlerSettings = config['runtimeSettings'][0]['handlerSettings']\n            if 'protectedSettings' in handlerSettings and \\\n                    'protectedSettingsCertThumbprint' in handlerSettings and \\\n                    handlerSettings['protectedSettings'] is not None and \\\n                    handlerSettings[\"protectedSettingsCertThumbprint\"] is not None:\n                protectedSettings = handlerSettings['protectedSettings']\n                thumb = handlerSettings['protectedSettingsCertThumbprint']\n                cert = constants.LibDir + '/' + thumb + '.crt'\n                pkey = constants.LibDir + '/' + thumb + '.prv'\n                unencodedSettings = base64.standard_b64decode(protectedSettings)\n                openSSLcmd_cms = ['openssl', 'cms', '-inform', 'DER', '-decrypt', '-recip' , cert, '-inkey', pkey]\n                cleartxt = ext_utils.run_send_stdin(openSSLcmd_cms, unencodedSettings)[1]\n                if cleartxt is None:\n                    self.log(\"OpenSSL decode error using cms command with thumbprint \" + thumb + \"\\n trying smime command\")\n                    openSSLcmd_smime = ['openssl', 'smime', '-inform', 'DER', '-decrypt', '-recip' , cert, '-inkey', pkey]\n                    cleartxt = ext_utils.run_send_stdin(openSSLcmd_smime, unencodedSettings)[1]\n                    if cleartxt is None:\n                        self.error(\"OpenSSL decode error using smime command with thumbprint \" + thumb)\n                        self.do_exit(1, \"Enable\", 'error', '1', 'Failed to decrypt protectedSettings')\n                jctxt = ''\n                try:\n                    jctxt = json.loads(cleartxt)\n                except:\n                    self.error('JSON exception decoding ' + HandlerUtility.redact_protected_settings(cleartxt))\n                handlerSettings['protectedSettings']=jctxt\n                self.log('Config decoded correctly.')\n        return config\n\n    def do_parse_context(self, operation):\n        _context = self.try_parse_context()\n        if not _context:\n            self.do_exit(1, operation, 'error', '1', operation + ' Failed')\n        return _context\n\n    def try_parse_context(self):\n        self._context = HandlerContext(self._short_name)\n        handler_env = None\n        config = None\n        ctxt = None\n        code = 0\n        # get the HandlerEnvironment.json. According to the extension handler spec, it is always in the ./ directory\n        self.log('cwd is ' + os.path.realpath(os.path.curdir))\n        handler_env_file = './HandlerEnvironment.json'\n        if not os.path.isfile(handler_env_file):\n            self.error(\"Unable to locate \" + handler_env_file)\n            return None\n        ctxt = ext_utils.get_file_contents(handler_env_file)\n        if ctxt == None:\n            self.error(\"Unable to read \" + handler_env_file)\n        try:\n            handler_env = json.loads(ctxt)\n        except:\n            pass\n        if handler_env == None:\n            self.log(\"JSON error processing \" + handler_env_file)\n            return None\n        if type(handler_env) == list:\n            handler_env = handler_env[0]\n\n        self._context._name = handler_env['name']\n        self._context._version = str(handler_env['version'])\n        self._context._config_dir = handler_env['handlerEnvironment']['configFolder']\n        self._context._log_dir = handler_env['handlerEnvironment']['logFolder']\n\n        self._context._log_file = os.path.join(handler_env['handlerEnvironment']['logFolder'], self._logFileName)\n        self._change_log_file()\n        self._context._status_dir = handler_env['handlerEnvironment']['statusFolder']\n        self._context._heartbeat_file = handler_env['handlerEnvironment']['heartbeatFile']\n        self._context._seq_no = self._get_current_seq_no(self._context._config_dir)\n        if self._context._seq_no < 0:\n            self.error(\"Unable to locate a .settings file!\")\n            return None\n        self._context._seq_no = str(self._context._seq_no)\n        self.log('sequence number is ' + self._context._seq_no)\n        self._context._status_file = os.path.join(self._context._status_dir, self._context._seq_no + '.status')\n        self._context._settings_file = os.path.join(self._context._config_dir, self._context._seq_no + '.settings')\n        self.log(\"setting file path is\" + self._context._settings_file)\n        ctxt = None\n        ctxt = ext_utils.get_file_contents(self._context._settings_file)\n        if ctxt == None:\n            error_msg = 'Unable to read ' + self._context._settings_file + '. '\n            self.error(error_msg)\n            return None\n\n        self.log(\"JSON config: \" + HandlerUtility.redact_protected_settings(ctxt))\n        self._context._config = self._parse_config(ctxt)\n        return self._context\n\n    def _change_log_file(self):\n        self.log(\"Change log file to \" + self._context._log_file)\n        # this will change the logging file for all python files that share the same process\n        logger.global_shared_context_logger = logger.Logger(self._context._log_file, '/dev/stdout')\n\n    def is_seq_smaller(self):\n        return int(self._context._seq_no) <= self._get_most_recent_seq()\n\n    def save_seq(self):\n        self._set_most_recent_seq(self._context._seq_no)\n        self.log(\"set most recent sequence number to \" + str(self._context._seq_no))\n\n    def exit_if_enabled(self, remove_protected_settings=False):\n        self.exit_if_seq_smaller(remove_protected_settings)\n\n    def exit_if_seq_smaller(self, remove_protected_settings):\n        if(self.is_seq_smaller()):\n            self.log(\n                \"Current sequence number, \" + str(self._context._seq_no) +\n                \", is not greater than the sequence number of the most recent executed configuration. Exiting...\")\n            sys.exit(0)\n        self.save_seq()\n\n        if remove_protected_settings:\n            self.scrub_settings_file()\n\n    def _get_most_recent_seq(self):\n        if (os.path.isfile('mrseq')):\n            seq = ext_utils.get_file_contents('mrseq')\n            if (seq):\n                return int(seq)\n\n        return -1\n\n    def is_current_config_seq_greater_inused(self):\n        return int(self._context._seq_no) > self._get_most_recent_seq()\n\n    def get_inused_config_seq(self):\n        return self._get_most_recent_seq()\n\n    def set_inused_config_seq(self, seq):\n        self._set_most_recent_seq(seq)\n\n    def _set_most_recent_seq(self, seq):\n        ext_utils.set_file_contents('mrseq', str(seq))\n\n    def do_status_report(self, operation, status, status_code, message):\n        self.log(\"{0},{1},{2},{3}\".format(operation, status, status_code, message))\n        tstamp = time.strftime(DateTimeFormat, time.gmtime())\n        stat = [{\n            \"version\": self._context._version,\n            \"timestampUTC\": tstamp,\n            \"status\": {\n                \"name\": self._context._name,\n                \"operation\": operation,\n                \"status\": status,\n                \"code\": status_code,\n                \"formattedMessage\": {\n                    \"lang\": \"en-US\",\n                    \"message\": message\n                }\n            }\n        }]\n        stat_rept = json.dumps(stat)\n        if self._context._status_file:\n            tmp = \"%s.tmp\" % (self._context._status_file)\n            with open(tmp, 'w+') as f:\n                f.write(stat_rept)\n            os.rename(tmp, self._context._status_file)\n\n    def do_heartbeat_report(self, heartbeat_file, status, code, message):\n        # heartbeat\n        health_report = '[{\"version\":\"1.0\",\"heartbeat\":{\"status\":\"' + status + '\",\"code\":\"' + code + '\",\"Message\":\"' + message + '\"}}]'\n        if ext_utils.set_file_contents(heartbeat_file, health_report) is None:\n            self.error('Unable to wite heartbeat info to ' + heartbeat_file)\n\n    def do_exit(self, exit_code, operation, status, code, message):\n        try:\n            self.do_status_report(operation, status, code, message)\n        except Exception as e:\n            self.log(\"Can't update status: \" + str(e))\n        sys.exit(exit_code)\n\n    def get_name(self):\n        return self._context._name\n\n    def get_seq_no(self):\n        return self._context._seq_no\n\n    def get_log_dir(self):\n        return self._context._log_dir\n\n    def get_handler_settings(self):\n        if (self._context._config != None):\n            return self._context._config['runtimeSettings'][0]['handlerSettings']\n        return None\n\n    def get_protected_settings(self):\n        if (self._context._config != None):\n            protectedSettings = self.get_handler_settings().get('protectedSettings')\n            if (isinstance(protectedSettings, dict)):\n                return protectedSettings\n            else:\n                self.error(\"Protected settings is not of type dictionary\")\n        return None\n\n    def get_public_settings(self):\n        handlerSettings = self.get_handler_settings()\n        if (handlerSettings != None):\n            return self.get_handler_settings().get('publicSettings')\n        return None\n\n    def scrub_settings_file(self):\n        content = ext_utils.get_file_contents(self._context._settings_file)\n        redacted = HandlerUtility.redact_protected_settings(content)\n        ext_utils.set_file_contents(self._context._settings_file, redacted)\n"
  },
  {
    "path": "Utils/logger.py",
    "content": "import time\nimport sys\nimport string\n\n\n# noinspection PyMethodMayBeStatic\nclass Logger(object):\n    \"\"\"\n    The Agent's logging assumptions are:\n    For Log, and LogWithPrefix all messages are logged to the\n    self.file_path and to the self.con_path.  Setting either path\n    parameter to None skips that log.  If Verbose is enabled, messages\n    calling the LogIfVerbose method will be logged to file_path yet\n    not to con_path.  Error and Warn messages are normal log messages\n    with the 'ERROR:' or 'WARNING:' prefix added.\n    \"\"\"\n\n    def __init__(self, filepath, conpath, verbose=False):\n        \"\"\"\n        Construct an instance of Logger.\n        \"\"\"\n        self.file_path = filepath\n        self.con_path = conpath\n        self.verbose = verbose\n\n    def throttle_log(self, counter):\n        \"\"\"\n        Log everything up to 10, every 10 up to 100, then every 100.\n        \"\"\"\n        return (counter < 10) or ((counter < 100) and ((counter % 10) == 0)) or ((counter % 100) == 0)\n\n    def write_to_file(self, message):\n        \"\"\"\n        Write 'message' to logfile.\n        \"\"\"\n        if self.file_path:\n            try:\n                with open(self.file_path, \"a\") as F:\n                    message = filter(lambda x: x in string.printable, message)\n\n                    # encoding works different for between interpreter version, we are keeping separate implementation\n                    # to ensure backward compatibility\n                    if sys.version_info[0] == 3:\n                        message = ''.join(list(message)).encode('ascii', 'ignore').decode(\"ascii\", \"ignore\")\n                    elif sys.version_info[0] == 2:\n                        message = message.encode('ascii', 'ignore')\n\n                    F.write(message + \"\\n\")\n            except IOError as e:\n                pass\n\n    def write_to_console(self, message):\n        \"\"\"\n        Write 'message' to /dev/console.\n        This supports serial port logging if the /dev/console\n        is redirected to ttys0 in kernel boot options.\n        \"\"\"\n        if self.con_path:\n            try:\n                with open(self.con_path, \"w\") as C:\n                    message = filter(lambda x: x in string.printable, message)\n\n                    # encoding works different for between interpreter version, we are keeping separate implementation\n                    # to ensure backward compatibility\n                    if sys.version_info[0] == 3:\n                        message = ''.join(list(message)).encode('ascii', 'ignore').decode(\"ascii\", \"ignore\")\n                    elif sys.version_info[0] == 2:\n                        message = message.encode('ascii', 'ignore')\n\n                    C.write(message + \"\\n\")\n            except IOError as e:\n                pass\n\n    def log(self, message):\n        \"\"\"\n        Standard Log function.\n        Logs to self.file_path, and con_path\n        \"\"\"\n        self.log_with_prefix(\"\", message)\n\n    def log_to_console(self, message):\n        \"\"\"\n        Logs message to console by pre-pending each line of 'message' with current time.\n        \"\"\"\n        log_prefix = self._get_log_prefix(\"\")\n        for line in message.split('\\n'):\n            line = log_prefix + line\n            self.write_to_console(line)\n\n    def log_to_file(self, message):\n        \"\"\"\n        Logs message to file by pre-pending each line of 'message' with current time.\n        \"\"\"\n        log_prefix = self._get_log_prefix(\"\")\n        for line in message.split('\\n'):\n            line = log_prefix + line\n            self.write_to_file(line)\n\n    def no_log(self, message):\n        \"\"\"\n        Don't Log.\n        \"\"\"\n        pass\n\n    def log_if_verbose(self, message):\n        \"\"\"\n        Only log 'message' if global Verbose is True.\n        \"\"\"\n        self.log_with_prefix_if_verbose('', message)\n\n    def log_with_prefix(self, prefix, message):\n        \"\"\"\n        Prefix each line of 'message' with current time+'prefix'.\n        \"\"\"\n        log_prefix = self._get_log_prefix(prefix)\n        for line in message.split('\\n'):\n            line = log_prefix + line\n            self.write_to_file(line)\n            self.write_to_console(line)\n\n    def log_with_prefix_if_verbose(self, prefix, message):\n        \"\"\"\n        Only log 'message' if global Verbose is True.\n        Prefix each line of 'message' with current time+'prefix'.\n        \"\"\"\n        if self.verbose:\n            log_prefix = self._get_log_prefix(prefix)\n            for line in message.split('\\n'):\n                line = log_prefix + line\n                self.write_to_file(line)\n                self.write_to_console(line)\n\n    def warning(self, message):\n        self.log_with_prefix(\"WARNING:\", message)\n\n    def error_with_prefix(self, prefix, message):\n        self.log_with_prefix(\"ERROR: \" + str(prefix), message)\n\n    def error(self, message):\n        \"\"\"\n        Call ErrorWithPrefix(message).\n        \"\"\"\n        self.error_with_prefix(\"\", message)\n\n    def _get_log_prefix(self, prefix):\n        \"\"\"\n        Generates the log prefix with timestamp+'prefix'.\n        \"\"\"\n        t = time.localtime()\n        t = \"%04u/%02u/%02u %02u:%02u:%02u \" % (t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec)\n        return t + prefix\n\n# meant to be used with tests\n# noinspection PyMethodMayBeStatic\nclass TestLogger(Logger):\n    def __init__(self):\n        super(Logger, self).__init__()\n        self.verbose = True\n        self.con_path = None\n        self.file_path = None\n\n    def _log_to_stdout(self, message):\n        sys.stdout.writelines(message)\n        sys.stdout.write(\"\\n\")\n\n    def write_to_file(self, message):\n        self._log_to_stdout(message)\n\n    def write_to_console(self, message):\n        self._log_to_stdout(message)\n\n    def log(self, message):\n        self._log_to_stdout(message)\n\n    def log_to_console(self, message):\n        self._log_to_stdout(message)\n\n    def log_to_file(self, message):\n        self._log_to_stdout(message)\n\n    def log_if_verbose(self, message):\n        self._log_to_stdout(message)\n\n    def log_with_prefix(self, prefix, message):\n        log_prefix = self._get_log_prefix(prefix)\n        for line in message.split('\\n'):\n            line = log_prefix + line\n            self._log_to_stdout(line)\n\n    def log_with_prefix_if_verbose(self, prefix, message):\n        self.log_with_prefix(prefix, message)\n\n    def warning(self, message):\n        self.log_with_prefix(\"WARNING:\", message)\n\n    def error_with_prefix(self, prefix, message):\n        self.log_with_prefix(\"ERROR:\", message)\n\n    def error(self, message):\n        self.error_with_prefix(\"\", message)\n\n\nglobal global_shared_context_logger\ntry:\n    # test whether global_shared_context_logger has been assigned previously\n    _ = global_shared_context_logger\nexcept NameError:\n    # previously not assigned, assign default value\n    # will assign global_shared_context_logger only once\n    global_shared_context_logger = Logger('/var/log/waagent.log', '/dev/console')\n\n\ndef log(message):\n    global_shared_context_logger.log(message)\n\n\ndef error(message):\n    global_shared_context_logger.error(message)\n\n\ndef warning(message):\n    global_shared_context_logger.warning(message)\n\n\ndef error_with_prefix(prefix, message):\n    global_shared_context_logger.error_with_prefix(prefix, message)\n\n\ndef log_if_verbose(message):\n    global_shared_context_logger.log_if_verbose(message)\n"
  },
  {
    "path": "Utils/ovfutils.py",
    "content": "import re\nimport os\nimport base64\nimport xml.dom.minidom\nimport xml.sax.saxutils\nimport Utils.extensionutils as ext_utils\nimport Utils.constants as constants\nimport Utils.logger as logger\n\n\ndef get_node_text_data(a):\n    \"\"\"\n    Filter non-text nodes from DOM tree\n    \"\"\"\n    for b in a.childNodes:\n        if b.nodeType == b.TEXT_NODE:\n            return b.data\n\n\ndef translate_custom_data(data, configuration):\n    \"\"\"\n    Translate the custom data from a Base64 encoding. Default to no-op.\n    \"\"\"\n    data_to_decode = configuration.get(\"Provisioning.DecodeCustomData\")\n    if data_to_decode is not None and data_to_decode.lower().startswith(\"y\"):\n        return base64.b64decode(data)\n    return data\n\n\nclass OvfEnv(object):\n    \"\"\"\n    Read, and process provisioning info from provisioning file OvfEnv.xml\n    \"\"\"\n\n    #\n    # <?xml version=\"1.0\" encoding=\"utf-8\"?>\n    # <Environment xmlns=\"http://schemas.dmtf.org/ovf/environment/1\"\n    # xmlns:oe=\"http://schemas.dmtf.org/ovf/environment/1\" xmlns:wa=\"http://schemas.microsoft.com/windowsazure\"\n    # xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\">\n    #    <wa:ProvisioningSection>\n    #      <wa:Version>1.0</wa:Version>\n    #      <LinuxProvisioningConfigurationSet\n    #      xmlns=\"http://schemas.microsoft.com/windowsazure\" xmlns:i=\"http://www.w3.org/2001/XMLSchema-instance\">\n    #        <ConfigurationSetType>LinuxProvisioningConfiguration</ConfigurationSetType>\n    #        <HostName>HostName</HostName>\n    #        <UserName>UserName</UserName>\n    #        <UserPassword>UserPassword</UserPassword>\n    #        <DisableSshPasswordAuthentication>false</DisableSshPasswordAuthentication>\n    #        <SSH>\n    #          <PublicKeys>\n    #            <PublicKey>\n    #              <Fingerprint>EB0C0AB4B2D5FC35F2F0658D19F44C8283E2DD62</Fingerprint>\n    #              <Path>$HOME/UserName/.ssh/authorized_keys</Path>\n    #            </PublicKey>\n    #          </PublicKeys>\n    #          <KeyPairs>\n    #            <KeyPair>\n    #              <Fingerprint>EB0C0AB4B2D5FC35F2F0658D19F44C8283E2DD62</Fingerprint>\n    #              <Path>$HOME/UserName/.ssh/id_rsa</Path>\n    #            </KeyPair>\n    #          </KeyPairs>\n    #        </SSH>\n    #      </LinuxProvisioningConfigurationSet>\n    #    </wa:ProvisioningSection>\n    # </Environment>\n    #\n    def __init__(self):\n        \"\"\"\n        Reset members.\n        \"\"\"\n        self.WaNs = \"http://schemas.microsoft.com/windowsazure\"\n        self.OvfNs = \"http://schemas.dmtf.org/ovf/environment/1\"\n        self.MajorVersion = 1\n        self.MinorVersion = 0\n        self.ComputerName = None\n        self.AdminPassword = None\n        self.UserName = None\n        self.UserPassword = None\n        self.CustomData = None\n        self.DisableSshPasswordAuthentication = True\n        self.SshPublicKeys = []\n        self.SshKeyPairs = []\n\n    # this is a static function to return an instance of  OfvEnv\n    @staticmethod\n    def parse(xml_text, configuration, is_deprovision=False, write_custom_data=True):\n        \"\"\"\n        Parse xml tree, retrieving user and ssh key information.\n        Return self.\n        \"\"\"\n        ovf_env = OvfEnv()\n        if xml_text is None:\n            return None\n        logger.log_if_verbose(re.sub(\"UserPassword>.*?<\", \"UserPassword>*<\", xml_text))\n        try:\n            dom = xml.dom.minidom.parseString(xml_text)\n        except (TypeError, xml.parsers.expat.ExpatError):\n            # when the input is of unexpected type or invalid xml\n            return None\n        if len(dom.getElementsByTagNameNS(ovf_env.OvfNs, \"Environment\")) != 1:\n            logger.error(\"Unable to parse OVF XML.\")\n        section = None\n        newer = False\n        for p in dom.getElementsByTagNameNS(ovf_env.WaNs, \"ProvisioningSection\"):\n            for n in p.childNodes:\n                if n.localName == \"Version\":\n                    verparts = get_node_text_data(n).split('.')\n                    major = int(verparts[0])\n                    minor = int(verparts[1])\n                    if major > ovf_env.MajorVersion:\n                        newer = True\n                    if major != ovf_env.MajorVersion:\n                        break\n                    if minor > ovf_env.MinorVersion:\n                        newer = True\n                    section = p\n        if newer:\n            logger.warning(\n                \"Newer provisioning configuration detected. Please consider updating waagent.\")\n        if section is None:\n            logger.error(\n                \"Could not find ProvisioningSection with major version=\" + str(ovf_env.MajorVersion))\n            return None\n        ovf_env.ComputerName = get_node_text_data(section.getElementsByTagNameNS(ovf_env.WaNs, \"HostName\")[0])\n        ovf_env.UserName = get_node_text_data(section.getElementsByTagNameNS(ovf_env.WaNs, \"UserName\")[0])\n        if is_deprovision:\n            return ovf_env\n        try:\n            ovf_env.UserPassword = get_node_text_data(section.getElementsByTagNameNS(ovf_env.WaNs, \"UserPassword\")[0])\n        except (KeyError, ValueError, AttributeError, IndexError):\n            pass\n\n        if write_custom_data:\n            try:\n                cd_section = section.getElementsByTagNameNS(ovf_env.WaNs, \"CustomData\")\n                if len(cd_section) > 0:\n                    ovf_env.CustomData = get_node_text_data(cd_section[0])\n                    if len(ovf_env.CustomData) > 0:\n                        ext_utils.set_file_contents(constants.LibDir + '/CustomData', bytearray(\n                            translate_custom_data(ovf_env.CustomData, configuration)))\n                        logger.log('Wrote ' + constants.LibDir + '/CustomData')\n                    else:\n                        logger.error('<CustomData> contains no data!')\n            except Exception as e:\n                logger.error(str(e) + ' occured creating ' + constants.LibDir + '/CustomData')\n        \n        disable_ssh_passwd = section.getElementsByTagNameNS(ovf_env.WaNs, \"DisableSshPasswordAuthentication\")\n        if len(disable_ssh_passwd) != 0:\n            ovf_env.DisableSshPasswordAuthentication = (get_node_text_data(disable_ssh_passwd[0]).lower() == \"true\")\n        for pkey in section.getElementsByTagNameNS(ovf_env.WaNs, \"PublicKey\"):\n            logger.log_if_verbose(repr(pkey))\n            fp = None\n            path = None\n            for c in pkey.childNodes:\n                if c.localName == \"Fingerprint\":\n                    fp = get_node_text_data(c).upper()\n                    logger.log_if_verbose(fp)\n                if c.localName == \"Path\":\n                    path = get_node_text_data(c)\n                    logger.log_if_verbose(path)\n            ovf_env.SshPublicKeys += [[fp, path]]\n        for keyp in section.getElementsByTagNameNS(ovf_env.WaNs, \"KeyPair\"):\n            fp = None\n            path = None\n            logger.log_if_verbose(repr(keyp))\n            for c in keyp.childNodes:\n                if c.localName == \"Fingerprint\":\n                    fp = get_node_text_data(c).upper()\n                    logger.log_if_verbose(fp)\n                if c.localName == \"Path\":\n                    path = get_node_text_data(c)\n                    logger.log_if_verbose(path)\n            ovf_env.SshKeyPairs += [[fp, path]]\n        return ovf_env\n\n    def prepare_dir(self, filepath, distro):\n        \"\"\"\n        Create home dir for self.UserName\n        Change owner and return path.\n        \"\"\"\n        home = distro.get_home()\n        # Expand HOME variable if present in path\n        path = os.path.normpath(filepath.replace(\"$HOME\", home))\n        if (not path.startswith(\"/\")) or path.endswith(\"/\"):\n            return None\n        dir_name = path.rsplit('/', 1)[0]\n        if dir_name != \"\":\n            ext_utils.create_dir(dir_name, \"root\", 0o700)\n            if path.startswith(os.path.normpath(home + \"/\" + self.UserName + \"/\")):\n                ext_utils.create_dir(dir_name, self.UserName, 0o700)\n        return path\n"
  },
  {
    "path": "Utils/test/MockUtil.py",
    "content": "#!/usr/bin/env python\n#\n# Sample Extension\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nclass MockUtil():\n    def __init__(self, test):\n        self.test = test\n\n    def get_log_dir(self):\n        return \"/tmp\"\n\n    def log(self, msg):\n        print(msg)\n\n    def error(self, msg):\n        print(msg)\n\n    def get_seq_no(self):\n        return \"0\"\n\n    def do_status_report(self, operation, status, status_code, message):\n        self.test.assertNotEqual(None, message)\n        self.last = \"do_status_report\"\n\n    def do_exit(self,exit_code,operation,status,code,message):\n        self.test.assertNotEqual(None, message)\n        self.last = \"do_exit\"\n"
  },
  {
    "path": "Utils/test/env.py",
    "content": "#!/usr/bin/env python\n#\n# Sample Extension\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nimport os\n\n#append installer directory to sys.path\nroot = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append(root)\n"
  },
  {
    "path": "Utils/test/mock.sh",
    "content": "#!/bin/bash\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\necho \"Start...\"\nsleep 0.1\necho \"Running\"\n>&2 echo \"Warning\"\nsleep 0.1\necho \"Finished\"\nexit $1\n"
  },
  {
    "path": "Utils/test/mock_sshd_config",
    "content": "# Package generated configuration file\n# See the sshd_config(5) manpage for details\n\n# What ports, IPs and protocols we listen for\nPort 22\n# Use these options to restrict which interfaces/protocols sshd will bind to\n#ListenAddress ::\n#ListenAddress 0.0.0.0\nProtocol 2\n# HostKeys for protocol version 2\nHostKey /etc/ssh/ssh_host_rsa_key\nHostKey /etc/ssh/ssh_host_dsa_key\nHostKey /etc/ssh/ssh_host_ecdsa_key\nHostKey /etc/ssh/ssh_host_ed25519_key\n#Privilege Separation is turned on for security\nUsePrivilegeSeparation yes\n\n# Lifetime and size of ephemeral version 1 server key\nKeyRegenerationInterval 3600\nServerKeyBits 1024\n\n# Logging\nSyslogFacility AUTH\nLogLevel INFO\n\n# Authentication:\nLoginGraceTime 10m\nPermitRootLogin without-password\nStrictModes yes\n\nRSAAuthentication yes\nPubkeyAuthentication yes\n#AuthorizedKeysFile %h/.ssh/authorized_keys\n\n# Don’t read the user’s ~/.rhosts and ~/.shosts files\nIgnoreRhosts yes\n# For this to work you will also need host keys in /etc/ssh_known_hosts\nRhostsRSAAuthentication no\n# similar for protocol version 2\nHostbasedAuthentication no\n# Uncomment if you don’t trust ~/.ssh/known_hosts for RhostsRSAAuthentication\n#IgnoreUserKnownHosts yes\n\n# To enable empty passwords, change to yes (NOT RECOMMENDED)\nPermitEmptyPasswords no\n\n# Change to yes to enable challenge-response passwords (beware issues with\n# some PAM modules and threads)\nChallengeResponseAuthentication yes\n\n# Change to no to disable tunnelled clear text passwords\nPasswordAuthentication no\n\n# Kerberos options\n#KerberosAuthentication no\n#KerberosGetAFSToken no\n#KerberosOrLocalPasswd yes\n#KerberosTicketCleanup yes\n\n# GSSAPI options\n#GSSAPIAuthentication no\n#GSSAPICleanupCredentials yes\n\nX11Forwarding yes\nX11DisplayOffset 10\nPrintMotd no\nPrintLastLog yes\nTCPKeepAlive yes\n#UseLogin no\n\n#MaxStartups 10:30:60\n#Banner /etc/issue.net\n\n# Allow client to pass locale environment variables\nAcceptEnv LANG LC_*\n\nSubsystem sftp /usr/lib/openssh/sftp-server\n\n# Set this to ‘yes’ to enable PAM authentication, account processing,\n# and session processing. If this is enabled, PAM authentication will\n# be allowed through the ChallengeResponseAuthentication and\n# PasswordAuthentication. Depending on your PAM configuration,\n# PAM authentication via ChallengeResponseAuthentication may bypass\n# the setting of “PermitRootLogin without-password”.\n# If you just want the PAM account and session checks to run without\n# PAM authentication, then enable this but set PasswordAuthentication\n# and ChallengeResponseAuthentication to ‘no’.\nUsePAM yes\n\n# CLOUD_IMG: This file was created/modified by the Cloud Image build process\nClientAliveInterval 120\nAuthorizedKeysCommand /usr/sbin/aad_certhandler %u %k\nAuthorizedKeysCommandUser root\n"
  },
  {
    "path": "Utils/test/non_latin_characters.txt",
    "content": "ü"
  },
  {
    "path": "Utils/test/ovf-env-empty.xml",
    "content": "<!DOCTYPE _[<!ELEMENT _ EMPTY>]><_/>"
  },
  {
    "path": "Utils/test/ovf-env.xml",
    "content": "<ns0:Environment xmlns:ns0=\"http://schemas.dmtf.org/ovf/environment/1\" xmlns:ns1=\"http://schemas.microsoft.com/windowsazure\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\">\n<ns1:ProvisioningSection>\n<ns1:Version>1.0</ns1:Version>\n<ns1:LinuxProvisioningConfigurationSet>\n<ns1:ConfigurationSetType>LinuxProvisioningConfiguration</ns1:ConfigurationSetType>\n<ns1:UserName>AzureUser</ns1:UserName>\n<ns1:DisableSshPasswordAuthentication>true</ns1:DisableSshPasswordAuthentication>\n<ns1:SSH>\n<ns1:PublicKeys>\n<ns1:PublicKey>\n<ns1:Fingerprint>85C04BB59660B7A2B845DCDD50174B2059CC77A4</ns1:Fingerprint>\n<ns1:Path>/home/AzureUser/.ssh/authorized_keys</ns1:Path>\n</ns1:PublicKey>\n</ns1:PublicKeys>\n</ns1:SSH>\n<ns1:HostName>ubuntu18</ns1:HostName></ns1:LinuxProvisioningConfigurationSet>\n</ns1:ProvisioningSection>\n<ns1:PlatformSettingsSection>\n<ns1:Version>1.0</ns1:Version>\n<ns1:PlatformSettings>\n<ns1:KmsServerHostname>kms.core.windows.net</ns1:KmsServerHostname>\n<ns1:ProvisionGuestAgent>true</ns1:ProvisionGuestAgent>\n<ns1:GuestAgentPackageName xsi:nil=\"true\" />\n<ns1:RetainWindowsPEPassInUnattend>true</ns1:RetainWindowsPEPassInUnattend>\n<ns1:RetainOfflineServicingPassInUnattend>true</ns1:RetainOfflineServicingPassInUnattend>\n<ns1:PreprovisionedVm>false</ns1:PreprovisionedVm>\n<ns1:EnableTrustedImageIdentifier>false</ns1:EnableTrustedImageIdentifier>\n</ns1:PlatformSettings>\n</ns1:PlatformSettingsSection>\n</ns0:Environment>"
  },
  {
    "path": "Utils/test/place_vmaccess_on_local_machine.sh",
    "content": "#!/usr/bin/env bash\n# must run with sudo permissions\n# this file copies the local changes to /var/lib/waagent/Microsoft.OSTCExtensions.VMAccessForLinux-<version>\n\n# remember to update the version number to what you have\ndestdir=\"/var/lib/waagent/Microsoft.OSTCExtensions.VMAccessForLinux-1.5.4\"\nutilsDest=\"$destdir/Utils\"\nvmaccessDest=\"$destdir/vmaccess.py\"\n\ncurrentDir=\"$( cd \"$( dirname \"${BASH_SOURCE[0]}\" )\" >/dev/null 2>&1 && pwd )\"\n\nutilsSource=\"$currentDir/..\"\nvmaccessSource=\"$currentDir/../../VMAccess/vmaccess.py\"\n\ncp -r -f $utilsSource $utilsDest\ncp -f $vmaccessSource $vmaccessDest\nfind $destdir -name '*.pyc' | xargs rm\n"
  },
  {
    "path": "Utils/test/test_encode.py",
    "content": "#!/usr/bin/env python\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport Utils.extensionutils as eu\nimport unittest\n\n\nclass TestEncode(unittest.TestCase):\n    def test_encode(self):\n        contents = eu.get_file_contents('mock_sshd_config')\n        encoded_contents = eu.encode_for_writing_to_file(contents)\n        known_non_ascii_character = b\"%c\" % encoded_contents[2353]\n        self.assertEqual(known_non_ascii_character, b'\\x9d')\n\nclass TestRunCommandGetOutput(unittest.TestCase):\n    def test_output(self):\n        cmd = [\"cat\", \"non_latin_characters.txt\"]\n        return_code, output_string = eu.run_command_get_output(cmd)\n        self.assertEqual(0, return_code)\n        expected_character_byte = b'\\xc3\\xbc'\n        expected_character = expected_character_byte.decode(\"utf-8\")\n        self.assertEqual(expected_character, output_string[0])\n\n    def test_stdin(self):\n        cmd = ['bash', '-c', 'read ; echo $REPLY']\n        cmd_input = b'\\xc3\\xbc' # ü character\n        return_code, output_string = eu.run_send_stdin(cmd, cmd_input)\n        self.assertEqual(0, return_code)\n        self.assertEqual(cmd_input.decode('utf-8'), output_string[0])\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "Utils/test/test_extensionutils_code_injection.py",
    "content": "#!/usr/bin/env python\n\nimport os\nimport pwd\nimport shutil\nimport tempfile\nimport unittest\nimport Utils.extensionutils as ext_utils\nimport Utils.logger as logger\n\n\nlogger.global_shared_context_logger = logger.TestLogger()\n\n\nclass TestCodeInjection(unittest.TestCase):\n    test_dir = \"./test_output\"\n\n    def get_random_filename(self):\n        f = tempfile.NamedTemporaryFile(dir=TestCodeInjection.test_dir, delete=False)\n        return f.name\n\n    def cleanup(self):\n        shutil.rmtree(TestCodeInjection.test_dir)\n\n    def setup(self):\n        current_user = pwd.getpwuid(os.getuid())\n        ext_utils.create_dir(TestCodeInjection.test_dir, current_user.pw_name, 0o700)\n\n    def test_code_injection(self):\n        # failure cases\n        exit_code, string_output = ext_utils.run_command_get_output(\"echo hello; echo world\")\n        self.assertNotEqual(0, exit_code, \"exit code != 0\")\n        exit_code, string_output = ext_utils.run_command_get_output([\"echo hello; echo world\"])\n        self.assertNotEqual(0, exit_code, \"exit code != 0\")\n\n        # success case\n        exit_code, string_output = ext_utils.run_command_get_output([\"echo\", \"hello\", \";\", \"echo\", \"world\"])\n        self.assertEqual(0, exit_code, \"exit code == 0\")\n        self.assertEqual(\"hello ; echo world\\n\", string_output, \"unexpected output\")\n        exit_code, string_output = ext_utils.run_command_get_output([\"echo\", \"hello\", \"world\"])\n        self.assertEqual(0, exit_code, \"exit code == 0\")\n\n    def test_code_injection2(self):\n        self.setup()\n        self.addCleanup(self.cleanup)\n        # failure cases\n        out_file = self.get_random_filename()\n        exit_code = ext_utils.run_command_and_write_stdout_to_file(\n            \"echo hello; echo world\", out_file)\n        self.assertNotEqual(0, exit_code, \"exit code != 0\")\n\n        out_file = self.get_random_filename()\n        exit_code = ext_utils.run_command_and_write_stdout_to_file(\n            [\"echo hello; echo world\"], out_file)\n        self.assertNotEqual(0, exit_code, \"exit code != 0\")\n\n        # success case\n        out_file = self.get_random_filename()\n        exit_code = ext_utils.run_command_and_write_stdout_to_file(\n            [\"echo\", \"hello\", \";\", \"echo\", \"world\"], out_file)\n        self.assertEqual(0, exit_code, \"exit code == 0\")\n        file_contents = ext_utils.get_file_contents(out_file)\n        self.assertEqual(\"hello ; echo world\\n\", file_contents, \"unexpected output\")\n\n        out_file = self.get_random_filename()\n        exit_code = ext_utils.run_command_and_write_stdout_to_file([\n            \"echo\", \"hello\", \"world\"], out_file)\n        self.assertEqual(0, exit_code, \"exit code == 0\")\n        file_contents = ext_utils.get_file_contents(out_file)\n        self.assertEqual(\"hello world\\n\", file_contents, \"unexpected output\")\n\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "Utils/test/test_logutil.py",
    "content": "#!/usr/bin/env python\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nimport LogUtil as lu\n\n\nclass TestLogUtil(unittest.TestCase):    \n    def test_tail(self):\n        with open(\"/tmp/testtail\", \"w+\") as F:\n            F.write(u\"abcdefghijklmnopqrstu\\u6211vwxyz\".encode(\"utf-8\"))\n        tail = lu.tail(\"/tmp/testtail\", 2)\n        self.assertEquals(\"yz\", tail)\n\n        tail = lu.tail(\"/tmp/testtail\")\n        self.assertEquals(\"abcdefghijklmnopqrstuvwxyz\", tail)\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "Utils/test/test_null_protected_settings.py",
    "content": "#!/usr/bin/env python\n#\n# Sample Extension\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nimport HandlerUtil as Util\n\ndef mock_log(*args, **kwargs):\n    pass\n\nclass TestNullProtectedSettings(unittest.TestCase):\n    def test_null_protected_settings(self):\n        hutil = Util.HandlerUtility(mock_log, mock_log, \"UnitTest\", \"HandlerUtil.UnitTest\", \"0.0.1\")\n        config = hutil._parse_config(Settings)\n        handlerSettings = config['runtimeSettings'][0]['handlerSettings']\n        self.assertEquals(handlerSettings[\"protectedSettings\"], None)\n\nSettings=\"\"\"\\\n{\n    \"runtimeSettings\":[{\n        \"handlerSettings\":{\n            \"protectedSettingsCertThumbprint\":null,\n            \"protectedSettings\":null,\n            \"publicSettings\":{}\n            }\n     }]\n}\n\"\"\"\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "Utils/test/test_ovf_utils.py",
    "content": "#!/usr/bin/env python\n\nimport os.path as path\nimport unittest\nimport Utils.extensionutils as ext_utils\nimport Utils.ovfutils as ovf_utils\nimport Utils.logger as logger\n\n\n# dummy configuration class based on vmaccess.Configuration\nclass Configuration:\n    def __init__(self):\n        self.dictionary = {\n            \"Provisioning.DecodeCustomData\": \"n\"\n        }\n\n    def get(self, key):\n        return self.dictionary.get(key)\n\n\nconfig = Configuration()\n\nlogger.global_shared_context_logger = logger.TestLogger()\n\n\nclass TestTestOvfUtils(unittest.TestCase):\n    def test_ovf_env_parse(self):\n        current_dir = path.dirname(path.abspath(__file__))\n        ovf_xml = ext_utils.get_file_contents(path.join(current_dir, 'ovf-env.xml'))\n        ovf_env = ovf_utils.OvfEnv.parse(ovf_xml, config)\n        self.assertIsNotNone(ovf_env, \"ovf_env should not be null\")\n\n    def test_ovf_env_parse_minimalxml(self):\n        current_dir = path.dirname(path.abspath(__file__))\n        ovf_xml = ext_utils.get_file_contents(path.join(current_dir, 'ovf-env-empty.xml'))\n        ovf_env = ovf_utils.OvfEnv.parse(ovf_xml, config)\n        self.assertIsNone(ovf_env, \"ovf_env should be null\")\n\n    def test_ovf_env_parse_none_string(self):\n        ovf_env = ovf_utils.OvfEnv.parse(None, config)\n        self.assertIsNone(ovf_env, \"ovf_env should be null\")\n\n    def test_ovf_env_parse_empty_string(self):\n        ovf_env = ovf_utils.OvfEnv.parse(\"\", config)\n        self.assertIsNone(ovf_env, \"ovf_env should be null\")\n\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "Utils/test/test_redacted_settings.py",
    "content": "#!/usr/bin/env python\n#\n# Tests for redacted settings\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nimport Utils.HandlerUtil as Util\n\n\nclass TestRedactedProtectedSettings(unittest.TestCase):\n\n    def test_redacted_protected_settings(self):\n        redacted = Util.HandlerUtility.redact_protected_settings(settings_original)\n        self.assertIn('\"protectedSettings\": \"*** REDACTED ***\"', redacted)\n        self.assertIn('\"protectedSettingsCertThumbprint\": \"*** REDACTED ***\"', redacted)\n\n\nsettings_original = \"\"\"\\\n{\n    \"runtimeSettings\": [{\n        \"handlerSettings\": {\n            \"protectedSettingsCertThumbprint\": \"9310D2O49D7216D4A1CEDCE9D8A7CE5DBD7FB7BF\",\n            \"protectedSettings\": \"MIIC4AYJKoZIhvcNAQcWoIIB0TCDEc0CAQAxggFpMIIBZQIBADBNMDkxNzA1BgoJkiaJk/IsZAEZFidXaW5kb3dzIEF6dXJlIENSUCBDZXJ0aWZpY2F0ZSBHZW5lcmF0b3ICEB8f7DyzHLGjSDLnEWd4YeAwDQYJKoZIhvcNAQEBBQAEggEAiZj2gQtT4MpdTaEH8rUVFB/8Ucc8OxGFWu8VKbIdoHLKp1WcDb7Vlzv6fHLBIccgXGuR1XHTvtlD4QiKpSet341tPPug/R5ZtLSRz1pqtXZdrFcuuSxOa6ib/+la5ukdygcVwkEnmNSQaiipPKyqPH2JsuhmGCdXFiKwCSTrgGE6GyCBtaK9KOf48V/tYXHnDGrS9q5a1gRF5KVI2B26UYSO7V7pXjzYCd/Sp9yGj7Rw3Kqf9Lpix/sPuqWjV6e2XFlD3YxaHSeHVnLI/Bkz2E6Ri8yfPYus52r/mECXPL2YXqY9dGyrlKKIaD9AuzMyvvy1A74a9VBq7zxQQ4adEzBbBgkqhkiG9w0BBwEwFAYIKoZIhvcNAwcECDyEf4mRrmWJgDhW4j2nRNTJU4yXxocQm/PhAr39Um7n0pgI2Cn28AabYtsHWjKqr8Al9LX6bKm8cnmnLjqTntphCw==\",\n            \"publicSettings\": {}\n            }\n     }]\n}\n\"\"\"\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "Utils/test/test_scriptutil.py",
    "content": "#!/usr/bin/env python\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport os.path\nimport env\nimport ScriptUtil as su\nimport unittest\nfrom MockUtil import MockUtil\n\nclass TestScriptUtil(unittest.TestCase):\n    def test_parse_args(self):\n        print(__file__)\n        cmd = u'sh foo.bar.sh -af bar --foo=bar | more \\u6211'\n        args = su.parse_args(cmd.encode('utf-8'))\n        self.assertNotEquals(None, args)\n        self.assertNotEquals(0, len(args))\n        print(args)\n\n    def test_run_command(self):\n        hutil = MockUtil(self)\n        test_script = \"mock.sh\"\n        os.chdir(os.path.join(env.root, \"test\"))\n        exit_code = su.run_command(hutil, [\"sh\", test_script, \"0\"], os.getcwd(), 'RunScript-0', 'TestExtension', '1.0', True, 0.1)\n        self.assertEquals(0, exit_code)\n        self.assertEquals(\"do_exit\", hutil.last)\n        exit_code = su.run_command(hutil, [\"sh\", test_script, \"75\"], os.getcwd(), 'RunScript-1', 'TestExtension', '1.0', False, 0.1)\n        self.assertEquals(75, exit_code)\n        self.assertEquals(\"do_status_report\", hutil.last)\n    \n    def test_log_or_exit(self):        \n        hutil = MockUtil(self)\n        su.log_or_exit(hutil, True, 0, 'LogOrExit-0', 'Message1')\n        self.assertEquals(\"do_exit\", hutil.last)\n        su.log_or_exit(hutil, False, 0, 'LogOrExit-1', 'Message2')\n        self.assertEquals(\"do_status_report\", hutil.last)\n        \nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "VMAccess/CHANGELOG.md",
    "content": "## 1.5.10 (2020-09-09)\n- VMAccess Linux is now more robust to the absence of ovf-env.xml file\n\n## 1.5.6 - 1.5.9\n- several bug-fixes\n\n## 1.5.5 (2020-07-20)\n- Created new python modules under Utils that are meant to be python 3\n  compatible and are supposed to be used instead of importing waagent python file through waagentloader.py\n- Fixed code injection vulnerability through the username\n\n## 1.5.1 (2018-10-31)\n- Support for Python3. Changing VMAccess to work for both Python 2 and Python 3 \n  interpreter.\n\n## 1.4.6.0 (2016-09-16)\n- Forcibly reset ChallengeAuthenticationResponse to no.  This value was inadvertently set\n  in previous releases, and is forcibly reset.\n\n## 1.4.5.0 (2016-09-07)\n- Check for None before checking the length of a user's password.  This is\n  fallout from allowing and rejecting empty passwords.\n\n## 1.4.4.0 (2016-09-06)\n- Do not set ChallengeResponseAuthenticaiton.  This value should not\n  be changed by VMAccess.\n\n## 1.4.3.0 (2016-09-05)\n- Reject zero length passwords.\n\n## 1.4.2.0 (2016-08-25)\n- Ensure expiration (if specified) is used when creating an account\n- Backup sshd_config before any edits are made.\n- Ensure sshd_config is restarted when edits are made.\n\n## 1.4.1.0 (2016-07-27)\n- Install operation posts incorrect status [#206]\n- Misspelling of resources/debian_default\n"
  },
  {
    "path": "VMAccess/HandlerManifest.json",
    "content": "[\n  {\n    \"version\": 1.0,\n    \"handlerManifest\": {\n      \"disableCommand\": \"extension_noop.sh\",\n      \"enableCommand\": \"extension_shim.sh -c ./vmaccess.py  -e\",\n      \"installCommand\": \"extension_noop.sh\",\n      \"uninstallCommand\": \"extension_noop.sh\",\n      \"updateCommand\": \"extension_noop.sh\",\n      \"rebootAfterInstall\": false,\n      \"reportHeartbeat\": false,\n      \"continueOnUpdateFailure\": true\n    }\n  }\n]\n"
  },
  {
    "path": "VMAccess/README.md",
    "content": "# VMAccess Extension\nProvide several ways to allow owner of the VM to get the SSH access back and perform additional VM disk check tasks. \n\nCurrent version is [1.5](https://github.com/Azure/azure-linux-extensions/releases/tag/VMAccess-1.5.18).\n\nYou can read the User Guide below.\n* [Manage administrative users, SSH, and check or repair disks on Linux VMs by using the VMAccess extension](https://learn.microsoft.com/en-us/azure/virtual-machines/extensions/vmaccess)\n\nVMAccess Extension can:\n* Reset the password of the original sudo user \n* Create a new sudo user with the password specified\n* Set the public host key with the key given\n* Reset the public host key provided during VM provisioning if host key not provided\n* Open the SSH port(22) and reset the sshd_config if reset_ssh is set to true\n* Remove the existing user\n* Check disks\n* Repair added disk\n* Remove prior public keys when a new public key is provided\n* Restore the original backup sshd_config if restore_backup_ssh is set to true\n\n# Security Notes:\n* VMAccess Extension is designed for regaining access to a VM in the event that access is lost. \n* Based on this principle, it will grant sudo permission to the account specified in the username field.\n* Do not specify a user in the username field if you do not wish that user to gain sudo permissions.\n* Instead, login to the VM and use built-in tools (e.g. usermod, chage, etc) to manage unprivileged users.\n\n# User Guide\n## 1. Configuration schema\n### 1.1. Public configuration\n\nSchema for the public configuration file looks like:\n\n* `check_disk`: (optional, boolean) whether or not to check disk\n* `repair_disk`: (optional, boolean) whether or not to repair disk\n* `disk_name`: (boolean) name of disk to repair (required when repair_disk is true)\n\n```json\n{\n  \"check_disk\": \"true\",\n  \"repair_disk\": \"true\",\n  \"disk_name\": \"<disk-name>\"\n}\n```\n\n### 1.2. Protected configuration\n\nSchema for the protected configuration file looks like this:\n\n* `username`: (required, string) the name of the user\n* `password`: (optional, string) the password of the user\n* `ssh_key`: (optional, string) the public key of the user\n* `reset_ssh`: (optional, boolean) whether or not reset the ssh\n* `remove_user`: (optional, string) the user name to remove\n* `expiration`: (optional, string) expiration of the account, defaults to never, e.g. 2016-01-01.\n* `remove_prior_keys`: (optional, boolean) whether or not to remove old SSH keys when adding a new one\n* `restore_backup_ssh`: (optional, boolean) whether or not to restore original backed-up sshd config\n\n```json\n{\n  \"username\": \"<username>\",\n  \"password\": \"<password>\",\n  \"ssh_key\": \"<cert-contents>\",\n  \"reset_ssh\": true,\n  \"remove_user\": \"<username-to-remove>\",\n  \"expiration\": \"<yyyy-mm-dd>\",\n  \"remove_prior_keys\": true,\n  \"restore_backup_ssh\": true\n}\n```\n\n`ssh_key` supports `ssh-rsa`, `ssh-ed25519` and `.pem` formats.\n\n* If your public key is in `ssh-rsa` format, for example, `ssh-rsa XXXXXXXX`, you can use:\n\n  ```\n  \"ssh_key\": \"ssh-rsa XXXXXXXX\"\n  ```\n\n* If your public key is in `ssh-ed25519` format, for example, `ssh-ed25519 XXXXXXXX`, you can use:\n\n  ```\n  \"ssh_key\": \"ssh-ed25519 XXXXXXXX\"\n  ```\n\n* If your public key is in `.pem` format, use the following UNIX command to convert the .pem file to a value that can be passed in a JSON string:\n\n  ```\n  awk 'NF {sub(/\\r/, \"\"); printf \"%s\\\\n\",$0;}' myCert.pem\n  ```\n\n  You can use:\n  ```\n  \"ssh_key\": \"-----BEGIN CERTIFICATE-----\\nXXXXXXXXXXXXXXXXXXXXXXXX\\n-----END CERTIFICATE-----\"\n  ```\n\n## 2. Deploying the Extension to a VM\n\nYou can deploy it using Azure CLI, Azure Powershell and ARM template.\n \n### 2.1. Using [**Azure CLI**][azure-cli]\n\nCreate a `settings.json` (optional) and a `protected_settings.json` and run:\n```\n$ azure vm extension set \\\n--resource-group <resource-group> \\\n--vm-name <vm-name> \\\n--name VMAccessForLinux \\\n--publisher Microsoft.OSTCExtensions \\\n--version 1.5 \\\n--settings settings.json\n--protected-settings protected_settings.json\n```\n\nTo retrieve the deployment state of extensions for a given VM, run:\n```\n$ azure vm extension list \\\n--resource-group <resource-group> \\\n--vm-name <vm-name> -o table\n```\n\n### 2.2. Using [**Azure Powershell**][azure-powershell]\n\nYou can deploying VMAccess Extension by running:\n\n```powershell\n$username = \"<username>\"\n$sshKey = \"<cert-contents>\"\n\n$settings = @{\"check_disk\" = $true};\n$protectedSettings = @{\"username\" = $username; \"ssh_key\" = $sshKey};\n\nSet-AzVMExtension -ResourceGroupName \"<resource-group>\" -VMName \"<vm-name>\" -Location \"<location>\" `\n-Publisher \"Microsoft.OSTCExtensions\" -ExtensionType \"VMAccessForLinux\" -Name \"VMAccessForLinux\" `\n-TypeHandlerVersion \"1.5\" -Settings $settings -ProtectedSettings $protectedSettings\n```\n\nYou can provide and modify extension settings by using strings:\n\n```powershell\n$username = \"<username>\"\n$sshKey = \"<cert-contents>\"\n\n$settingsString = '{\"check_disk\":true}';\n$protectedSettingsString = '{\"username\":\"' + $username + '\",\"ssh_key\":\"' + $sshKey + '\"}';\n\nSet-AzVMExtension -ResourceGroupName \"<resource-group>\" -VMName \"<vm-name>\" -Location \"<location>\" `\n-Publisher \"Microsoft.OSTCExtensions\" -ExtensionType \"VMAccessForLinux\" -Name \"VMAccessForLinux\" `\n-TypeHandlerVersion \"1.5\" -SettingString $settingsString -ProtectedSettingString $protectedSettingsString\n```\n\n### 2.3. Using [**ARM Template**][arm-template]\n```json\n{\n  \"type\": \"Microsoft.Compute/virtualMachines/extensions\",\n  \"name\": \"<extension-deployment-name>\",\n  \"apiVersion\": \"<api-version>\",\n  \"location\": \"<location>\",\n  \"dependsOn\": [\n    \"[concat('Microsoft.Compute/virtualMachines/', <vm-name>)]\"\n  ],\n  \"properties\": {\n    \"publisher\": \"Microsoft.OSTCExtensions\",\n    \"type\": \"VMAccessForLinux\",\n    \"typeHandlerVersion\": \"1.5\",\n    \"autoUpgradeMinorVersion\": true,\n    \"settings\": {},\n    \"protectedSettings\": {\n      \"username\": \"<username>\",\n      \"password\": \"<password>\",\n      \"reset_ssh\": true,\n      \"ssh_key\": \"<ssh-key>\",\n      \"remove_user\": \"<username-to-remove>\"\n    }\n  }\n}\n```\n\nRefer to the following sample [ARM template](https://github.com/azure/azure-quickstart-templates/tree/master/demos/vmaccess-on-ubuntu).\n\nFor more details about ARM template, please visit [Authoring Azure Resource Manager templates](https://azure.microsoft.com/en-us/documentation/articles/resource-group-authoring-templates/).\n\n## 3. Scenarios\n\n### 3.1 Resetting the password\n\nin the Public Settings\n```json\n{\n  \"check_disk\": \"false\"\n}\n```\n\n> VMAccessForLinux resets and restarts the SSH server if a password is specified. This is necessary if the VM was deployed with public key authentication because the SSH server is not configured to accept passwords.  For this reason, the SSH server's configuration is reset to allow password authentication, and restarted to accept this new configuration.  This behavior can be disabled by setting the reset_ssh value to false.\n\nin the Protected Settings\n```json\n{\n  \"username\": \"currentusername\",\n  \"password\": \"newpassword\",\n  \"reset_ssh\": \"false\"\n}\n```\n\n### 3.2 Resetting the SSH key\n```json\n{ \n  \"username\": \"currentusername\", \n  \"ssh_key\": \"contentofsshkey\"\n}\n```\n\n### 3.3 Resetting the password and the SSH key\n```json\n{\n  \"username\": \"currentusername\",\n  \"ssh_key\": \"contentofsshkey\",\n  \"password\": \"newpassword\",\n}\n```\n\n### 3.4 Creating a new sudo user account with the password\n```json\n{\n  \"username\": \"newusername\",\n  \"password\": \"newpassword\"\n}\n```\n\n#### 3.4.1 Creating a new sudo user account with a password and expiration date.\n```json\n{\n  \"username\": \"newusername\",\n  \"password\": \"newpassword\",\n  \"expiration\": \"2016-12-31\"\n}\n```\n\n### 3.5 Creating a new sudo user account with the SSH key\n```json\n{\n  \"username\": \"newusername\",\n  \"ssh_key\": \"contentofsshkey\"\n}\n```\n\n#### 3.5.1 Creating a new sudo user account with the SSH key\n```json\n{\n  \"username\": \"newusername\",\n  \"ssh_key\": \"contentofsshkey\",\n  \"expiration\": \"2016-12-31\"\n}\n```\n\n### 3.6 Resetting the SSH configuration\n```json\n{\n  \"reset_ssh\": true\n}\n```\n\n### 3.7 Removing an existing user\n```json\n{\n  \"remove_user\": \"usertoberemoveed\",\n}\n```\n\n### 3.8 Checking added disks on VM\n```json\n{\n    \"check_disk\": \"true\"\n}\n```\n\n### 3.9 Fix added disks on a VM\n```json\n{\n    \"repair_disk\": \"true\",\n    \"disk_name\": \"userdisktofix\"\n}\n```\n\n### 3.10 Removing prior SSH keys (only when provided a new one)\n```json\n{\n    \"username\": \"newusername\",\n    \"ssh_key\": \"contentofsshkey\",\n    \"remove_prior_keys\": true\n}\n```\n\n### 3.11 Restoring original SSH configuration\n```json\n{\n    \"restore_backup_ssh\": true\n}\n```\n\n## Supported Linux Distributions\n- Ubuntu 12.04 and higher\n- CentOS 6.5 and higher\n- Oracle Linux 6.4.0.0.0 and higher\n- openSUSE 13.1 and higher\n- SUSE Linux Enterprise Server 11 SP3 and higher\n\n## Debug\n\n* The status of the extension is reported back to Azure so that user can\nsee the status on Azure Portal\n* The operation log of the extension is `/var/log/azure/<extension-name>/<version>/extension.log` file.\n\n[azure-powershell]: https://azure.microsoft.com/en-us/documentation/articles/powershell-install-configure/\n[azure-cli]: https://azure.microsoft.com/en-us/documentation/articles/xplat-cli/\n[arm-template]: http://azure.microsoft.com/en-us/documentation/templates/ \n[arm-overview]: https://azure.microsoft.com/en-us/documentation/articles/resource-group-overview/\n"
  },
  {
    "path": "VMAccess/extension_noop.sh",
    "content": "#!/usr/bin/env bash\n\n# There is no need to write a status file for commands other than Enable\nexit 0"
  },
  {
    "path": "VMAccess/extension_shim.sh",
    "content": "#!/usr/bin/env bash\n\n# Keeping the default command\nCOMMAND=\"\"\nPYTHON=\"\"\n\nUSAGE=\"$(basename \"$0\") [-h] [-i|--install] [-u|--uninstall] [-d|--disable] [-e|--enable] [-p|--update]\n\nProgram to find the installed python on the box and invoke a Python extension script.\n\nwhere:\n    -h|--help       show this help text\n    -i|--install    install the extension\n    -u|--uninstall  uninstall the extension\n    -d|--disable    disable the extension\n    -e|--enable     enable the extension\n    -p|--update     update the extension\n    -c|--command    command to run\n\nexample:\n# Install usage\n$ bash extension_shim.sh -i\npython ./vmaccess.py -install\n\n# Custom executable python file\n$ bash extension_shim.sh -c \"\"hello.py\"\" -i\npython hello.py -install\n\n# Custom executable python file with arguments\n$ bash extension_shim.sh -c \"\"hello.py --install\"\"\npython hello.py --install\n\"\n\nfunction find_python(){\n    local python_exec_command=$1\n\n    # Check if there is python defined.\n    if command -v python >/dev/null 2>&1 ; then\n        eval ${python_exec_command}=\"python\"\n    else\n        # Python was not found. Searching for Python3 now.\n        if command -v python3 >/dev/null 2>&1 ; then\n            eval ${python_exec_command}=\"python3\"\n        fi\n    fi\n}\n\n# Transform long options to short ones for getopts support (getopts doesn't support long args)\nfor arg in \"$@\"; do\n  shift\n  case \"$arg\" in\n    \"--help\")       set -- \"$@\" \"-h\" ;;\n    \"--install\")    set -- \"$@\" \"-i\" ;;\n    \"--update\")     set -- \"$@\" \"-p\" ;;\n    \"--enable\")     set -- \"$@\" \"-e\" ;;\n    \"--disable\")    set -- \"$@\" \"-d\" ;;\n    \"--uninstall\")  set -- \"$@\" \"-u\" ;;\n    *)              set -- \"$@\" \"$arg\"\n  esac\ndone\n\nif [ -z \"$arg\" ]\nthen\n   echo \"$USAGE\" >&2\n   exit 1\nfi\n\n# Get the arguments\nwhile getopts \"iudephc:?\" o; do\n    case \"${o}\" in\n        h|\\?)\n            echo \"$USAGE\"\n            exit 0\n            ;;\n        i)\n            operation=\"-install\"\n            ;;\n        u)\n            operation=\"-uninstall\"\n            ;;\n        d)\n            operation=\"-disable\"\n            ;;\n        e)\n            operation=\"-enable\"\n            ;;\n        p)\n            operation=\"-update\"\n            ;;\n        c)\n            COMMAND=\"$OPTARG\"\n            ;;\n        *)\n            echo \"$USAGE\" >&2\n            exit 1\n            ;;\n    esac\ndone\n\nshift $((OPTIND-1))\n\n# If find_python is not able to find a python installed, $PYTHON will be null.\nfind_python PYTHON\n\n\nif [ -z \"$PYTHON\" ]; then\n   echo \"No Python interpreter found on the box\" >&2\n   exit 51 # Not Supported\nelse\n   echo `${PYTHON} --version`\nfi\n\n${PYTHON} ${COMMAND} ${operation}\n# DONE"
  },
  {
    "path": "VMAccess/manifest.xml",
    "content": "<?xml version='1.0' encoding='utf-8' ?>\n<ExtensionImage xmlns=\"http://schemas.microsoft.com/windowsazure\">\n  <ProviderNameSpace>Microsoft.OSTCExtensions</ProviderNameSpace>\n  <Type>VMAccessForLinux</Type>\n  <Version>1.5.23</Version>\n  <Label>Microsoft Azure VM Access Extension for Linux Virtual Machines</Label>\n  <HostingResources>VmRole</HostingResources>\n  <MediaLink></MediaLink>\n  <Description>Microsoft Azure VM Access Extension for Linux Virtual Machines</Description>\n  <IsInternalExtension>true</IsInternalExtension>\n  <Eula>https://github.com/Azure/azure-linux-extensions/blob/master/LICENSE-2_0.txt</Eula>\n  <PrivacyUri>http://www.microsoft.com/privacystatement/en-us/OnlineServices/Default.aspx</PrivacyUri>\n  <HomepageUri>https://github.com/Azure/azure-linux-extensions</HomepageUri>\n  <IsJsonExtension>true</IsJsonExtension>\n  <SupportedOS>Linux</SupportedOS>\n  <CompanyName>Microsoft</CompanyName>\n  <!--%REGIONS%-->\n</ExtensionImage>\n"
  },
  {
    "path": "VMAccess/references",
    "content": "Utils/\nCommon/WALinuxAgent-2.0.16/waagent\nCommon/waagentloader.py\n"
  },
  {
    "path": "VMAccess/resources/SuSE_default",
    "content": "#\t$OpenBSD: sshd_config,v 1.89 2013/02/06 00:20:42 dtucker Exp $\n\n# This is the sshd server system-wide configuration file.  See\n# sshd_config(5) for more information.\n\n# This sshd was compiled with PATH=/usr/bin:/bin:/usr/sbin:/sbin\n\n# The strategy used for options in the default sshd_config shipped with\n# OpenSSH is to specify options with their default value where\n# possible, but leave them commented.  Uncommented options override the\n# default value.\n\n#Port 22\n#AddressFamily any\n#ListenAddress 0.0.0.0\n#ListenAddress ::\n\n# The default requires explicit activation of protocol 1\n#Protocol 2\n\n# HostKey for protocol version 1\n#HostKey /etc/ssh/ssh_host_key\n# HostKeys for protocol version 2\n#HostKey /etc/ssh/ssh_host_rsa_key\n#HostKey /etc/ssh/ssh_host_dsa_key\n#HostKey /etc/ssh/ssh_host_ecdsa_key\n\n# Lifetime and size of ephemeral version 1 server key\n#KeyRegenerationInterval 1h\n#ServerKeyBits 1024\n\n# Logging\n# obsoletes QuietMode and FascistLogging\n#SyslogFacility AUTH\n#LogLevel INFO\n\n# Authentication:\n\n#LoginGraceTime 2m\n#PermitRootLogin yes\n#StrictModes yes\n#MaxAuthTries 6\n#MaxSessions 10\n\n#RSAAuthentication yes\n#PubkeyAuthentication yes\n\n# The default is to check both .ssh/authorized_keys and .ssh/authorized_keys2\n# but this is overridden so installations will only check .ssh/authorized_keys\nAuthorizedKeysFile\t.ssh/authorized_keys\n\n#AuthorizedPrincipalsFile none\n\n#AuthorizedKeysCommand none\n#AuthorizedKeysCommandUser nobody\n\n# For this to work you will also need host keys in /etc/ssh/ssh_known_hosts\n#RhostsRSAAuthentication no\n# similar for protocol version 2\n#HostbasedAuthentication no\n# Change to yes if you don't trust ~/.ssh/known_hosts for\n# RhostsRSAAuthentication and HostbasedAuthentication\n#IgnoreUserKnownHosts no\n# Don't read the user's ~/.rhosts and ~/.shosts files\n#IgnoreRhosts yes\n\n# To disable tunneled clear text passwords, change to no here!\nPasswordAuthentication no\n#PermitEmptyPasswords no\n\n# Change to no to disable s/key passwords\n#ChallengeResponseAuthentication yes\n\n# Kerberos options\n#KerberosAuthentication no\n#KerberosOrLocalPasswd yes\n#KerberosTicketCleanup yes\n#KerberosGetAFSToken no\n\n# GSSAPI options\n#GSSAPIAuthentication no\n#GSSAPICleanupCredentials yes\n\n# Set this to 'yes' to enable support for the deprecated 'gssapi' authentication\n# mechanism to OpenSSH 3.8p1. The newer 'gssapi-with-mic' mechanism is included\n# in this release. The use of 'gssapi' is deprecated due to the presence of\n# potential man-in-the-middle attacks, which 'gssapi-with-mic' is not susceptible to.\n#GSSAPIEnableMITMAttack no\n\n# Set this to 'yes' to enable PAM authentication, account processing, \n# and session processing. If this is enabled, PAM authentication will \n# be allowed through the ChallengeResponseAuthentication and\n# PasswordAuthentication.  Depending on your PAM configuration,\n# PAM authentication via ChallengeResponseAuthentication may bypass\n# the setting of \"PermitRootLogin without-password\".\n# If you just want the PAM account and session checks to run without\n# PAM authentication, then enable this but set PasswordAuthentication\n# and ChallengeResponseAuthentication to 'no'.\nUsePAM yes\n\n#AllowAgentForwarding yes\n#AllowTcpForwarding yes\n#GatewayPorts no\nX11Forwarding yes\n#X11DisplayOffset 10\n#X11UseLocalhost yes\n#PrintMotd yes\n#PrintLastLog yes\n#TCPKeepAlive yes\n#UseLogin no\nUsePrivilegeSeparation sandbox\t\t# Default for new installations.\n#PermitUserEnvironment no\n#Compression delayed\n#ClientAliveInterval 0\n#ClientAliveCountMax 3\n#UseDNS yes\n#PidFile /run/sshd.pid\n#MaxStartups 10:30:100\n#PermitTunnel no\n#ChrootDirectory none\n#VersionAddendum none\n\n# no default banner path\n#Banner none\n\n# override default of no subsystems\nSubsystem\tsftp\t/usr/lib/ssh/sftp-server\n\n# This enables accepting locale enviroment variables LC_* LANG, see sshd_config(5).\nAcceptEnv LANG LC_CTYPE LC_NUMERIC LC_TIME LC_COLLATE LC_MONETARY LC_MESSAGES\nAcceptEnv LC_PAPER LC_NAME LC_ADDRESS LC_TELEPHONE LC_MEASUREMENT\nAcceptEnv LC_IDENTIFICATION LC_ALL\n\n# Example of overriding settings on a per-user basis\n#Match User anoncvs\n#\tX11Forwarding no\n#\tAllowTcpForwarding no\n#\tForceCommand cvs server\nClientAliveInterval 180\n"
  },
  {
    "path": "VMAccess/resources/Ubuntu_default",
    "content": "# Package generated configuration file\n# See the sshd_config(5) manpage for details\n\n# What ports, IPs and protocols we listen for\nPort 22\n# Use these options to restrict which interfaces/protocols sshd will bind to\n#ListenAddress ::\n#ListenAddress 0.0.0.0\nProtocol 2\n# HostKeys for protocol version 2\nHostKey /etc/ssh/ssh_host_rsa_key\nHostKey /etc/ssh/ssh_host_dsa_key\nHostKey /etc/ssh/ssh_host_ecdsa_key\nHostKey /etc/ssh/ssh_host_ed25519_key\n#Privilege Separation is turned on for security\nUsePrivilegeSeparation yes\n\n# Lifetime and size of ephemeral version 1 server key\nKeyRegenerationInterval 3600\nServerKeyBits 1024\n\n# Logging\nSyslogFacility AUTH\nLogLevel INFO\n\n# Authentication:\nLoginGraceTime 120\nPermitRootLogin without-password\nStrictModes yes\n\nRSAAuthentication yes\nPubkeyAuthentication yes\n#AuthorizedKeysFile\t%h/.ssh/authorized_keys\n\n# Don't read the user's ~/.rhosts and ~/.shosts files\nIgnoreRhosts yes\n# For this to work you will also need host keys in /etc/ssh_known_hosts\nRhostsRSAAuthentication no\n# similar for protocol version 2\nHostbasedAuthentication no\n# Uncomment if you don't trust ~/.ssh/known_hosts for RhostsRSAAuthentication\n#IgnoreUserKnownHosts yes\n\n# To enable empty passwords, change to yes (NOT RECOMMENDED)\nPermitEmptyPasswords no\n\n# Change to yes to enable challenge-response passwords (beware issues with\n# some PAM modules and threads)\nChallengeResponseAuthentication no\n\n# Change to no to disable tunnelled clear text passwords\nPasswordAuthentication yes\n\n# Kerberos options\n#KerberosAuthentication no\n#KerberosGetAFSToken no\n#KerberosOrLocalPasswd yes\n#KerberosTicketCleanup yes\n\n# GSSAPI options\n#GSSAPIAuthentication no\n#GSSAPICleanupCredentials yes\n\nX11Forwarding yes\nX11DisplayOffset 10\nPrintMotd no\nPrintLastLog yes\nTCPKeepAlive yes\n#UseLogin no\n\n#MaxStartups 10:30:60\n#Banner /etc/issue.net\n\n# Allow client to pass locale environment variables\nAcceptEnv LANG LC_*\n\nSubsystem sftp /usr/lib/openssh/sftp-server\n\n# Set this to 'yes' to enable PAM authentication, account processing,\n# and session processing. If this is enabled, PAM authentication will\n# be allowed through the ChallengeResponseAuthentication and\n# PasswordAuthentication.  Depending on your PAM configuration,\n# PAM authentication via ChallengeResponseAuthentication may bypass\n# the setting of \"PermitRootLogin without-password\".\n# If you just want the PAM account and session checks to run without\n# PAM authentication, then enable this but set PasswordAuthentication\n# and ChallengeResponseAuthentication to 'no'.\nUsePAM yes\n\n# CLOUD_IMG: This file was created/modified by the Cloud Image build process\nClientAliveInterval 120"
  },
  {
    "path": "VMAccess/resources/centos_default",
    "content": "#\t$OpenBSD: sshd_config,v 1.80 2008/07/02 02:24:18 djm Exp $\n\n# This is the sshd server system-wide configuration file.  See\n# sshd_config(5) for more information.\n\n# This sshd was compiled with PATH=/usr/local/bin:/bin:/usr/bin\n\n# The strategy used for options in the default sshd_config shipped with\n# OpenSSH is to specify options with their default value where\n# possible, but leave them commented.  Uncommented options change a\n# default value.\n\n#Port 22\n#AddressFamily any\n#ListenAddress 0.0.0.0\n#ListenAddress ::\n\n# Disable legacy (protocol version 1) support in the server for new\n# installations. In future the default will change to require explicit\n# activation of protocol 1\nProtocol 2\n\n# HostKey for protocol version 1\n#HostKey /etc/ssh/ssh_host_key\n# HostKeys for protocol version 2\n#HostKey /etc/ssh/ssh_host_rsa_key\n#HostKey /etc/ssh/ssh_host_dsa_key\n\n# Lifetime and size of ephemeral version 1 server key\n#KeyRegenerationInterval 1h\n#ServerKeyBits 1024\n\n# Logging\n# obsoletes QuietMode and FascistLogging\n#SyslogFacility AUTH\nSyslogFacility AUTHPRIV\n#LogLevel INFO\n\n# Authentication:\n\n#LoginGraceTime 2m\n#PermitRootLogin yes\n#StrictModes yes\n#MaxAuthTries 6\n#MaxSessions 10\n\n#RSAAuthentication yes\n#PubkeyAuthentication yes\n#AuthorizedKeysFile\t.ssh/authorized_keys\n#AuthorizedKeysCommand none\n#AuthorizedKeysCommandRunAs nobody\n\n# For this to work you will also need host keys in /etc/ssh/ssh_known_hosts\n#RhostsRSAAuthentication no\n# similar for protocol version 2\n#HostbasedAuthentication no\n# Change to yes if you don't trust ~/.ssh/known_hosts for\n# RhostsRSAAuthentication and HostbasedAuthentication\n#IgnoreUserKnownHosts no\n# Don't read the user's ~/.rhosts and ~/.shosts files\n#IgnoreRhosts yes\n\n# To disable tunneled clear text passwords, change to no here!\n#PasswordAuthentication yes\n#PermitEmptyPasswords no\nPasswordAuthentication yes\n\n# Change to no to disable s/key passwords\n#ChallengeResponseAuthentication yes\nChallengeResponseAuthentication no\n\n# Kerberos options\n#KerberosAuthentication no\n#KerberosOrLocalPasswd yes\n#KerberosTicketCleanup yes\n#KerberosGetAFSToken no\n#KerberosUseKuserok yes\n\n# GSSAPI options\n#GSSAPIAuthentication no\nGSSAPIAuthentication yes\n#GSSAPICleanupCredentials yes\nGSSAPICleanupCredentials yes\n#GSSAPIStrictAcceptorCheck yes\n#GSSAPIKeyExchange no\n\n# Set this to 'yes' to enable PAM authentication, account processing, \n# and session processing. If this is enabled, PAM authentication will \n# be allowed through the ChallengeResponseAuthentication and\n# PasswordAuthentication.  Depending on your PAM configuration,\n# PAM authentication via ChallengeResponseAuthentication may bypass\n# the setting of \"PermitRootLogin without-password\".\n# If you just want the PAM account and session checks to run without\n# PAM authentication, then enable this but set PasswordAuthentication\n# and ChallengeResponseAuthentication to 'no'.\n#UsePAM no\nUsePAM yes\n\n# Accept locale-related environment variables\nAcceptEnv LANG LC_CTYPE LC_NUMERIC LC_TIME LC_COLLATE LC_MONETARY LC_MESSAGES\nAcceptEnv LC_PAPER LC_NAME LC_ADDRESS LC_TELEPHONE LC_MEASUREMENT\nAcceptEnv LC_IDENTIFICATION LC_ALL LANGUAGE\nAcceptEnv XMODIFIERS\n\n#AllowAgentForwarding yes\n#AllowTcpForwarding yes\n#GatewayPorts no\n#X11Forwarding no\nX11Forwarding yes\n#X11DisplayOffset 10\n#X11UseLocalhost yes\n#PrintMotd yes\n#PrintLastLog yes\n#TCPKeepAlive yes\n#UseLogin no\n#UsePrivilegeSeparation yes\n#PermitUserEnvironment no\n#Compression delayed\nClientAliveInterval 180\n#ClientAliveCountMax 3\n#ShowPatchLevel no\n#UseDNS yes\n#PidFile /var/run/sshd.pid\n#MaxStartups 10:30:100\n#PermitTunnel no\n#ChrootDirectory none\n\n# no default banner path\n#Banner none\n\n# override default of no subsystems\nSubsystem\tsftp\t/usr/libexec/openssh/sftp-server\n\n# Example of overriding settings on a per-user basis\n#Match User anoncvs\n#\tX11Forwarding no\n#\tAllowTcpForwarding no\n#\tForceCommand cvs server\n"
  },
  {
    "path": "VMAccess/resources/debian_default",
    "content": "# Package generated configuration file\n# See the sshd_config(5) manpage for details\n\n# What ports, IPs and protocols we listen for\nPort 22\n# Use these options to restrict which interfaces/protocols sshd will bind to\n#ListenAddress ::\n#ListenAddress 0.0.0.0\nProtocol 2\n# HostKeys for protocol version 2\nHostKey /etc/ssh/ssh_host_rsa_key\nHostKey /etc/ssh/ssh_host_dsa_key\nHostKey /etc/ssh/ssh_host_ecdsa_key\nHostKey /etc/ssh/ssh_host_ed25519_key\n#Privilege Separation is turned on for security\nUsePrivilegeSeparation yes\n\n# Lifetime and size of ephemeral version 1 server key\nKeyRegenerationInterval 3600\nServerKeyBits 1024\n\n# Logging\nSyslogFacility AUTH\nLogLevel INFO\n\n# Authentication:\nLoginGraceTime 120\nPermitRootLogin without-password\nStrictModes yes\n\nRSAAuthentication yes\nPubkeyAuthentication yes\n#AuthorizedKeysFile\t%h/.ssh/authorized_keys\n\n# Don't read the user's ~/.rhosts and ~/.shosts files\nIgnoreRhosts yes\n# For this to work you will also need host keys in /etc/ssh_known_hosts\nRhostsRSAAuthentication no\n# similar for protocol version 2\nHostbasedAuthentication no\n# Uncomment if you don't trust ~/.ssh/known_hosts for RhostsRSAAuthentication\n#IgnoreUserKnownHosts yes\n\n# To enable empty passwords, change to yes (NOT RECOMMENDED)\nPermitEmptyPasswords no\n\n# Change to yes to enable challenge-response passwords (beware issues with\n# some PAM modules and threads)\nChallengeResponseAuthentication no\n\n# Change to no to disable tunnelled clear text passwords\nPasswordAuthentication yes\n\n# Kerberos options\n#KerberosAuthentication no\n#KerberosGetAFSToken no\n#KerberosOrLocalPasswd yes\n#KerberosTicketCleanup yes\n\n# GSSAPI options\n#GSSAPIAuthentication no\n#GSSAPICleanupCredentials yes\n\nX11Forwarding yes\nX11DisplayOffset 10\nPrintMotd no\nPrintLastLog yes\nTCPKeepAlive yes\n#UseLogin no\n\n#MaxStartups 10:30:60\n#Banner /etc/issue.net\n\n# Allow client to pass locale environment variables\nAcceptEnv LANG LC_*\n\nSubsystem sftp /usr/lib/openssh/sftp-server\n\n# Set this to 'yes' to enable PAM authentication, account processing,\n# and session processing. If this is enabled, PAM authentication will\n# be allowed through the ChallengeResponseAuthentication and\n# PasswordAuthentication.  Depending on your PAM configuration,\n# PAM authentication via ChallengeResponseAuthentication may bypass\n# the setting of \"PermitRootLogin without-password\".\n# If you just want the PAM account and session checks to run without\n# PAM authentication, then enable this but set PasswordAuthentication\n# and ChallengeResponseAuthentication to 'no'.\nUsePAM yes\n\n# CLOUD_IMG: This file was created/modified by the Cloud Image build process\nClientAliveInterval 120"
  },
  {
    "path": "VMAccess/resources/default",
    "content": "#Default sshd_config\n\n# Package generated configuration file\n# See the sshd_config(5) manpage for details\n\n# What ports, IPs and protocols we listen for\nPort 22\n# Use these options to restrict which interfaces/protocols sshd will bind to\n#ListenAddress ::\n#ListenAddress 0.0.0.0\nProtocol 2\n# HostKeys for protocol version 2\nHostKey /etc/ssh/ssh_host_rsa_key\nHostKey /etc/ssh/ssh_host_dsa_key\nHostKey /etc/ssh/ssh_host_ecdsa_key\nHostKey /etc/ssh/ssh_host_ed25519_key\n#Privilege Separation is turned on for security\nUsePrivilegeSeparation yes\n\n# Lifetime and size of ephemeral version 1 server key\nKeyRegenerationInterval 3600\nServerKeyBits 1024\n\n# Logging\nSyslogFacility AUTH\nLogLevel INFO\n\n# Authentication:\nLoginGraceTime 120\nPermitRootLogin without-password\nStrictModes yes\n\nRSAAuthentication yes\nPubkeyAuthentication yes\n#AuthorizedKeysFile\t%h/.ssh/authorized_keys\n\n# Don't read the user's ~/.rhosts and ~/.shosts files\nIgnoreRhosts yes\n# For this to work you will also need host keys in /etc/ssh_known_hosts\nRhostsRSAAuthentication no\n# similar for protocol version 2\nHostbasedAuthentication no\n# Uncomment if you don't trust ~/.ssh/known_hosts for RhostsRSAAuthentication\n#IgnoreUserKnownHosts yes\n\n# To enable empty passwords, change to yes (NOT RECOMMENDED)\nPermitEmptyPasswords no\n\n# Change to yes to enable challenge-response passwords (beware issues with\n# some PAM modules and threads)\nChallengeResponseAuthentication no\n\n# Change to no to disable tunnelled clear text passwords\nPasswordAuthentication yes\n\n# Kerberos options\n#KerberosAuthentication no\n#KerberosGetAFSToken no\n#KerberosOrLocalPasswd yes\n#KerberosTicketCleanup yes\n\n# GSSAPI options\n#GSSAPIAuthentication no\n#GSSAPICleanupCredentials yes\n\nX11Forwarding yes\nX11DisplayOffset 10\nPrintMotd no\nPrintLastLog yes\nTCPKeepAlive yes\n#UseLogin no\n\n#MaxStartups 10:30:60\n#Banner /etc/issue.net\n\n# Allow client to pass locale environment variables\nAcceptEnv LANG LC_*\n\nSubsystem sftp /usr/lib/openssh/sftp-server\n\n# Set this to 'yes' to enable PAM authentication, account processing,\n# and session processing. If this is enabled, PAM authentication will\n# be allowed through the ChallengeResponseAuthentication and\n# PasswordAuthentication.  Depending on your PAM configuration,\n# PAM authentication via ChallengeResponseAuthentication may bypass\n# the setting of \"PermitRootLogin without-password\".\n# If you just want the PAM account and session checks to run without\n# PAM authentication, then enable this but set PasswordAuthentication\n# and ChallengeResponseAuthentication to 'no'.\nUsePAM yes\n\n# CLOUD_IMG: This file was created/modified by the Cloud Image build process\nClientAliveInterval 120\n"
  },
  {
    "path": "VMAccess/resources/fedora_default",
    "content": "#\t$OpenBSD: sshd_config,v 1.80 2008/07/02 02:24:18 djm Exp $\n\n# This is the sshd server system-wide configuration file.  See\n# sshd_config(5) for more information.\n\n# This sshd was compiled with PATH=/usr/local/bin:/bin:/usr/bin\n\n# The strategy used for options in the default sshd_config shipped with\n# OpenSSH is to specify options with their default value where\n# possible, but leave them commented.  Uncommented options change a\n# default value.\n\n#Port 22\n#AddressFamily any\n#ListenAddress 0.0.0.0\n#ListenAddress ::\n\n# Disable legacy (protocol version 1) support in the server for new\n# installations. In future the default will change to require explicit\n# activation of protocol 1\nProtocol 2\n\n# HostKey for protocol version 1\n#HostKey /etc/ssh/ssh_host_key\n# HostKeys for protocol version 2\n#HostKey /etc/ssh/ssh_host_rsa_key\n#HostKey /etc/ssh/ssh_host_dsa_key\n\n# Lifetime and size of ephemeral version 1 server key\n#KeyRegenerationInterval 1h\n#ServerKeyBits 1024\n\n# Logging\n# obsoletes QuietMode and FascistLogging\n#SyslogFacility AUTH\nSyslogFacility AUTHPRIV\n#LogLevel INFO\n\n# Authentication:\n\n#LoginGraceTime 2m\n#PermitRootLogin yes\n#StrictModes yes\n#MaxAuthTries 6\n#MaxSessions 10\n\n#RSAAuthentication yes\n#PubkeyAuthentication yes\n#AuthorizedKeysFile\t.ssh/authorized_keys\n#AuthorizedKeysCommand none\n#AuthorizedKeysCommandRunAs nobody\n\n# For this to work you will also need host keys in /etc/ssh/ssh_known_hosts\n#RhostsRSAAuthentication no\n# similar for protocol version 2\n#HostbasedAuthentication no\n# Change to yes if you don't trust ~/.ssh/known_hosts for\n# RhostsRSAAuthentication and HostbasedAuthentication\n#IgnoreUserKnownHosts no\n# Don't read the user's ~/.rhosts and ~/.shosts files\n#IgnoreRhosts yes\n\n# To disable tunneled clear text passwords, change to no here!\n#PasswordAuthentication yes\n#PermitEmptyPasswords no\nPasswordAuthentication yes\n\n# Change to no to disable s/key passwords\n#ChallengeResponseAuthentication yes\nChallengeResponseAuthentication no\n\n# Kerberos options\n#KerberosAuthentication no\n#KerberosOrLocalPasswd yes\n#KerberosTicketCleanup yes\n#KerberosGetAFSToken no\n#KerberosUseKuserok yes\n\n# GSSAPI options\n#GSSAPIAuthentication no\nGSSAPIAuthentication yes\n#GSSAPICleanupCredentials yes\nGSSAPICleanupCredentials yes\n#GSSAPIStrictAcceptorCheck yes\n#GSSAPIKeyExchange no\n\n# Set this to 'yes' to enable PAM authentication, account processing, \n# and session processing. If this is enabled, PAM authentication will \n# be allowed through the ChallengeResponseAuthentication and\n# PasswordAuthentication.  Depending on your PAM configuration,\n# PAM authentication via ChallengeResponseAuthentication may bypass\n# the setting of \"PermitRootLogin without-password\".\n# If you just want the PAM account and session checks to run without\n# PAM authentication, then enable this but set PasswordAuthentication\n# and ChallengeResponseAuthentication to 'no'.\n#UsePAM no\nUsePAM yes\n\n# Accept locale-related environment variables\nAcceptEnv LANG LC_CTYPE LC_NUMERIC LC_TIME LC_COLLATE LC_MONETARY LC_MESSAGES\nAcceptEnv LC_PAPER LC_NAME LC_ADDRESS LC_TELEPHONE LC_MEASUREMENT\nAcceptEnv LC_IDENTIFICATION LC_ALL LANGUAGE\nAcceptEnv XMODIFIERS\n\n#AllowAgentForwarding yes\n#AllowTcpForwarding yes\n#GatewayPorts no\n#X11Forwarding no\nX11Forwarding yes\n#X11DisplayOffset 10\n#X11UseLocalhost yes\n#PrintMotd yes\n#PrintLastLog yes\n#TCPKeepAlive yes\n#UseLogin no\n#UsePrivilegeSeparation yes\n#PermitUserEnvironment no\n#Compression delayed\nClientAliveInterval 180\n#ClientAliveCountMax 3\n#ShowPatchLevel no\n#UseDNS yes\n#PidFile /var/run/sshd.pid\n#MaxStartups 10:30:100\n#PermitTunnel no\n#ChrootDirectory none\n\n# no default banner path\n#Banner none\n\n# override default of no subsystems\nSubsystem\tsftp\t/usr/libexec/openssh/sftp-server\n\n# Example of overriding settings on a per-user basis\n#Match User anoncvs\n#\tX11Forwarding no\n#\tAllowTcpForwarding no\n#\tForceCommand cvs server\n"
  },
  {
    "path": "VMAccess/resources/redhat_default",
    "content": "#\t$OpenBSD: sshd_config,v 1.80 2008/07/02 02:24:18 djm Exp $\n\n# This is the sshd server system-wide configuration file.  See\n# sshd_config(5) for more information.\n\n# This sshd was compiled with PATH=/usr/local/bin:/bin:/usr/bin\n\n# The strategy used for options in the default sshd_config shipped with\n# OpenSSH is to specify options with their default value where\n# possible, but leave them commented.  Uncommented options change a\n# default value.\n\n#Port 22\n#AddressFamily any\n#ListenAddress 0.0.0.0\n#ListenAddress ::\n\n# Disable legacy (protocol version 1) support in the server for new\n# installations. In future the default will change to require explicit\n# activation of protocol 1\nProtocol 2\n\n# HostKey for protocol version 1\n#HostKey /etc/ssh/ssh_host_key\n# HostKeys for protocol version 2\n#HostKey /etc/ssh/ssh_host_rsa_key\n#HostKey /etc/ssh/ssh_host_dsa_key\n\n# Lifetime and size of ephemeral version 1 server key\n#KeyRegenerationInterval 1h\n#ServerKeyBits 1024\n\n# Logging\n# obsoletes QuietMode and FascistLogging\n#SyslogFacility AUTH\nSyslogFacility AUTHPRIV\n#LogLevel INFO\n\n# Authentication:\n\n#LoginGraceTime 2m\n#PermitRootLogin yes\n#StrictModes yes\n#MaxAuthTries 6\n#MaxSessions 10\n\n#RSAAuthentication yes\n#PubkeyAuthentication yes\n#AuthorizedKeysFile\t.ssh/authorized_keys\n#AuthorizedKeysCommand none\n#AuthorizedKeysCommandRunAs nobody\n\n# For this to work you will also need host keys in /etc/ssh/ssh_known_hosts\n#RhostsRSAAuthentication no\n# similar for protocol version 2\n#HostbasedAuthentication no\n# Change to yes if you don't trust ~/.ssh/known_hosts for\n# RhostsRSAAuthentication and HostbasedAuthentication\n#IgnoreUserKnownHosts no\n# Don't read the user's ~/.rhosts and ~/.shosts files\n#IgnoreRhosts yes\n\n# To disable tunneled clear text passwords, change to no here!\n#PasswordAuthentication yes\n#PermitEmptyPasswords no\nPasswordAuthentication yes\n\n# Change to no to disable s/key passwords\n#ChallengeResponseAuthentication yes\nChallengeResponseAuthentication no\n\n# Kerberos options\n#KerberosAuthentication no\n#KerberosOrLocalPasswd yes\n#KerberosTicketCleanup yes\n#KerberosGetAFSToken no\n#KerberosUseKuserok yes\n\n# GSSAPI options\n#GSSAPIAuthentication no\nGSSAPIAuthentication yes\n#GSSAPICleanupCredentials yes\nGSSAPICleanupCredentials yes\n#GSSAPIStrictAcceptorCheck yes\n#GSSAPIKeyExchange no\n\n# Set this to 'yes' to enable PAM authentication, account processing, \n# and session processing. If this is enabled, PAM authentication will \n# be allowed through the ChallengeResponseAuthentication and\n# PasswordAuthentication.  Depending on your PAM configuration,\n# PAM authentication via ChallengeResponseAuthentication may bypass\n# the setting of \"PermitRootLogin without-password\".\n# If you just want the PAM account and session checks to run without\n# PAM authentication, then enable this but set PasswordAuthentication\n# and ChallengeResponseAuthentication to 'no'.\n#UsePAM no\nUsePAM yes\n\n# Accept locale-related environment variables\nAcceptEnv LANG LC_CTYPE LC_NUMERIC LC_TIME LC_COLLATE LC_MONETARY LC_MESSAGES\nAcceptEnv LC_PAPER LC_NAME LC_ADDRESS LC_TELEPHONE LC_MEASUREMENT\nAcceptEnv LC_IDENTIFICATION LC_ALL LANGUAGE\nAcceptEnv XMODIFIERS\n\n#AllowAgentForwarding yes\n#AllowTcpForwarding yes\n#GatewayPorts no\n#X11Forwarding no\nX11Forwarding yes\n#X11DisplayOffset 10\n#X11UseLocalhost yes\n#PrintMotd yes\n#PrintLastLog yes\n#TCPKeepAlive yes\n#UseLogin no\n#UsePrivilegeSeparation yes\n#PermitUserEnvironment no\n#Compression delayed\nClientAliveInterval 180\n#ClientAliveCountMax 3\n#ShowPatchLevel no\n#UseDNS yes\n#PidFile /var/run/sshd.pid\n#MaxStartups 10:30:100\n#PermitTunnel no\n#ChrootDirectory none\n\n# no default banner path\n#Banner none\n\n# override default of no subsystems\nSubsystem\tsftp\t/usr/libexec/openssh/sftp-server\n\n# Example of overriding settings on a per-user basis\n#Match User anoncvs\n#\tX11Forwarding no\n#\tAllowTcpForwarding no\n#\tForceCommand cvs server\n"
  },
  {
    "path": "VMAccess/test/env.py",
    "content": "#!/usr/bin/env python\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nimport os\n\n#append installer directory to sys.path\nroot = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append(root)\n\nmanifestFile = os.path.join(root, 'HandlerManifest.json')\nif os.path.exists(manifestFile):\n    import json \n    jsonData = open(manifestFile)\n    manifest = json.load(jsonData)\n    jsonData.close()\n    extName=\"{0}-{1}\".format(\"VMAccess\", manifest[0][\"version\"])\n    print(\"Start test: %s\" % extName)\n\n    extDir=os.path.join(\"/var/lib/waagent\", extName)\n    if(os.path.isdir(extDir)):\n        os.chdir(extDir)\n        print(\"Switching to dir: %s\" % os.getcwd())\n\n"
  },
  {
    "path": "VMAccess/test/test_iptable_rules.py",
    "content": "#!/usr/bin/env python\n#\n#CustomScript extension\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nfrom VMAccess.test import env\nfrom VMAccess import vmaccess\nimport os\nfrom Utils.WAAgentUtil import waagent\n\nwaagent.LoggerInit('/tmp/test.log','/dev/null')\n\nclass TestIPhablesRule(unittest.TestCase):\n    def test_insert_rule_if_not_exists(self):\n        rule = 'INPUT -p tcp -m tcp --dport 9998 -j DROP'\n        vmaccess._insert_rule_if_not_exists(rule)\n        cmd_result = waagent.RunGetOutput(\"iptables-save | grep '%s'\" %rule)\n        self.assertEqual(cmd_result[0], 0)\n        waagent.Run(\"iptables -D %s\" %rule)\n\n    def test_del_rule_if_exists(self):\n        rule = 'INPUT -p tcp -m tcp --dport 9998 -j DROP'\n        waagent.Run(\"iptables -I %s\" %rule)\n        vmaccess._del_rule_if_exists(rule)\n        cmd_result = waagent.RunGetOutput(\"iptables-save | grep '%s'\" %rule)\n        self.assertNotEqual(cmd_result[0], 0)\n        \nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "VMAccess/test/test_reset_account.py",
    "content": "#!/usr/bin/env python\n#\n#CustomScript extension\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport unittest\n\nfrom VMAccess import vmaccess\nfrom Utils.WAAgentUtil import waagent\n\nwaagent.LoggerInit('/tmp/test.log','/dev/stdout')\nwaagent.MyDistro = waagent.GetMyDistro()\n\nclass Dummy(object):\n    pass\n\nhutil = Dummy()\nhutil.log = waagent.Log\n\nclass TestCreateNewAccount(unittest.TestCase):\n    def test_creat_newuser(self):\n        settings={}\n        settings['username'] = 'NewUser'\n        settings['password'] = 'User@123'\n        waagent.Run('userdel %s' %settings['username'])\n        vmaccess._set_user_account_pub_key(settings, hutil)\n        waagent.Run(\"echo 'exit' > /tmp/exit.sh\")\n        cmd_result = waagent.RunGetOutput(\"sshpass -p 'User@123' ssh -o StrictHostKeyChecking=no\" \n                + \" %s@localhost < /tmp/exit.sh\" %settings['username'])\n        self.assertEqual(cmd_result[0], 0)\n        waagent.Run(\"rm exit.sh -f\")\n        waagent.Run('userdel %s' %settings['username'])\n\nexpected_cert_str = \"\"\"\\\n-----BEGIN CERTIFICATE-----\nMIICOTCCAaICCQD7F0nb+GtpcTANBgkqhkiG9w0BAQsFADBhMQswCQYDVQQGEwJh\nYjELMAkGA1UECAwCYWIxCzAJBgNVBAcMAmFiMQswCQYDVQQKDAJhYjELMAkGA1UE\nCwwCYWIxCzAJBgNVBAMMAmFiMREwDwYJKoZIhvcNAQkBFgJhYjAeFw0xNDA4MDUw\nODIwNDZaFw0xNTA4MDUwODIwNDZaMGExCzAJBgNVBAYTAmFiMQswCQYDVQQIDAJh\nYjELMAkGA1UEBwwCYWIxCzAJBgNVBAoMAmFiMQswCQYDVQQLDAJhYjELMAkGA1UE\nAwwCYWIxETAPBgkqhkiG9w0BCQEWAmFiMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCB\niQKBgQC4Vugyj4uAKGYHW/D1eAg1DmLAv01e+9I0zIi8HzJxP87MXmS8EdG5SEzR\nN6tfQQie76JBSTYI4ngTaVCKx5dVT93LiWxLV193Q3vs/HtwwH1fLq0rAKUhREQ6\n+CsRGNyeVfJkNsxAvNvQkectnYuOtcDxX5n/25eWAofobxVbSQIDAQABMA0GCSqG\nSIb3DQEBCwUAA4GBAF20gkq/DeUSXkZA+jjmmbCPioB3KL63GpoTXfP65d6yU4xZ\nTlMoLkqGKe3WoXmhjaTOssulgDAGA24IeWy/u7luH+oHdZEmEufFhj4M7tQ1pAhN\nCT8JCL2dI3F76HD6ZutTOkwRar3PYk5q7RsSJdAemtnwVpgp+RBMtbmct7MQ\n-----END CERTIFICATE-----\n\"\"\"\nclass TestSaveCertFile(unittest.TestCase):\n    def test_save_cert_Str_as_file(self):\n        cert_str = waagent.GetFileContents(os.path.join(waagent.LibDir, 'TEST.crt'))\n        vmaccess._save_cert_str_as_file(cert_str, '/tmp/tmp.crt')\n        saved_cert_str = waagent.GetFileContents('/tmp/tmp.crt')\n        self.assertEqual(saved_cert_str, expected_cert_str)\n\nclass TestResetSshKey(unittest.TestCase):\n    def test_reset_ssh_key(self):\n        settings={}\n        settings['username'] = 'NewUser'\n        settings['ssh_key'] = waagent.GetFileContents(os.path.join(waagent.LibDir, 'TEST.crt'))\n        vmaccess._set_user_account_pub_key(settings, hutil)\n        waagent.Run(\"echo 'exit' > /tmp/exit.sh\")\n        cmd_result = waagent.RunGetOutput(\"ssh -o StrictHostKeyChecking=no -i %s\" %os.path.join(waagent.LibDir, 'TEST.prv')\n                + \" %s@localhost < /tmp/exit.sh\" %settings['username'])\n        self.assertEqual(cmd_result[0], 0)\n        waagent.Run(\"rm exit.sh -f\")\n        waagent.Run('userdel %s' %settings['username'])\n\n\nclass TestResetExistingUser(unittest.TestCase):\n    def test_reset_existing_user(self):\n        settings={}\n        settings['username'] = 'ExistingUser'\n        settings['password'] = 'User@123'\n        waagent.Run('userdel %s' %settings['username'])\n        waagent.Run('useradd %s' %settings['username'])\n        waagent.MyDistro.changePass(settings['username'], \"Quattro!\")\n        vmaccess._set_user_account_pub_key(settings, hutil)\n        waagent.Run(\"echo 'exit' > /tmp/exit.sh\")\n        cmd_result = waagent.RunGetOutput(\"sshpass -p 'User@123' ssh -o StrictHostKeyChecking=no\" \n                + \" %s@localhost < /tmp/exit.sh\" %settings['username'])\n        self.assertEqual(cmd_result[0], 0)\n        waagent.Run(\"rm exit.sh -f\")\n        waagent.Run('userdel %s' %settings['username'])\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "VMAccess/test/test_reset_sshd_config.py",
    "content": "#!/usr/bin/env python\n#\n#CustomScript extension\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nfrom VMAccess.test import env\nfrom VMAccess import vmaccess\nimport os\nfrom Utils.WAAgentUtil import waagent\nimport shutil\n\nwaagent.LoggerInit('/tmp/test.log','/dev/stdout')\nwaagent.MyDistro = waagent.GetMyDistro()\nclass Dummy(object):\n    pass\n\nhutil = Dummy()\nhutil.log = waagent.Log\n\nclass TestResetSshdConfig(unittest.TestCase):\n    def test_reset_sshd_config(self):\n        path = '/tmp/sshd_config'\n        resources=os.path.join(env.root, 'resources')\n        if(os.path.exists(path)):\n            os.remove(path)\n        if(os.path.isdir('resources')):\n            shutil.rmtree('resources')\n        shutil.copytree(resources, 'resources')\n        vmaccess._reset_sshd_config(path)\n        self.assertTrue(os.path.exists(path))\n        config = waagent.GetFileContents(path)\n        self.assertFalse(config.startswith(\"#Default sshd_config\"))\n        os.remove(path)\n\n    def test_backup_sshd_config(self):\n        test_dir = '/tmp/test_vmaccess'\n        path = os.path.join(test_dir, \"old_sshd_config\")\n        if(not os.path.isdir(test_dir)):\n            os.mkdir(test_dir)\n        if(not os.path.exists(path)):\n            waagent.Run(\"echo > %s\" %path)\n        vmaccess._backup_sshd_config(path)\n        os.remove(path)\n        files = os.listdir(test_dir)\n        self.assertNotEqual(len(files), 0)\n        shutil.rmtree(test_dir)\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "VMAccess/vmaccess.py",
    "content": "#!/usr/bin/env python\n#\n# VMAccess extension\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport re\nimport shutil\nimport sys\nimport tempfile\nimport time\nimport traceback\n\nimport Utils.handlerutil2 as handler_util\nimport Utils.logger as logger\nimport Utils.extensionutils as ext_utils\nimport Utils.distroutils as dist_utils\nimport Utils.constants as constants\nimport Utils.ovfutils as ovf_utils\n\n# Define global variables\nExtensionShortName = 'VMAccess'\nBeginCertificateTag = '-----BEGIN CERTIFICATE-----'\nEndCertificateTag = '-----END CERTIFICATE-----'\nBeginSSHTag = '---- BEGIN SSH2 PUBLIC KEY ----'\nOutputSplitter = ';'\nSshdConfigPath = '/etc/ssh/sshd_config'\nSshdConfigBackupPath = '/var/cache/vmaccess/backup'\n\n# overwrite the default logger\nlogger.global_shared_context_logger = logger.Logger('/var/log/waagent.log', '/dev/stdout')\n\ndef get_os_name():\n    if os.path.isfile(constants.os_release):\n        return ext_utils.get_line_starting_with(\"NAME\", constants.os_release)\n    elif os.path.isfile(constants.system_release):\n        return ext_utils.get_file_contents(constants.system_release)\n    return None\n\ndef get_linux_agent_conf_filename(os_name):\n    if os_name is not None:\n        if re.search(\"coreos\", os_name, re.IGNORECASE) or re.search(\"flatcar\", os_name, re.IGNORECASE):\n            return \"/usr/share/oem/waagent.conf\"\n    return \"/etc/waagent.conf\"\n\nclass ConfigurationProvider(object):\n    \"\"\"\n    Parse amd store key:values in waagent.conf\n    \"\"\"\n\n    def __init__(self, wala_config_file):\n        self.values = dict()\n        if not os.path.isfile(wala_config_file):\n            logger.warning(\"Missing configuration in {0}, setting default values for PasswordCryptId and PasswordCryptSaltLength\".format(wala_config_file))\n            self.values[\"Provisioning.PasswordCryptId\"] = \"6\"\n            self.values[\"Provisioning.PasswordCryptSaltLength\"] = 10\n            return\n        try:\n            for line in ext_utils.get_file_contents(wala_config_file).split('\\n'):\n                if not line.startswith(\"#\") and \"=\" in line:\n                    parts = line.split()[0].split('=')\n                    value = parts[1].strip(\"\\\" \")\n                    if value != \"None\":\n                        self.values[parts[0]] = value\n                    else:\n                        self.values[parts[0]] = None\n        # when get_file_contents returns none\n        except AttributeError:\n            logger.error(\"Unable to parse {0}\".format(wala_config_file))\n            raise\n        return\n\n    def get(self, key):\n        return self.values.get(key)\n\n    def yes(self, key):\n        config_value = self.get(key)\n        if config_value is not None and config_value.lower().startswith(\"y\"):\n            return True\n        else:\n            return False\n\n    def no(self, key):\n        config_value = self.get(key)\n        if config_value is not None and config_value.lower().startswith(\"n\"):\n            return True\n        else:\n            return False\n\n\nOSName = get_os_name()\nConfiguration = ConfigurationProvider(get_linux_agent_conf_filename(OSName))\nMyDistro = dist_utils.get_my_distro(Configuration, OSName)\n\n\ndef main():\n    logger.log(\"%s started to handle.\" % ExtensionShortName)\n\n    try:\n        for a in sys.argv[1:]:\n            if re.match(\"^([-/]*)(enable)\", a):\n                enable()\n    except Exception as e:\n        err_msg = \"Failed with error: {0}, {1}\".format(e, traceback.format_exc())\n        logger.error(err_msg)\n\n\ndef enable():\n    hutil = handler_util.HandlerUtility()\n    hutil.do_parse_context('Enable')\n    try:\n        hutil.exit_if_enabled(remove_protected_settings=True)  # If no new seqNum received, exit.\n\n        reset_ssh = None\n        remove_user = None\n        restore_backup_ssh = None\n        protect_settings = hutil.get_protected_settings()\n        if protect_settings:\n            reset_ssh = protect_settings.get('reset_ssh', False)\n            remove_user = protect_settings.get('remove_user')\n            restore_backup_ssh = protect_settings.get('restore_backup_ssh', False)\n\n        if remove_user and _is_sshd_config_modified(protect_settings):\n            ext_utils.add_extension_event(name=hutil.get_name(),\n                                          op=constants.WALAEventOperation.Enable,\n                                          is_success=False,\n                                          message=\"(03002)Argument error, conflicting operations\")\n            raise Exception(\"Cannot reset sshd_config and remove a user in one operation.\")\n\n        _forcibly_reset_chap(hutil)\n\n        if reset_ssh or restore_backup_ssh:\n            _open_ssh_port()\n            hutil.log(\"Succeeded in check and open ssh port.\")\n            ext_utils.add_extension_event(name=hutil.get_name(), op=\"scenario\", is_success=True, message=\"reset-ssh\")\n            _reset_sshd_config(hutil, restore_backup_ssh)\n            hutil.log(\"Succeeded in {0} sshd_config.\".format(\"resetting\" if reset_ssh else \"restoring\"))\n\n        if remove_user:\n            ext_utils.add_extension_event(name=hutil.get_name(), op=\"scenario\", is_success=True, message=\"remove-user\")\n            _remove_user_account(remove_user, hutil)\n\n        _set_user_account_pub_key(protect_settings, hutil)\n\n        if _is_sshd_config_modified(protect_settings):\n            MyDistro.restart_ssh_service()\n\n        check_and_repair_disk(hutil)\n        hutil.do_exit(0, 'Enable', 'success', '0', 'Enable succeeded.')\n    except Exception as e:\n        hutil.error((\"Failed to enable the extension with error: {0}, \"\n                     \"stack trace: {1}\").format(str(e), traceback.format_exc()))\n        hutil.do_exit(1, 'Enable', 'error', '0', \"Enable failed: {0}\".format(str(e)))\n\n\ndef _forcibly_reset_chap(hutil):\n    name = \"ChallengeResponseAuthentication\"\n    _backup_and_update_sshd_config(hutil, name, \"no\")\n    MyDistro.restart_ssh_service()\n\n\ndef _is_sshd_config_modified(protected_settings):\n    result = protected_settings.get('reset_ssh') or protected_settings.get('restore_backup_ssh') or protected_settings.get('password')\n    return result is not None\n\n\ndef _remove_user_account(user_name, hutil):\n    hutil.log(\"Removing user account\")\n\n    try:\n        sudoers = _get_other_sudoers(user_name)\n        MyDistro.delete_account(user_name)\n        _save_other_sudoers(sudoers)\n    except Exception as e:\n        ext_utils.add_extension_event(name=hutil.get_name(),\n                                      op=constants.WALAEventOperation.Enable,\n                                      is_success=False,\n                                      message=\"(02102)Failed to remove user.\")\n        raise Exception(\"Failed to remove user {0}\".format(e))\n\n    ext_utils.add_extension_event(name=hutil.get_name(),\n                                  op=constants.WALAEventOperation.Enable,\n                                  is_success=True,\n                                  message=\"Successfully removed user\")\n\n\ndef _set_user_account_pub_key(protect_settings, hutil):\n    ovf_env = None\n    try:\n        ovf_xml = ext_utils.get_file_contents('/var/lib/waagent/ovf-env.xml')\n        if ovf_xml is not None:\n            ovf_env = ovf_utils.OvfEnv.parse(ovf_xml, Configuration, False, False)\n    except (EnvironmentError, ValueError, KeyError, AttributeError, TypeError):\n        pass\n    if ovf_env is None:\n        # default ovf_env with empty data\n        ovf_env = ovf_utils.OvfEnv()\n        logger.log(\"could not load ovf-env.xml\")\n\n    # user name must be provided if set ssh key or password\n    if not protect_settings or 'username' not in protect_settings:\n        return\n\n    user_name = protect_settings['username']\n    user_pass = protect_settings.get('password')\n    cert_txt = protect_settings.get('ssh_key')\n    expiration = protect_settings.get('expiration')\n    remove_prior_keys = protect_settings.get('remove_prior_keys')\n    enable_passwordless_access = protect_settings.get('enable_passwordless_access', False)\n\n    no_convert = False\n    if not user_pass and not cert_txt and not ovf_env.SshPublicKeys:\n        raise Exception(\"No password or ssh_key is specified.\")\n\n    if user_pass is not None and len(user_pass) == 0:\n        user_pass = None\n        hutil.log(\"empty passwords are not allowed, ignoring password reset\")\n\n    # Reset user account and password, password could be empty\n    sudoers = _get_other_sudoers(user_name)\n    error_string = MyDistro.create_account(\n        user_name, user_pass, expiration, None, enable_passwordless_access)\n    _save_other_sudoers(sudoers)\n\n    if error_string is not None:\n        err_msg = \"Failed to create the account or set the password\"\n        ext_utils.add_extension_event(name=hutil.get_name(),\n                                      op=constants.WALAEventOperation.Enable,\n                                      is_success=False,\n                                      message=\"(02101)\" + err_msg)\n        raise Exception(err_msg + \" with \" + error_string)\n    hutil.log(\"Succeeded in creating the account or setting the password.\")\n\n    # Allow password authentication if user_pass is provided\n    if user_pass is not None:\n        ext_utils.add_extension_event(name=hutil.get_name(), op=\"scenario\", is_success=True,\n                                      message=\"create-user-with-password\")\n        _allow_password_auth(hutil)\n\n    # Reset ssh key with the new public key passed in or reuse old public key.\n    if cert_txt:\n        # support for SSH2-compatible format for public keys in addition to OpenSSH-compatible format\n        if cert_txt.strip().startswith(BeginSSHTag):\n            ext_utils.set_file_contents(\"temp.pub\", cert_txt.strip())\n            retcode, output = ext_utils.run_command_get_output(['ssh-keygen', '-i', '-f', 'temp.pub'])\n            if retcode > 0:\n                raise Exception(\"Failed to convert SSH2 key to OpenSSH key.\")\n            hutil.log(\"Succeeded in converting SSH2 key to OpenSSH key.\")\n            cert_txt = output\n            os.remove(\"temp.pub\")\n\n        if cert_txt.strip().lower().startswith(\"ssh-rsa\") or cert_txt.strip().lower().startswith(\"ssh-ed25519\"):\n            no_convert = True\n        try:\n            pub_path = os.path.join('/home/', user_name, '.ssh',\n                                    'authorized_keys')\n            ovf_env.UserName = user_name\n            if no_convert:\n                if cert_txt:\n                    pub_path = ovf_env.prepare_dir(pub_path, MyDistro)\n                    final_cert_txt = cert_txt\n                    if not cert_txt.endswith(\"\\n\"):\n                        final_cert_txt = final_cert_txt + \"\\n\"\n\n                    if remove_prior_keys == True:\n                        ext_utils.set_file_contents(pub_path, final_cert_txt)\n                        hutil.log(\"Removed prior ssh keys and added new key for user %s\" % user_name)\n                    else:\n                        ext_utils.append_file_contents(pub_path, final_cert_txt)\n        \n                    MyDistro.set_se_linux_context(\n                        pub_path, 'unconfined_u:object_r:ssh_home_t:s0')\n                    ext_utils.change_owner(pub_path, user_name)\n                    ext_utils.add_extension_event(name=hutil.get_name(), op=\"scenario\", is_success=True,\n                                                  message=\"create-user\")\n                    hutil.log(\"Succeeded in resetting ssh_key.\")\n                else:\n                    err_msg = \"Failed to reset ssh key because the cert content is empty.\"\n                    ext_utils.add_extension_event(name=hutil.get_name(),\n                                                  op=constants.WALAEventOperation.Enable,\n                                                  is_success=False,\n                                                  message=\"(02100)\" + err_msg)\n            else:\n                # do the certificate conversion\n                # we support PKCS8 certificates besides ssh-rsa public keys\n                _save_cert_str_as_file(cert_txt, 'temp.crt')\n                pub_path = ovf_env.prepare_dir(pub_path, MyDistro)\n                retcode = ext_utils.run_command_and_write_stdout_to_file(\n                    [constants.Openssl, 'x509', '-in', 'temp.crt', '-noout', '-pubkey'], \"temp.pub\")\n                if retcode > 0:\n                    raise Exception(\"Failed to generate public key file.\")\n\n                MyDistro.ssh_deploy_public_key('temp.pub', pub_path)\n                os.remove('temp.pub')\n                os.remove('temp.crt')\n                ext_utils.add_extension_event(name=hutil.get_name(), op=\"scenario\", is_success=True,\n                                              message=\"create-user\")\n                hutil.log(\"Succeeded in resetting ssh_key.\")\n        except Exception as e:\n            hutil.log(str(e))\n            ext_utils.add_extension_event(name=hutil.get_name(),\n                                          op=constants.WALAEventOperation.Enable,\n                                          is_success=False,\n                                          message=\"(02100)Failed to reset ssh key.\")\n            raise e\n\n\ndef _get_other_sudoers(user_name):\n    sudoers_file = '/etc/sudoers.d/waagent'\n    if not os.path.isfile(sudoers_file):\n        return None\n    sudoers = ext_utils.get_file_contents(sudoers_file).split(\"\\n\")\n    pattern = '^{0}\\s'.format(user_name)\n    sudoers = list(filter(lambda x: re.match(pattern, x) is None, sudoers))\n    return sudoers\n\n\ndef _save_other_sudoers(sudoers):\n    sudoers_file = '/etc/sudoers.d/waagent'\n    if sudoers is None:\n        return\n    ext_utils.append_file_contents(sudoers_file, \"\\n\".join(sudoers))\n    os.chmod(\"/etc/sudoers.d/waagent\", 0o440)\n\n\ndef _allow_password_auth(hutil):\n    name = \"PasswordAuthentication\"\n    _backup_and_update_sshd_config(hutil, name, \"yes\")\n\n    cloudInitConfigPath = \"/etc/ssh/sshd_config.d/50-cloud-init.conf\"\n    config = ext_utils.get_file_contents(cloudInitConfigPath)\n    if config is not None:\n        config = config.split(\"\\n\")\n        _set_sshd_config(config, name, \"yes\")\n        ext_utils.replace_file_with_contents_atomic(cloudInitConfigPath, \"\\n\".join(config))\n\n\ndef _backup_and_update_sshd_config(hutil, attr_name, attr_value):\n    config = ext_utils.get_file_contents(SshdConfigPath).split(\"\\n\")\n\n    for i in range(0, len(config)):\n        if config[i].startswith(attr_name) and attr_value in config[i].lower():\n            hutil.log(\"%s already set to %s in sshd_config, skip update.\" % (attr_name, attr_value))\n            return\n\n    hutil.log(\"Setting %s to %s in sshd_config.\" % (attr_name, attr_value))\n    \n    _backup_sshd_config(hutil)\n    _set_sshd_config(config, attr_name, attr_value)\n    ext_utils.replace_file_with_contents_atomic(SshdConfigPath, \"\\n\".join(config))\n\n\ndef _set_sshd_config(config, name, val):\n    notfound = True\n    i = None\n    for i in range(0, len(config)):\n        if config[i].startswith(name):\n            config[i] = \"{0} {1}\".format(name, val)\n            notfound = False\n        elif config[i].startswith(\"Match\"):\n            # Match block must be put in the end of sshd config\n            break\n    if notfound:\n        if i is None:\n            i = 0\n        config.insert(i, \"{0} {1}\".format(name, val))\n    return config\n\n\ndef _get_default_ssh_config_filename():\n    if OSName is not None:\n        # the default ssh config files are present in\n        # /var/lib/waagent/Microsoft.OSTCExtensions.VMAccessForLinux-<version>/resources/\n        if re.search(\"centos\", OSName, re.IGNORECASE):\n            return \"centos_default\"\n        if re.search(\"debian\", OSName, re.IGNORECASE):\n            return \"debian_default\"\n        if re.search(\"fedora\", OSName, re.IGNORECASE):\n            return \"fedora_default\"\n        if re.search(\"red\\s?hat\", OSName, re.IGNORECASE):\n            return \"redhat_default\"\n        if re.search(\"suse\", OSName, re.IGNORECASE):\n            return \"SuSE_default\"\n        if re.search(\"ubuntu\", OSName, re.IGNORECASE):\n            return \"ubuntu_default\"\n    return \"default\"\n\n\ndef _reset_sshd_config(hutil, restore_backup_ssh):\n    ssh_default_config_filename = _get_default_ssh_config_filename()\n    ssh_default_config_file_path = os.path.join(os.getcwd(), 'resources', ssh_default_config_filename)\n\n    if not os.path.exists(ssh_default_config_file_path):\n        ssh_default_config_file_path = os.path.join(os.getcwd(), 'resources', 'default')\n\n    if restore_backup_ssh:\n        if os.path.exists(SshdConfigBackupPath):\n            ssh_default_config_file_path = SshdConfigBackupPath\n\n    # handle CoreOS differently\n    if isinstance(MyDistro, dist_utils.CoreOSDistro):\n        # Parse sshd port from ssh_default_config_file_path\n        sshd_port = 22\n        regex = re.compile(r\"^Port\\s+(\\d+)\", re.VERBOSE)\n        with open(ssh_default_config_file_path) as f:\n            for line in f:\n                match = regex.match(line)\n                if match:\n                    sshd_port = match.group(1)\n                    break\n\n        # Prepare cloud init config for coreos-cloudinit\n        f = tempfile.NamedTemporaryFile(delete=False)\n        f.close()\n        cfg_tempfile = f.name\n        cfg_content = \"#cloud-config\\n\\n\"\n\n        # Overwrite /etc/ssh/sshd_config\n        cfg_content += \"write_files:\\n\"\n        cfg_content += \"  - path: {0}\\n\".format(SshdConfigPath)\n        cfg_content += \"    permissions: 0600\\n\"\n        cfg_content += \"    owner: root:root\\n\"\n        cfg_content += \"    content: |\\n\"\n        for line in ext_utils.get_file_contents(ssh_default_config_file_path).split('\\n'):\n            cfg_content += \"      {0}\\n\".format(line)\n\n        # Change the sshd port in /etc/systemd/system/sshd.socket\n        cfg_content += \"\\ncoreos:\\n\"\n        cfg_content += \"  units:\\n\"\n        cfg_content += \"  - name: sshd.socket\\n\"\n        cfg_content += \"    command: restart\\n\"\n        cfg_content += \"    content: |\\n\"\n        cfg_content += \"      [Socket]\\n\"\n        cfg_content += \"      ListenStream={0}\\n\".format(sshd_port)\n        cfg_content += \"      Accept=yes\\n\"\n\n        ext_utils.set_file_contents(cfg_tempfile, cfg_content)\n\n        ext_utils.run(['coreos-cloudinit', '-from-file', cfg_tempfile], chk_err=False)\n        os.remove(cfg_tempfile)\n    else:\n        shutil.copyfile(ssh_default_config_file_path, SshdConfigPath)\n        if ssh_default_config_file_path == SshdConfigBackupPath:\n            hutil.log(\"sshd_config restored from backup, remove backup file.\")\n            # Remove backup config once sshd_config restored\n            os.remove(ssh_default_config_file_path)\n        MyDistro.restart_ssh_service()\n\n\ndef _backup_sshd_config(hutil):\n    if os.path.exists(SshdConfigPath) and not os.path.exists(SshdConfigBackupPath):\n        # Create VMAccess cache folder if doesn't exist\n        if not os.path.exists(os.path.dirname(SshdConfigBackupPath)):\n            os.makedirs(os.path.dirname(SshdConfigBackupPath))\n\n        hutil.log(\"Create backup ssh config file\")\n        open(SshdConfigBackupPath, 'a').close()\n        \n        # When copying, make sure to preserve permissions and ownership.\n        ownership = os.stat(SshdConfigPath)\n        shutil.copy2(SshdConfigPath, SshdConfigBackupPath)\n        os.chown(SshdConfigBackupPath, ownership.st_uid, ownership.st_gid)\n\n\ndef _save_cert_str_as_file(cert_txt, file_name):\n    cert_start = cert_txt.find(BeginCertificateTag)\n    if cert_start >= 0:\n        cert_txt = cert_txt[cert_start + len(BeginCertificateTag):]\n    cert_end = cert_txt.find(EndCertificateTag)\n    if cert_end >= 0:\n        cert_txt = cert_txt[:cert_end]\n    cert_txt = cert_txt.strip()\n    cert_txt = \"{0}\\n{1}\\n{2}\\n\".format(BeginCertificateTag, cert_txt, EndCertificateTag)\n    ext_utils.set_file_contents(file_name, cert_txt)\n\n\ndef _open_ssh_port():\n    _del_rule_if_exists(['INPUT', '-p', 'tcp', '-m', 'tcp', '--dport', '22', '-j', 'DROP'])\n    _del_rule_if_exists(['INPUT', '-p', 'tcp', '-m', 'tcp', '--dport', '22', '-j', 'REJECT'])\n    _del_rule_if_exists(['INPUT', '-p', '-j', 'DROP'])\n    _del_rule_if_exists(['INPUT', '-p', '-j', 'REJECT'])\n    _insert_rule_if_not_exists(['INPUT', '-p', 'tcp', '-m', 'tcp', '--dport', '22', '-j', 'ACCEPT'])\n\n    _del_rule_if_exists(['OUTPUT', '-p', 'tcp', '-m', 'tcp', '--sport', '22', '-j', 'DROP'])\n    _del_rule_if_exists(['OUTPUT', '-p', 'tcp', '-m', 'tcp', '--sport', '22', '-j', 'REJECT'])\n    _del_rule_if_exists(['OUTPUT', '-p', '-j', 'DROP'])\n    _del_rule_if_exists(['OUTPUT', '-p', '-j', 'REJECT'])\n    _insert_rule_if_not_exists(['OUTPUT', '-p', 'tcp', '-m', 'tcp', '--dport', '22', '-j', 'ACCEPT'])\n\n\ndef _del_rule_if_exists(rule_string):\n    rule_string_for_cmp = \" \".join(rule_string)\n    cmd_result = ext_utils.run_command_get_output(['iptables-save'])\n    while cmd_result[0] == 0 and (rule_string_for_cmp in cmd_result[1]):\n        ext_utils.run(['iptables', '-D'] + rule_string)\n        cmd_result = ext_utils.run_command_get_output(['iptables-save'])\n\n\ndef _insert_rule_if_not_exists(rule_string):\n    rule_string_for_cmp = \" \".join(rule_string)\n    cmd_result = ext_utils.run_command_get_output(['iptables-save'])\n    if cmd_result[0] == 0 and (rule_string_for_cmp not in cmd_result[1]):\n        ext_utils.run_command_get_output(['iptables', '-I'] + rule_string)\n\n\ndef check_and_repair_disk(hutil):\n    public_settings = hutil.get_public_settings()\n    if public_settings:\n        check_disk = public_settings.get('check_disk')\n        repair_disk = public_settings.get('repair_disk')\n        disk_name = public_settings.get('disk_name')\n\n        if check_disk and repair_disk:\n            err_msg = (\"check_disk and repair_disk was both specified.\"\n                       \"Only one of them can be specified\")\n            hutil.error(err_msg)\n            hutil.do_exit(1, 'Enable', 'error', '0', 'Enable failed.')\n\n        if check_disk:\n            ext_utils.add_extension_event(name=hutil.get_name(), op=\"scenario\", is_success=True, message=\"check_disk\")\n            outretcode = _fsck_check(hutil)\n            hutil.log(\"Successfully checked disk\")\n            return outretcode\n\n        if repair_disk:\n            ext_utils.add_extension_event(name=hutil.get_name(), op=\"scenario\", is_success=True, message=\"repair_disk\")\n            outdata = _fsck_repair(hutil, disk_name)\n            hutil.log(\"Repaired and remounted disk\")\n            return outdata\n\n\ndef _fsck_check(hutil):\n    try:\n        retcode = ext_utils.run(['fsck', '-As', '-y'])\n        if retcode > 0:\n            hutil.log(retcode)\n            raise Exception(\"Disk check was not successful\")\n        else:\n            return retcode\n    except Exception as e:\n        hutil.error(\"Failed to run disk check with error: {0}, {1}\".format(\n            str(e), traceback.format_exc()))\n        hutil.do_exit(1, 'Check', 'error', '0', 'Check failed.')\n\n\ndef _fsck_repair(hutil, disk_name):\n    # first unmount disks and loop devices lazy + forced\n    try:\n        cmd_result = ext_utils.run(['umount', '-f', '/' + disk_name])\n        if cmd_result != 0:\n            # Fail fast\n            hutil.log(\"Failed to unmount disk: %s\" % disk_name)\n            # run repair\n            retcode = ext_utils.run(['fsck', '-AR', '-y'])\n            hutil.log(\"Ran fsck with return code: %d\" % retcode)\n            if retcode == 0:\n                retcode, output = ext_utils.run_command_get_output([\"mount\"])\n                hutil.log(output)\n                return output\n        else:\n            raise Exception(\"Failed to mount disks\")\n    except Exception as e:\n        hutil.error(\"{0}, {1}\".format(str(e), traceback.format_exc()))\n        hutil.do_exit(1, 'Repair', 'error', '0', 'Repair failed.')\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "VMBackup/.gitignore",
    "content": "# VMBackup debughelper builds\ndebughelper/msft_snap_monit\n"
  },
  {
    "path": "VMBackup/HandlerManifest.json",
    "content": "[\n    {\n        \"handlerManifest\": {\n            \"disableCommand\": \"main/handle.sh disable\", \n            \"enableCommand\": \"main/handle.sh enable\", \n            \"installCommand\": \"main/handle.sh install\", \n            \"rebootAfterInstall\": false, \n            \"reportHeartbeat\": false, \n            \"uninstallCommand\": \"main/handle.sh uninstall\", \n            \"updateCommand\": \"main/handle.sh update\"\n        }, \n        \"name\": \"MyBackupTestLinuxInt\", \n        \"version\": \"1.0.9120.0\"\n    }\n]"
  },
  {
    "path": "VMBackup/MANIFEST.in",
    "content": "include HandlerManifest.json handler.py\nprune test\n"
  },
  {
    "path": "VMBackup/README.txt",
    "content": "VMBackup extension is used by Azure Backup service to provide application consistent backup for Linux VMs running in Azure. \n\n**Note:** This extension is not recommended to be installed outside Azure Backup service context. \n\n## Deploying the extension to a VM\nThis extension gets deployed as part of first scheduled backup of the VM post you configure VM for backup. You can configure VM to be backed up using [Azure Portal](https://docs.microsoft.com/azure/backup/quick-backup-vm-portal), [Azure PowerShell](https://docs.microsoft.com/azure/backup/quick-backup-vm-powershell) or Azure CLI(https://docs.microsoft.com/azure/backup/quick-backup-vm-cli). \n\n"
  },
  {
    "path": "VMBackup/VMBackup.pyproj",
    "content": "﻿<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<Project DefaultTargets=\"Build\" xmlns=\"http://schemas.microsoft.com/developer/msbuild/2003\" ToolsVersion=\"4.0\">\n  <PropertyGroup>\n    <Configuration Condition=\" '$(Configuration)' == '' \">Debug</Configuration>\n    <SchemaVersion>2.0</SchemaVersion>\n    <ProjectGuid>{a09c7cdb-874f-4214-bab2-90f888eac208}</ProjectGuid>\n    <ProjectHome>.</ProjectHome>\n    <StartupFile>test\\handle.py</StartupFile>\n    <SearchPath>\n    </SearchPath>\n    <WorkingDirectory>.</WorkingDirectory>\n    <OutputPath>.</OutputPath>\n    <Name>VMBackup</Name>\n    <RootNamespace>VMBackup</RootNamespace>\n  </PropertyGroup>\n  <PropertyGroup Condition=\" '$(Configuration)' == 'Debug' \">\n    <DebugSymbols>true</DebugSymbols>\n    <EnableUnmanagedDebugging>false</EnableUnmanagedDebugging>\n  </PropertyGroup>\n  <PropertyGroup Condition=\" '$(Configuration)' == 'Release' \">\n    <DebugSymbols>true</DebugSymbols>\n    <EnableUnmanagedDebugging>false</EnableUnmanagedDebugging>\n  </PropertyGroup>\n  <PropertyGroup>\n    <VisualStudioVersion Condition=\"'$(VisualStudioVersion)' == ''\">10.0</VisualStudioVersion>\n    <PtvsTargetsFile>$(MSBuildExtensionsPath32)\\Microsoft\\VisualStudio\\v$(VisualStudioVersion)\\Python Tools\\Microsoft.PythonTools.targets</PtvsTargetsFile>\n  </PropertyGroup>\n  <ItemGroup>\n    <Folder Include=\"main\\\" />\n    <Folder Include=\"main\\patch\\\" />\n    <Folder Include=\"main\\Utils\\\" />\n    <Folder Include=\"test\\\" />\n  </ItemGroup>\n  <ItemGroup>\n    <Compile Include=\"main\\backuplogger.py\" />\n    <Compile Include=\"main\\blobwriter.py\" />\n    <Compile Include=\"main\\common.py\" />\n    <Compile Include=\"main\\DiskUtil.py\" />\n    <Compile Include=\"main\\fsfreezer.py\" />\n    <Compile Include=\"main\\handle.py\" />\n    <Compile Include=\"main\\HttpUtil.py\" />\n    <Compile Include=\"main\\MachineIdentity.py\" />\n    <Compile Include=\"main\\mounts.py\" />\n    <Compile Include=\"main\\parameterparser.py\" />\n    <Compile Include=\"main\\patch\\AbstractPatching.py\" />\n    <Compile Include=\"main\\patch\\centosPatching.py\" />\n    <Compile Include=\"main\\patch\\oraclePatching.py\" />\n    <Compile Include=\"main\\patch\\redhatPatching.py\" />\n    <Compile Include=\"main\\patch\\SuSEPatching.py\" />\n    <Compile Include=\"main\\patch\\debianPatching.py\" />\n    <Compile Include=\"main\\patch\\UbuntuPatching.py\" />\n    <Compile Include=\"main\\patch\\__init__.py\" />\n    <Compile Include=\"main\\snapshotter.py\" />\n    <Compile Include=\"main\\taskidentity.py\" />\n    <Compile Include=\"main\\Utils\\HandlerUtil.py\" />\n    <Compile Include=\"main\\Utils\\WAAgentUtil.py\" />\n    <Compile Include=\"main\\Utils\\__init__.py\" />\n    <Compile Include=\"main\\__init__.py\" />\n    <Compile Include=\"mkstub.py\" />\n    <Compile Include=\"setup.py\" />\n    <Compile Include=\"test\\handle.py\" />\n  </ItemGroup>\n  <ItemGroup>\n    <Content Include=\"HandlerManifest.json\" />\n    <Content Include=\"MANIFEST.in\" />\n    <Content Include=\"README.txt\" />\n    <Content Include=\"references\" />\n    <Content Include=\"test.ps1\" />\n    <Content Include=\"test\\handle.py~\" />\n    <Content Include=\"VMBackupForLinuxExtension-0.1.0.9.xml\" />\n    <Content Include=\"VMBackupForLinuxExtension-0.1.0.91.xml\" />\n  </ItemGroup>\n  <Import Condition=\"Exists($(PtvsTargetsFile))\" Project=\"$(PtvsTargetsFile)\" />\n  <Import Condition=\"!Exists($(PtvsTargetsFile))\" Project=\"$(MSBuildToolsPath)\\Microsoft.Common.targets\" />\n  <!-- Uncomment the CoreCompile target to enable the Build command in\n       Visual Studio and specify your pre- and post-build commands in\n       the BeforeBuild and AfterBuild targets below. -->\n  <!--<Target Name=\"CoreCompile\" />-->\n  <Target Name=\"BeforeBuild\">\n  </Target>\n  <Target Name=\"AfterBuild\">\n  </Target>\n</Project>"
  },
  {
    "path": "VMBackup/debughelper/README.md",
    "content": "# Diagnostic app for Snapshot Extensions\n\n## What?\n\nThis is a very experimental stage program to capture system level logs\nand metrics while a snapshot operation is in progress. This kind of data is often\nprivate to each customer and hence we have no plans right now of including this\nas part of the normal workflow. This tool will hopefully help us gather critical\ndata to debug issues that are not solved with the normal logs we collect.\n\n## Build\n- Please install [Go](https://go.dev/doc/install) and then\n- Copy this directory somewhere. Let's say:\n`/var/log/azure/Microsoft.Azure.RecoveryServices.VMSnapshotLinux/debughelper`\n\n- Change directory and build\n\n```sh\nsudo su\ncd /var/log/azure/Microsoft.Azure.RecoveryServices.VMSnapshotLinux/debughelper\n# Make sure location of go binary is in PATH\n# If go was extracted to /usr/local/go - then run\n#\n# export PATH=\"/usr/local/go/bin:$PATH\"\n# \n# put the export statement above in your ~/.profile or ~/.bashrc \n# or whatever`rc` file depending on your shell to persist across shell\n# restarts\n#\n# If you used your package manager to install `go` then\n# you, most likely, don't need to do any of this.\ngo build\nls -la\n```\n\n- On listing files you should see an entry called `msft_snap_monit`\n- That's it!\n\n## Run as part of a snapshot operation\n\n- Create or edit \"/etc/azure/vmbackup.conf\"\n- Add a new section `[Monitor]` to the file and \n- under that add the following options\n- `Run=yes`\n- `Strace=no`\n- `Location=/var/log/azure/Microsoft.Azure.RecoveryServices.VMSnapshotLinux/debughelper`\n- Of course if you want strace to also run while taking a snapshot enable the option\n- Your `/etc/azure/vmback.conf` file should look something like this after these changes\n```text\n[Monitor]\nRun=yes\nStrace=no\nLocation=/var/log/azure/Microsoft.Azure.RecoveryServices.VMSnapshotLinux/debughelper\n```\n- I've not included other sections that the file might already include so donot delete if there are any.\n- That's it! Now every time after a snapshot/restore point is taken a new folder will be created at\n`/var/log/azure/Microsoft.Azure.RecoveryServices.VMSnapshotLinux/debughelper/<new folder>`.\n- The name of this new folder will be a `ULID` which can be sorted by time\n- Inside this directory you'll see several files called `cpu.log`, `mem.log`, `disk.log`, `strace.log`\n- It is pretty self explanatory what metrics/logs each file contains.\n\nI'll add a more detailed section about how to read the data in each file soon.\nFor the curious, it's pretty much the data in the `/proc/*` files.\n\n## Running manually\nWhen run manually, right now, it will keep running till it receives an OS Interrupt (Ctrl+c) after\nthe binary has been executed.\n\nThe default behavior (do `./msft_snap_monit --help` for all options) will log everything\nto a shared memory location (`/dev/shm/Microsoft.Azure.Snapshots.Diagnostics/`)\nand after it has been interrupted will move the log subdirectory (see section below)\nto the working directory - which by default is the current directory.\n\n```sh\n./msft_snap_monit\n```\n\nYou should see a lot of logs like:\n```\n2023/10/19 12:05:34 [monitorCPU] -> sending new metric\n2023/10/19 12:05:34 [logMem] -> received new metric\n2023/10/19 12:05:34 [logMem] -> writing to log file\n2023/10/19 12:05:34 [logDisk] -> received new metric\n2023/10/19 12:05:34 [logDisk] -> writing to log file\n2023/10/19 12:05:34 [logDisk] -> received new metric\n2023/10/19 12:05:34 [logDisk] -> writing to log file\n2023/10/19 12:05:34 [logDisk] -> received new metric\n2023/10/19 12:05:34 [logDisk] -> writing to log file\n2023/10/19 12:05:34 [logDisk] -> received new metric\n2023/10/19 12:05:34 [logDisk] -> writing to log file\n2023/10/19 12:05:34 [logDisk] -> received new metric\n2023/10/19 12:05:34 [logDisk] -> writing to log file\n2023/10/19 12:05:34 [logDisk] -> received new metric\n2023/10/19 12:05:34 [logDisk] -> writing to log file\n2023/10/19 12:05:34 [logDisk] -> received new metric\n2023/10/19 12:05:34 [logDisk] -> writing to log file\n2023/10/19 12:05:35 [monitorCPU] -> sending new metric\n2023/10/19 12:05:35 [logMem] -> received new metric\n2023/10/19 12:05:35 [logMem] -> writing to log file\n2023/10/19 12:05:35 [logDisk] -> received new metric\n2023/10/19 12:05:35 [logDisk] -> writing to log file\n2023/10/19 12:05:35 [logDisk] -> received new metric\n... and so on\n```\n\nIgnore this and in another terminal window:\n```sh\n# go to the shared memory location. this directory is in memory so fsfreeze will \n# not affect it\ncd /dev/shm/Microsoft.Azure.Snapshots.Diagnostics\nls -l\n```\n\nYou should see a subdirectory here that looks something like `01H7J4WD653PA49Y2X3J1RVYHS`.\nThis is a ULID (see the ULID section below). `cd` into it and list files.\n```sh\ncd 01H7J4WD653PA49Y2X3J1RVYHS\nls\n```\n\nNow you should see some `.log` files here. `tail` them to see data as its written:\n```sh\n# tail the cpu file\ntail -f cpu.log\n\n# or tail all logs files\ntail -f *.log\n```\n\n\n## ULID\nEach run will generate a fresh [ULID](https://github.com/ulid/spec).\nThis ID is unique to this run and all associated logs will be stored in a\nsubdirectory inside the working directory with the ID as it's name. ULID\nhas the nice property of encoding the Unix timestamp in the generated ID -\nso it will be easy later to make corelations based on time.\n\n```sh\ngo install github.com/oklog/ulid/v2/cmd/ulid@latest\n# Let's assume we have a ULID: 01H7J38F44J44RZ5CYYJHKMVHB\nulid 01H7J38F44J44RZ5CYYJHKMVHB\n```\n\nThe output should be\n```sh\nFri Aug 11 10:46:15.94 UTC 2023\n```\n\n### NB:\nRunning this with strace enabled hasn't been completely tested yet - give it a whirl if you want. Ofcourse please make sure strace is installed.\nYou will need the PID of a running process.\n\nIn one terminal run:\n```sh\nwatch ls -la /tmp\n```\n\nIn another terminal run\n```sh\nps -ef | grep watch | grep -v grep | awk '{print $2}'\n```\n\nLet's say the process ID is: `35151`\n\n```sh\n./msft_snap_monit --strace --tracepid 35151\n```\n\n## Plan\nThere are quite a few more resources to log and monitor like processes and known applications\nthat conflict with snapshots like antiviruses or network monitors\nbut the broader structure of the code should not need too many changes. Please test\nit out and open issues for bugs and feature requests that you think would help\nin debugging snapshots.\n\n### Thank You\n"
  },
  {
    "path": "VMBackup/debughelper/checkMounts.go",
    "content": "package main\n\ntype Mount struct{}\n\n// Get all mounts - to check for noatime vs reltime\nfunc mounts() []Mount {\n\treturn nil\n}\n"
  },
  {
    "path": "VMBackup/debughelper/go.mod",
    "content": "module msft_snap_monit\n\ngo 1.21.0\n\nrequire github.com/oklog/ulid/v2 v2.1.0\n"
  },
  {
    "path": "VMBackup/debughelper/go.sum",
    "content": "github.com/oklog/ulid/v2 v2.1.0 h1:+9lhoxAP56we25tyYETBBY1YLA2SaoLvUFgrP2miPJU=\ngithub.com/oklog/ulid/v2 v2.1.0/go.mod h1:rcEKHmBBKfef9DhnvX7y1HZBYxjXb0cP5ExxNsTT1QQ=\ngithub.com/pborman/getopt v0.0.0-20170112200414-7148bc3a4c30/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o=\n"
  },
  {
    "path": "VMBackup/debughelper/main.go",
    "content": "package main\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"os/exec\"\n\t\"os/signal\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com/oklog/ulid/v2\"\n)\n\nvar (\n\tworking_directory = flag.String(\n\t\t\"wd\",\n\t\t\"./\",\n\t\t\"Location which this application will use for all it's processing and persisting data. Please make sure this location does not get frozen during a snapshot operation\",\n\t)\n\textension_command = flag.String(\"extcmd\", \"\", \"The command to execute extensions with\")\n\trun_diagnosis     = flag.Bool(\"diagnose\", false, \"Daignose the system\")\n\twith_strace       = flag.Bool(\"strace\", false, \"The tool will run with strace enabled\")\n\tstrace_pid        = flag.Int64(\"tracepid\", 0, \"The PID to apply strace on\")\n\tlog_to_mem        = flag.Bool(\"logtomem\", true, \"Will temporarily log to memory before moving all log files to working directory\")\n)\n\nfunc wrapErr(err error, msgs ...string) error {\n\tpc := make([]uintptr, 15)\n\tn := runtime.Callers(2, pc)\n\tframes := runtime.CallersFrames(pc[:n])\n\tframe, _ := frames.Next()\n\tsrc := frame.Function\n\ts := strings.Join(append([]string{src}, msgs...), \" -> \")\n\treturn fmt.Errorf(\"%s -> %s\", s, err.Error())\n}\n\nfunc checkBinExistence(c string) bool {\n\tif len(c) == 0 {\n\t\treturn false\n\t}\n\tcmd := exec.Command(\"which\", c)\n\tbs, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tlog.Println(wrapErr(err, \"CombinedOutput failed\"))\n\t\treturn false\n\t}\n\tif cmd.ProcessState.ExitCode() != 0 {\n\t\treturn false\n\t}\n\tif len(bs) == 0 {\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc checkSvcExistence(s string) bool {\n\tif len(s) == 0 {\n\t\treturn false\n\t}\n\tcmd := exec.Command(\"systemctl\", \"list-unit-files\", \"--type\", \"service\")\n\tcmd2 := exec.Command(\"grep\", \"-e\", s)\n\tr, w := io.Pipe()\n\tcmd.Stdout = w\n\tcmd2.Stdin = r\n\n\tvar b2 bytes.Buffer\n\tcmd2.Stdout = &b2\n\n\tcmd.Start()\n\tcmd2.Start()\n\tcmd.Wait()\n\tw.Close()\n\tcmd2.Wait()\n\n\tbs := b2.Bytes()\n\tif len(bs) == 0 {\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc envVarExists(v string) bool {\n\treturn len(os.Getenv(v)) > 0\n}\n\nfunc databaseText(d string) string {\n\treturn fmt.Sprintf(\"Unsupported database detected: \\\"%s\\\". Please make sure the database is not in use during a snapshot operation. The heavy disk IO behavior of databases can conflict with disk freezing\", d)\n}\n\nfunc avText(a string) string {\n\treturn fmt.Sprintf(\"Anitivirus detected: \\\"%s\\\". Make sure no files, directories, or mountpoints are being scanned during a snapshot operation\", a)\n}\n\nfunc diagnoseDbs() []string {\n\tdbreport := []string{}\n\tif checkSvcExistence(\"postgresql.service\") {\n\t\tdbreport = append(dbreport, databaseText(\"PostgreSQL\"))\n\t}\n\tif checkSvcExistence(\"mongod\") {\n\t\tdbreport = append(dbreport, databaseText(\"MongoDB\"))\n\t}\n\tif checkBinExistence(\"mysqld\") || checkBinExistence(\"mysql\") {\n\t\tdbreport = append(dbreport, databaseText(\"MySQL\"))\n\t}\n\treturn dbreport\n}\n\nfunc diagnoseAvs() []string {\n\tclamAVExists := checkBinExistence(\"clamscan\")\n\tbitDefenderExists := checkSvcExistence(\"bdsec*\")\n\n\tavreport := []string{}\n\n\tif clamAVExists {\n\t\tavreport = append(avreport, avText(\"ClamAV\"))\n\t}\n\tif bitDefenderExists {\n\t\tavreport = append(avreport, avText(\"Bitdefender\"))\n\t}\n\n\treturn avreport\n}\n\nfunc main() {\n\tflag.Parse()\n\topID := fmt.Sprintf(\"%s\", ulid.Make())\n\n\tif *with_strace && *strace_pid == 0 {\n\t\tlog.Printf(\"Cannot trace PID: 0\")\n\t\treturn\n\t}\n\n\tr, rf, err := NewRun(*working_directory, opID, *with_strace, *strace_pid, *log_to_mem)\n\tif err != nil {\n\t\tlog.Println(wrapErr(err))\n\t\treturn\n\t}\n\tdefer rf.Close()\n\n\tif *run_diagnosis {\n\t\tlf := r.diagnose()\n\t\tlog.Printf(\"Diagnosis has been written to:\\n%s\\n\", lf)\n\t\treturn\n\t}\n\n\twg := sync.WaitGroup{}\n\tctx, cancel := context.WithCancel(context.Background())\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tr.monitor(ctx)\n\t}()\n\n\tinter := make(chan os.Signal, 1)\n\t// Auto kill after 20 minutes\n\tgo func(inter chan os.Signal) {\n\t\ti := 0\n\t\tticker := time.NewTicker(time.Second)\n\t\tfor range ticker.C {\n\t\t\ti++\n\t\t\tif i >= (20 * 60) {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tticker.Stop()\n\t\tinter <- syscall.SIGINT\n\t}(inter)\n\tsignal.Notify(inter, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)\n\t<-inter\n\tcancel()\n\twg.Wait()\n}\n"
  },
  {
    "path": "VMBackup/debughelper/run.go",
    "content": "package main\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"os/exec\"\n\t\"path\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\nconst LF_RUN = \"run.log\"\nconst LF_STRACE = \"strace.log\"\nconst LF_CPU = \"cpu.log\"\nconst LF_MEM = \"mem.log\"\nconst LF_DISK = \"disk.log\"\nconst LF_DIAG = \"diagnosis.log\"\n\ntype Run struct {\n\twd       string\n\topID     string\n\tlog      *log.Logger\n\tstrace   bool\n\ttracePID int64\n\tlogToMem bool\n\tinMemDir string\n}\n\nfunc NewRun(workingDir, opID string, with_strace bool, trace_pid int64, logToMem bool) (*Run, *os.File, error) {\n\tr := &Run{\n\t\twd:       workingDir,\n\t\topID:     opID,\n\t\tstrace:   with_strace,\n\t\ttracePID: trace_pid,\n\t\tinMemDir: \"/dev/shm/Microsoft.Azure.Snapshots.Diagnostics\",\n\t\tlogToMem: logToMem,\n\t}\n\tif r.logToMem {\n\t\tif err := os.MkdirAll(path.Join(r.inMemDir, r.opID), 0755); err != nil {\n\t\t\treturn nil, nil, wrapErr(err, \"os.MkdirAll failed\")\n\t\t}\n\t}\n\tf, err := os.OpenFile(path.Join(r.workDir(), LF_RUN), os.O_CREATE, 0644)\n\tif err != nil {\n\t\treturn nil, nil, wrapErr(err, \"os.OpenFile failed\")\n\t}\n\tr.log = log.New(f, \"\", log.Ldate|log.Ltime|log.LUTC)\n\treturn r, f, nil\n}\n\nfunc (r Run) workDir() string {\n\tp := path.Join(r.wd, r.opID)\n\tif r.logToMem {\n\t\tp = path.Join(r.inMemDir, r.opID)\n\t}\n\treturn p\n}\n\nfunc (r Run) startStrace(ctx context.Context) error {\n\tif !r.strace {\n\t\treturn nil\n\t}\n\tif r.tracePID == 0 {\n\t\treturn fmt.Errorf(\"empty process ID\")\n\t}\n\tcommand := exec.CommandContext(\n\t\tctx, \"strace\", \"-t\", \"-p\", fmt.Sprintf(\"%d\", r.tracePID),\n\t\t\"-f\", \"-o\", path.Join(r.workDir(), LF_STRACE),\n\t)\n\t_, err := command.CombinedOutput()\n\tif err != nil {\n\t\tr.log.Println(wrapErr(err, \"CombinedOutput failed\"))\n\t}\n\treturn nil\n}\n\nfunc (r Run) diagnose() string {\n\tavreport := diagnoseAvs()\n\tdbreport := diagnoseDbs()\n\tlogFile := path.Join(r.workDir(), LF_DIAG)\n\tf, err := os.OpenFile(logFile, os.O_CREATE, 0644)\n\tif err != nil {\n\t\tr.log.Println(wrapErr(err, \"os.OpenFile failed\"))\n\t}\n\tdefer f.Close()\n\n\tl := \"\"\n\tif len(avreport) > 0 {\n\t\tl = l + \"========== ANTIVIRUS ============\\n\\n\"\n\t\tl = l + strings.Join(avreport, \"\\n\\n\")\n\t}\n\tf.WriteString(l)\n\tif len(l) > 0 {\n\t\tl = \"\\n\\n\\n\"\n\t}\n\tif len(dbreport) > 0 {\n\t\tl = l + \"========== DATABSES ============\\n\\n\"\n\t\tl = l + strings.Join(dbreport, \"\\n\\n\")\n\t}\n\n\tf.WriteString(l)\n\tf.WriteString(\"\\n\")\n\tr.persistInMemDir()\n\treturn path.Join(r.wd, r.opID, LF_DIAG)\n}\n\ntype LoadAvg struct {\n\tTS         int64  `json:\"timestamp_millis\"`\n\tOne        string `json:\"one\"`\n\tFive       string `json:\"five\"`\n\tFifteen    string `json:\"fifteen\"`\n\tSchedRatio string `json:\"scheduled_ratio\"`\n\tLP         string `json:\"last_pid\"`\n}\n\nfunc (r Run) monitorCPU(ctx context.Context, cpuStream chan *LoadAvg) {\n\t// log.Println(\"[monitorCPU] -> Fired\")\n\tticker := time.NewTicker(time.Second)\n\tctx1, cancel := context.WithCancel(ctx)\nouter:\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\tcancel()\n\t\t\tticker.Stop()\n\t\t\tcpuStream <- nil\n\t\t\tbreak outer\n\t\tcase <-ticker.C:\n\t\t\tgo func() {\n\t\t\t\tcommand := exec.CommandContext(ctx1, \"cat\", \"/proc/loadavg\")\n\t\t\t\tbs, err := command.CombinedOutput()\n\t\t\t\tif err != nil {\n\t\t\t\t\tr.log.Println(wrapErr(err, \"CombinedOutput failed\"))\n\t\t\t\t} else {\n\t\t\t\t\tfields := strings.Fields(strings.Trim(string(bs), \" \\n\"))\n\t\t\t\t\tif len(fields) != 5 {\n\t\t\t\t\t\tr.log.Println(wrapErr(fmt.Errorf(\"/proc/loadavg returned invalid number of strings\")))\n\t\t\t\t\t} else {\n\t\t\t\t\t\tla := LoadAvg{\n\t\t\t\t\t\t\tOne:        fields[0],\n\t\t\t\t\t\t\tFive:       fields[1],\n\t\t\t\t\t\t\tFifteen:    fields[2],\n\t\t\t\t\t\t\tSchedRatio: fields[3],\n\t\t\t\t\t\t\tLP:         fields[4],\n\t\t\t\t\t\t\tTS:         time.Now().UnixMilli(),\n\t\t\t\t\t\t}\n\t\t\t\t\t\tlog.Println(\"[monitorCPU] -> sending new metric\")\n\t\t\t\t\t\tcpuStream <- &la\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\n\t\t}\n\t}\n}\n\nfunc (r Run) logCPU(ctx context.Context, cpuStream chan *LoadAvg) error {\n\tf, err := os.Create(path.Join(r.workDir(), LF_CPU))\n\tif err != nil {\n\t\treturn wrapErr(err, \"os.Create failed\")\n\t}\n\t// logger := log.New(f, \"\", log.Ldate|log.Ltime|log.LUTC)\n\tdefer f.Close()\nouter:\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\tbreak outer\n\t\tcase lav := <-cpuStream:\n\t\t\t// log.Println(\"[logCPU] -> new metric received\")\n\t\t\tbs, err := json.Marshal(lav)\n\t\t\tif err != nil {\n\t\t\t\tr.log.Println(wrapErr(err, \"json.Marshal failed\"))\n\t\t\t} else {\n\t\t\t\t// log.Println(\"[logCPU] -> writing to log file\")\n\t\t\t\tf.WriteString(fmt.Sprintf(\"%s\\n\", string(bs)))\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\ntype Mem struct {\n\tTS           int64 `json:\"timestamp_millis\"`\n\tTotalKb      int64 `json:\"total_kb\"`\n\tAvailKb      int64 `json:\"avail_kb\"`\n\tFreeKb       int64 `json:\"free_kb\"`\n\tCachedKb     int64 `json:\"cached_kb\"`\n\tSwapCachedKb int64 `json:\"swap_cached_kb\"`\n\tSwapTotalKb  int64 `json:\"swap_total_kb\"`\n\tSwapFreeKb   int64 `json:\"swap_free_kb\"`\n}\n\nfunc (r Run) monitorMem(ctx context.Context, memStream chan *Mem) {\n\tticker := time.NewTicker(time.Second)\n\tctx1, cancel := context.WithCancel(context.TODO())\nouter:\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\tcancel()\n\t\t\tticker.Stop()\n\t\t\tmemStream <- nil\n\t\t\tbreak outer\n\t\tcase <-ticker.C:\n\t\t\tcommand := exec.CommandContext(ctx1, \"cat\", \"/proc/meminfo\")\n\t\t\tbs, err := command.CombinedOutput()\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(wrapErr(err, \"CombinedOutput failed\"))\n\t\t\t} else {\n\t\t\t\tm := Mem{\n\t\t\t\t\tTS: time.Now().UnixMilli(),\n\t\t\t\t}\n\t\t\t\tflag := false\n\t\t\t\tfor _, line := range strings.Split(string(bs), \"\\n\") {\n\t\t\t\t\tif len(line) == 0 {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tfields := strings.Fields(line)\n\t\t\t\t\tif len(fields) != 3 {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tswitch fields[0] {\n\t\t\t\t\tcase \"MemTotal:\":\n\t\t\t\t\t\tv, err := strconv.Atoi(fields[1])\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tlog.Println(\"MemTotal conversion to int failed: \", err)\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tm.TotalKb = int64(v)\n\t\t\t\t\t\t\tflag = true\n\t\t\t\t\t\t}\n\t\t\t\t\tcase \"MemAvailable:\":\n\t\t\t\t\t\tv, err := strconv.Atoi(fields[1])\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tlog.Println(\"MemAvailable conversion to int failed: \", err)\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tm.AvailKb = int64(v)\n\t\t\t\t\t\t\tflag = true\n\t\t\t\t\t\t}\n\t\t\t\t\tcase \"MemFree:\":\n\t\t\t\t\t\tv, err := strconv.Atoi(fields[1])\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tlog.Println(\"MemFree conversion to int failed: \", err)\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tm.FreeKb = int64(v)\n\t\t\t\t\t\t\tflag = true\n\t\t\t\t\t\t}\n\t\t\t\t\tcase \"Cached:\":\n\t\t\t\t\t\tv, err := strconv.Atoi(fields[1])\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tlog.Println(\"Cached Mem conversion to int failed: \", err)\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tm.CachedKb = int64(v)\n\t\t\t\t\t\t\tflag = true\n\t\t\t\t\t\t}\n\t\t\t\t\tcase \"SwapCached:\":\n\t\t\t\t\t\tv, err := strconv.Atoi(fields[1])\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tlog.Println(\"SwapCached conversion to int failed: \", err)\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tm.SwapCachedKb = int64(v)\n\t\t\t\t\t\t\tflag = true\n\t\t\t\t\t\t}\n\t\t\t\t\tcase \"SwapTotal:\":\n\t\t\t\t\t\tv, err := strconv.Atoi(fields[1])\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tlog.Println(\"SwapTotal conversion to int failed: \", err)\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tm.SwapTotalKb = int64(v)\n\t\t\t\t\t\t\tflag = true\n\t\t\t\t\t\t}\n\t\t\t\t\tcase \"SwapFree:\":\n\t\t\t\t\t\tv, err := strconv.Atoi(fields[1])\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tlog.Println(\"SwapFree conversion to int failed: \", err)\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tm.SwapFreeKb = int64(v)\n\t\t\t\t\t\t\tflag = true\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif flag {\n\t\t\t\t\t// log.Println(\"[monitorMem] -> sending new metric\")\n\t\t\t\t\tmemStream <- &m\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (r Run) logMem(ctx context.Context, memStream chan *Mem) error {\n\t// log.Println(\"[logMem] -> Fired\")\n\tf, err := os.Create(path.Join(r.workDir(), LF_MEM))\n\tif err != nil {\n\t\treturn wrapErr(err, \"OpenFile failed\")\n\t}\n\tdefer f.Close()\nouter:\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\tbreak outer\n\t\tcase lav := <-memStream:\n\t\t\tif lav == nil {\n\t\t\t\tbreak outer\n\t\t\t}\n\t\t\tlog.Println(\"[logMem] -> received new metric\")\n\t\t\tbs, err := json.Marshal(lav)\n\t\t\tif err != nil {\n\t\t\t\tr.log.Println(wrapErr(err, \"json.Marshal failed\"))\n\t\t\t} else {\n\t\t\t\tlog.Println(\"[logMem] -> writing to log file\")\n\t\t\t\tf.WriteString(fmt.Sprintf(\"%s\\n\", string(bs)))\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\ntype DiskLog struct {\n\tTS                          int64  `json:\"timestamp_millis\"`\n\tMajorNum                    string `json:\"major_num\"`\n\tMinorNum                    string `json:\"minor_num\"`\n\tDeviceName                  string `json:\"device_name\"`\n\tReadsCompleted              string `json:\"reads_completed_successfully\"`\n\tReadsMerged                 string `json:\"reads_merged\"`\n\tSectorsRead                 string `json:\"sectors_read\"`\n\tTimeSpentReadingMs          string `json:\"time_spent_reading_ms\"`\n\tWritesCompleted             string `json:\"writes_completed\"`\n\tWriteMerged                 string `json:\"writes_merged\"`\n\tSectorsWritten              string `json:\"sectors_written\"`\n\tTimeSpentWritingMs          string `json:\"time_spent_writing\"`\n\tIosInProgress               string `json:\"ios_currently_in_progress\"`\n\tTimeSpentIosMs              string `json:\"time_spent_doing_ios_ms\"`\n\tWeightedTimeSpentDoingIosMs string `json:\"weighted_time_spent_doing_ios_ms\"`\n\tDiscardsCompleted           string `json:\"discards_completed_successfully\"`\n\tDiscardsMerged              string `json:\"discards_merged\"`\n\tSectorsDiscarded            string `json:\"sectors_discarded\"`\n\tTimeSpentDiscardingMs       string `json:\"time_sspent_discarding\"`\n\tFlushRequestsCompleted      string `json:\"flush_requests_completed_successfully\"`\n\tTimeSpentFlushingMs         string `json:\"time_spent_flushing\"`\n}\n\nfunc (r Run) monitorDisk(ctx context.Context, diskChan chan *DiskLog) {\n\tticker := time.NewTicker(time.Second)\n\tctx1, cancel := context.WithCancel(context.TODO())\nouter:\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\tcancel()\n\t\t\tticker.Stop()\n\t\t\tdiskChan <- nil\n\t\t\tbreak outer\n\t\tcase <-ticker.C:\n\t\t\tcommand := exec.CommandContext(ctx1, \"cat\", \"/proc/diskstats\")\n\t\t\tbs, err := command.CombinedOutput()\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(wrapErr(err, \"CombinedOutput failed\"))\n\t\t\t\tcontinue outer\n\t\t\t}\n\t\t\tfor _, line := range strings.Split(string(bs), \"\\n\") {\n\t\t\t\tfields := strings.Fields(line)\n\t\t\t\t// Get only sata or nvme disks\n\t\t\t\tif len(fields) == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif !strings.Contains(fields[2], \"sd\") && !strings.Contains(fields[2], \"nvme\") {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tdl := DiskLog{\n\t\t\t\t\tTS:                          time.Now().UnixMilli(),\n\t\t\t\t\tMajorNum:                    fields[0],\n\t\t\t\t\tMinorNum:                    fields[1],\n\t\t\t\t\tDeviceName:                  fields[2],\n\t\t\t\t\tReadsCompleted:              fields[3],\n\t\t\t\t\tReadsMerged:                 fields[4],\n\t\t\t\t\tSectorsRead:                 fields[5],\n\t\t\t\t\tTimeSpentReadingMs:          fields[6],\n\t\t\t\t\tWritesCompleted:             fields[7],\n\t\t\t\t\tWriteMerged:                 fields[8],\n\t\t\t\t\tSectorsWritten:              fields[9],\n\t\t\t\t\tTimeSpentWritingMs:          fields[10],\n\t\t\t\t\tIosInProgress:               fields[11],\n\t\t\t\t\tTimeSpentIosMs:              fields[12],\n\t\t\t\t\tWeightedTimeSpentDoingIosMs: fields[13],\n\t\t\t\t}\n\n\t\t\t\tlf := len(fields)\n\t\t\t\t// Kernel 4.18+ will have the following fields\n\t\t\t\tif lf >= 18 {\n\t\t\t\t\tdl.DiscardsCompleted = fields[14]\n\t\t\t\t\tdl.DiscardsMerged = fields[15]\n\t\t\t\t\tdl.SectorsDiscarded = fields[16]\n\t\t\t\t\tdl.TimeSpentDiscardingMs = fields[17]\n\t\t\t\t}\n\n\t\t\t\t// Kernel 5.5+ further have the following fields\n\t\t\t\tif lf >= 20 {\n\t\t\t\t\tdl.FlushRequestsCompleted = fields[18]\n\t\t\t\t\tdl.TimeSpentFlushingMs = fields[19]\n\t\t\t\t}\n\n\t\t\t\tdiskChan <- &dl\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (r Run) logDisk(ctx context.Context, diskChan chan *DiskLog) error {\n\tf, err := os.Create(path.Join(r.workDir(), LF_DISK))\n\tif err != nil {\n\t\treturn wrapErr(err, \"os.Create failed\")\n\t}\n\tdefer f.Close()\nouter:\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\tbreak outer\n\t\tcase lav := <-diskChan:\n\t\t\tif lav == nil {\n\t\t\t\tbreak outer\n\t\t\t}\n\t\t\tlog.Println(\"[logDisk] -> received new metric\")\n\t\t\tbs, err := json.Marshal(lav)\n\t\t\tif err != nil {\n\t\t\t\tr.log.Println(wrapErr(err, \"json.Marshal failed\"))\n\t\t\t} else {\n\t\t\t\tlog.Println(\"[logDisk] -> writing to log file\")\n\t\t\t\tf.WriteString(fmt.Sprintf(\"%s\\n\", string(bs)))\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (r Run) persistInMemDir() {\n\t// log.Println(\"[persistInMemDir] -> Fired\")\n\tif !r.logToMem {\n\t\treturn\n\t}\n\tlog.Printf(\"moving: \\\"%s\\\" to \\\"%s\\\"\\n\", r.workDir(), r.wd)\n\tcmd := exec.Command(\"mv\", r.workDir(), fmt.Sprintf(\"%s/\", r.wd))\n\tif _, err := cmd.CombinedOutput(); err != nil {\n\t\tr.log.Println(wrapErr(err, fmt.Sprintf(\"moving from shared memory to path: \\\"%s\\\" failed\", r.wd)))\n\t}\n}\n\nfunc (r Run) monitor(ctx context.Context) {\n\t// log.Println(\"[monitor] -> Fired\")\n\twg := sync.WaitGroup{}\n\n\t// save pid file\n\tpf, err := os.Create(path.Join(r.workDir(), \"monitor.pid\"))\n\tif err != nil {\n\t\tr.log.Println(\"error creating pid file\")\n\t\treturn\n\t}\n\n\tpf.WriteString(fmt.Sprintf(\"%d\", os.Getpid()))\n\n\t// strace\n\ttctx, tcancel := context.WithCancel(ctx)\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tr.startStrace(tctx)\n\t}()\n\n\t// CPU ==============================\n\tcpuStream := make(chan *LoadAvg, 1)\n\tcpuCtx, cpuCancel := context.WithCancel(ctx)\n\tlogCpuCtx, logCpuCancel := context.WithCancel(ctx)\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tr.monitorCPU(cpuCtx, cpuStream)\n\t}()\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tif err := r.logCPU(logCpuCtx, cpuStream); err != nil {\n\t\t\tr.log.Println(wrapErr(err))\n\t\t}\n\t}()\n\n\t// RAM\n\tmemStream := make(chan *Mem, 1)\n\tmemCtx, memCancel := context.WithCancel(ctx)\n\tlogMemCtx, logMemCancel := context.WithCancel(ctx)\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tr.monitorMem(memCtx, memStream)\n\n\t}()\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tif err := r.logMem(logMemCtx, memStream); err != nil {\n\t\t\tr.log.Println(wrapErr(err))\n\t\t}\n\t}()\n\n\t// Disk\n\tdiskChan := make(chan *DiskLog, 20)\n\tdiskCtx, diskCancel := context.WithCancel(ctx)\n\tlogDiskCtx, logDiskCancel := context.WithCancel(ctx)\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tr.monitorDisk(diskCtx, diskChan)\n\n\t}()\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tif err := r.logDisk(logDiskCtx, diskChan); err != nil {\n\t\t\tr.log.Println(wrapErr(err))\n\t\t}\n\t}()\n\n\t<-ctx.Done()\n\ttcancel()\n\tcpuCancel()\n\tlogCpuCancel()\n\tmemCancel()\n\tlogMemCancel()\n\tdiskCancel()\n\tlogDiskCancel()\n\n\twg.Wait()\n\n\tr.persistInMemDir()\n}\n"
  },
  {
    "path": "VMBackup/main/ExtensionErrorCodeHelper.py",
    "content": "from Utils import Status\n\nclass ExtensionErrorCodeEnum():\n    success_appconsistent = 0\n    success = 1\n    error = 2\n    SuccessAlreadyProcessedInput = 3\n    ExtensionTempTerminalState = 4\n    error_parameter = 11\n    error_12 = 12\n    error_wrong_time = 13\n    error_same_taskid = 14\n    error_http_failure = 15\n    FailedHandlerGuestAgentCertificateNotFound = 16\n    #error_upload_status_blob = 16\n    FailedInvalidDataDiskLunList = 17\n\n    FailedRetryableSnapshotFailedNoNetwork = 76\n    FailedHostSnapshotRemoteServerError = 556\n    FailedSnapshotLimitReached = 85\n    FailedRetryableSnapshotRateExceeded = 173\n    FailedRetryableSnapshotFailedRestrictedNetwork = 761\n\n    FailedRetryableFsFreezeFailed = 121\n    FailedRetryableFsFreezeTimeout = 122\n    FailedRetryableUnableToOpenMount = 123\n    FailedSafeFreezeBinaryNotFound = 124\n\n    FailedPrepostPreScriptFailed = 300\n    FailedPrepostPostScriptFailed = 301\n    FailedPrepostPreScriptNotFound = 302\n    FailedPrepostPostScriptNotFound = 303\n    FailedPrepostPluginhostConfigParsing = 304\n    FailedPrepostPluginConfigParsing = 305\n    FailedPrepostPreScriptPermissionError = 306\n    FailedPrepostPostScriptPermissionError = 307\n    FailedPrepostPreScriptTimeout = 308\n    FailedPrepostPostScriptTimeout = 309\n    FailedPrepostPluginhostPreTimeout = 310\n    FailedPrepostPluginhostPostTimeout = 311\n    FailedPrepostCheckSumMismatch = 312\n    FailedPrepostPluginhostConfigNotFound = 313\n    FailedPrepostPluginhostConfigPermissionError = 314\n    FailedPrepostPluginhostConfigOwnershipError = 315\n    FailedPrepostPluginConfigNotFound = 316\n    FailedPrepostPluginConfigPermissionError = 317\n    FailedPrepostPluginConfigOwnershipError = 318\n    FailedGuestAgentInvokedCommandTooLate = 402\n\n    FailedWorkloadPreError = 500\n    FailedWorkloadConfParsingError = 501\n    FailedWorkloadInvalidRole = 502\n    FailedWorkloadInvalidWorkloadName = 503\n    FailedWorkloadPostError = 504\n    FailedWorkloadAuthorizationMissing = 505\n    FailedWorkloadConnectionError = 506\n    FailedWorkloadIPCDirectoryMissing = 507\n    FailedWorkloadDatabaseStatusChanged = 508\n    FailedWorkloadQuiescingError = 509\n    FailedWorkloadQuiescingTimeout = 510\n    FailedWorkloadDatabaseInNoArchiveLog = 511\n    FailedWorkloadLogModeChanged = 512\n\nclass ExtensionErrorCodeHelper:\n    ExtensionErrorCodeDict = {\n            ExtensionErrorCodeEnum.success_appconsistent : Status.ExtVmHealthStateEnum.green,\n            ExtensionErrorCodeEnum.success : Status.ExtVmHealthStateEnum.green,\n            ExtensionErrorCodeEnum.ExtensionTempTerminalState : Status.ExtVmHealthStateEnum.green,\n            ExtensionErrorCodeEnum.error : Status.ExtVmHealthStateEnum.green,\n            ExtensionErrorCodeEnum.error_12 : Status.ExtVmHealthStateEnum.green,\n            ExtensionErrorCodeEnum.SuccessAlreadyProcessedInput : Status.ExtVmHealthStateEnum.green,\n            ExtensionErrorCodeEnum.FailedRetryableSnapshotRateExceeded : Status.ExtVmHealthStateEnum.green,\n            ExtensionErrorCodeEnum.FailedInvalidDataDiskLunList : Status.ExtVmHealthStateEnum.green,\n            \n            ExtensionErrorCodeEnum.FailedSafeFreezeBinaryNotFound: Status.ExtVmHealthStateEnum.yellow,\n            ExtensionErrorCodeEnum.FailedRetryableFsFreezeFailed : Status.ExtVmHealthStateEnum.yellow,\n            ExtensionErrorCodeEnum.FailedRetryableFsFreezeTimeout : Status.ExtVmHealthStateEnum.yellow,\n            ExtensionErrorCodeEnum.FailedRetryableUnableToOpenMount : Status.ExtVmHealthStateEnum.yellow,\n            ExtensionErrorCodeEnum.error_parameter : Status.ExtVmHealthStateEnum.yellow,\n            ExtensionErrorCodeEnum.FailedHandlerGuestAgentCertificateNotFound : Status.ExtVmHealthStateEnum.yellow,\n\n            ExtensionErrorCodeEnum.FailedPrepostPreScriptFailed : Status.ExtVmHealthStateEnum.yellow,\n            ExtensionErrorCodeEnum.FailedPrepostPostScriptFailed : Status.ExtVmHealthStateEnum.yellow,\n            ExtensionErrorCodeEnum.FailedPrepostPreScriptNotFound : Status.ExtVmHealthStateEnum.yellow,\n            ExtensionErrorCodeEnum.FailedPrepostPostScriptNotFound : Status.ExtVmHealthStateEnum.yellow,\n            ExtensionErrorCodeEnum.FailedPrepostPluginhostConfigParsing : Status.ExtVmHealthStateEnum.yellow,\n            ExtensionErrorCodeEnum.FailedPrepostPluginConfigParsing : Status.ExtVmHealthStateEnum.yellow,\n            ExtensionErrorCodeEnum.FailedPrepostPreScriptPermissionError : Status.ExtVmHealthStateEnum.yellow,\n            ExtensionErrorCodeEnum.FailedPrepostPostScriptPermissionError : Status.ExtVmHealthStateEnum.yellow,\n            ExtensionErrorCodeEnum.FailedPrepostPreScriptTimeout : Status.ExtVmHealthStateEnum.yellow,\n            ExtensionErrorCodeEnum.FailedPrepostPostScriptTimeout : Status.ExtVmHealthStateEnum.yellow,\n            ExtensionErrorCodeEnum.FailedPrepostPluginhostPreTimeout : Status.ExtVmHealthStateEnum.yellow,\n            ExtensionErrorCodeEnum.FailedPrepostPluginhostPostTimeout : Status.ExtVmHealthStateEnum.yellow,\n            ExtensionErrorCodeEnum.FailedPrepostCheckSumMismatch : Status.ExtVmHealthStateEnum.yellow,\n            ExtensionErrorCodeEnum.FailedPrepostPluginhostConfigNotFound : Status.ExtVmHealthStateEnum.yellow,\n            ExtensionErrorCodeEnum.FailedPrepostPluginhostConfigPermissionError : Status.ExtVmHealthStateEnum.yellow,\n            ExtensionErrorCodeEnum.FailedPrepostPluginhostConfigOwnershipError : Status.ExtVmHealthStateEnum.yellow,\n            ExtensionErrorCodeEnum.FailedPrepostPluginConfigNotFound : Status.ExtVmHealthStateEnum.yellow,\n            ExtensionErrorCodeEnum.FailedPrepostPluginConfigPermissionError : Status.ExtVmHealthStateEnum.yellow,\n            ExtensionErrorCodeEnum.FailedPrepostPluginConfigOwnershipError : Status.ExtVmHealthStateEnum.yellow,\n\n            ExtensionErrorCodeEnum.error_http_failure : Status.ExtVmHealthStateEnum.red,\n            ExtensionErrorCodeEnum.FailedRetryableSnapshotFailedRestrictedNetwork : Status.ExtVmHealthStateEnum.red,\n            ExtensionErrorCodeEnum.FailedRetryableSnapshotFailedNoNetwork : Status.ExtVmHealthStateEnum.red,\n            ExtensionErrorCodeEnum.FailedSnapshotLimitReached : Status.ExtVmHealthStateEnum.red,\n            ExtensionErrorCodeEnum.FailedGuestAgentInvokedCommandTooLate : Status.ExtVmHealthStateEnum.red,\n            \n            ExtensionErrorCodeEnum.FailedWorkloadPreError : Status.ExtVmHealthStateEnum.yellow,\n            ExtensionErrorCodeEnum.FailedWorkloadConfParsingError : Status.ExtVmHealthStateEnum.yellow,\n            ExtensionErrorCodeEnum.FailedWorkloadInvalidRole : Status.ExtVmHealthStateEnum.yellow,\n            ExtensionErrorCodeEnum.FailedWorkloadInvalidWorkloadName : Status.ExtVmHealthStateEnum.yellow,\n            ExtensionErrorCodeEnum.FailedWorkloadPostError : Status.ExtVmHealthStateEnum.yellow,\n            ExtensionErrorCodeEnum.FailedWorkloadAuthorizationMissing : Status.ExtVmHealthStateEnum.yellow,\n            ExtensionErrorCodeEnum.FailedWorkloadConnectionError : Status.ExtVmHealthStateEnum.yellow,\n            ExtensionErrorCodeEnum.FailedWorkloadIPCDirectoryMissing : Status.ExtVmHealthStateEnum.yellow,\n            ExtensionErrorCodeEnum.FailedWorkloadDatabaseStatusChanged : Status.ExtVmHealthStateEnum.yellow,\n            ExtensionErrorCodeEnum.FailedWorkloadQuiescingError : Status.ExtVmHealthStateEnum.yellow,\n            ExtensionErrorCodeEnum.FailedWorkloadQuiescingTimeout : Status.ExtVmHealthStateEnum.yellow,\n            ExtensionErrorCodeEnum.FailedWorkloadDatabaseInNoArchiveLog : Status.ExtVmHealthStateEnum.yellow,\n            ExtensionErrorCodeEnum.FailedWorkloadLogModeChanged : Status.ExtVmHealthStateEnum.yellow\n            }\n\n    ExtensionErrorCodeNameDict = {\n            ExtensionErrorCodeEnum.success : \"success\",\n            ExtensionErrorCodeEnum.success_appconsistent : \"success_appconsistent\",\n            ExtensionErrorCodeEnum.ExtensionTempTerminalState : \"ExtensionTempTerminalState\",\n            ExtensionErrorCodeEnum.error : \"error\",\n            ExtensionErrorCodeEnum.error_12 : \"error_12\",\n            ExtensionErrorCodeEnum.SuccessAlreadyProcessedInput : \"SuccessAlreadyProcessedInput\",\n            ExtensionErrorCodeEnum.FailedInvalidDataDiskLunList : \"FailedInvalidDataDiskLunList\",\n\n            ExtensionErrorCodeEnum.FailedRetryableFsFreezeFailed : \"FailedRetryableFsFreezeFailed\",\n            ExtensionErrorCodeEnum.FailedRetryableFsFreezeTimeout : \"FailedRetryableFsFreezeTimeout\",\n            ExtensionErrorCodeEnum.FailedRetryableUnableToOpenMount : \"FailedRetryableUnableToOpenMount\",\n            ExtensionErrorCodeEnum.error_parameter : \"error_parameter\",\n            ExtensionErrorCodeEnum.FailedHandlerGuestAgentCertificateNotFound : \"FailedHandlerGuestAgentCertificateNotFound\",\n            ExtensionErrorCodeEnum.FailedSafeFreezeBinaryNotFound : \"FailedSafeFreezeBinaryNotFound\",\n             \n            ExtensionErrorCodeEnum.FailedPrepostPreScriptFailed : \"FailedPrepostPreScriptFailed\",\n            ExtensionErrorCodeEnum.FailedPrepostPostScriptFailed : \"FailedPrepostPostScriptFailed\",\n            ExtensionErrorCodeEnum.FailedPrepostPreScriptNotFound : \"FailedPrepostPreScriptNotFound\",\n            ExtensionErrorCodeEnum.FailedPrepostPostScriptNotFound : \"FailedPrepostPostScriptNotFound\",\n            ExtensionErrorCodeEnum.FailedPrepostPluginhostConfigParsing : \"FailedPrepostPluginhostConfigParsing\",\n            ExtensionErrorCodeEnum.FailedPrepostPluginConfigParsing : \"FailedPrepostPluginConfigParsing\",\n            ExtensionErrorCodeEnum.FailedPrepostPreScriptPermissionError : \"FailedPrepostPreScriptPermissionError\",\n            ExtensionErrorCodeEnum.FailedPrepostPostScriptPermissionError : \"FailedPrepostPostScriptPermissionError\",\n            ExtensionErrorCodeEnum.FailedPrepostPreScriptTimeout : \"FailedPrepostPreScriptTimeout\",\n            ExtensionErrorCodeEnum.FailedPrepostPostScriptTimeout : \"FailedPrepostPostScriptTimeout\",\n            ExtensionErrorCodeEnum.FailedPrepostPluginhostPreTimeout : \"FailedPrepostPluginhostPreTimeout\",\n            ExtensionErrorCodeEnum.FailedPrepostPluginhostPostTimeout : \"FailedPrepostPluginhostPostTimeout\",\n            ExtensionErrorCodeEnum.FailedPrepostCheckSumMismatch : \"FailedPrepostCheckSumMismatch\",\n            ExtensionErrorCodeEnum.FailedPrepostPluginhostConfigNotFound : \"FailedPrepostPluginhostConfigNotFound\",\n            ExtensionErrorCodeEnum.FailedPrepostPluginhostConfigPermissionError : \"FailedPrepostPluginhostConfigPermissionError\",\n            ExtensionErrorCodeEnum.FailedPrepostPluginhostConfigOwnershipError : \"FailedPrepostPluginhostConfigOwnershipError\",\n            ExtensionErrorCodeEnum.FailedPrepostPluginConfigNotFound : \"FailedPrepostPluginConfigNotFound\",\n            ExtensionErrorCodeEnum.FailedPrepostPluginConfigPermissionError : \"FailedPrepostPluginConfigPermissionError\",\n            ExtensionErrorCodeEnum.FailedPrepostPluginConfigOwnershipError : \"FailedPrepostPluginConfigOwnershipError\",\n\n            ExtensionErrorCodeEnum.error_http_failure : \"error_http_failure\",\n            ExtensionErrorCodeEnum.FailedRetryableSnapshotFailedRestrictedNetwork : \"FailedRetryableSnapshotFailedRestrictedNetwork\",\n            ExtensionErrorCodeEnum.FailedRetryableSnapshotFailedNoNetwork : \"FailedRetryableSnapshotFailedNoNetwork\",\n            ExtensionErrorCodeEnum.FailedHostSnapshotRemoteServerError : \"FailedHostSnapshotRemoteServerError\",\n            ExtensionErrorCodeEnum.FailedSnapshotLimitReached : \"FailedSnapshotLimitReached\",\n            ExtensionErrorCodeEnum.FailedGuestAgentInvokedCommandTooLate : \"FailedGuestAgentInvokedCommandTooLate\",\n            \n            ExtensionErrorCodeEnum.FailedWorkloadPreError : \"FailedWorkloadPreError\",\n            ExtensionErrorCodeEnum.FailedWorkloadConfParsingError : \"FailedWorkloadConfParsingError\",\n            ExtensionErrorCodeEnum.FailedWorkloadInvalidRole : \"FailedWorkloadInvalidRole\",\n            ExtensionErrorCodeEnum.FailedWorkloadInvalidWorkloadName : \"FailedWorkloadInvalidWorkloadName\",\n            ExtensionErrorCodeEnum.FailedWorkloadPostError : \"FailedWorkloadPostError\",\n            ExtensionErrorCodeEnum.FailedWorkloadAuthorizationMissing : \"FailedWorkloadAuthorizationMissing\",\n            ExtensionErrorCodeEnum.FailedWorkloadConnectionError : \"FailedWorkloadConnectionError\",\n            ExtensionErrorCodeEnum.FailedWorkloadIPCDirectoryMissing : \"FailedWorkloadIPCDirectoryMissing\",\n            ExtensionErrorCodeEnum.FailedWorkloadDatabaseStatusChanged : \"FailedWorkloadDatabaseStatusChanged\",\n            ExtensionErrorCodeEnum.FailedWorkloadQuiescingError : \"FailedWorkloadQuiescingError\",\n            ExtensionErrorCodeEnum.FailedWorkloadQuiescingTimeout : \"FailedWorkloadQuiescingTimeout\",\n            ExtensionErrorCodeEnum.FailedWorkloadDatabaseInNoArchiveLog : \"FailedWorkloadDatabaseInNoArchiveLog\",\n            ExtensionErrorCodeEnum.FailedWorkloadLogModeChanged : \"FailedWorkloadLogModeChanged\"\n            }\n    \n    @staticmethod\n    def StatusCodeStringBuilder(ExtErrorCodeEnum):\n        return \" StatusCode.\" + ExtensionErrorCodeHelper.ExtensionErrorCodeNameDict[ExtErrorCodeEnum] + \",\"\n\n"
  },
  {
    "path": "VMBackup/main/HttpUtil.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport time\nimport datetime\nimport traceback\ntry:\n    import httplib as httplibs\nexcept ImportError:\n    import http.client as httplibs\nimport shlex\nimport subprocess\nimport sys\nfrom common import CommonVariables\nfrom subprocess import *\nfrom Utils.WAAgentUtil import waagent\nimport Utils.HandlerUtil\nimport sys\n\nclass HttpUtil(object):\n    \"\"\"description of class\"\"\"\n    __instance = None\n    \"\"\"Singleton class initialization\"\"\"\n    def __new__(cls, hutil):\n        if(cls.__instance is None):\n            hutil.log(\"Creating HttpUtil\")\n            cls.__instance = super(HttpUtil, cls).__new__(cls)\n            Config = None\n            cls.__instance.proxyHost = None\n            cls.__instance.proxyPort = None\n            try:\n                waagent.MyDistro = waagent.GetMyDistro()\n                Config = waagent.ConfigurationProvider(None)\n            except Exception as e:\n                errorMsg = \"Failed to construct ConfigurationProvider, which may be due to the old code with error: %s, stack trace: %s\" % (str(e), traceback.format_exc())\n                hutil.log(errorMsg)\n                Config = None\n            cls.__instance.logger = hutil\n            if Config != None:\n                cls.__instance.proxyHost = Config.get(\"HttpProxy.Host\")\n                cls.__instance.proxyPort = Config.get(\"HttpProxy.Port\")\n            cls.__instance.tmpFile = './tmp_file_FD76C85E-406F-4CFA-8EB0-CF18B123365C'\n        else:\n            cls.__instance.logger = hutil\n            cls.__instance.logger.log(\"Returning HttpUtil\")\n        return cls.__instance\n\n    \"\"\"\n    snapshot also called this. so we should not write the file/read the file in this method.\n    \"\"\"\n    def CallUsingCurl(self,method,sasuri_obj,data,headers):\n        header_str = \"\"\n        for key, value in headers.iteritems():\n            header_str = header_str + '-H ' + '\"' + str(key) + ':' + str(value) + '\"'\n\n        if(self.proxyHost == None or self.proxyPort == None):\n            commandToExecute = 'curl --request PUT --connect-timeout 10 --data-binary @-' + ' ' + header_str + ' \"' + sasuri_obj.scheme + '://' + sasuri_obj.hostname + sasuri_obj.path + '?' + sasuri_obj.query + '\"' + ' -v'\n        else:\n            commandToExecute = 'curl --request PUT --connect-timeout 10 --data-binary @-' + ' ' + header_str + ' \"' + sasuri_obj.scheme + '://' + sasuri_obj.hostname + sasuri_obj.path + '?' + sasuri_obj.query + '\"'\\\n                + '--proxy ' + self.proxyHost + ':' + self.proxyPort + ' -v'\n        args =Utils.HandlerUtil.HandlerUtility.split(self.logger, commandToExecute.encode('ascii'))\n        proc = Popen(args,stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.PIPE)\n        proc.stdin.write(data)\n        curlResult,err = proc.communicate()\n        returnCode = proc.wait()\n        self.logger.log(\"curl error is: \" + str(err))\n        self.logger.log(\"curl return code is : \" + str(returnCode))\n        # what if the curl is returned successfully, but the http response is\n        # 403\n        if(returnCode == 0):\n            return CommonVariables.success\n        else:\n            return CommonVariables.error_http_failure\n\n    def Call(self, method, sasuri_obj, data, headers, fallback_to_curl = False):\n        try:\n            result, resp, errorMsg = self.HttpCallGetResponse(method, sasuri_obj, data, headers)\n            self.logger.log(\"HttpUtil Call : result: \" + str(result) + \", errorMsg: \" + str(errorMsg))\n            if(result == CommonVariables.success and resp != None):\n                self.logger.log(\"resp-header: \" + str(resp.getheaders()))\n            else:\n                self.logger.log(\"Http connection response is None\")\n\n            responseBody = resp.read()\n            self.logger.log(\" resp status: \" + str(resp.status))\n            if(responseBody is not None):\n                self.logger.log(\"responseBody: \" + (responseBody).decode('utf-8-sig'))\n\n            if(resp.status == 200 or resp.status == 201):\n                return CommonVariables.success\n            else:\n                return resp.status\n        except Exception as e:\n            errorMsg = \"Failed to call http with error: %s, stack trace: %s\" % (str(e), traceback.format_exc())\n            self.logger.log(errorMsg)\n            if(fallback_to_curl):\n                return self.CallUsingCurl(method,sasuri_obj,data,headers)\n            else:\n                return CommonVariables.error_http_failure\n\n    def HttpCallGetResponse(self, method, sasuri_obj, data, headers , responseBodyRequired = False, isHostCall = False):\n        result = CommonVariables.error_http_failure\n        resp = None\n        responeBody = \"\"\n        errorMsg = None\n        responseBody = None\n        try:\n            resp = None\n            self.logger.log(\"Entered HttpCallGetResponse, isHostCall: \" + str(isHostCall))\n\n            if(isHostCall or self.proxyHost == None or self.proxyPort != None):\n                if(isHostCall):\n                    connection = httplibs.HTTPConnection(sasuri_obj.hostname, timeout = 40) # making call with port 80 to make it http call\n                else:\n                    connection = httplibs.HTTPSConnection(sasuri_obj.hostname, timeout = 10)\n                self.logger.log(\"Details of sas uri object  hostname: \" + str(sasuri_obj.hostname) + \" path: \" + str(sasuri_obj.path))\n                connection.request(method=method, url=(sasuri_obj.path + '?' + sasuri_obj.query), body=data, headers = headers)\n                resp = connection.getresponse()\n                if(responseBodyRequired):\n                    responeBody = resp.read().decode('utf-8-sig')\n                connection.close()\n            else:\n                connection = httplibs.HTTPSConnection(self.proxyHost, self.proxyPort, timeout = 10)\n                connection.set_tunnel(sasuri_obj.hostname, 443)\n                # If proxy is used, full url is needed.\n                path = \"https://{0}:{1}{2}\".format(sasuri_obj.hostname, 443, (sasuri_obj.path + '?' + sasuri_obj.query))\n                connection.request(method=method, url=(path), body=data, headers=headers)\n                resp = connection.getresponse()\n                connection.close()\n            result = CommonVariables.success\n        except Exception as e:\n            errorMsg = str(datetime.datetime.utcnow()) +  \" Failed to call http with error: %s, stack trace: %s\" % (str(e), traceback.format_exc())\n            self.logger.log(errorMsg)\n            if sys.version[0] == 2 and sys.version[1] == 6:\n                self.CallUsingCurl(method,sasuri_obj,data,headers)\n        if(responseBodyRequired):\n            return result, resp, errorMsg, responeBody\n        else:\n            return result, resp, errorMsg\n"
  },
  {
    "path": "VMBackup/main/IaaSExtensionSnapshotService/README.md",
    "content": "The systemd process manages the lifecycle of the service, including starting, stopping, restarting, and keeping track of whether it’s running or not.\n\nA service is registered typically by placing its .service file in /etc/systemd/system/\n\nTo start: \nsudo systemctl start <service_name>\n\nExecStart defines what executable or script is launched when the service starts.\nSystemd spawns a child process to run the command in ExecStart\n\nTypically, daemons write their PID to a PID file themselves (e.g., /var/run/<service>.pid) to track their process."
  },
  {
    "path": "VMBackup/main/IaaSExtensionSnapshotService/SnapshotServiceConstants.py",
    "content": "class SnapshotServiceConstants:\n\n    service_name = \"Microsoft.Azure.RecoveryServices.VMSnapshotLinux.service\"\n    config_section = 'IaaSExtensionSnapshotService'\n    pid_file = \"VMSnapshotLinux.pid\"\n\n    HOST_IP_ADDRESS = \"168.63.129.16\"\n\n    GET_SNAPSHOT_REQUESTS_URI = \"http://{0}/xdisksvc/snapshotrequest\".format(HOST_IP_ADDRESS)\n    START_SNAPSHOT_REQUESTS_URI = \"http://{0}/xdisksvc/startsnapshots\".format(HOST_IP_ADDRESS)\n    END_SNAPSHOT_REQUESTS_URI = \"http://{0}/xdisksvc/endsnapshots\".format(HOST_IP_ADDRESS)\n\n    SERVICE_POLLING_INTERVAL_IN_SECS = 300\n    EXTENSION_TIMEOUT_IN_MINS = 10"
  },
  {
    "path": "VMBackup/main/IaaSExtensionSnapshotService/SnapshotServiceContracts.py",
    "content": "import json\n\nclass GetSnapshotResponseBody:\n    def __init__(self, snapshotId, diskInfo=None, extensionSettings=None):\n        self.snapshotId = snapshotId\n        self.diskInfo = diskInfo\n        self.extensionSettings = extensionSettings\n\n    def convertToDictionary(self):\n        return dict(\n            snapshotId=self.snapshotId,\n            diskInfo=self.diskInfo.convertToDictionary() if self.diskInfo else None,\n            extensionSettings=self.extensionSettings\n        )\n\nclass StartSnapshotHostResponseBody:\n    def __init__(self, snapshotId, error=None):\n        self.snapshotId = snapshotId\n        self.error = error\n\n    def convertToDictionary(self):\n        return dict(\n            snapshotId=self.snapshotId,\n            error=self.error.convertToDictionary() if self.error else None\n        )\n\nclass StartSnapshotHostRequestBody:\n    def __init__(self, snapshotId):\n        self.snapshotId = snapshotId\n\n    def serialize_to_json_string(self):\n        return json.dumps(self.convertToDictionary())\n\n    def convertToDictionary(self):\n        return dict(snapshotId=self.snapshotId)\n    \nclass EndSnapshotHostRequestBody:\n    def __init__(self, snapshotId, error=None, provisioningDetails=None):\n        self.snapshotId = snapshotId\n        self.error = error\n        self.provisioningDetails = provisioningDetails\n\n    def serialize_to_json_string(self):\n        return json.dumps(self.convertToDictionary())\n\n    def convertToDictionary(self):\n        return dict(\n            snapshotId=self.snapshotId,\n            error=self.error.convertToDictionary() if self.error else None,\n            provisioningDetails=self.provisioningDetails\n        )\n\nclass EndSnapshotHostResponseBody:\n    def __init__(self, snapshotId, error=None):\n        self.snapshotId = snapshotId\n        self.error = error\n\n    def convertToDictionary(self):\n        return dict(\n            snapshotId=self.snapshotId,\n            error=self.error.convertToDictionary() if self.error else None\n        )\n\nclass Error:\n    def __init__(self, code, message=None):\n        self.code = code\n        self.message = message\n\n    def convertToDictionary(self):\n        return dict(\n            code=self.code,\n            message=self.message\n        )\n\nclass DiskInfo:\n    def __init__(self, dataDiskInfo=None, isOSDiskIncluded=False):\n        self.dataDiskInfo = dataDiskInfo or []\n        self.isOSDiskIncluded = isOSDiskIncluded\n\n    def convertToDictionary(self):\n        return dict(\n            dataDiskInfo=[disk.convertToDictionary() for disk in self.dataDiskInfo],\n            isOSDiskIncluded=self.isOSDiskIncluded\n        )\n\nclass DataDiskInfo:\n    def __init__(self, controllerType, controllerId, lunId):\n        self.controllerType = controllerType\n        self.controllerId = controllerId\n        self.lunId = lunId\n\n    def convertToDictionary(self):\n        return dict(\n            controllerType=self.controllerType,\n            controllerId=self.controllerId,\n            lunId=self.lunId\n        )\n\nclass XDiskSvcError:\n    def __init__(self, code, message=None):\n        self.code = code\n        self.message = message\n\n    def convertToDictionary(self):\n        return dict(\n            code=self.code,\n            message=self.message\n        )\n\nclass ProvisioningDetails:\n    def __init__(self, code, vmHealthInfo=None, storageDetails=None, message=None):\n        self.code = code\n        self.vmHealthInfo = vmHealthInfo\n        self.storageDetails = storageDetails\n        self.message = message\n\n    def convertToDictionary(self):\n        return dict(\n            code=self.code,\n            vmHealthInfo=self.vmHealthInfo.convertToDictionary() if self.vmHealthInfo else None,\n            storageDetails=self.storageDetails.convertToDictionary() if self.storageDetails else None,\n            message=self.message\n        )\n"
  },
  {
    "path": "VMBackup/main/IaaSExtensionSnapshotService/__init__.py",
    "content": "#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n"
  },
  {
    "path": "VMBackup/main/IaaSExtensionSnapshotService/service_metadata.json",
    "content": "{\n    \"Unit\": {\n      \"Description\": \"Long running Snapshot service for Microsoft Azure Restore Points\",\n      \"After\": \"multi-user.target\"\n    },\n    \"Service\": {\n      \"Type\": \"simple\",\n      \"Restart\": \"always\",\n      \"WorkingDirectory\": \"../..\",\n      \"ExecStart\": [\"/usr/bin/env\", \"python\", \"main/IaaSExtensionSnapshotService/PollingService.py\"]\n    },\n    \"Install\": {\n      \"WantedBy\": \"multi-user.target\"\n    }\n}"
  },
  {
    "path": "VMBackup/main/LogSeverity.json",
    "content": "{\n    \"EventLogLevel\": 2\n}"
  },
  {
    "path": "VMBackup/main/MachineIdentity.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport subprocess\nimport xml\nimport xml.dom.minidom\n\nclass MachineIdentity:\n    def __init__(self):\n        self.store_identity_file = './machine_identity_FD76C85E-406F-4CFA-8EB0-CF18B123365C'\n\n    def current_identity(self):\n        identity = None\n        file = None\n        try:\n            if os.path.exists(\"/var/lib/waagent/HostingEnvironmentConfig.xml\"):\n                file = open(\"/var/lib/waagent/HostingEnvironmentConfig.xml\",'r')\n                xmlText = file.read()\n                dom = xml.dom.minidom.parseString(xmlText)\n                deployment = dom.getElementsByTagName(\"Role\")\n                identity=deployment[0].getAttribute(\"guid\")\n        finally:\n            if file != None:\n                if file.closed == False:\n                    file.close()\n        return identity\n\n    def save_identity(self):\n        file = None\n        try:\n            file = open(self.store_identity_file,'w')\n            machine_identity = self.current_identity()\n            if( machine_identity != None ):\n                file.write(machine_identity)\n        finally:\n            if file != None:\n                if file.closed == False:\n                    file.close()\n\n    def stored_identity(self):\n        identity_stored = None\n        file = None\n        try:\n            if(os.path.exists(self.store_identity_file)):\n                file = open(self.store_identity_file,'r')\n                identity_stored = file.read()\n        finally:\n            if file != None:\n                if file.closed == False:\n                    file.close()\n        return identity_stored\n\n"
  },
  {
    "path": "VMBackup/main/PluginHost.py",
    "content": "import time\nimport sys\nimport os\nimport threading\nimport platform\ntry:\n    import ConfigParser as ConfigParsers\nexcept ImportError:\n    import configparser as ConfigParsers\nfrom common import CommonVariables\nfrom pwd import getpwuid\nfrom stat import *\nimport traceback\n\n\n    # [pre_post]\n    # \"timeout\" : (in seconds),\n    #\n    # .... other params ...\n    #\n    # \"pluginName0\" : \"oracle_plugin\",      the python plugin file will have same name\n    # \"pluginPath0\" : \"/abc/xyz/\"\n    # \"pluginConfigPath0\" : \"sdf/sdf/abcd.json\"\n    #\n    #\n    # errorcode policy\n    # errorcode = 0 (CommonVariables.PrePost_PluginStatus_Successs), means success, script runs without error, warnings maybe possible\n    # errorcode = 5 (CommonVariables.PrePost_PluginStatus_Timeout), means timeout\n    # errorcode = 10 (CommonVariables.PrePost_PluginStatus_ConfigNotFound), config file not found\n    # errorcode = process return code, means bash script encountered some other error, like 127 for script not found\n\n\nclass PluginHostError(object):\n    def __init__(self, errorCode, pluginName):\n        self.errorCode = errorCode\n        self.pluginName = pluginName\n\n    def __str__(self):\n        return 'Plugin :- ', self.pluginName , ' ErrorCode :- ' + str(self.errorCode)\n\n\nclass PluginHostResult(object):\n    def __init__(self):\n        self.errors = []\n        self.anyScriptFailed = False\n        self.continueBackup = True\n        self.errorCode = 0\n        self.fileCode = []\n        self.filePath = []\n\n    def __str__(self):\n        errorStr = ''\n        for error in self.errors:\n            errorStr += (str(error)) + '\\n'\n        errorStr += 'Final Error Code :- ' + str(self.errorCode) + '\\n'\n        errorStr += 'Any script Failed :- ' + str(self.anyScriptFailed) + '\\n'\n        errorStr += 'Continue Backup :- ' + str(self.continueBackup) + '\\n'\n        return errorStr\n\n\nclass PluginHost(object):\n    \"\"\" description of class \"\"\"\n    def __init__(self, logger):\n        self.logger = logger\n        self.modulesLoaded = False\n        self.configLocation = '/etc/azure/VMSnapshotPluginHost.conf'\n        self.timeoutInSeconds = 1800\n        self.plugins = []\n        self.pluginName = []\n        self.noOfPlugins = 0\n        self.preScriptCompleted = []\n        self.preScriptResult = []\n        self.postScriptCompleted = []\n        self.postScriptResult = []\n        self.pollTime = 3\n\n    def pre_check(self):\n        self.logger.log('Loading script modules now...',True,'Info')\n        errorCode = CommonVariables.PrePost_PluginStatus_Success\n        dobackup = True\n        fsFreeze_on = True\n\n        # NS-BSD is already hardened, no checks and no freeze\n        if 'NS-BSD' in platform.system():\n            return errorCode, dobackup, False\n\n        if not os.path.isfile(self.configLocation):\n            self.logger.log('Plugin host Config file does not exist in the location ' + self.configLocation, True)\n            self.configLocation = './main/VMSnapshotPluginHost.conf'\n        \n        permissions = self.get_permissions(self.configLocation)\n        if not os.path.isfile(self.configLocation):\n            self.logger.log('Plugin host Config file does not exist in the location ' + self.configLocation, True)\n            errorCode =CommonVariables.FailedPrepostPluginhostConfigNotFound\n        elif  not (int(permissions[1]) == 0 or int(permissions[1]) == 4) or not (int(permissions[2]) == 0 or int(permissions[2]) == 4):\n            self.logger.log('Plugin host Config file does not have desired permissions', True, 'Error')\n            errorCode = CommonVariables.FailedPrepostPluginhostConfigPermissionError\n        elif not self.find_owner(self.configLocation) == 'root':\n            self.logger.log('The owner of the Plugin host Config file ' + self.configLocation + ' is ' + self.find_owner(self.configLocation) + ' but not root', True, 'Error')\n            errorCode = CommonVariables.FailedPrepostPluginhostConfigPermissionError\n        else :\n            errorCode,dobackup,fsFreeze_on = self.load_modules()\n\n        return errorCode,dobackup,fsFreeze_on\n\n    def load_modules(self):\n\n            # Imports all plugin modules using the information in config.json\n            # and initializes basic class variables associated with each plugin\n        len = 0\n        errorCode = CommonVariables.PrePost_PluginStatus_Success\n        dobackup = True\n        fsFreeze_on = True\n\n        try:\n            self.logger.log('config file: '+str(self.configLocation),True,'Info')\n            config = ConfigParsers.ConfigParser()\n            config.read(self.configLocation)\n            if (config.has_option('pre_post', 'timeoutInSeconds')):\n                self.timeoutInSeconds = min(int(config.get('pre_post','timeoutInSeconds')),self.timeoutInSeconds)\n            if (config.has_option('pre_post', 'numberOfPlugins')):\n                len = int(config.get('pre_post','numberOfPlugins'))\n\n            self.logger.log('timeoutInSeconds: '+str(self.timeoutInSeconds),True,'Info')\n            self.logger.log('numberOfPlugins: '+str(len),True,'Info')\n\n            while len > 0:\n                pname = config.get('pre_post','pluginName'+str(self.noOfPlugins))\n                ppath = config.get('pre_post','pluginPath'+str(self.noOfPlugins))\n                pcpath = config.get('pre_post','pluginConfigPath'+str(self.noOfPlugins))\n                self.logger.log('Name of the Plugin is ' + pname, True)\n                self.logger.log('Plugin config path is ' + pcpath, True)\n                errorCode = CommonVariables.PrePost_PluginStatus_Success\n                dobackup = True\n\n                if os.path.isfile(pcpath):\n                    permissions = self.get_permissions(pcpath)\n                    if (int(permissions[0]) %2 == 1) or int(permissions[1]) > 0 or int(permissions[2]) > 0:\n                        self.logger.log('Plugin Config file does not have desired permissions', True, 'Error')\n                        errorCode = CommonVariables.FailedPrepostPluginConfigPermissionError\n                    if not self.find_owner(pcpath) == 'root':\n                        self.logger.log('The owner of the Plugin Config file ' + pcpath + ' is ' + self.find_owner(pcpath) + ' but not root', True, 'Error')\n                        errorCode = CommonVariables.FailedPrepostPluginConfigPermissionError\n                else:\n                    self.logger.log('Plugin host file does not exist in the location ' + pcpath, True, 'Error')\n                    errorCode = CommonVariables.FailedPrepostPluginConfigNotFound\n\n                if(errorCode == CommonVariables.PrePost_PluginStatus_Success):\n                    sys.path.append(ppath)\n                    plugin = __import__(pname)\n\n                    self.plugins.append(plugin.ScriptRunner(logger=self.logger,name=pname,configPath=pcpath,maxTimeOut=self.timeoutInSeconds))\n                    errorCode,dobackup,fsFreeze_on, self.pollTime = self.plugins[self.noOfPlugins].validate_scripts()\n                    self.logger.log('Validate Scripts output: errorCode - {0} dobackup - {1} fsFreeze_on - {2} pollTime - {3}'.format(errorCode, dobackup, fsFreeze_on, self.pollTime), True)\n                    self.noOfPlugins = self.noOfPlugins + 1\n                    self.pluginName.append(pname)\n                    self.preScriptCompleted.append(False)\n                    self.preScriptResult.append(None)\n                    self.postScriptCompleted.append(False)\n                    self.postScriptResult.append(None)\n\n                len = len - 1\n            if self.noOfPlugins != 0:\n                self.modulesLoaded = True\n\n        except Exception as err:\n            errMsg = 'Error in reading PluginHost config file : %s, stack trace: %s' % (str(err), traceback.format_exc())\n            self.logger.log(errMsg, True, 'Error')\n            errorCode = CommonVariables.FailedPrepostPluginhostConfigParsing\n\n        return errorCode,dobackup,fsFreeze_on\n\n    def find_owner(self, filename):\n        file_owner = ''\n        try:\n            file_owner = getpwuid(os.stat(filename).st_uid).pw_name\n        except Exception as err:\n            errMsg = 'Error in fetching owner of the file : ' + filename  + ': %s, stack trace: %s' % (str(err), traceback.format_exc())\n            self.logger.log(errMsg, True, 'Error')\n\n        return file_owner\n\n\n    def get_permissions(self, filename):\n        permissions = '777'\n        try:\n            permissions = oct(os.stat(filename)[ST_MODE])[-3:]\n            self.logger.log('Permissions of the file ' + filename + ' are ' + permissions,True)\n        except Exception as err:\n            errMsg = 'Error in fetching permissions of the file : ' + filename  + ': %s, stack trace: %s' % (str(err), traceback.format_exc())\n            self.logger.log(errMsg, True, 'Error')\n\n        return permissions\n\n\n    def pre_script(self):\n\n            # Runs pre_script() for all plugins and maintains a timer\n\n\n        result = PluginHostResult()\n        curr = 0\n        for plugin in self.plugins:\n            t1 = threading.Thread(target=plugin.pre_script, args=(curr, self.preScriptCompleted, self.preScriptResult))\n            t1.start()\n            curr = curr + 1\n\n        flag = True\n        for i in range(0, int(self.timeoutInSeconds/self.pollTime) + 2): #waiting 10 more seconds to escape race condition between Host and script timing out\n            time.sleep(self.pollTime)\n            flag = True\n            for j in range(0,self.noOfPlugins):\n                flag = flag & self.preScriptCompleted[j]\n            if flag:\n                break\n\n\n        continueBackup = True\n        #Plugin timed out\n        if not flag:\n            ecode = CommonVariables.FailedPrepostPluginhostPreTimeout\n            result.anyScriptFailed = True\n            presult = PluginHostError(errorCode = ecode, pluginName = self.pluginName[j])\n            result.errors.append(presult)\n        else:\n            for j in range(0,self.noOfPlugins):\n                ecode = CommonVariables.FailedPrepostPluginhostPreTimeout\n                continueBackup = continueBackup & self.preScriptResult[j].continueBackup\n                if self.preScriptCompleted[j]:\n                    ecode = self.preScriptResult[j].errorCode\n                if ecode != CommonVariables.PrePost_PluginStatus_Success:\n                    result.anyScriptFailed = True\n                presult = PluginHostError(errorCode = ecode, pluginName = self.pluginName[j])\n                result.errors.append(presult)\n        result.continueBackup = continueBackup\n        self.logger.log('Finished prescript execution from PluginHost side. Continue Backup: '+str(continueBackup),True,'Info')\n        return result\n\n    def post_script(self):\n\n            # Runs post_script() for all plugins and maintains a timer\n\n\n        result = PluginHostResult()\n        if not self.modulesLoaded:\n            return result\n\n        self.logger.log('Starting postscript for all modules.',True,'Info')\n        curr = 0\n        for plugin in self.plugins:\n            t1 = threading.Thread(target=plugin.post_script, args=(curr, self.postScriptCompleted, self.postScriptResult))\n            t1.start()\n            curr = curr + 1\n\n        flag = True\n        for i in range(0, int(self.timeoutInSeconds/self.pollTime) + 2): #waiting 10 more seconds to escape race condition between Host and script timing out\n            time.sleep(self.pollTime)\n            flag = True\n            for j in range(0,self.noOfPlugins):\n                flag = flag & self.postScriptCompleted[j]\n            if flag:\n                break\n\n        continueBackup = True\n\n        #Plugin timed out\n        if not flag:\n            ecode = CommonVariables.FailedPrepostPluginhostPostTimeout\n            result.anyScriptFailed = True\n            presult = PluginHostError(errorCode = ecode, pluginName = self.pluginName[j])\n            result.errors.append(presult)\n        else:\n            for j in range(0,self.noOfPlugins):\n                ecode = CommonVariables.FailedPrepostPluginhostPostTimeout\n                continueBackup = continueBackup & self.postScriptResult[j].continueBackup\n                if self.postScriptCompleted[j]:\n                    ecode = self.postScriptResult[j].errorCode\n                if ecode != CommonVariables.PrePost_PluginStatus_Success:\n                    result.anyScriptFailed = True\n                presult = PluginHostError(errorCode = ecode, pluginName = self.pluginName[j])\n                result.errors.append(presult)\n        result.continueBackup = continueBackup\n        self.logger.log('Finished postscript execution from PluginHost side. Continue Backup: '+str(continueBackup),True,'Info')\n        return result\n\n\n"
  },
  {
    "path": "VMBackup/main/ScriptRunner.py",
    "content": "import json\nimport subprocess\nimport time\nimport os\nfrom pwd import getpwuid\nfrom stat import *\nfrom common import CommonVariables\nimport traceback\nfrom Utils import HandlerUtil\n\n    # config.json --------structure---------\n    # {\n    #     \"pluginName\" : \"oracleLinux\",\n    #     \"timeoutInSeconds\" : (in seconds),\n    #     \"continueBackupOnFailure\" : true/false,\n    #\n    #     ... other config params ...\n    #\n    #     \"preScriptLocation\" : \"/abc/xyz.sh\"\n    #     \"postScriptLocation\" : \"/abc/def.sh\"\n    #     \"preScriptNoOfRetries\" : 3,\n    #     \"postScriptNoOfRetries\" : 2,\n    #     \"preScriptParams\" : [\n    #         ... all params to be passed to prescript ...\n    #     ],\n    #     \"postScriptParams\" : [\n    #         ... all params to be passed to postscript ...\n    #     ]\n    # }\n    #\n    #\n    # errorcode policy\n    # errorcode = 0 (CommonVariables.PrePost_PluginStatus_Successs), means success, script runs without error, warnings maybe possible\n    # errorcode = 5 (CommonVariables.PrePost_PluginStatus_Timeout), means timeout\n    # errorcode = 10 (CommonVariables.PrePost_PluginStatus_ConfigNotFound), config file not found\n    # errorcode = process return code, means bash script encountered some other error, like 127 for script not found\n\n\nclass ScriptRunnerResult(object):\n    def __init__(self):\n        self.errorCode = None\n        self.continueBackup = True\n        self.noOfRetries = 0\n        self.requiredNoOfRetries = 0\n        self.fileCode = []\n        self.filePath = []\n\n    def __str__(self):\n        errorStr =  'ErrorCode :- ' + str(self.errorCode) + '\\n'\n        errorStr += 'Continue Backup :- ' + str(self.continueBackup) + '\\n'\n        errorStr += 'Number of Retries done :- ' + str(self.noOfRetries) + '\\n'\n        return errorStr\n\n\nclass ScriptRunner(object):\n    \"\"\" description of class \"\"\"\n    def __init__(self, logger, name, configPath, maxTimeOut):\n        self.logger = logger\n        self.timeoutInSeconds = 10\n        self.pollSleepTime = 3\n        self.pollTotalCount = (self.timeoutInSeconds / self.pollSleepTime)\n        self.configLocation = configPath\n        self.pluginName = name\n        self.continueBackupOnFailure = True\n        self.preScriptParams = []\n        self.postScriptParams = []\n        self.preScriptLocation = None\n        self.postScriptLocation = None\n        self.preScriptNoOfRetries = 0\n        self.postScriptNoOfRetries = 0\n        self.fsFreeze_on = True\n        self.configLoaded = False\n        self.PreScriptCompletedSuccessfully = False\n        self.maxTimeOut = maxTimeOut\n\n    def get_config(self):\n        \"\"\"\n            Get configuration information from config.json\n\n        \"\"\"\n        try:\n            with open(self.configLocation, 'r') as configFile:\n                configData = json.load(configFile)\n            configDataKeys = configData.keys()\n            if 'timeoutInSeconds' in configDataKeys:\n                self.timeoutInSeconds = min(configData['timeoutInSeconds'],self.maxTimeOut)\n            if 'pluginName' in configDataKeys:\n                self.pluginName = configData['pluginName']\n            if 'appName' in configDataKeys:\n                HandlerUtil.HandlerUtility.add_to_telemetery_data('appName',configData['appName'])\n            self.preScriptLocation = configData['preScriptLocation']\n            self.postScriptLocation = configData['postScriptLocation']\n            if 'preScriptParams' in configDataKeys:\n                self.preScriptParams = configData['preScriptParams']\n            if 'postScriptParams' in configDataKeys:\n                self.postScriptParams = configData['postScriptParams']\n            if 'continueBackupOnFailure' in configDataKeys:\n                self.continueBackupOnFailure = configData['continueBackupOnFailure']\n            if 'preScriptNoOfRetries' in configDataKeys:\n                self.preScriptNoOfRetries = configData['preScriptNoOfRetries']\n            if 'postScriptNoOfRetries' in configDataKeys:\n                self.postScriptNoOfRetries = configData['postScriptNoOfRetries']\n            if 'fsFreezeEnabled' in configDataKeys:\n                self.fsFreeze_on = configData['fsFreezeEnabled']\n            if 'ScriptsExecutionPollTimeSeconds' in configDataKeys and int(configData['ScriptsExecutionPollTimeSeconds']) >= 1 and int(configData['ScriptsExecutionPollTimeSeconds']) <=5:\n                self.pollSleepTime = int(configData['ScriptsExecutionPollTimeSeconds'])\n            self.pollTotalCount = (self.timeoutInSeconds / self.pollSleepTime)\n            self.configLoaded = True\n        except IOError:\n            errMsg = 'Error in opening ' + self.pluginName + ' config file.' + ': %s, stack trace: %s' % (str(err), traceback.format_exc())\n            self.logger.log(errMsg, True, 'Error')\n        except ValueError as err:\n            errMsg = 'Error in decoding ' + self.pluginName + ' config file.' + ': %s, stack trace: %s' % (str(err), traceback.format_exc())\n            self.logger.log(errMsg, True, 'Error')\n        except KeyError as err:\n            errMsg = 'Error in fetching value for the key '+str(err) + ' in ' +self.pluginName+' config file.' + ': %s, stack trace: %s' % (str(err), traceback.format_exc())\n            self.logger.log(errMsg, True, 'Error')\n\n    def find_owner(self, filename):\n        file_owner = ''\n        try:\n            file_owner = getpwuid(os.stat(filename).st_uid).pw_name\n        except Exception as err:\n            errMsg = 'Error in fetching owner of the file : ' + filename  + ': %s, stack trace: %s' % (str(err), traceback.format_exc())\n            self.logger.log(errMsg, True, 'Error')\n\n        return file_owner\n\n    def validate_permissions(self, filename):\n        valid_permissions = True\n        try:\n            permissions = oct(os.stat(filename)[ST_MODE])[-3:]\n            self.logger.log('Permissions of the file ' + filename + ' are ' + permissions,True)\n            if int(permissions[1]) > 0 : #validating permissions for group\n                valid_permissions = False\n            if int(permissions[2]) > 0 : #validating permissions for others\n                valid_permissions = False\n        except Exception as err:\n            errMsg = 'Error in fetching permissions of the file : ' + filename  + ': %s, stack trace: %s' % (str(err), traceback.format_exc())\n            self.logger.log(errMsg, True, 'Error')\n            valid_permissions = False\n\n        return valid_permissions\n\n    def validate_scripts(self):\n\n        errorCode = CommonVariables.PrePost_PluginStatus_Success\n        dobackup = True\n\n        self.get_config()\n        self.logger.log('Plugin:'+str(self.pluginName)+' timeout:'+str(self.timeoutInSeconds)+' pollTotalCount:'+str(self.pollTotalCount) +' preScriptParams:'+str(self.preScriptParams)+' postScriptParams:' + str(self.postScriptParams)+ ' continueBackupOnFailure:' + str(self.continueBackupOnFailure) + ' preScriptNoOfRetries:' + str(self.preScriptNoOfRetries) + ' postScriptNoOfRetries:' + str(self.postScriptNoOfRetries) + ' Global FS Freeze on :' + str(self.fsFreeze_on), True, 'Info')\n\n        if not self.configLoaded:\n            errorCode = CommonVariables.FailedPrepostPluginConfigParsing\n            self.logger.log('Cant run prescript for '+self.pluginName+' . Config File error.', True, 'Error')\n            return errorCode,dobackup,self.fsFreeze_on, self.pollSleepTime\n\n        dobackup = self.continueBackupOnFailure\n\n        if not os.path.isfile(self.preScriptLocation):\n            self.logger.log('Prescript file does not exist in the location '+self.preScriptLocation, True, 'Error')\n            errorCode = CommonVariables.FailedPrepostPreScriptNotFound\n            return errorCode,dobackup,self.fsFreeze_on, self.pollSleepTime\n\n        if not self.validate_permissions(self.preScriptLocation):\n            self.logger.log('Prescript file does not have desired permissions ', True, 'Error')\n            errorCode = CommonVariables.FailedPrepostPreScriptPermissionError\n            return errorCode,dobackup,self.fsFreeze_on, self.pollSleepTime\n\n\n        if not self.find_owner(self.preScriptLocation) == 'root':\n            self.logger.log('The owner of the PreScript file ' + self.preScriptLocation + ' is ' + self.find_owner(self.preScriptLocation) + ' but not root', True, 'Error')\n            errorCode = CommonVariables.FailedPrepostPreScriptPermissionError\n            return errorCode,dobackup,self.fsFreeze_on, self.pollSleepTime\n\n        if not os.path.isfile(self.postScriptLocation):\n            self.logger.log('Postscript file does not exist in the location ' + self.postScriptLocation, True, 'Error')\n            errorCode = CommonVariables.FailedPrepostPostScriptNotFound\n            return errorCode,dobackup,self.fsFreeze_on, self.pollSleepTime\n\n        if not self.validate_permissions(self.postScriptLocation):\n            self.logger.log('Postscript file does not have desired permissions ', True, 'Error')\n            errorCode = CommonVariables.FailedPrepostPostScriptPermissionError\n            return errorCode,dobackup,self.fsFreeze_on, self.pollSleepTime\n\n        if not self.find_owner(self.postScriptLocation) == 'root':\n            self.logger.log('The owner of the PostScript file ' + self.postScriptLocation + ' is '+ self.find_owner(self.postScriptLocation) + ' but  not root', True, 'Error')\n            errorCode = CommonVariables.FailedPrepostPostScriptPermissionError\n            return errorCode,dobackup,self.fsFreeze_on, self.pollSleepTime\n\n        return errorCode,dobackup,self.fsFreeze_on, self.pollSleepTime\n\n    def pre_script(self, pluginIndex, preScriptCompleted, preScriptResult):\n\n            # Generates a system call to run the prescript\n            # -- pluginIndex is the index for the current plugin assigned by pluginHost\n            # -- preScriptCompleted is a bool array, upon completion of script, true will be assigned at pluginIndex\n            # -- preScriptResult is an array and it stores the result at pluginIndex\n\n\n        result = ScriptRunnerResult()\n        result.requiredNoOfRetries = self.preScriptNoOfRetries\n\n        paramsStr = ['sh',str(self.preScriptLocation)]\n        for param in self.preScriptParams:\n            paramsStr.append(str(param))\n\n        self.logger.log('Running prescript for '+self.pluginName+' module...',True,'Info')\n        process = subprocess.Popen(paramsStr, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n        flag_timeout = False\n        curr = 0\n        cnt = 0\n        while True:\n            while process.poll() is None:\n                if curr >= self.pollTotalCount:\n                    self.logger.log('Prescript for '+self.pluginName+' timed out.',True,'Error')\n                    flag_timeout = True\n                    break\n                curr = curr + 1\n                time.sleep(self.pollSleepTime)\n            if process.returncode is CommonVariables.PrePost_ScriptStatus_Success:\n                break\n            if flag_timeout:\n                break\n            if cnt >= self.preScriptNoOfRetries:\n                break\n            self.logger.log('Prescript for '+self.pluginName+' failed. Retrying...',True,'Info')\n            cnt = cnt + 1\n\n\n        result.noOfRetries = cnt\n        if not flag_timeout:\n            result.errorCode = process.returncode\n            if result.errorCode != CommonVariables.PrePost_ScriptStatus_Success:\n                self.logger.log('Prescript for '+self.pluginName+' failed with error code: '+str(result.errorCode)+' .',True,'Error')\n                result.continueBackup = self.continueBackupOnFailure\n                result.errorCode = CommonVariables.FailedPrepostPreScriptFailed\n            else:\n                self.PreScriptCompletedSuccessfully = True\n                self.logger.log('Prescript for '+self.pluginName+' successfully executed.',True,'Info')\n        else:\n            result.errorCode =  CommonVariables.FailedPrepostPreScriptTimeout\n            result.continueBackup = self.continueBackupOnFailure\n        preScriptCompleted[pluginIndex] = True\n        preScriptResult[pluginIndex] = result\n\n    def post_script(self, pluginIndex, postScriptCompleted, postScriptResult):\n\n            # Generates a system call to run the postscript\n            # -- pluginIndex is the index for the current plugin assigned by pluginHost\n            # -- postScriptCompleted is a bool array, upon completion of script, true will be assigned at pluginIndex\n            # -- postScriptResult is an array and it stores the result at pluginIndex\n\n        result = ScriptRunnerResult()\n\n        result.requiredNoOfRetries = self.postScriptNoOfRetries\n\n        paramsStr = ['sh',str(self.postScriptLocation)]\n        for param in self.postScriptParams:\n            paramsStr.append(str(param))\n\n        self.logger.log('Running postscript for '+self.pluginName+' module...',True,'Info')\n        process = subprocess.Popen(paramsStr, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n        flag_timeout = False\n        curr = 0\n        cnt = 0\n        while True:\n            while process.poll() is None:\n                if curr >= self.pollTotalCount:\n                    self.logger.log('Postscript for '+self.pluginName+' timed out.',True,'Error')\n                    flag_timeout = True\n                    break\n                curr = curr + 1\n                time.sleep(self.pollSleepTime)\n            if process.returncode is CommonVariables.PrePost_ScriptStatus_Success:\n                break\n            if flag_timeout:\n                break\n            if cnt >= self.postScriptNoOfRetries:\n                break\n            self.logger.log('Postscript for '+self.pluginName+' failed. Retrying...',True,'Info')\n            cnt = cnt + 1\n\n        result.noOfRetries = cnt\n        if not flag_timeout:\n            result.errorCode = process.returncode\n            if result.errorCode != CommonVariables.PrePost_ScriptStatus_Success:\n                self.logger.log('Postscript for '+self.pluginName+' failed with error code: '+str(result.errorCode)+' .',True,'Error')\n                result.errorCode = CommonVariables.FailedPrepostPostScriptFailed\n                result.continueBackup = self.continueBackupOnFailure\n            else:\n                self.logger.log('Postscript for '+self.pluginName+' successfully executed.',True,'Info')\n        else:\n            result.errorCode =  CommonVariables.FailedPrepostPostScriptTimeout\n            result.continueBackup = self.continueBackupOnFailure\n        postScriptCompleted[pluginIndex] = True\n        postScriptResult[pluginIndex] = result\n"
  },
  {
    "path": "VMBackup/main/Utils/DiskUtil.py",
    "content": "#!/usr/bin/env python\n#\n# VMEncryption extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport subprocess\nimport os\nimport os.path\nimport shlex\nimport sys\nfrom subprocess import *\nimport shutil\nimport uuid\nimport glob\nfrom common import DeviceItem\nimport Utils.HandlerUtil\nimport traceback\ntry:\n        import ConfigParser as ConfigParsers\nexcept ImportError:\n        import configparser as ConfigParsers\n\nclass DiskUtil(object):\n    __instance__ = None\n    patching = None\n    logger = None\n    mount_output = None\n\n\n    def __init__(self, patching, logger):\n\n        if DiskUtil.__instance__ is None:\n            self.patching = patching\n            self.logger = logger\n            self.mount_output = None\n            DiskUtil.__instance__ = self\n        else:\n            return DiskUtil.__instance__\n\n    @staticmethod\n    def get_instance(patching, logger):\n        if not DiskUtil.__instance__:\n            DiskUtil(patching, logger)\n\n        return DiskUtil.__instance__\n\n    def get_device_items_property(self, lsblk_path, dev_name, property_name):\n        get_property_cmd = lsblk_path + \" /dev/\" + dev_name + \" -b -nl -o NAME,\" + property_name\n        get_property_cmd_args =Utils.HandlerUtil.HandlerUtility.split(self.logger, get_property_cmd)\n        get_property_cmd_p = Popen(get_property_cmd_args,stdout=subprocess.PIPE,stderr=subprocess.PIPE)\n        output,err = get_property_cmd_p.communicate()\n        output= str(output)\n        lines = output.splitlines()\n        for i in range(0,len(lines)):\n            item_value_str = lines[i].strip()\n            if(item_value_str != \"\"):\n                disk_info_item_array =Utils.HandlerUtil.HandlerUtility.split(self.logger, item_value_str)\n                if(dev_name == disk_info_item_array[0]):\n                    if(len(disk_info_item_array) > 1):\n                        return disk_info_item_array[1]\n        return None\n\n    def get_device_items_sles(self,dev_path):\n        self.logger.log(\"get_device_items_sles : getting the blk info from \" + str(dev_path), True)\n        device_items = []\n        #first get all the device names\n        if(dev_path is None):\n            get_device_cmd = self.patching.lsblk_path + \" -b -nl -o NAME\"\n        else:\n            get_device_cmd = self.patching.lsblk_path + \" -b -nl -o NAME \" + dev_path\n        get_device_cmd_args =Utils.HandlerUtil.HandlerUtility.split(self.logger, get_device_cmd)\n        p = Popen(get_device_cmd_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n        out_lsblk_output, err = p.communicate()\n        out_lsblk_output = str(out_lsblk_output)\n        lines = out_lsblk_output.splitlines()\n        for i in range(0,len(lines)):\n            item_value_str = lines[i].strip()\n            if(item_value_str != \"\"):\n                disk_info_item_array =Utils.HandlerUtil.HandlerUtility.split(self.logger, item_value_str)\n                device_item = DeviceItem()\n                device_item.name = disk_info_item_array[0]\n                device_items.append(device_item)\n\n        for i in range(0,len(device_items)):\n            device_item = device_items[i]\n            device_item.file_system = self.get_device_items_property(lsblk_path=self.patching.lsblk_path,dev_name=device_item.name,property_name='FSTYPE')\n            device_item.mount_point = self.get_device_items_property(lsblk_path=self.patching.lsblk_path,dev_name=device_item.name,property_name='MOUNTPOINT')\n            device_item.label = self.get_device_items_property(lsblk_path=self.patching.lsblk_path,dev_name=device_item.name,property_name='LABEL')\n            device_item.uuid = self.get_device_items_property(lsblk_path=self.patching.lsblk_path,dev_name=device_item.name,property_name='UUID')\n            #get the type of device\n            model_file_path = '/sys/block/' + device_item.name + '/device/model'\n            if(os.path.exists(model_file_path)):\n                with open(model_file_path,'r') as f:\n                    device_item.model = f.read().strip()\n            if(device_item.model == 'Virtual Disk'):\n                self.logger.log(\"model is virtual disk\", True)\n                device_item.type = 'disk'\n            if(device_item.type != 'disk'):\n                partition_files = glob.glob('/sys/block/*/' + device_item.name + '/partition')\n                if(partition_files is not None and len(partition_files) > 0):\n                    self.logger.log(\"partition files exists\", True)\n                    device_item.type = 'part'\n        return device_items\n\n    def get_device_items_from_lsblk_list(self, lsblk_path, dev_path):\n        self.logger.log(\"get_device_items_from_lsblk_list : getting the blk info from \" + str(dev_path), True)\n        device_items = []\n        #first get all the device names\n        if(dev_path is None):\n            get_device_cmd = lsblk_path + \" -b -nl -o NAME\"\n        else:\n            get_device_cmd = lsblk_path + \" -b -nl -o NAME \" + dev_path\n        get_device_cmd_args =Utils.HandlerUtil.HandlerUtility.split(self.logger, get_device_cmd)\n        p = Popen(get_device_cmd_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n        out_lsblk_output, err = p.communicate()\n        if sys.version_info > (3,):\n            out_lsblk_output =str(out_lsblk_output, encoding='utf-8', errors=\"backslashreplace\")\n        else:\n            out_lsblk_output =str(out_lsblk_output)\n        lines = out_lsblk_output.splitlines()\n        device_items_temp = []\n        for i in range(0,len(lines)):\n            item_value_str = lines[i].strip()\n            if(item_value_str != \"\"):\n                disk_info_item_array =Utils.HandlerUtil.HandlerUtility.split(self.logger, item_value_str)\n                device_item = DeviceItem()\n                device_item.name = disk_info_item_array[0]\n                device_items_temp.append(device_item)\n\n        for i in range(0,len(device_items_temp)):\n            device_item = device_items_temp[i]\n            device_item.mount_point = self.get_device_items_property(lsblk_path=lsblk_path,dev_name=device_item.name,property_name='MOUNTPOINT')\n            if (device_item.mount_point is not None):\n                device_item.file_system = self.get_device_items_property(lsblk_path=lsblk_path,dev_name=device_item.name,property_name='FSTYPE')\n                device_item.label = self.get_device_items_property(lsblk_path=lsblk_path,dev_name=device_item.name,property_name='LABEL')\n                device_item.uuid = self.get_device_items_property(lsblk_path=lsblk_path,dev_name=device_item.name,property_name='UUID')\n                device_item.type = self.get_device_items_property(lsblk_path=lsblk_path,dev_name=device_item.name,property_name='TYPE')\n                device_items.append(device_item)\n                self.logger.log(\"lsblk MOUNTPOINT=\" + str(device_item.mount_point) + \", NAME=\" + str(device_item.name) + \", TYPE=\" + str(device_item.type) + \", FSTYPE=\" + str(device_item.file_system) + \", LABEL=\" + str(device_item.label) + \", UUID=\" + str(device_item.uuid) + \", MODEL=\" + str(device_item.model), True)\n        return device_items\n\n    def get_lsblk_pairs_output(self, lsblk_path, dev_path):\n        self.logger.log(\"get_lsblk_pairs_output : getting the blk info from \" + str(dev_path) + \" using lsblk_path \" + str(lsblk_path), True)\n        \n        # If an alternate user is specified  in vmbackup.conf, run lsblk command through that user, not with root access. \n        # Fixes issues found in some SUSE-related distros where lsblk command gets stuck with root access\n        # Sample vmbackup.conf file with such alternate user setting:\n        # [lsblkUser]\n        # username: vmadmin\n\n        configfile = '/etc/azure/vmbackup.conf'\n        command_user = ''\n        alternate_user = False\n\n        try :\n            if os.path.exists(configfile):\n                config = ConfigParsers.ConfigParser()\n                config.read(configfile)\n                if config.has_option('lsblkUser','username'):\n                    lsblk_user = config.get('lsblkUser','username')\n                    command_user = \"su - \" + lsblk_user + \" -c\"\n                    if (dev_path is None):\n                        command_user = command_user + ' \\'' + 'lsblk -b -n -P -o NAME,TYPE,FSTYPE,MOUNTPOINT,LABEL,UUID,MODEL,SIZE' + '\\''\n                    else:\n                        command_user = command_user + ' \\'' + 'lsblk -b -n -P -o NAME,TYPE,FSTYPE,MOUNTPOINT,LABEL,UUID,MODEL,SIZE' + ' ' + dev_path + '\\''\n                    alternate_user = True\n        except Exception as e:\n            pass\n\n        out_lsblk_output = None\n        error_msg = None\n        is_lsblk_path_wrong = False\n        try:\n            if (alternate_user):\n                self.logger.log(\"Switching to alternate user to run this lsblk command: \" + str(command_user), True)\n                p = Popen(command_user, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n            elif(dev_path is None):\n                p = Popen([str(lsblk_path), '-b', '-n','-P','-o','NAME,TYPE,FSTYPE,MOUNTPOINT,LABEL,UUID,MODEL,SIZE'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n            else:\n                p = Popen([str(lsblk_path), '-b', '-n','-P','-o','NAME,TYPE,FSTYPE,MOUNTPOINT,LABEL,UUID,MODEL,SIZE',dev_path], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n        except Exception as e:\n            errMsg = 'Exception in lsblk command, error: %s, stack trace: %s' % (str(e), traceback.format_exc())\n            self.logger.log(errMsg, True, 'Error')\n            is_lsblk_path_wrong = True\n        if is_lsblk_path_wrong == False :\n            out_lsblk_output, err = p.communicate()\n            if sys.version_info > (3,):\n                out_lsblk_output = str(out_lsblk_output, encoding='utf-8', errors=\"backslashreplace\")\n            else:\n                out_lsblk_output = str(out_lsblk_output)    \n            error_msg = str(err)\n            if(error_msg is not None and error_msg.strip() != \"\"):\n                self.logger.log(str(err), True)\n        return is_lsblk_path_wrong, out_lsblk_output, error_msg\n    \n    def get_which_command_result(self, program_to_locate):\n        self.logger.log(\"getting the which info for  \" + str(program_to_locate), True)\n        out_which_output = None\n        error_msg = None\n        try:\n            p = Popen(['which', str(program_to_locate)], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n            out_which_output, err = p.communicate()\n            if sys.version_info > (3,):\n                out_which_output = str(out_which_output, encoding='utf-8', errors=\"backslashreplace\")\n            else:\n                out_which_output = str(out_which_output)\n            error_msg = str(err)\n            if(error_msg is not None and error_msg.strip() != \"\"):\n                self.logger.log(str(err), True)\n            self.logger.log(\"which command result :\" + str(out_which_output), True)\n            if (out_which_output is not None):\n                out_which_output = out_which_output.splitlines()[0]\n        except Exception as e:\n            errMsg = 'Exception in which command, error: %s, stack trace: %s' % (str(e), traceback.format_exc())\n            self.logger.log(errMsg, True, 'Error')\n        return out_which_output, error_msg\n\n    def get_device_items(self, dev_path):\n        if(self.patching.distro_info[0].lower() == 'suse' and self.patching.distro_info[1] == '11'):\n            return self.get_device_items_sles(dev_path)\n        else:\n            self.logger.log(\"getting the blk info from \" + str(dev_path), True)\n            device_items = []\n            lsblk_path = self.patching.lsblk_path\n            # Get lsblk command output using lsblk_path as self.patching.lsblk_path\n            is_lsblk_path_wrong, out_lsblk_output, error_msg = self.get_lsblk_pairs_output(lsblk_path, dev_path)\n            # if lsblk_path was wrong, use /bin/lsblk or usr/bin/lsblk based on self.patching.usr_flag to get lsblk command output again for centos/redhat distros\n            if (is_lsblk_path_wrong == True) and (self.patching.distro_info[0].lower() == 'centos' or self.patching.distro_info[0].lower() == 'redhat'):\n                if self.patching.usr_flag == 1:\n                    self.logger.log(\"lsblk path is wrong, removing /usr prefix\", True, 'Warning')\n                    lsblk_path = \"/bin/lsblk\"\n                else:\n                    self.logger.log(\"lsblk path is wrong, adding /usr prefix\", True, 'Warning')\n                    lsblk_path = \"/usr/bin/lsblk\"\n                is_lsblk_path_wrong, out_lsblk_output, error_msg = self.get_lsblk_pairs_output(lsblk_path, dev_path)\n            # if lsblk_path was still wrong, lsblk_path using \"which\" command\n            if (is_lsblk_path_wrong == True):\n                self.logger.log(\"lsblk path is wrong. finding path using which command\", True, 'Warning')\n                out_which_output, which_error_msg = self.get_which_command_result('lsblk')\n                # get lsblk command output\n                if (out_which_output is not None):\n                     lsblk_path = str(out_which_output)\n                     is_lsblk_path_wrong, out_lsblk_output, error_msg = self.get_lsblk_pairs_output(lsblk_path, dev_path)\n            # if error_msg contains \"invalid option\" or \"P\" (rely on only \"-P\" optiont in error to handle non-English locales), then get device_items using method get_device_items_from_lsblk_list\n            if (error_msg is not None and error_msg.strip() != \"\" and ('invalid option' in error_msg or 'P' in error_msg)):\n                device_items = self.get_device_items_from_lsblk_list(lsblk_path, dev_path)\n            # else get device_items from parsing the lsblk command output\n            elif (out_lsblk_output is not None):\n                lines = out_lsblk_output.splitlines()\n                for i in range(0,len(lines)):\n                    item_value_str = lines[i].strip()\n                    if(item_value_str != \"\"):\n                        disk_info_item_array =Utils.HandlerUtil.HandlerUtility.split(self.logger, item_value_str)\n                        device_item = DeviceItem()\n                        disk_info_item_array_length = len(disk_info_item_array)\n                        for j in range(0, disk_info_item_array_length):\n                            disk_info_property = disk_info_item_array[j]\n                            property_item_pair = disk_info_property.split('=')\n\n                            if(property_item_pair[0] == 'NAME'):\n                                device_item.name = property_item_pair[1].strip('\"')\n\n                            if(property_item_pair[0] == 'TYPE'):\n                                device_item.type = property_item_pair[1].strip('\"')\n\n                            if(property_item_pair[0] == 'FSTYPE'):\n                                device_item.file_system = property_item_pair[1].strip('\"')\n                        \n                            if(property_item_pair[0] == 'MOUNTPOINT'):\n                                device_item.mount_point = property_item_pair[1].strip('\"')\n\n                            if(property_item_pair[0] == 'LABEL'):\n                                device_item.label = property_item_pair[1].strip('\"')\n\n                            if(property_item_pair[0] == 'UUID'):\n                                device_item.uuid = property_item_pair[1].strip('\"')\n\n                            if(property_item_pair[0] == 'MODEL'):\n                                device_item.model = property_item_pair[1].strip('\"')\n\n                        self.logger.log(\"lsblk MOUNTPOINT=\" + str(device_item.mount_point) + \", NAME=\" + str(device_item.name) + \", TYPE=\" + str(device_item.type) + \", FSTYPE=\" + str(device_item.file_system) + \", LABEL=\" + str(device_item.label) + \", UUID=\" + str(device_item.uuid) + \", MODEL=\" + str(device_item.model), True)\n                        \n                        if(device_item.mount_point is not None and device_item.mount_point != \"\" and device_item.mount_point != \" \"):\n                            device_items.append(device_item)\n            return device_items\n\n    def get_mount_command_output(self, mount_path):\n        self.logger.log(\"getting the mount info using mount_path \" + str(mount_path), True)\n        out_mount_output = None\n        error_msg = None\n        is_mount_path_wrong = False\n        try:\n            p = Popen([str(mount_path)], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n        except Exception as e:\n            errMsg = 'Exception in mount command, error: %s, stack trace: %s' % (str(e), traceback.format_exc())\n            self.logger.log(errMsg, True, 'Error')\n            is_mount_path_wrong = True\n        if is_mount_path_wrong == False :\n            out_mount_output, err = p.communicate()\n            if sys.version_info > (3,):\n                out_mount_output = str(out_mount_output, encoding='utf-8', errors=\"backslashreplace\")\n            else:\n                out_mount_output = str(out_mount_output)\n            self.logger.log(\"getting the mount info using mount_path \" + out_mount_output, True)\n            error_msg = str(err)\n            if(error_msg is not None and error_msg.strip() != \"\"):\n                self.logger.log(str(err), True)\n        return is_mount_path_wrong, out_mount_output, error_msg\n\n    def get_mount_points(self):\n        mount_points_info = []\n        mount_points = []\n        fs_types = []\n        out_mount_output = self.get_mount_output()\n        if (out_mount_output is not None):\n            #Extract the list of mnt_point in order\n            lines = out_mount_output.splitlines()\n            #Reverse the mount command output to go-through from last-to-first mounts in output\n            lines.reverse()\n            for line in lines:\n                line = line.strip()\n                if(line != \"\"):\n                    deviceName =Utils.HandlerUtil.HandlerUtility.split(self.logger, line)[0]\n                    mountPrefixStr = \" on /\"\n                    prefixIndex = line.find(mountPrefixStr)\n                    if(prefixIndex >= 0):\n                        mountpointStart = prefixIndex + len(mountPrefixStr) - 1\n                        fstypePrefixStr = \" type \"\n                        mountpointEnd = line.find(fstypePrefixStr, mountpointStart)\n                        if(mountpointEnd >= 0):\n                            mount_point = line[mountpointStart:mountpointEnd]\n                            fs_type = \"\"\n                            fstypeStart = line.find(fstypePrefixStr) + len(fstypePrefixStr) - 1\n                            if(line.find(fstypePrefixStr) >= 0):\n                                fstypeEnd = line.find(\" \", fstypeStart+1)\n                                if(fstypeEnd >=0):\n                                    fs_type = line[fstypeStart+1:fstypeEnd]\n                            # If there is a duplicate, keep only the first instance\n                            if (mount_point not in mount_points):\n                                self.logger.log(\"mount command, adding mount :\" + str(mount_point) + \":  device :\" + str(deviceName) + \": fstype :\"+ str(fs_type) + \":\", True) \n                                fs_types.append(fs_type)\n                                mount_points.append(mount_point)\n                                mount_points_info.append((mount_point,deviceName,fs_type))\n                            else:\n                                self.logger.log(\"####### mount command, not adding duplicate mount :\" + str(mount_point) + \":  device :\" + str(deviceName) + \": fstype :\"+ str(fs_type) + \":\", True)\n        #Now reverse the mount_points & fs_types lists to make them in the same order as mount command output order\n        mount_points_info.reverse()\n        mount_points.reverse()\n        for fstype in fs_types:\n            if (\"fuse\" in fstype.lower() or \"nfs\" in fstype.lower() or \"cifs\" in fstype.lower()):\n                Utils.HandlerUtil.HandlerUtility.add_to_telemetery_data(\"networkFSTypePresentInMount\",\"True\")\n                break\n        return mount_points,mount_points_info\n\n    def get_mount_file_systems(self):\n        out_mount_output = self.get_mount_output()\n        file_systems_info = []\n        mount_points = []\n        if (out_mount_output is not None):\n            lines = out_mount_output.splitlines()\n            #Reverse the mount command output to go-through from last-to-first mounts in output\n            lines.reverse()\n            for line in lines:\n                self.logger.log(\"print line by line :\" + line , True)\n                line = line.strip()\n                if(line != \"\"):\n                    file_system =Utils.HandlerUtil.HandlerUtility.split(self.logger, line)[0]\n                    mountPrefixStr = \" on /\"\n                    prefixIndex = line.find(mountPrefixStr)\n                    if(prefixIndex >= 0):\n                        mountpointStart = prefixIndex + len(mountPrefixStr) - 1\n                        fstypePrefixStr = \" type \"\n                        mountpointEnd = line.find(fstypePrefixStr, mountpointStart)\n                        if(mountpointEnd >= 0):\n                            mount_point = line[mountpointStart:mountpointEnd]\n                            fs_type = \"\"\n                            fstypeStart = line.find(fstypePrefixStr) + len(fstypePrefixStr) - 1\n                            if(line.find(fstypePrefixStr) >= 0):\n                                fstypeEnd = line.find(\" \", fstypeStart+1)\n                                if(fstypeEnd >=0):\n                                    fs_type = line[fstypeStart+1:fstypeEnd]\n                    # If there is a duplicate, keep only the first instance\n                    if (mount_point not in mount_points):\n                        file_systems_info.append((file_system,fs_type,mount_point))\n                        mount_points.append(mount_point)\n        #Now reverse the file_systems_info list to make them in the same order as mount command output order\n        file_systems_info.reverse()\n        return file_systems_info\n\n    def get_mount_output(self):\n        if self.mount_output is not None:\n            return self.mount_output\n        else :\n            # Get the output on the mount command\n            self.logger.log(\"getting the mount-points info using mount command \", True)\n            mount_path = self.patching.mount_path\n            is_mount_path_wrong, out_mount_output, error_msg = self.get_mount_command_output(mount_path)\n            if (is_mount_path_wrong == True):\n                if self.patching.usr_flag == 1:\n                    self.logger.log(\"mount path is wrong.removing /usr prefix\", True, 'Warning')\n                    mount_path = \"/bin/mount\"\n                else:\n                    self.logger.log(\"mount path is wrong.Adding /usr prefix\", True, 'Warning')\n                    mount_path = \"/usr/bin/mount\"\n                is_mount_path_wrong, out_mount_output, error_msg = self.get_mount_command_output(mount_path)\n            # if mount_path was still wrong, mount_path using \"which\" command\n            if (is_mount_path_wrong == True):\n                self.logger.log(\"mount path is wrong. finding path using which command\", True, 'Warning')\n                out_which_output, which_error_msg = self.get_which_command_result('mount')\n                # get mount command output\n                if (out_which_output is not None):\n                     mount_path = str(out_which_output)\n                     is_mount_path_wrong, out_mount_output, error_msg = self.get_mount_command_output(mount_path)\n            self.mount_output = out_mount_output\n            return out_mount_output\n\n"
  },
  {
    "path": "VMBackup/main/Utils/Event.py",
    "content": "from datetime import datetime\nimport os\nimport sys\nif sys.version_info[0] == 3:\n    import threading\nelse:\n    # to make it compatible with python version less than 3\n    import thread as threading\n\nclass Event:\n    '''\n         The agent will only pick the first 3K - 3072 characters.\n         Rest of the characters would be discarded from the messages.\n         To ensure this we Check the message length and divide them accordingly\n         into chunks of characters less than 3K.\n    '''\n\n    def __init__(self, level, message, task_name, operation_id, version):\n        self.version = version\n        self.timestamp = datetime.utcnow().isoformat()\n        self.task_name = task_name\n        self.event_level = level\n        self.message = message\n        self.event_pid = str(os.getpid())\n        self.event_tid = str(threading.get_ident()).zfill(8)\n        self.operation_id = operation_id\n    \n    def convertToDictionary(self):\n        return dict(Version = self.version, Timestamp = self.timestamp, TaskName = self.task_name, EventLevel = self.event_level, Message = self.message, EventPid = self.event_pid, EventTid = self.event_tid, OperationId = str(self.operation_id))"
  },
  {
    "path": "VMBackup/main/Utils/EventLoggerUtil.py",
    "content": "import os\nimport threading\nimport json\nimport sys\nimport datetime\nimport time\nimport uuid\nif sys.version_info[0] == 2:\n    import Queue as queue\nelse:\n    # if python version is > 3\n    import queue\nimport shutil\nfrom Utils.LogHelper import FileHelpers,LoggingConstants\nfrom Utils.StringHelper import StringHelper\nfrom Utils.Event import Event\n\nclass EventLogger:\n    _instance = None\n    _lock = threading.Lock()\n    \n    \n    def __init__(self, event_directory, severity_level, use_async_event_logging = 0):\n        global logger\n        self.temporary_directory = os.path.join(event_directory, 'Temp')\n        self.space_available_in_event_directory = 0\n        self.event_processing_interval = 0\n        self.disposed = False\n        self.event_processing_task = None  \n        self.current_message_len = 0\n        self.event_logging_enabled = False\n        self.event_logging_error_count = 0\n        self.events_folder = event_directory\n        self.event_logging_enabled = bool(self.events_folder)\n        self.async_event_logging = use_async_event_logging\n        self.filehelper = FileHelpers()\n\n        if self.event_logging_enabled:\n            self.extension_version = os.path.basename(os.getcwd())\n            self.operation_id = uuid.UUID(int=0)\n            self.log_severity_level = severity_level\n            logger.log(\"Information: EventLogging severity level setting is {0}\".format(self.log_severity_level))\n            # creating a temp directory\n            if not os.path.exists(self.temporary_directory):\n                os.makedirs(self.temporary_directory)\n            FileHelpers.clearOldJsonFilesInDirectory(self.temporary_directory)\n\n            FileHelpers.clearOldJsonFilesInDirectory(self.events_folder)\n            \n            self.current_message = ''\n            self.event_queue = queue.Queue()\n            \t\t\t\n            space_available = LoggingConstants.MaxEventDirectorySize - FileHelpers.getSizeOfDir(self.events_folder)\n            self.space_available_in_event_directory = max(0, space_available)\n            print(\"Information: Space available in event directory : %sB\" %(self.space_available_in_event_directory))\n            \n            if( self.async_event_logging == 1):\n                self.event_processing_signal = threading.Event() # an event object that runs continuously until signal is set\n                self.event_processing_interval = LoggingConstants.MinEventProcesingInterval\n                print(\"Information: Setting event reporting interval to %ss\" %(self.event_processing_interval))\n                self.begin_event_queue_polling()\n            self._event_processing_loop\n        else:\n            print(\"Warning: EventsFolder parameter is empty. Guest Agent does not support event logging.\")\n            \n    @staticmethod\n    def GetInstance(backup_logger, event_directory, severity_level, use_async_event_logging = 0):\n        global logger\n        try:\n            logger = backup_logger\n            if EventLogger._instance is None:\n                with EventLogger._lock:\n                    if EventLogger._instance is None:\n                        EventLogger._instance = EventLogger(event_directory, severity_level, use_async_event_logging)\n        except Exception as e:\n            print(\"Exception has occurred {0}\".format(str(e)))\n        return EventLogger._instance\n        \n    def update_properties(self, task_id):\n        self.operation_id = task_id\n\n    def severity(self, severity_level):\n        level = 0\n        if(severity_level == \"Verbose\"):\n            level = 0\n        elif(severity_level == \"Info\"):\n            level = 1\n        elif(severity_level == \"Warning\"):\n            level = 2\n        else:\n            level = 3\n        return level\n\n    def trace_message(self, severity_level, message):\n        global logger\n        level = self.severity(severity_level)\n        if self.event_logging_enabled and level >= self.log_severity_level:\n            stringhelper = StringHelper()\n            message = stringhelper.resolve_string(severity_level, message)\n            try:\n                message_len = len(message)\n                message_max_len = LoggingConstants.MaxMessageLenLimit\n                \n                if message_len > message_max_len:\n                    num_chunks = (message_len + message_max_len - 1) // message_max_len\n                    msg_date_time = datetime.datetime.utcnow().strftime(u'%Y-%m-%dT%H:%M:%S.%fZ')\n                    \n                    for string_part in range(num_chunks):\n                        start_index = string_part * message_max_len\n                        length = min(message_max_len, message_len - start_index)\n                        message_part = '%s [%d/%d] %s' % (msg_date_time, string_part + 1, num_chunks, message[start_index:start_index+length])\n                        self.log_event(message_part)\n                else:\n                    self.log_event(message)\n            except Exception as ex:\n                self.event_logging_error_count += 1\n                if self.event_logging_error_count > 10:\n                    self.event_logging_enabled = False\n                    print(\"Warning: Count(EventLoggingErrors) > 10. Disabling eventLogging. Continue with execution\")\n                    print(\"Exception: {0}\" .format(str(ex)))\n\n    def log_event(self, message):\n        global logger\n        try:\n            if self.current_message_len + len(message) > LoggingConstants.MaxMessageLengthPerEvent:\n                self.event_queue.put(Event(\"Info\",\n                                           self.current_message, LoggingConstants.DefaultEventTaskName,\n                                           self.operation_id, self.extension_version).convertToDictionary())\n                # Reset the current message\n                self.current_message = message\n                self.current_message_len = len(message)\n            else:\n                self.current_message += message\n                self.current_message_len += len(message)\n        except Exception as ex:\n            print(\"Warning: Error adding extension event to queue. Exception: {0}\" .format(str(ex)))\n\n    def begin_event_queue_polling(self):\n        global logger\n        print(\"Event logging via polling is starting...\") #using threads\n        try:\n            self.event_processing_task = threading.Thread(target=self._event_processing_loop)\n            self.event_processing_task.start()\n        except Exception as e:\n            print(\"Exception in begin_event_queue_polling {0}\".format(str(e)))\n\n    def _event_processing_loop(self):\n        global logger\n        if(self.async_event_logging == 1):\n            while not self.event_processing_signal.wait(self.event_processing_interval):\n                try:\n                    self._process_events()\n                except Exception as ex:\n                    print(\"Warning: Event processing has failed. Exception: {0}\" .format(str(ex)))\n        else:\n            try:\n                self._process_events()\n            except Exception as ex:\n                print(\"Warning: Event processing has failed. Exception: {0}\" .format(str(ex)))\n        print(\"Information: Exiting function polling...\")\n\n    def _process_events(self):\n        global logger\n        try:\n            if self.space_available_in_event_directory == 0:\n                # There is no space available in the events directory then a check is made to see if space has been\n                # created (no files). If there is space available we reset our flags and proceed with processing.\n                if not os.listdir(self.events_folder):\n                    self.space_available_in_event_directory = LoggingConstants.MaxEventDirectorySize\n                    logger.log(\"Event directory has space for new event files. Resuming event reporting.\")\n                else:\n                    self.event_queue = queue.Queue()\n                    return\n            if not self.event_queue.empty():\n                if sys.version_info[0] == 2:\n                    event_file_path = os.path.join(self.temporary_directory, \"{}.json\".format(int(time.time() * 1000000000)))\n                else:\n                    event_file_path = os.path.join(self.temporary_directory, \"{}.json\".format(int(datetime.datetime.utcnow().timestamp() * 1000000000)))\n                with self._create_event_file(event_file_path) as file:\n                    if file is None:\n                        logger.log(\"Warning: Could not create the event file in the path mentioned.\")\n                        return\n                    print(\"Clearing out event queue for processing...\")\n                    old_queue = self.event_queue\n                    self.event_queue = queue.Queue()\n                    self._write_events_to_event_file(file, old_queue, event_file_path)\n                self._send_event_file_to_event_directory(event_file_path, self.events_folder)\n        except Exception as e:\n            print(\"Exception occurred in _process_events {0}\".format(str(e)))\n\n    def _create_event_file(self, event_file_path):\n        print(\"Information: Attempting to create a new event file...\")\n        success_msg = \"Successfully created new event file: %s\" % event_file_path\n        retry_msg = \"Failed to write events to file: %s. Retrying...\" % event_file_path\n        err_msg = \"Failed to write events to file %s after %d attempts. No longer retrying. Events for this iteration will not be reported.\" % (event_file_path, LoggingConstants.MaxAttemptsForEventFileCreationWriteMove)\n\n        stream_writer = self.filehelper.execute_with_retries(\n            LoggingConstants.MaxAttemptsForEventFileCreationWriteMove,\n            LoggingConstants.ThreadSleepDuration,\n            success_msg,\n            retry_msg,\n            err_msg,\n            lambda: open(event_file_path, \"w\")\n        )\n        \n        return stream_writer\n\n    def _write_events_to_event_file(self, file, events, event_file_path):\n        data_list = []\n        while not events.empty():\n            data = events.get()\n            data_list.append(data)\n        json_data = json.dumps(data_list)\n        if not json_data:\n            print(\"Warning: Unable to serialize events. Events for this iteration will not be reported.\")\n            return\n\n        success_msg = \"Successfully wrote events to file: %s\" % event_file_path\n        retry_msg = \"Failed to write events to file: %s. Retrying...\" % event_file_path\n        err_msg = \"Failed to write events to file %s after %d attempts. No longer retrying. Events for this iteration will not be reported.\" % (event_file_path, LoggingConstants.MaxAttemptsForEventFileCreationWriteMove)\n\n        self.filehelper.execute_with_retries(\n            LoggingConstants.MaxAttemptsForEventFileCreationWriteMove,\n            LoggingConstants.ThreadSleepDuration,\n            success_msg,\n            retry_msg,\n            err_msg,\n            lambda: file.write(json_data)\n        )\n\n    def _send_event_file_to_event_directory(self, file_path, events_folder):\n        file_info = os.stat(file_path)\n        file_size = file_info.st_size\n\n        if self.space_available_in_event_directory - file_size >= 0:\n            new_path_for_event_file = os.path.join(events_folder, os.path.basename(file_path))\n            success_msg = \"Successfully moved event file to event directory: %s\" % new_path_for_event_file\n            retry_msg = \"Unable to move event file to event directory: %s. Retrying...\" % file_path\n            err_msg = \"Unable to move event file to event directory: %s . No longer retrying. Events for this iteration will not be reported.\" % file_path\n\n            self.filehelper.execute_with_retries(\n                LoggingConstants.MaxAttemptsForEventFileCreationWriteMove,\n                LoggingConstants.ThreadSleepDuration,\n                success_msg,\n                retry_msg,\n                err_msg,\n                lambda: shutil.move(file_path, new_path_for_event_file)\n            )\n\n            self.space_available_in_event_directory -= file_size\n        else:\n            self.space_available_in_event_directory = 0\n            FileHelpers.deleteFile(file_path)\n            print(\"Information: Event reporting has paused due to reaching maximum capacity in the Event directory. Reporting will resume once space is available. Events for this iteration will not be reported.\")\n\n    def clear_temp_directory(self, directory_path):\n        try:\n            if os.path.exists(directory_path):\n                if len(os.listdir(directory_path)) == 0:\n                    os.rmdir(directory_path)\n                else:\n                    shutil.rmtree(directory_path)\n        except Exception as ex:\n            print(\"Warning: Error clearing the temp directory. Exception: {0}\".format(str(ex)))\n    \n    def dispose(self):\n        print(\"Information: Dispose(), called on EventLogger. Event processing is terminating...\")\n        self._dispose(True)\n\n    def _dispose(self, disposing):\n        global logger\n        try:\n            if not self.disposed:\n                if disposing and self.event_logging_enabled:\n                    if self.async_event_logging == 1:\n                        self.event_processing_signal.set()\n                        self.event_processing_task.join()\n                        self.event_processing_signal.clear()\n                    if (self.current_message != ''):\n                        self.event_queue.put(Event(\"Info\", self.current_message, LoggingConstants.DefaultEventTaskName, self.operation_id, self.extension_version).convertToDictionary())\n                    if not self.event_queue.empty():\n                        try:\n                            self._process_events()\n                            self.current_message = ''\n                            self.dispose()\n                        except Exception as ex:\n                            logger.log(\"Warning: Unable to process events before termination of extension. Exception: {0}\" .format(str(ex)))\n                self.disposed = True\n                print(\"Information: Event Logger has terminated\")\n                print(\"Clearing the temp directory\")\n                self.clear_temp_directory(self.temporary_directory)\n                self.event_logging_enabled = False\n        except Exception as ex:\n            print(\"Warning: Processing Dispose() of EventLogger resulted in Exception: {0}\" .format(str(ex)))"
  },
  {
    "path": "VMBackup/main/Utils/HandlerUtil.py",
    "content": "#\n# Handler library for Linux IaaS\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nJSON def:\nHandlerEnvironment.json\n[{\n  \"name\": \"ExampleHandlerLinux\",\n  \"seqNo\": \"seqNo\",\n  \"version\": \"1.0\",\n  \"handlerEnvironment\": {\n    \"logFolder\": \"<your log folder location>\",\n    \"eventFolder\": \"<your event folder location>\",\n    \"configFolder\": \"<your config folder location>\",\n    \"statusFolder\": \"<your status folder location>\",\n    \"heartbeatFile\": \"<your heartbeat file location>\",\n    \n  }\n}]\n\nExample ./config/1.settings\n\"{\"runtimeSettings\":[{\"handlerSettings\":{\"protectedSettingsCertThumbprint\":\"1BE9A13AA1321C7C515EF109746998BAB6D86FD1\",\"protectedSettings\":\n\"MIIByAYJKoZIhvcNAQcDoIIBuTCCAbUCAQAxggFxMIIBbQIBADBVMEExPzA9BgoJkiaJk/IsZAEZFi9XaW5kb3dzIEF6dXJlIFNlcnZpY2UgTWFuYWdlbWVudCBmb3IgR+nhc6VHQTQpCiiV2zANBgkqhkiG9w0BAQEFAASCAQCKr09QKMGhwYe+O4/a8td+vpB4eTR+BQso84cV5KCAnD6iUIMcSYTrn9aveY6v6ykRLEw8GRKfri2d6tvVDggUrBqDwIgzejGTlCstcMJItWa8Je8gHZVSDfoN80AEOTws9Fp+wNXAbSuMJNb8EnpkpvigAWU2v6pGLEFvSKC0MCjDTkjpjqciGMcbe/r85RG3Zo21HLl0xNOpjDs/qqikc/ri43Y76E/Xv1vBSHEGMFprPy/Hwo3PqZCnulcbVzNnaXN3qi/kxV897xGMPPC3IrO7Nc++AT9qRLFI0841JLcLTlnoVG1okPzK9w6ttksDQmKBSHt3mfYV+skqs+EOMDsGCSqGSIb3DQEHATAUBggqhkiG9w0DBwQITgu0Nu3iFPuAGD6/QzKdtrnCI5425fIUy7LtpXJGmpWDUA==\",\"publicSettings\":{\"port\":\"3000\"}}}]}\"\n\n\nExample HeartBeat\n{\n\"version\": 1.0,\n    \"heartbeat\" : {\n        \"status\": \"ready\",\n        \"code\": 0,\n        \"Message\": \"Sample Handler running. Waiting for a new configuration from user.\"\n    }\n}\nExample Status Report:\n[{\"version\":\"1.0\",\"timestampUTC\":\"2014-05-29T04:20:13Z\",\"status\":{\"name\":\"Chef Extension Handler\",\"operation\":\"chef-client-run\",\"status\":\"success\",\"code\":0,\"formattedMessage\":{\"lang\":\"en-US\",\"message\":\"Chef-client run success\"}}}]\n\n\"\"\"\n\n\nimport os\nimport os.path\nimport shlex\nimport sys\nimport re\ntry:\n    import imp as imp\nexcept ImportError:\n    import importlib as imp\nimport base64\nimport json\nimport tempfile\nimport time\nfrom os.path import join\nimport Utils.WAAgentUtil\nfrom Utils.WAAgentUtil import waagent\nimport logging\nimport logging.handlers\ntry:\n        import ConfigParser as ConfigParsers\nexcept ImportError:\n        import configparser as ConfigParsers\nfrom common import CommonVariables\nimport platform\nimport subprocess\nimport datetime\nimport Utils.Status\nfrom Utils.EventLoggerUtil import EventLogger\nfrom Utils.LogHelper import LoggingLevel, LoggingConstants, FileHelpers\n\n# Handle the deprecation of platform.dist() in Python 3.8+\ntry:\n    import distro\n    HAS_DISTRO = True\nexcept ImportError:\n    HAS_DISTRO = False\nfrom MachineIdentity import MachineIdentity\nimport ExtensionErrorCodeHelper\nimport traceback\n\nDateTimeFormat = \"%Y-%m-%dT%H:%M:%SZ\"\n\nclass HandlerContext:\n    def __init__(self,name):\n        self._name = name\n        self._version = '0.0'\n        return\n\nclass HandlerUtility:\n    telemetry_data = {} \n    serializable_telemetry_data = []\n    ExtErrorCode = ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.success\n    SnapshotConsistency = Utils.Status.SnapshotConsistencyType.none\n    HealthStatusCode = -1\n    def __init__(self, log, error, short_name):\n        self._log = log\n        self._error = error\n        self.log_message = \"\"\n        self._short_name = short_name\n        self.patching = None\n        self.storageDetailsObj = None\n        self.partitioncount = 0\n        self.logging_file = None\n        self.pre_post_enabled = False\n        self.severity_level = self.get_severity_level()\n        self.event_dir = None\n        self.eventlogger = None\n        self.operation = None\n\n    def _get_log_prefix(self):\n        return '[%s-%s]' % (self._context._name, self._context._version)\n\n    # Look through all .settings files in the config folder and,\n    # Retrieve the most recent modified file's seq#\n    def _get_current_seq_no(self, config_folder):\n        seq_no = -1\n        cur_seq_no = -1\n        freshest_time = None\n        for subdir, dirs, files in os.walk(config_folder):\n            for file in files:\n                try:\n                    if(file.endswith('.settings')):\n                        cur_seq_no = int(os.path.basename(file).split('.')[0])\n                        if(freshest_time == None):\n                            freshest_time = os.path.getmtime(join(config_folder,file))\n                            seq_no = cur_seq_no\n                        else:\n                            current_file_m_time = os.path.getmtime(join(config_folder,file))\n                            if(current_file_m_time > freshest_time):\n                                freshest_time = current_file_m_time\n                                seq_no = cur_seq_no\n                except ValueError:\n                    continue\n        return seq_no\n\n    def get_last_seq(self):\n        if(os.path.isfile('mrseq')):\n            seq = waagent.GetFileContents('mrseq')\n            if(seq):\n                return int(seq)\n        return -1\n\n    def exit_if_same_seq(self):\n        current_seq = int(self._context._seq_no)\n        last_seq = self.get_last_seq()\n        if(current_seq == last_seq):\n            self.log(\"the sequence number are same, so skip, current:\" + str(current_seq) + \"== last:\" + str(last_seq))\n            self.update_settings_file()\n            if(self.eventlogger is not None):\n                self.eventlogger.dispose()\n            sys.exit(0)\n\n    def set_event_logger(self, eventlogger):\n        self.eventlogger = eventlogger\n\n    def log(self, message,level='Info'):\n        try:\n            self.log_with_no_try_except(message, level)\n        except IOError:\n            pass\n        except Exception as e:\n            try:\n                errMsg = str(e) + 'Exception in hutil.log'\n                self.log_with_no_try_except(errMsg, 'Warning')\n            except Exception as e:\n                pass\n\n    def log_with_no_try_except(self, message, level='Info'):\n        WriteLog = self.get_strvalue_from_configfile('WriteLog','True')\n        if (WriteLog == None or WriteLog == 'True'):\n            if sys.version_info > (3,):\n                if self.logging_file is not None:\n                    self.log_py3(message)\n                    if self.eventlogger != None:\n                        self.eventlogger.trace_message(level, message)\n                else:\n                    pass\n            else:\n                self._log(self._get_log_prefix() + message)\n                if self.eventlogger != None:\n                    self.eventlogger.trace_message(level, message)\n            message = \"{0}  {1}  {2} \\n\".format(str(datetime.datetime.utcnow()) , level , message)\n        self.log_message = self.log_message + message\n\n    def log_py3(self, msg):\n        if type(msg) is not str:\n            msg = str(msg, errors=\"backslashreplace\")\n        msg = str(datetime.datetime.utcnow()) + \" \" + str(self._get_log_prefix()) + msg + \"\\n\"\n        try:\n            with open(self.logging_file, \"a+\") as C :\n                C.write(msg)\n        except IOError:\n            pass\n\n    def error(self, message):\n        self._error(self._get_log_prefix() + message)\n\n    def fetch_log_message(self):\n        return self.log_message\n\n    def _decrypt_protected_settings(self, encrypted_file, cert_path, pkey_path):\n        \"\"\"\n        Decrypt protected settings with FIPS 140-3 AES256 support and defensive fallback.\n        \n        For FIPS 140-3 compliance, CRP is upgrading encryption to AES256. Opted-in VMs receive\n        protected settings encrypted with AES256, while other VMs continue using DES_EDE3_CBC.\n        \n        The 'cms' command supports both AES256 and DES_EDE3_CBC encryption, while 'smime' only\n        supports DES_EDE3_CBC. We try 'cms' first and fallback to 'smime' for compatibility.\n        \n        Args:\n            encrypted_file: Path to temporary file containing encrypted settings\n            cert_path: Path to certificate file (.crt)\n            pkey_path: Path to private key file (.prv)\n            \n        Returns:\n            Decrypted cleartext string\n        \"\"\"\n        cleartxt = None\n        \n        # Determine base64 decode command based on platform\n        if 'NS-BSD' in platform.system():\n            # base64 tool is not available with NSBSD, use openssl\n            base64_cmd = self.patching.openssl_path + \" base64 -d -A -in \" + encrypted_file\n        else:\n            base64_cmd = self.patching.base64_path + \" -d \" + encrypted_file\n        \n        # Try OpenSSL CMS command first (supports both AES256 and DES_EDE3_CBC)\n        try:\n            cms_cmd = base64_cmd + \" | \" + self.patching.openssl_path + \" cms -inform DER -decrypt -recip \" + cert_path + \" -inkey \" + pkey_path\n            self.log(\"Attempting decryption using OpenSSL CMS command (supports AES256 and DES_EDE3_CBC)\")\n            result = waagent.RunGetOutput(cms_cmd, chk_err=False, log_cmd=False)\n            if result[0] == 0 and result[1]:  # Success (return code 0) and non-empty output\n                cleartxt = result[1]\n                self.log(\"Successfully decrypted protected settings using CMS command\")\n                return cleartxt\n            else:\n                self.log(\"CMS decryption failed with return code: \" + str(result[0]) + \", attempting fallback to SMIME\")\n        except Exception as e:\n            self.log(\"CMS decryption failed with exception: \" + type(e).__name__ + \", attempting fallback to SMIME\")\n        \n        # Fallback to OpenSSL SMIME command (supports DES_EDE3_CBC only)\n        try:\n            smime_cmd = base64_cmd + \" | \" + self.patching.openssl_path + \" smime -inform DER -decrypt -recip \" + cert_path + \" -inkey \" + pkey_path\n            self.log(\"Attempting decryption using OpenSSL SMIME command (fallback - supports DES_EDE3_CBC only)\")\n            result = waagent.RunGetOutput(smime_cmd, chk_err=False, log_cmd=False)\n            if result[0] == 0 and result[1]:  # Success (return code 0) and non-empty output\n                cleartxt = result[1]\n                self.log(\"Successfully decrypted protected settings using SMIME command (fallback)\")\n                return cleartxt\n            else:\n                self.error(\"SMIME decryption also failed with return code: \" + str(result[0]))\n        except Exception as e:\n            self.error(\"SMIME decryption failed with exception: \" + type(e).__name__)\n        \n        # If both methods fail, raise an error\n        if not cleartxt:\n            self.error(\"Failed to decrypt protected settings using both CMS and SMIME commands\")\n            \n        return cleartxt\n\n    def _parse_config(self, ctxt):\n        config = None\n        try:\n            config = json.loads(ctxt)\n        except:\n            self.error('JSON exception decoding settings file')\n        if config == None:\n            self.error('JSON error processing settings file')\n        else:\n            handlerSettings = config['runtimeSettings'][0]['handlerSettings']\n            if 'protectedSettings' in handlerSettings and \\\n                    \"protectedSettingsCertThumbprint\" in handlerSettings and \\\n                    handlerSettings['protectedSettings'] is not None and \\\n                    handlerSettings[\"protectedSettingsCertThumbprint\"] is not None:\n                protectedSettings = handlerSettings['protectedSettings']\n                thumb = handlerSettings['protectedSettingsCertThumbprint']\n                cert = waagent.LibDir + '/' + thumb + '.crt'\n                pkey = waagent.LibDir + '/' + thumb + '.prv'\n                f = tempfile.NamedTemporaryFile(delete=False)\n                f.close()\n                waagent.SetFileContents(f.name,config['runtimeSettings'][0]['handlerSettings']['protectedSettings'])\n                cleartxt = None\n                # Decrypt protected settings with FIPS 140-3 AES256 support and defensive fallback\n                # Try cms command first (supports both AES256 and DES_EDE3_CBC), fallback to smime if needed\n                cleartxt = self._decrypt_protected_settings(f.name, cert, pkey)\n                jctxt = {}\n                try:\n                    jctxt = json.loads(cleartxt)\n                    self.log('Config decoded correctly.')\n                except:\n                    self.error('JSON exception decoding decrypted protected settings')\n                handlerSettings['protectedSettings'] = jctxt\n                # cleaning/removing the temp files created\n                try:\n                    if os.path.isfile(f.name):\n                        os.remove(f.name)\n                except Exception as e:\n                    self.log('Failed to remove the temporary file ' + str(e))\n        return config\n\n    def do_parse_context(self, operation, seqNo):\n        self.operation = operation\n        _context = self.try_parse_context(seqNo)\n        getWaagentPathUsed = Utils.WAAgentUtil.GetPathUsed()\n        if(getWaagentPathUsed == 0):\n            self.log(\"waagent old path is used\")\n        else:\n            self.log(\"waagent new path is used\")\n        if not _context:\n            self.log(\"maybe no new settings file found\")\n            if(self.eventlogger is not None):\n                self.eventlogger.dispose()\n            sys.exit(0)\n        return _context\n\n    def try_parse_context(self, seqNo):\n        self._context = HandlerContext(self._short_name)\n        handler_env = None\n        config = None\n        ctxt = None\n        code = 0\n        try:\n            self.log('try_parse_context : Sequence Number received ' + str(seqNo))\n            # get the HandlerEnvironment.json.  According to the extension handler\n            # spec, it is always in the ./ directory\n            self.log('cwd is ' + os.path.realpath(os.path.curdir))\n            handler_env_file = './HandlerEnvironment.json'\n            if not os.path.isfile(handler_env_file):\n                self.error(\"Unable to locate \" + handler_env_file)\n                return None\n            ctxt = waagent.GetFileContents(handler_env_file)\n            if ctxt == None :\n                self.error(\"Unable to read \" + handler_env_file)\n            try:\n                handler_env = json.loads(ctxt)\n            except:\n                pass\n            if handler_env == None :\n                self.log(\"JSON error processing \" + handler_env_file)\n                return None\n            if type(handler_env) == list:\n                handler_env = handler_env[0]\n            self._context._name = handler_env['name']\n            self._context._version = str(handler_env['version'])\n            self._context._config_dir = handler_env['handlerEnvironment']['configFolder']\n            self._context._log_dir = handler_env['handlerEnvironment']['logFolder']\n            self._context._log_file = os.path.join(handler_env['handlerEnvironment']['logFolder'],'extension.log')\n            self.logging_file=self._context._log_file\n            self._context._shell_log_file = os.path.join(handler_env['handlerEnvironment']['logFolder'],'shell.log')\n            self._change_log_file()\n            try:\n                if(self.get_intvalue_from_configfile(\"disable_logging\", 0) == 0):\n                    self._context._event_dir = handler_env['handlerEnvironment']['eventsFolder']\n                    self.event_dir = self._context._event_dir\n            except Exception as e:\n                self._context._event_dir = None\n                self.event_dir = None\n                errorMsg = 'The eventsFolder field is missing in handlerEnvironment.json file. Hence skipping event logging!'\n                self.log(errorMsg, 'Error')\n                self.log(repr(e), 'Error')\n            self._context._status_dir = handler_env['handlerEnvironment']['statusFolder']\n            self._context._heartbeat_file = handler_env['handlerEnvironment']['heartbeatFile']\n            if seqNo != -1:\n                self._context._seq_no = seqNo\n            else:\n                self._context._seq_no = self._get_current_seq_no(self._context._config_dir)\n            if self._context._seq_no < 0:\n                self.error(\"Unable to locate a .settings file!\")\n                return None\n            self._context._seq_no = str(self._context._seq_no)\n            if seqNo != -1:\n                self.log('sequence number from environment variable is ' + self._context._seq_no)\n            else:\n                self.log('sequence number based on config file-names is ' + self._context._seq_no)\n            self._context._status_file = os.path.join(self._context._status_dir, self._context._seq_no + '.status')\n            self._context._settings_file = os.path.join(self._context._config_dir, self._context._seq_no + '.settings')\n            self.log(\"setting file path is\" + self._context._settings_file)\n            ctxt = None\n            ctxt = waagent.GetFileContents(self._context._settings_file)\n            if ctxt == None :\n                error_msg = 'Unable to read ' + self._context._settings_file + '. '\n                self.error(error_msg)\n                return None\n            else:\n                if(self.operation is not None and self.operation.lower() == \"enable\"):\n                    # we should keep the current status file\n                    self.backup_settings_status_file(self._context._seq_no)\n            self._context._config = self._parse_config(ctxt)\n        except Exception as e:\n            errorMsg = \"Unable to parse context, error: %s, stack trace: %s\" % (str(e), traceback.format_exc())\n            self.log(errorMsg, 'Error')\n            raise\n        return self._context\n\n    def _change_log_file(self):\n        self.log(\"Change log file to \" + self._context._log_file)\n        waagent.LoggerInit(self._context._log_file,'/dev/stdout')\n        self._log = waagent.Log\n        self._error = waagent.Error\n\n    def save_seq(self):\n        self.set_last_seq(self._context._seq_no)\n        self.log(\"set most recent sequence number to \" + self._context._seq_no)\n\n    def set_last_seq(self,seq):\n        waagent.SetFileContents('mrseq', str(seq))\n\n\n    '''\n    Sample /etc/azure/vmbackup.conf\n \n    [SnapshotThread]\n    seqsnapshot = 1\n    isanysnapshotfailed = False\n    UploadStatusAndLog = True\n    WriteLog = True\n    onlyLocalFilesystems = True\n\n    seqsnapshot valid values(0-> parallel snapshot, 1-> programatically set sequential snapshot , 2-> customer set it for sequential snapshot)\n    '''\n\n    def get_value_from_configfile(self, key):\n        global backup_logger\n        value = None\n        configfile = '/etc/azure/vmbackup.conf'\n        try :\n            if os.path.exists(configfile):\n                config = ConfigParsers.ConfigParser()\n                config.read(configfile)\n                if config.has_option('SnapshotThread',key):\n                    value = config.get('SnapshotThread',key)  \n        except Exception as e:\n            pass\n\n        return value\n\n    def get_strvalue_from_configfile(self, key, default):\n        value = self.get_value_from_configfile(key)\n        \n        if value == None or value == '':\n            value = default\n\n        try :\n            value_str = str(value)\n        except ValueError :\n            self.log('Not able to parse the read value as string, falling back to default value', 'Warning')\n            value = default\n\n        return value\n\n    def get_intvalue_from_configfile(self, key, default):\n        value = default\n        value = self.get_value_from_configfile(key)\n        \n        if value == None or value == '':\n            value = default\n\n        try :\n            value_int = int(value)\n        except ValueError :\n            self.log('Not able to parse the read value as int, falling back to default value', 'Warning')\n            value = default\n\n        return int(value)\n \n    def set_value_to_configfile(self, key, value):\n        configfile = '/etc/azure/vmbackup.conf'\n        try :\n            self.log('setting ' + str(key)  + 'in config file to ' + str(value) , 'Info')\n            if not os.path.exists(os.path.dirname(configfile)):\n                os.makedirs(os.path.dirname(configfile))\n            config = ConfigParsers.RawConfigParser()\n            if os.path.exists(configfile):\n                config.read(configfile)\n                if config.has_section('SnapshotThread'):\n                    if config.has_option('SnapshotThread', key):\n                        config.remove_option('SnapshotThread', key)\n                else:\n                    config.add_section('SnapshotThread')\n            else:\n                config.add_section('SnapshotThread')\n            config.set('SnapshotThread', key, value)\n            with open(configfile, 'w') as config_file:\n                config.write(config_file)\n        except Exception as e:\n            errorMsg = \" Unable to set config file.key is \"+ key +\"with error: %s, stack trace: %s\" % (str(e), traceback.format_exc())\n            self.log(errorMsg, 'Warning')\n        return value\n\n    def get_machine_id(self):\n        machine_id_file = \"/etc/azure/machine_identity_FD76C85E-406F-4CFA-8EB0-CF18B123358B\"\n        machine_id = \"\"\n        file_pointer = None\n        try:\n            if not os.path.exists(os.path.dirname(machine_id_file)):\n                os.makedirs(os.path.dirname(machine_id_file))\n\n            if os.path.exists(machine_id_file):\n                file_pointer = open(machine_id_file, \"r\")\n                machine_id = file_pointer.readline()\n                file_pointer.close()\n             \n            else:\n                mi = MachineIdentity()\n                if(mi.stored_identity() != None):\n                    machine_id = mi.stored_identity()[1:-1]\n                    file_pointer = open(machine_id_file, \"w\")\n                    file_pointer.write(machine_id)\n                    file_pointer.close()\n        except Exception as e:\n            errMsg = 'Failed to retrieve the unique machine id with error: %s, stack trace: %s' % (str(e), traceback.format_exc())\n            self.log(errMsg, 'Error')\n        finally :\n            if file_pointer != None :\n                if file_pointer.closed == False :\n                    file_pointer.close()\n \n        self.log(\"Unique Machine Id  : {0}\".format(machine_id))\n        return machine_id\n\n\n    def get_storage_details(self,total_size,failure_flag):\n        self.storageDetailsObj = Utils.Status.StorageDetails(self.partitioncount, total_size, False, failure_flag)\n\n        self.log(\"partition count : {0}, total used size : {1}, is storage space present : {2}, is size computation failed : {3}\".format(self.storageDetailsObj.partitionCount, self.storageDetailsObj.totalUsedSizeInBytes, self.storageDetailsObj.isStoragespacePresent, self.storageDetailsObj.isSizeComputationFailed))\n        return self.storageDetailsObj\n\n    def SetExtErrorCode(self, extErrorCode):\n        if self.ExtErrorCode == ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.success : \n            self.ExtErrorCode = extErrorCode\n\n    def SetSnapshotConsistencyType(self, snapshotConsistency):\n        self.SnapshotConsistency = snapshotConsistency\n\n    def SetHealthStatusCode(self, healthStatusCode):\n        self.HealthStatusCode = healthStatusCode\n\n    def do_status_json(self, operation, status, sub_status, status_code, message, telemetrydata, taskId, commandStartTimeUTCTicks, snapshot_info, vm_health_obj,total_size,failure_flag):\n        tstamp = time.strftime(DateTimeFormat, time.gmtime())\n        formattedMessage = Utils.Status.FormattedMessage(\"en-US\",message)\n        stat_obj = Utils.Status.StatusObj(self._context._name, operation, status, sub_status, status_code, formattedMessage, telemetrydata, self.get_storage_details(total_size,failure_flag), self.get_machine_id(), taskId, commandStartTimeUTCTicks, snapshot_info, vm_health_obj)\n        top_stat_obj = Utils.Status.TopLevelStatus(self._context._version, tstamp, stat_obj)\n\n        return top_stat_obj\n\n    def get_extension_version(self):\n        try:\n            cur_dir = os.getcwd()\n            cur_extension = cur_dir.split(\"/\")[-1]\n            extension_version = cur_extension.split(\"-\")[-1]\n            return extension_version\n        except Exception as e:\n            errMsg = 'Failed to retrieve the Extension version with error: %s, stack trace: %s' % (str(e), traceback.format_exc())\n            self.log(errMsg)\n            extension_version=\"Unknown\"\n            return extension_version\n\n    def get_wala_version(self):\n        try:\n            file_pointer = open('/var/log/waagent.log','r')\n            waagent_version = ''\n            for line in file_pointer:\n                if 'Azure Linux Agent Version' in line:\n                    waagent_version = line.split(':')[-1]\n            if waagent_version[:-1]==\"\": #for removing the trailing '\\n' character\n                waagent_version = self.get_wala_version_from_command()\n                return waagent_version\n            else:\n                waagent_version = waagent_version[:-1].split(\"-\")[-1] #getting only version number\n                return waagent_version\n        except Exception as e:\n            errMsg = 'Failed to retrieve the wala version with error: %s, stack trace: %s' % (str(e), traceback.format_exc())\n            self.log(errMsg)\n            waagent_version=\"Unknown\"\n            return waagent_version\n\n    def get_wala_version_from_command(self):\n        try:\n            cur_dir = os.getcwd()\n            os.chdir(\"..\")\n            out = self.command_output_from_subprocess(['/usr/sbin/waagent', '-version'],30)\n            if \"Goal state agent: \" in out:\n                 waagent_version = out.split(\"Goal state agent: \")[1].strip()\n            else:\n                out =  out.split(\" \")\n                waagent = out[0]\n                waagent_version = waagent.split(\"-\")[-1] #getting only version number\n\n            os.chdir(cur_dir)\n            return waagent_version\n        except Exception as e:\n            errMsg = 'Failed to retrieve the wala version with error: %s, stack trace: %s' % (str(e), traceback.format_exc())\n            self.log(errMsg)\n            os.chdir(cur_dir)\n            waagent_version=\"Unknown\"\n            return waagent_version\n\n    def get_dist_info(self):\n        try:\n            if 'FreeBSD' in platform.system():\n                release = re.sub('\\\\-.*$', '', str(platform.release()))\n                return \"FreeBSD\",release\n            if 'NS-BSD' in platform.system():\n                release = re.sub('\\\\-.*$', '', str(platform.release()))\n                return \"NS-BSD\", release\n            \n            # Try modern approach first (Python 3.8+ compatible)\n            if HAS_DISTRO:\n                try:\n                    distro_name = distro.name()\n                    distro_version = distro.version()\n                    if distro_name and distro_version:\n                        return distro_name + \"-\" + distro_version, platform.release()\n                except Exception as e:\n                    self.log('Warning: distro package failed with error: %s' % str(e))\n            \n            # Fallback to linux_distribution (deprecated in Python 3.5, removed in Python 3.8)\n            if hasattr(platform, 'linux_distribution'):\n                try:\n                    distinfo = list(platform.linux_distribution(full_distribution_name=0))\n                    # remove trailing whitespace in distro name\n                    if(distinfo[0] == ''):\n                        osfile= open(\"/etc/os-release\", \"r\")\n                        for line in osfile:\n                            lists=str(line).split(\"=\")\n                            if(lists[0]== \"NAME\"):\n                                distroname = lists[1].split(\"\\\"\")\n                            if(lists[0]==\"VERSION\"):\n                                distroversion = lists[1].split(\"\\\"\")\n                        osfile.close()\n                        return distroname[1]+\"-\"+distroversion[1],platform.release()\n                    distinfo[0] = distinfo[0].strip()\n                    return  distinfo[0]+\"-\"+distinfo[1],platform.release()\n                except Exception as e:\n                    self.log('Warning: platform.linux_distribution failed with error: %s' % str(e))\n            \n            # Fallback to platform.dist() (deprecated in Python 3.5, removed in Python 3.8+)\n            if hasattr(platform, 'dist'):\n                try:\n                    distinfo = platform.dist()\n                    return  distinfo[0]+\"-\"+distinfo[1],platform.release()\n                except Exception as e:\n                    self.log('Warning: platform.dist failed with error: %s' % str(e))\n            \n            # Final fallback: try to parse /etc/os-release manually\n            try:\n                distroname = None\n                distroversion = None\n                with open(\"/etc/os-release\", \"r\") as osfile:\n                    for line in osfile:\n                        lists = str(line.strip()).split(\"=\", 1)\n                        if len(lists) >= 2:\n                            key = lists[0].strip()\n                            value = lists[1].strip().strip('\"')\n                            if key == \"NAME\":\n                                distroname = value\n                            elif key == \"VERSION\" or key == \"VERSION_ID\":\n                                distroversion = value\n                \n                if distroname and distroversion:\n                    return distroname + \"-\" + distroversion, platform.release()\n                elif distroname:\n                    return distroname + \"-Unknown\", platform.release()\n            except Exception as e:\n                self.log('Warning: Failed to parse /etc/os-release with error: %s' % str(e))\n            \n            # If all else fails, return unknown\n            return \"Unknown\", \"Unknown\"\n            \n        except Exception as e:\n            errMsg = 'Failed to retrieve the distinfo with error: %s, stack trace: %s' % (str(e), traceback.format_exc())\n            self.log(errMsg)\n            return \"Unknown\",\"Unknown\"\n\n    def substat_new_entry(self,sub_status,code,name,status,formattedmessage):\n        sub_status_obj = Utils.Status.SubstatusObj(code,name,status,formattedmessage)\n        sub_status.append(sub_status_obj)\n        return sub_status\n\n    def timedelta_total_seconds(self, delta):\n        if not hasattr(datetime.timedelta, 'total_seconds'):\n            return delta.days * 86400 + delta.seconds\n        else:\n            return delta.total_seconds()\n\n    @staticmethod\n    def add_to_telemetery_data(key,value):\n        HandlerUtility.telemetry_data[key]=value\n\n    @staticmethod\n    def get_telemetry_data(key):\n        return HandlerUtility.telemetry_data[key]\n    \n    def add_telemetry_data(self):\n        os_version,kernel_version = self.get_dist_info()\n        workloads = self.get_workload_running()\n        HandlerUtility.add_to_telemetery_data(\"guestAgentVersion\",self.get_wala_version_from_command())\n        HandlerUtility.add_to_telemetery_data(\"extensionVersion\",self.get_extension_version())\n        HandlerUtility.add_to_telemetery_data(\"osVersion\",os_version)\n        HandlerUtility.add_to_telemetery_data(\"kernelVersion\",kernel_version)\n        HandlerUtility.add_to_telemetery_data(\"workloads\",str(workloads))\n        HandlerUtility.add_to_telemetery_data(\"prePostEnabled\", str(self.pre_post_enabled))\n    \n    def convert_telemetery_data_to_bcm_serializable_format(self):\n        HandlerUtility.serializable_telemetry_data = []\n        for k,v in HandlerUtility.telemetry_data.items():\n            each_telemetry_data = {}\n            each_telemetry_data[\"Value\"] = v\n            each_telemetry_data[\"Key\"] = k\n            HandlerUtility.serializable_telemetry_data.append(each_telemetry_data)\n \n    def do_status_report(self, operation, status, status_code, message, taskId = None, commandStartTimeUTCTicks = None, snapshot_info = None,total_size = 0,failure_flag = True ):\n        self.log(\"{0},{1},{2},{3},{4}\".format(operation, status, status_code, message, failure_flag ))\n        sub_stat = []\n        stat_rept = []\n        self.add_telemetry_data()\n        snapshotTelemetry = \"\"\n\n        if CommonVariables.snapshotCreator in HandlerUtility.telemetry_data.keys():\n            snapshotTelemetry = \"{0}{1}={2}, \".format(snapshotTelemetry , CommonVariables.snapshotCreator , HandlerUtility.telemetry_data[CommonVariables.snapshotCreator])\n        if CommonVariables.hostStatusCodePreSnapshot in HandlerUtility.telemetry_data.keys():\n            snapshotTelemetry = \"{0}{1}={2}, \".format(snapshotTelemetry , CommonVariables.hostStatusCodePreSnapshot , HandlerUtility.telemetry_data[CommonVariables.hostStatusCodePreSnapshot])\n        if CommonVariables.hostStatusCodeDoSnapshot in HandlerUtility.telemetry_data.keys():\n            snapshotTelemetry = \"{0}{1}={2}, \".format(snapshotTelemetry , CommonVariables.hostStatusCodeDoSnapshot , HandlerUtility.telemetry_data[CommonVariables.hostStatusCodeDoSnapshot])\n\n        if CommonVariables.statusBlobUploadError in HandlerUtility.telemetry_data.keys():\n            message = \"{0} {1}={2}, \".format(message , CommonVariables.statusBlobUploadError , HandlerUtility.telemetry_data[CommonVariables.statusBlobUploadError])\n        message = message + snapshotTelemetry\n\n        vm_health_obj = Utils.Status.VmHealthInfoObj(ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.ExtensionErrorCodeDict[self.ExtErrorCode], int(self.ExtErrorCode))\n\n        consistencyTypeStr = CommonVariables.consistency_crashConsistent\n        if (self.SnapshotConsistency != Utils.Status.SnapshotConsistencyType.crashConsistent):\n            if (status_code == CommonVariables.success_appconsistent):\n                self.SnapshotConsistency = Utils.Status.SnapshotConsistencyType.applicationConsistent\n                consistencyTypeStr = CommonVariables.consistency_applicationConsistent\n            elif (status_code == CommonVariables.success):\n                self.SnapshotConsistency = Utils.Status.SnapshotConsistencyType.fileSystemConsistent\n                consistencyTypeStr = CommonVariables.consistency_fileSystemConsistent\n            else:\n                self.SnapshotConsistency = Utils.Status.SnapshotConsistencyType.none\n                consistencyTypeStr = CommonVariables.consistency_none\n        HandlerUtility.add_to_telemetery_data(\"consistencyType\", consistencyTypeStr)\n\n        extensionResponseObj = Utils.Status.ExtensionResponse(message, self.SnapshotConsistency, \"\", failure_flag)\n        message = str(json.dumps(extensionResponseObj, cls = ComplexEncoder))\n\n        self.convert_telemetery_data_to_bcm_serializable_format()\n        stat_rept = self.do_status_json(operation, status, sub_stat, status_code, message, HandlerUtility.serializable_telemetry_data, taskId, commandStartTimeUTCTicks, snapshot_info, vm_health_obj, total_size,failure_flag)\n        time_delta = datetime.datetime.utcnow() - datetime.datetime(1970, 1, 1)\n        time_span = self.timedelta_total_seconds(time_delta) * 1000\n        date_place_holder = 'e2794170-c93d-4178-a8da-9bc7fd91ecc0'\n        stat_rept.timestampUTC = date_place_holder\n        date_string = r'\\/Date(' + str((int)(time_span)) + r')\\/'\n        # Convert TopLevelStatus object to JSON array string\n        # Before: stat_rept is TopLevelStatus object with timestampUTC=\"e2794170-c93d-4178-a8da-9bc7fd91ecc0\"\n        # After: stat_rept = '[{\"version\":\"1.0\",\"timestampUTC\":\"e2794170-c93d-4178-a8da-9bc7fd91ecc0\",\"status\":{\"name\":\"VMSnapshotLinux\",...}}]'\n        stat_rept = \"[\" + json.dumps(stat_rept, cls = ComplexEncoder) + \"]\"\n        # Replace placeholder GUID with actual Microsoft JSON date format first\n        # Before: \"timestampUTC\":\"e2794170-c93d-4178-a8da-9bc7fd91ecc0\"\n        # After: \"timestampUTC\":\"\\/Date(time_span)\\/\"\n        stat_rept = stat_rept.replace(date_place_holder,date_string)\n        # Now remove JSON-escaped forward slashes to get clean date format for C# DateTimeOffset\n        # Before: \"timestampUTC\":\"\\/Date(time_span)\\/\"\n        # After: \"timestampUTC\":\"/Date(time_span)/\"\n        stat_rept = stat_rept.replace(r'\\/', '/') # To fix the datetime format of CreationTime to be consumed by C# DateTimeOffset\n\n        # Add Status as sub-status for Status to be written on Status-File\n        sub_stat = self.substat_new_entry(sub_stat,'0',stat_rept,'success',None)\n        if self.get_public_settings()[CommonVariables.vmType].lower() == CommonVariables.VmTypeV2.lower() and CommonVariables.isTerminalStatus(status) :\n            status = CommonVariables.status_success\n        stat_rept_file = self.do_status_json(operation, status, sub_stat, status_code, message, None, taskId, commandStartTimeUTCTicks, None, None,total_size,failure_flag)\n        stat_rept_file =  \"[\" + json.dumps(stat_rept_file, cls = ComplexEncoder) + \"]\"\n\n        # rename all other status files, or the WALA would report the wrong\n        # status file.\n        # because the wala choose the status file with the highest sequence\n        # number to report.\n        return stat_rept, stat_rept_file\n\n    def write_to_status_file(self, stat_rept_file):\n        try:\n            tempStatusFile =  os.path.join(self._context._status_dir, CommonVariables.TempStatusFileName)\n            if self._context._status_file:\n                with open(tempStatusFile,'w+') as f:\n                    f.write(stat_rept_file)\n                os.rename(tempStatusFile, self._context._status_file)\n        except Exception as e:\n            errMsg = 'Status file creation failed with error: %s, stack trace: %s' % (str(e), traceback.format_exc())\n            self.log(errMsg)\n\n    def is_status_file_exists(self):\n        try:\n            if os.path.exists(self._context._status_file):\n                return True\n            else:\n                return False\n        except Exception as e:\n            self.log(\"exception is getting status file\" + traceback.format_exc())\n            return False\n\n    # Rename all .settings and .status files that do not belong to current seq# with '_' suffix\n    # Clear older files in the respective directories\n    def backup_settings_status_file(self, _seq_no):\n        self.log(\"current seq no is \" + _seq_no)\n        file_extn_settings = '.settings'\n        file_extn_status = '.status'\n        maxLimitOfFiles = 60\n        for subdir, dirs, files in os.walk(self._context._config_dir):\n            for file in files:\n                try:\n                    if(file.endswith(file_extn_settings) and file != (_seq_no + file_extn_settings)):\n                        new_file_name = file.replace(\".\",\"_\")\n                        os.rename(join(self._context._config_dir,file), join(self._context._config_dir,new_file_name))\n                except Exception as e:\n                    self.log(\"failed to rename the status file.\")\n                try:\n                    FileHelpers.clearOldFilesInDirectory(self._context._config_dir, '_settings', maxLimitOfFiles)\n                except Exception as e:\n                    pass  # Ignore the exception in clearing old files and continue\n\n        for subdir, dirs, files in os.walk(self._context._status_dir):\n            for file in files:\n                try:\n                    if(file.endswith(file_extn_status) and file != (_seq_no + file_extn_status)):\n                        new_file_name = file.replace(\".\",\"_\")\n                        os.rename(join(self._context._status_dir,file), join(self._context._status_dir, new_file_name))\n                except Exception as e:\n                    self.log(\"failed to rename the status file.\")\n                try:\n                    FileHelpers.clearOldFilesInDirectory(self._context._status_dir, '_status', maxLimitOfFiles)\n                except Exception as e:\n                    pass  # Ignore the exception in clearing old files and continue\n\n    def do_exit(self, exit_code, operation,status,code,message):\n        try:\n            HandlerUtility.add_to_telemetery_data(\"extErrorCode\", str(ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.ExtensionErrorCodeDict[self.ExtErrorCode]))\n            self.do_status_report(operation, status,code,message)\n        except Exception as e:\n            self.log(\"Can't update status: \" + str(e))\n        if(self.eventlogger is not None):\n            self.eventlogger.dispose()\n        sys.exit(exit_code)\n\n    def get_handler_settings(self):\n        return self._context._config['runtimeSettings'][0]['handlerSettings']\n\n    def get_protected_settings(self):\n        return self.get_handler_settings().get('protectedSettings')\n\n    def get_public_settings(self):\n        return self.get_handler_settings().get('publicSettings')\n\n    def is_prev_in_transition(self):\n        curr_seq = self.get_last_seq()\n        last_seq = curr_seq - 1\n        if last_seq >= 0:\n            self.log(\"previous status and path: \" + str(last_seq) + \"  \" + str(self._context._status_dir))\n            status_file_prev = os.path.join(self._context._status_dir, str(last_seq) + '_status')\n            if os.path.isfile(status_file_prev) and os.access(status_file_prev, os.R_OK):\n                searchfile = open(status_file_prev, \"r\")\n                for line in searchfile:\n                    if \"Transition\" in line: \n                        self.log(\"transitioning found in the previous status file\")\n                        searchfile.close()\n                        return True\n                searchfile.close()\n        return False\n\n    def get_prev_log(self):\n        with open(self._context._log_file, \"r\") as f:\n            lines = f.readlines()\n        if(len(lines) > 300):\n            lines = lines[-300:]\n            return ''.join(str(x) for x in lines)\n        else:\n            return ''.join(str(x) for x in lines)\n    \n    def get_shell_script_log(self):\n        lines = \"\" \n        try:\n            with open(self._context._shell_log_file, \"r\") as f:\n                lines = f.readlines()\n            if(len(lines) > 10):\n                lines = lines[-10:]\n            return ''.join(str(x) for x in lines)\n        except Exception as e:\n            self.log(\"Can't receive shell log file: \" + str(e))\n            return lines\n\n    def update_settings_file(self):\n        if(self._context._config['runtimeSettings'][0]['handlerSettings'].get('protectedSettings') != None):\n            del self._context._config['runtimeSettings'][0]['handlerSettings']['protectedSettings']\n            self.log(\"removing the protected settings\")\n            waagent.SetFileContents(self._context._settings_file,json.dumps(self._context._config))\n\n    def UriHasSpecialCharacters(self, blobs):\n        uriHasSpecialCharacters = False\n\n        if blobs is not None:\n            for blob in blobs:\n                blobUri = str(blob.split(\"?\")[0])\n                if '%' in blobUri:\n                    self.log(blobUri + \" URI has special characters\")\n                    uriHasSpecialCharacters = True\n\n        return uriHasSpecialCharacters\n\n    def get_workload_running(self):\n        workloads = []\n        try:\n            dblist= [\"mysqld\",\"postgresql\",\"oracle\",\"cassandra\",\",mongo\"] ## add all workload process name in lower case\n            if os.path.isdir(\"/proc\"):\n                pids = [pid for pid in os.listdir('/proc') if pid.isdigit()]\n                for pid in pids:\n                    pname = open(os.path.join('/proc', pid, 'cmdline'), 'rb').read()\n                    for db in dblist :\n                        if db in str(pname).lower() and db not in workloads :\n                            self.log(\"workload running found with name : \" + str(db))\n                            workloads.append(db)\n            return workloads\n        except Exception as e:\n            self.log(\"Unable to fetch running workloads\" + str(e))\n            return workloads\n        \n    def set_pre_post_enabled(self):\n        self.pre_post_enabled = True\n\n    def command_output_from_subprocess(self , args, process_wait_time):\n        process_out = subprocess.Popen(args, stdout=subprocess.PIPE)\n        while(process_wait_time > 0 and process_out.poll() is None):\n            time.sleep(1)\n            process_wait_time -= 1\n        out = process_out.stdout.read().decode()\n        out = str(out)\n        return out\n\n    def get_severity_level(self):\n        logging_level = LoggingLevel(LoggingConstants.DefaultEventLogLevel)\n        try:\n            log_setting_file_path = os.path.join(os.getcwd(), \"main\", LoggingConstants.LogLevelSettingFile)\n            if os.path.exists(log_setting_file_path):\n                with open(log_setting_file_path, 'r') as file:\n                    logging_level_input = json.load(file)\n                    logging_level.__dict__.update(logging_level_input)\n            else:\n                self.log(\"Logging level setting file is not present.\")\n        except Exception as e:\n            self.log(\"error in fetching the severity of logs \" + str(e))\n        return logging_level.EventLogLevel\n\n    @staticmethod\n    def split(logger,txt):\n        result = None\n        try:\n            result = shlex.split(txt)\n        except Exception as e:\n            logger.log('Shlex.Split threw exception error: %s, stack trace: %s' % (str(e), traceback.format_exc()))\n            result = txt.split()\n        return result\n\n        \n    @staticmethod\n    def convert_to_string(txt):\n        if sys.version_info > (3,):\n            txt = str(txt, encoding='utf-8', errors=\"backslashreplace\")\n        else:\n            txt = str(txt)\n        return txt\n    \n    def redact_sensitive_encryption_details(self, request_body):\n        try:\n            meta_list = getattr(request_body, \"snapshotMetadata\", None)\n            for meta in meta_list:\n                if meta.get(\"Key\") == \"DiskEncryptionSettings\":\n                    # Redact the entire value of DiskEncryptionSettings\n                    meta[\"Value\"] = \"REDACTED\"\n            return request_body\n        except Exception as e:\n            self.log(\"Error while redacting: {0}\".format(str(e)), 'Error')\n            return request_body\n\nclass ComplexEncoder(json.JSONEncoder):\n    def default(self, obj):\n        if hasattr(obj,'convertToDictionary'):\n            return obj.convertToDictionary()\n        else:\n            return obj.__dict__\n"
  },
  {
    "path": "VMBackup/main/Utils/HostSnapshotObjects.py",
    "content": "import json\n\nclass HostDoSnapshotRequestBody:\n    def __init__(self, taskId, diskIds, settings, snapshotTaskToken, snapshotMetadata, instantAccessDurationMinutes = None):\n        self.taskId = taskId\n        self.diskIds = diskIds\n        self.snapshotMetadata = snapshotMetadata\n        self.snapshotTaskToken = snapshotTaskToken\n        self.settings = settings\n        self.instantAccessDurationMinutes = instantAccessDurationMinutes\n\n    def convertToDictionary(self):\n        result = dict(taskId = self.taskId, diskIds = self.diskIds, settings = self.settings, snapshotTaskToken = self.snapshotTaskToken, snapshotMetadata = self.snapshotMetadata)\n        if self.instantAccessDurationMinutes is not None:\n            result['instantAccessDurationMinutes'] = self.instantAccessDurationMinutes\n        return result\n\nclass HostPreSnapshotRequestBody:\n    def __init__(self, taskId, snapshotTaskToken, preSnapshotSettings = None):\n        self.taskId = taskId\n        self.snapshotTaskToken = snapshotTaskToken\n        if (preSnapshotSettings != None):\n            self.preSnapshotSettings = preSnapshotSettings\n\n    def convertToDictionary(self):\n        result = dict(taskId=self.taskId, snapshotTaskToken=self.snapshotTaskToken)\n        if hasattr(self, 'preSnapshotSettings'):\n            result['preSnapshotSettings'] = self.preSnapshotSettings\n        return result\n\nclass BlobSnapshotInfo:\n    def __init__(self, isSuccessful, snapshotUri, errorMessage, statusCode, ddSnapshotIdentifier = None):\n        self.isSuccessful = isSuccessful\n        self.snapshotUri = snapshotUri\n        self.errorMessage = errorMessage\n        self.statusCode = statusCode\n        self.ddSnapshotIdentifier = ddSnapshotIdentifier\n\n    def convertToDictionary(self):\n        return dict(isSuccessful = self.isSuccessful, snapshotUri = self.snapshotUri, errorMessage = self.errorMessage, statusCode = self.statusCode, ddSnapshotIdentifier = self.ddSnapshotIdentifier)\n\nclass DDSnapshotIdentifier:\n    def __init__(self, creationTime, id, token, instantAccessDurationMinutes = None):\n        self.creationTime = creationTime\n        self.id = id\n        self.token = token\n        self.instantAccessDurationMinutes = instantAccessDurationMinutes\n\n    def convertToDictionary(self):\n        return dict(creationTime = self.creationTime, id = self.id, token = self.token, instantAccessDurationMinutes = self.instantAccessDurationMinutes)\n\n"
  },
  {
    "path": "VMBackup/main/Utils/LogHelper.py",
    "content": "import os\nimport datetime\nimport shutil\nimport time\n\nclass LoggingConstants:\n    MaxDayAgeOfStaleFiles = -1  # We don't store unprocessed files beyond 1 day from current processing time\n    LogFileWriteRetryAttempts = 3\n    LogFileWriteRetryTime = 500  # milliseconds\n    MaxAttemptsForEventFileCreationWriteMove = 3\n    MinEventProcesingInterval = 10  # 10 seconds\n    ThreadSleepDuration = 10  # 10 seconds\n    MaxEventDirectorySize = 39981250  # ~= 39Mb\n    MaxEventsPerRun = 300\n    MaxMessageLenLimit = 2900  # 3072 to be precise\n    MaxMessageLengthPerEvent = 3000  # 3072 to be precise\n    DefaultEventTaskName = \"Enable\"\n    # ToDo: The third param-TaskName is by default set to \"Enable\". We can add a mechanism to send the program file name\n    LogLevelSettingFile = \"LogSeverity.json\"\n    DefaultEventLogLevel = 2\n    AllLogEnabledLevel = 0\n\nclass LoggingLevel:\n    def __init__(self, event_log_level):\n        self.EventLogLevel = event_log_level\n        \nclass FileHelpers:\n    @staticmethod\n    def getSizeOfDir(path):\n        total_size = 0\n        for root, dirs, files in os.walk(path):\n            for file in files:\n                file_path = os.path.join(root, file)\n                total_size += os.path.getsize(file_path)\n        return total_size\n\n    @staticmethod\n    def deleteFile(file_path):\n        if os.path.exists(file_path):\n            try:\n                os.remove(file_path)\n                print(\"Information: Successfully deleted file: {0}\".format(file_path))\n            except Exception as ex:\n                print(\"Warning: Failed to delete file {0}. Exception: {1}\".format(file_path, str(ex)))\n        else:\n            print(\"Error: Attempted to delete non-existent file: {0}\".format(file_path))\n\n    @staticmethod\n    def deleteDirectory(directory_path):\n        if os.path.exists(directory_path):\n            try:\n                shutil.rmtree(directory_path)\n                print(\"Information: Successfully deleted directory: {0}\".format(directory_path))\n            except Exception as ex:\n                print(\"Warning: Failed to delete directory {0}. Exception: {1}\".format(directory_path, str(ex)))\n        else:\n            print(\"Error: Attempted to delete non-existent directory: {0}\".format(directory_path))\n\n    @staticmethod\n    def clearOldJsonFilesInDirectory(file_path):\n        try:\n            current_time = datetime.datetime.now()\n            max_day_age = datetime.timedelta(days=LoggingConstants.MaxDayAgeOfStaleFiles)\n            files_deleted = 0\n            for root, dirs, files in os.walk(file_path):\n                for file in files:\n                    file_path = os.path.join(root, file)\n                    last_write_time = datetime.datetime.fromtimestamp(os.path.getmtime(file_path))\n                    if last_write_time < current_time + max_day_age:\n                        try:\n                            os.remove(file_path)\n                            files_deleted += 1\n                        except Exception as ex:\n                            print(\"Warning: Failed to delete old JSON file {0}. Exception: {1}\".format(file_path))\n            print(\"Information: Cleared {0} day old JSON files in directory at path {1}, NumberOfFilesRemoved/NumberOfJSONFilesPresent = {2}/{3}\".format(LoggingConstants.MaxDayAgeOfStaleFiles, file_path, files_deleted, len(files)))\n        except Exception as ex:\n            print(\"Warning: Failed to delete old JSON files at path {0}. Exception: {1}\".format(file_path, str(ex)))\n\n    @staticmethod\n    def clearOldFilesInDirectory(directory, extension, file_limit):\n        \"\"\"\n        Deletes older files if the number of files with the given extension exceeds the file_limit.\n        \n        Parameters:\n        directory (str): The directory to clean up.\n        extension (str): The file extension to filter (e.g., \".status\", \".settings\").\n        file_limit (int): Maximum allowed number of files with the given extension.\n        \"\"\"\n        try:\n            # Ensure the directory exists\n            if not os.path.isdir(directory):\n                print(\"Directory '{0}' does not exist.\".format(directory))\n                return\n\n            # Collect all files with the specified extension\n            files_with_ext = [\n                os.path.join(directory, f)\n                for f in os.listdir(directory)\n                if f.endswith(extension) and os.path.isfile(os.path.join(directory, f))\n            ]\n\n            # Sort the files by modification time (oldest first)\n            files_with_ext.sort(key=lambda f: os.path.getmtime(f))\n\n            # Check if the number of files exceeds the limit\n            if len(files_with_ext) > file_limit:\n                files_to_delete = files_with_ext[:len(files_with_ext) - file_limit]\n\n                # Delete the excess files\n                for file in files_to_delete:\n                    try:\n                        os.remove(file)\n                        print(\"Deleted: {0}\".format(file))\n                    except Exception as e:\n                        print(\"Error deleting {0}: {1}\".format(file, str(e)))\n            else:\n                print(\"No files need to be deleted. Total files ({0}) are within the limit.\".format(len(files_with_ext)))\n        \n        except Exception as e:\n            print(\"An unexpected error occurred while clearing old files: {0}\".format(str(e)))\n\n    def execute_with_retries(self, max_attempts, delay, success_msg, retry_msg, err_msg, operation):\n        attempts = 0\n        while attempts < max_attempts:\n            try:\n                result = operation()\n                print(\"Information: \" + success_msg)\n                return result\n            except Exception as ex:\n                attempts += 1\n                print(\"Warning: {0}, Exception: {1}\".format(retry_msg, str(ex)))\n                if attempts < max_attempts:\n                    time.sleep(delay)\n    \n        print(\"Warning: \" + err_msg)\n        return None"
  },
  {
    "path": "VMBackup/main/Utils/ResourceDiskUtil.py",
    "content": "import os\nimport sys\nimport re\nimport subprocess\nimport shlex\nfrom subprocess import *\nimport traceback\nfrom Utils.DiskUtil import DiskUtil\nimport Utils.HandlerUtil\n\n\nSTORAGE_DEVICE_PATH = '/sys/bus/vmbus/devices/'\nGEN2_DEVICE_ID = 'f8b3781a-1e82-4818-a1c3-63d806ec15bb'\n\n\ndef read_file(filepath):\n\t\"\"\"\n\tRead and return contents of 'filepath'.\n\t\"\"\"\n\tmode = 'rb'\n\twith open(filepath, mode) as in_file:\n\t\tdata = in_file.read().decode('utf-8')\n\t\treturn data\n\nclass ResourceDiskUtil(object):\n\n\tdef __init__(self,patching,logger):\n\t\tself.logger = logger\n\t\tself.disk_util = DiskUtil.get_instance(patching,logger)\n\n\t\n\t@staticmethod\n\tdef _enumerate_device_id():\n\t\t\"\"\"\n\t\tEnumerate all storage device IDs.\n\t\tArgs:\n\t\tNone\n\t\tReturns:\n\t\tIterator[Tuple[str, str]]: VmBus and storage devices.\n\t\t\"\"\"\n\n\t\tif os.path.exists(STORAGE_DEVICE_PATH):\n\t\t\tfor vmbus in os.listdir(STORAGE_DEVICE_PATH):\n\t\t\t\tdeviceid = read_file(filepath=os.path.join(STORAGE_DEVICE_PATH, vmbus, \"device_id\"))\n\t\t\t\tguid = deviceid.strip('{}\\n')\n\t\t\t\tyield vmbus, guid\n\n\n\t@staticmethod\n\tdef search_for_resource_disk(gen1_device_prefix, gen2_device_id):\n\t\t\"\"\"\n\t\tSearch the filesystem for a device by ID or prefix.\n\t\tArgs:\n\t\tgen1_device_prefix (str): Gen1 resource disk prefix.\n\t\tgen2_device_id (str): Gen2 resource device ID.\n\t\tReturns:\n\t\tstr: The found device.\n\t\t\"\"\"\n\n\t\tdevice = None\n\t\t# We have to try device IDs for both Gen1 and Gen2 VMs.\n\t\t#ResourceDiskUtil.logger.log('Searching gen1 prefix {0} or gen2 {1}'.format(gen1_device_prefix, gen2_device_id),True)\n\t\ttry: # pylint: disable=R1702\n\t\t\tfor vmbus, guid in ResourceDiskUtil._enumerate_device_id():\n\t\t\t\tif guid.startswith(gen1_device_prefix) or guid == gen2_device_id:\n\t\t\t\t\tfor root, dirs, files in os.walk(STORAGE_DEVICE_PATH + vmbus): # pylint: disable=W0612\n\t\t\t\t\t\troot_path_parts = root.split('/')\n\t\t\t\t\t\t# For Gen1 VMs we only have to check for the block dir in the\n\t\t\t\t\t\t# current device. But for Gen2 VMs all of the disks (sda, sdb,\n\t\t\t\t\t\t# sr0) are presented in this device on the same SCSI controller.\n\t\t\t\t\t\t# Because of that we need to also read the LUN. It will be:\n\t\t\t\t\t\t#   0 - OS disk\n\t\t\t\t\t\t#   1 - Resource disk\n\t\t\t\t\t\t#   2 - CDROM\n\t\t\t\t\t\tif root_path_parts[-1] == 'block' and ( # pylint: disable=R1705\n\t\t\t\t\t\t\t\tguid != gen2_device_id or\n\t\t\t\t\t\t\t\troot_path_parts[-2].split(':')[-1] == '1'):\n\t\t\t\t\t\t\tdevice = dirs[0]\n\t\t\t\t\t\t\treturn device\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t# older distros\n\t\t\t\t\t\t\tfor d in dirs: # pylint: disable=C0103\n\t\t\t\t\t\t\t\tif ':' in d and \"block\" == d.split(':')[0]:\n\t\t\t\t\t\t\t\t\tdevice = d.split(':')[1]\n\t\t\t\t\t\t\t\t\treturn device\n\t\texcept (OSError, IOError) as exc:\n\t\t\terr_msg='Error getting device for %s or %s: %s , Stack Trace: %s' % (gen1_device_prefix, gen2_device_id, str(exc),traceback.format_exc())\n\t\treturn None\n\n\tdef device_for_ide_port(self):\n\t\t\"\"\"\n\t\tReturn device name attached to ide port 'n'.\n\t\tgen1 device prefix is the prefix of the file name in which the resource disk partition is stored eg sdb\n\t\tgen1 is for new distros\n\t\tIn old distros the directory name which contains resource disk partition is assigned to gen2 device id\n\t\t\"\"\"\n\t\tg0 = \"00000000\"\n\t\tgen1_device_prefix = '{0}-0001'.format(g0)\n\t\tself.logger.log('Searching gen1 prefix {0} or gen2 {1}'.format(gen1_device_prefix, GEN2_DEVICE_ID),True)\n\t\tdevice = self.search_for_resource_disk(\n\t\t\tgen1_device_prefix=gen1_device_prefix,\n\t\t\tgen2_device_id=GEN2_DEVICE_ID\n\t\t)\n\t\tself.logger.log('Found device: {0}'.format(device),True)\n\t\treturn device\n\n\tdef get_mount_point(self, mountlist, device):\n\t\t\"\"\"\n\t\tExample of mountlist:\n\t\t\t/dev/sda1 on / type ext4 (rw)\n\t\t\tproc on /proc type proc (rw)\n\t\t\tsysfs on /sys type sysfs (rw)\n\t\t\tdevpts on /dev/pts type devpts (rw,gid=5,mode=620)\n\t\t\ttmpfs on /dev/shm type tmpfs\n\t\t\t(rw,rootcontext=\"system_u:object_r:tmpfs_t:s0\")\n\t\t\tnone on /proc/sys/fs/binfmt_misc type binfmt_misc (rw)\n\t\t\t/dev/sdb1 on /mnt/resource type ext4 (rw)\n\t\t\"\"\"\n\t\tif (mountlist and device):\n\t\t\tfor entry in mountlist.split('\\n'):\n\t\t\t\tif(re.search(device, entry)):\n\t\t\t\t\ttokens =Utils.HandlerUtil.HandlerUtility.split(self.logger, entry)\n\t\t\t\t\t#Return the 3rd column of this line\n\t\t\t\t\treturn tokens[2] if len(tokens) > 2 else None\n\t\treturn None\n\n\tdef get_resource_disk_mount_point(self,option=1): # pylint: disable=R0912,R0914\n\t\ttry:\n\t\t\t\"\"\"\n\t\t\tif option = 0 then partition will be returned eg sdb1\n\t\t\tif option = 1 then mount point will be returned eg /mnt/resource\n\t\t\t\"\"\"\n\t\t\tdevice = self.device_for_ide_port()\n\t\t\tif device is None:\n\t\t\t\tself.logger.log('unable to detect disk topology',True,'Error')\n\n\t\t\tif device is not None:\n\t\t\t\tpartition = \"{0}{1}\".format(device,\"1\")  #assuming only one resourde disk partition\n\t\t\telse:\n\t\t\t\tpartition=\"\"\n\n\t\t\tself.logger.log(\"Resource disk partition: {0} \".format(partition),True)\n\t\t\tif(option==0):\n\t\t\t\treturn partition\n\n\t\t\t#p = Popen(\"mount\", stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\t\t\t#mount_list, err = p.communicate()\n\t\t\tmount_list = self.disk_util.get_mount_output()\n\n\t\t\tif(mount_list is not None):\n\t\t\t\tmount_point = self.get_mount_point(mountlist = mount_list, device = device)\n\t\t\t\tself.logger.log(\"Resource disk {0} is mounted {1}\".format(partition,mount_point),True)\n\t\t\t\tif mount_point:\n\t\t\t\t\treturn mount_point\n\t\t\treturn None\n\t\texcept Exception as e:\n\t\t\terr_msg='Cannot get Resource disk partition, Exception %s, stack trace: %s' % (str(e), traceback.format_exc())\n\t\t\tself.logger.log(err_msg, True, 'Error')\n\t\t\treturn None\n"
  },
  {
    "path": "VMBackup/main/Utils/SizeCalculation.py",
    "content": "import os\nimport os.path\nimport sys\ntry:\n    import imp as imp\nexcept ImportError:\n    import importlib as imp\ntry:\n    import ConfigParser as ConfigParsers\nexcept ImportError:\n    import configparser as ConfigParsers\nimport base64\nimport json\nimport tempfile\nimport time\nfrom Utils.DiskUtil import DiskUtil\nfrom Utils.ResourceDiskUtil import ResourceDiskUtil\nimport Utils.HandlerUtil\nimport traceback\nimport subprocess\nimport shlex\nfrom common import CommonVariables\n\nclass SizeCalculation(object):\n\n    def __init__(self,patching, hutil, logger,para_parser):\n        self.patching = patching\n        self.logger = logger\n        self.hutil = hutil\n        self.includedLunList = []\n        self.file_systems_info = []\n        self.non_physical_file_systems = ['fuse', 'nfs', 'cifs', 'overlay', 'aufs', 'lustre', 'secfs2', 'zfs', 'btrfs', 'iso']\n        self.known_fs = ['ext3', 'ext4', 'jfs', 'xfs', 'reiserfs', 'devtmpfs', 'tmpfs', 'rootfs', 'fuse', 'nfs', 'cifs', 'overlay', 'aufs', 'lustre', 'secfs2', 'zfs', 'btrfs', 'iso']\n        self.isOnlyOSDiskBackupEnabled = False\n        try:\n            if(para_parser.customSettings != None and para_parser.customSettings != ''):\n                self.logger.log('customSettings : ' + str(para_parser.customSettings))\n                customSettings = json.loads(para_parser.customSettings)\n                if(\"isOnlyOSDiskBackupEnabled\" in customSettings):\n                    self.isOnlyOSDiskBackupEnabled = customSettings[\"isOnlyOSDiskBackupEnabled\"]\n                    if(self.isOnlyOSDiskBackupEnabled == True):\n                        Utils.HandlerUtil.HandlerUtility.add_to_telemetery_data(\"billingType\",\"os disk\")\n                    else:\n                        Utils.HandlerUtil.HandlerUtility.add_to_telemetery_data(\"billingType\",\"none\")\n                self.logger.log(\"isOnlyOSDiskBackupEnabled : {0}\".format(str(self.isOnlyOSDiskBackupEnabled)))\n        except Exception as e:\n            errMsg = 'Failed to serialize customSettings with error: %s, stack trace: %s' % (str(e), traceback.format_exc())\n            self.logger.log(errMsg, True, 'Error')\n            self.isOnlyOSDiskBackupEnabled = False\n\n        self.disksToBeIncluded = []\n        self.root_devices = []\n        self.root_mount_points = ['/' , '/boot/efi']\n\n        self.devicesToInclude = [] #partitions to be included\n        self.device_mount_points = []\n        self.isAnyDiskExcluded = False\n        self.LunListEmpty = False\n        self.logicalVolume_to_bill = []\n        self.sudo_off = 0 # run the commands using sudo\n        if(para_parser.includedDisks != None and para_parser.includedDisks != '' and CommonVariables.isAnyDiskExcluded in para_parser.includedDisks.keys() and para_parser.includedDisks[CommonVariables.isAnyDiskExcluded] != None ):\n            self.isAnyDiskExcluded = para_parser.includedDisks[CommonVariables.isAnyDiskExcluded]\n            self.logger.log(\"isAnyDiskExcluded {0}\".format(self.isAnyDiskExcluded))\n        if( para_parser.includeLunList != None and para_parser.includeLunList != ''):\n            self.includedLunList = para_parser.includeLunList\n        self.logger.log(\"includedLunList {0}\".format(self.includedLunList))\n        if(self.includedLunList == None or len(self.includedLunList) == 0):\n            self.LunListEmpty = True\n            self.logger.log(\"As the LunList is empty including all disks\")\n    \n    def get_lsscsi_list(self):\n        command = \"lsscsi\"\n        if (self.sudo_off == 0):\n            command = \"sudo \" + command\n        try:\n            self.logger.log(\"executing command  {0}\".format(command))\n            self.lsscsi_list = (os.popen(command).read()).splitlines()\n        except Exception as e:\n            error_msg = \"Failed to execute the command \\\"%s\\\" because of error %s , stack trace: %s\" % (command, str(e), traceback.format_exc())\n            self.logger.log(error_msg, True ,'Error')\n            self.lsscsi_list = []\n\n    def get_lsblk_list(self):\n        try:\n            self.output_lsblk = os.popen(\"lsblk -n --list --output name,mountpoint\").read().strip().splitlines()\n        except Exception as e:\n            error_msg = \"Failed to execute the command lsblk -n --list --output name,mountpoint because of error %s , stack trace: %s\" % (str(e), traceback.format_exc())\n            self.logger.log(error_msg, True ,'Error')\n            self.output_lsblk = []\n\n    def get_pvs_list(self):\n        try:\n            command = \"pvs\"\n            if (self.sudo_off == 0):\n                command = \"sudo \" + command\n            self.pvs_output = os.popen(command).read().strip().split(\"\\n\")\n            self.pvs_output = self.pvs_output[1:]\n        except Exception as e:\n            error_msg = \"Failed to execute the command \\\"%s\\\" because of error %s , stack trace: %s\" % (command, str(e), traceback.format_exc())\n            self.logger.log(error_msg, True ,'Error')\n            self.pvs_output = []\n\n    def get_loop_devices(self):\n        global disk_util\n        disk_util = DiskUtil.get_instance(patching = self.patching,logger = self.logger)\n        if len(self.file_systems_info) == 0 :\n            self.file_systems_info = disk_util.get_mount_file_systems()\n        self.logger.log(\"file_systems list : \",True)\n        self.logger.log(str(self.file_systems_info),True)\n        disk_loop_devices_file_systems = []\n        for file_system_info in self.file_systems_info:\n            if 'loop' in file_system_info[0]:\n                disk_loop_devices_file_systems.append(file_system_info[0])\n        return disk_loop_devices_file_systems\n  \n    def disk_list_for_billing(self):\n        if(len(self.lsscsi_list) != 0):\n            for item in self.lsscsi_list:\n                idxOfColon = item.rindex(':',0,item.index(']'))# to get the index of last ':'\n                idxOfColon += 1\n                lunNumber = int(item[idxOfColon:item.index(']')])\n                # item_split is the list of elements present in the one row of the cmd sudo lsscsi\n                self.item_split = item.split()\n                #storing the corresponding device name from the list\n                device_name = self.item_split[len(self.item_split)-1]\n\n                for device in self.root_devices :\n                    if device_name in device :\n                        lunNumber = -1\n                        # Changing the Lun# of OS Disk to -1\n\n                if lunNumber in self.includedLunList :\n                    self.disksToBeIncluded.append(device_name)\n                self.logger.log(\"LUN Number {0}, disk {1}\".format(lunNumber,device_name))   \n            self.logger.log(\"Disks to be included {0}\".format(self.disksToBeIncluded))\n        else:\n            self.size_calc_failed = True\n            self.logger.log(\"There is some glitch in executing the command 'lsscsi' and therefore size calculation is marked as failed.\")\n\n    def get_logicalVolumes_for_billing(self):\n        try:\n            self.pvs_dict = {}\n            for pvs_item in self.pvs_output:\n                pvs_item_split  = pvs_item.strip().split()\n                if(len(pvs_item_split) > 2):\n                    physicalVolume = pvs_item_split[0]\n                    logicalVolumeGroup = pvs_item_split[1]\n                    if(logicalVolumeGroup in self.pvs_dict.keys()):\n                        self.pvs_dict.get(logicalVolumeGroup).append(physicalVolume)\n                    else:\n                        self.pvs_dict[logicalVolumeGroup] = []\n                        self.pvs_dict.get(logicalVolumeGroup).append(physicalVolume)\n            self.logger.log(\"The pvs_dict contains {0}\".format(str(self.pvs_dict)))\n        except Exception as e:\n            errMsg = 'Failed to serialize pvs_output with error: %s, stack trace: %s' % (str(e), traceback.format_exc())\n            self.logger.log(errMsg, True, 'Error') \n        for lvg in self.pvs_dict.keys():\n            count = 0\n            for disk in self.disksToBeIncluded:\n                for pv in self.pvs_dict[lvg]:\n                    if(disk in pv):\n                        count = count+1\n            if(count == len(self.pvs_dict[lvg])):\n                lvg = \"/dev/mapper/\" + lvg\n                self.logicalVolume_to_bill.append(lvg)\n            else:\n                self.logger.log(\"Partial snapshotting for the logical volume group {0} can't be taken\".format(lvg))\n        self.logger.log(\"the lvm list to bill are {0}\".format(self.logicalVolume_to_bill))\n\n    def device_list_for_billing(self):\n        self.logger.log(\"In device_list_for_billing\",True)\n        devices_to_bill = [] #list to store device names to be billed\n        device_items = disk_util.get_device_items(None)\n        for device_item in device_items :\n            if str(device_item.name).startswith(\"sd\"):\n                devices_to_bill.append(\"/dev/{0}\".format(str(device_item.name)))\n            else:\n                self.logger.log(\"Not adding device {0} as it does not start with sd\".format(str(device_item.name)))\n        self.logger.log(\"Initial billing items {0}\".format(devices_to_bill))\n        \n        '''\n            Sample output for file_systems_info\n            [('sysfs', 'sysfs', '/sys'), ('proc', 'proc', '/proc'), ('udev', 'devtmpfs', '/dev'),..]\n            Since root devices are at mount points '/' and '/boot/efi' we use file_system_info to find the root_devices based on the mount points.\n        '''\n\n        # check if user off the sudo usage in commands\n        self.sudo_off = self.hutil.get_intvalue_from_configfile(\"sudo_off\", self.sudo_off)\n        self.logger.log(\"sudo flag is {0}\".format(self.sudo_off))\n\n        # The command lsscsi is used for mapping the LUN numbers to the disk_names\n        self.get_lsscsi_list() #populates self.lsscsi_list \n        self.get_lsblk_list() #populates self.lsblk_list\n        self.get_pvs_list()#populates pvs list\n\n        for file_system in self.file_systems_info:\n            if(file_system[2] in self.root_mount_points):\n                self.root_devices.append(file_system[0])\n        self.logger.log(\"root_devices {0}\".format(str(self.root_devices)))\n        self.logger.log(\"lsscsi_list {0}\".format(self.lsscsi_list))\n        '''\n            Sample output of the lsscsi command \n            [1:0:0:15]   disk    Msft     Virtual Disk     1.0   /dev/sda\n            [1:0:0:18]   disk    Msft     Virtual Disk     1.0   /dev/sdc\n        '''\n\n        self.disk_list_for_billing() \n        self.get_logicalVolumes_for_billing()\n        self.logger.log(\"lsblk o/p {0}\".format(self.output_lsblk))\n        self.logger.log(\"lvm {0}\".format(self.logicalVolume_to_bill))\n        ''' \n                NAME          MOUNTPOINT\n                sda\n                sda1          /boot/efi\n                sda2          /boot\n                sda3\n                sda4\n                rootvg-tmplv  /tmp\n                rootvg-usrlv  /usr\n                rootvg-optlv  /opt\n                rootvg-homelv /home\n                rootvg-varlv  /var\n                rootvg-rootlv /\n                sdb\n                sdb1          /mnt/resource\n\n                sdc\n                sdc1          /mnt/sdc1\n                sdc2          /mnt/sdc2\n                sdc3\n                sde\n                sde1          /mnt/sde1\n                sdf\n                sdg\n\t'''\n        if(len(self.output_lsblk) == 0):\n            self.size_calc_failed = True\n            self.logger.log(\"There is some glitch in executing the command 'lsblk -n --list --output name,mountpoint' and therefore size calculation is marked as failed.\")\n        \n        for item in self.output_lsblk: \n            item_split = item.split()\n            if(len(item_split)==2):\n                device = item_split[0]\n                mount_point = item_split[1]\n            else:\n                mount_point = \"\"\n                device = \"\"\n            if device != '' and mount_point != '':\n                device = '/dev/' + device\n                for disk in self.disksToBeIncluded :\n                    if disk in device and device not in self.devicesToInclude:\n                        self.devicesToInclude.append(device)\n                        self.device_mount_points.append(mount_point)\n                        break              \n        self.logger.log(\"devices_to_bill: {0}\".format(str(self.devicesToInclude)),True) \n        self.logger.log(\"The mountpoints of devices to bill: {0}\".format(str(self.device_mount_points)), True)\n        self.logger.log(\"exiting device_list_for_billing\",True)\n        return devices_to_bill\n\n    def get_total_used_size(self):\n        try:\n            self.size_calc_failed = False\n\n            onlyLocalFilesystems = self.hutil.get_strvalue_from_configfile(CommonVariables.onlyLocalFilesystems, \"False\") \n            # df command gives the information of all the devices which have mount points\n            if onlyLocalFilesystems in ['True', 'true']:  \n                df = subprocess.Popen([\"df\" , \"-kl\"], stdout=subprocess.PIPE)\n            else:\n                df = subprocess.Popen([\"df\" , \"-k\"], stdout=subprocess.PIPE)\n\n            '''\n            Sample output of the df command\n            Filesystem                                              Type     1K-blocks    Used    Avail Use% Mounted on\n            /dev/sda2                                               xfs       52155392 3487652 48667740   7% /\n            devtmpfs                                                devtmpfs   7170976       0  7170976   0% /dev\n            tmpfs                                                   tmpfs      7180624       0  7180624   0% /dev/shm\n            tmpfs                                                   tmpfs      7180624  760496  6420128  11% /run\n            tmpfs                                                   tmpfs      7180624       0  7180624   0% /sys/fs/cgroup\n            /dev/sda1                                               ext4        245679  151545    76931  67% /boot\n            /dev/sdb1                                               ext4      28767204 2142240 25140628   8% /mnt/resource\n            /dev/mapper/mygroup-thinv1                              xfs        1041644   33520  1008124   4% /bricks/brick1\n            /dev/mapper/mygroup-85197c258a54493da7880206251f5e37_0  xfs        1041644   33520  1008124   4% /run/gluster/snaps/85197c258a54493da7880206251f5e37/brick2\n            /dev/mapper/mygroup2-thinv2                             xfs       15717376 5276944 10440432  34% /tmp/test\n            /dev/mapper/mygroup2-63a858543baf4e40a3480a38a2f232a0_0 xfs       15717376 5276944 10440432  34% /run/gluster/snaps/63a858543baf4e40a3480a38a2f232a0/brick2\n            tmpfs                                                   tmpfs      1436128       0  1436128   0% /run/user/1000\n            //Centos72test/cifs_test                                cifs      52155392 4884620 47270772  10% /mnt/cifs_test2\n            '''\n            output = \"\"\n            process_wait_time = 300\n            while(df is not None and process_wait_time >0 and df.poll() is None):\n                time.sleep(1)\n                process_wait_time -= 1\n            self.logger.log(\"df command executed for process wait time value\" + str(process_wait_time), True)\n            if(df is not None and df.poll() is not None):\n                self.logger.log(\"df return code \"+str(df.returncode), True)\n                output = df.stdout.read().decode()\n            if sys.version_info > (3,):\n                try:\n                    output = str(output, encoding='utf-8', errors=\"backslashreplace\")\n                except:\n                    output = str(output)\n            else:\n                output = str(output)\n            output = output.strip().split(\"\\n\")\n            self.logger.log(\"output of df : {0}\".format(str(output)),True)\n            disk_loop_devices_file_systems = self.get_loop_devices()\n            self.logger.log(\"outside loop device\", True)\n            total_used = 0\n            total_used_network_shares = 0\n            total_used_gluster = 0\n            total_used_loop_device=0\n            total_used_temporary_disks = 0 \n            total_used_ram_disks = 0\n            total_used_unknown_fs = 0\n            actual_temp_disk_used = 0\n            total_sd_size = 0\n            network_fs_types = []\n            unknown_fs_types = []\n            excluded_disks_used = 0\n            totalSpaceUsed = 0\n            device_list = []\n      \n            if len(self.file_systems_info) == 0 :\n                self.file_systems_info = disk_util.get_mount_file_systems()\n\n            output_length = len(output)\n            index = 1\n            self.resource_disk = ResourceDiskUtil(patching = self.patching, logger = self.logger)\n            resource_disk_device = self.resource_disk.get_resource_disk_mount_point(0)\n            self.logger.log(\"resource_disk_device: {0}\".format(resource_disk_device),True)\n            resource_disk_device = \"/dev/{0}\".format(resource_disk_device)\n            self.logger.log(\"ResourceDisk is excluded in billing as it represents the Actual Temporary disk\")\n\n            if(self.LunListEmpty != True and self.isAnyDiskExcluded == True):\n                device_list = self.device_list_for_billing() #new logic: calculate the disk size for billing\n\n            while index < output_length:\n                if(len(Utils.HandlerUtil.HandlerUtility.split(self.logger, output[index])) < 6 ): #when a row is divided in 2 lines\n                    index = index+1\n                    if(index < output_length and len(Utils.HandlerUtil.HandlerUtility.split(self.logger, output[index-1])) + len(Utils.HandlerUtil.HandlerUtility.split(self.logger, output[index])) == 6):\n                        output[index] = output[index-1] + output[index]\n                    else:\n                        self.logger.log(\"Output of df command is not in desired format\",True)\n                        total_used = 0\n                        self.size_calc_failed = True\n                        break\n                device, size, used, available, percent, mountpoint =Utils.HandlerUtil.HandlerUtility.split(self.logger, output[index])\n                fstype = ''\n                isNetworkFs = False\n                isKnownFs = False\n\n                if int(used) < 0 :\n                    self.logger.log(\"The used space is negative, so marking the size computation as failed and returning zero\")\n                    self.size_calc_failed = True\n                    return 0,self.size_calc_failed\n\n                for file_system_info in self.file_systems_info:\n                    if device == file_system_info[0] and mountpoint == file_system_info[2]:\n                        fstype = file_system_info[1]\n                self.logger.log(\"index :{0} Device name : {1} fstype : {2} size : {3} used space in KB : {4} available space : {5} mountpoint : {6}\".format(index,device,fstype,size,used,available,mountpoint),True)\n\n                for nonPhysicaFsType in self.non_physical_file_systems:\n                    if nonPhysicaFsType in fstype.lower():\n                        isNetworkFs = True\n                        break\n\n                for knownFs in self.known_fs:\n                    if knownFs in fstype.lower():\n                        isKnownFs = True\n                        break\n\n                if device == resource_disk_device and self.isOnlyOSDiskBackupEnabled == False : # adding log to check difference in billing of temp disk\n                    self.logger.log(\"Actual temporary disk, Device name : {0} used space in KB : {1} fstype : {2}\".format(device,used,fstype),True)\n                    actual_temp_disk_used= int(used)\n                \n                if device in device_list and device != resource_disk_device :\n                    self.logger.log(\"Adding sd* partition, Device name : {0} used space in KB : {1} fstype : {2}\".format(device,used,fstype),True)\n                    total_sd_size = total_sd_size + int(used) #calcutale total sd* size just skip temp disk\n\n                if not (isKnownFs or fstype == '' or fstype == None):\n                    unknown_fs_types.append(fstype)\n\n                if isNetworkFs :\n                    if fstype not in network_fs_types :\n                        network_fs_types.append(fstype)\n                    self.logger.log(\"Not Adding network-drive, Device name : {0} used space in KB : {1} fstype : {2}\".format(device,used,fstype),True)\n                    total_used_network_shares = total_used_network_shares + int(used)\n\n                elif device == \"/dev/sdb1\"  and self.isOnlyOSDiskBackupEnabled == False : #<todo> in some cases root is mounted on /dev/sdb1\n                    self.logger.log(\"Not Adding temporary disk, Device name : {0} used space in KB : {1} fstype : {2}\".format(device,used,fstype),True)\n                    total_used_temporary_disks = total_used_temporary_disks + int(used)\n\n                elif \"tmpfs\" in fstype.lower() or \"devtmpfs\" in fstype.lower() or \"ramdiskfs\" in fstype.lower() or \"rootfs\" in fstype.lower():\n                    self.logger.log(\"Not Adding RAM disks, Device name : {0} used space in KB : {1} fstype : {2}\".format(device,used,fstype),True)\n                    total_used_ram_disks = total_used_ram_disks + int(used)\n\n                elif 'loop' in device and device not in disk_loop_devices_file_systems:\n                    self.logger.log(\"Not Adding Loop Device , Device name : {0} used space in KB : {1} fstype : {2}\".format(device,used,fstype),True)\n                    total_used_loop_device = total_used_loop_device + int(used)\n\n                elif (mountpoint.startswith('/run/gluster/snaps/')):\n                    self.logger.log(\"Not Adding Gluster Device , Device name : {0} used space in KB : {1} mount point : {2}\".format(device,used,mountpoint),True)\n                    total_used_gluster = total_used_gluster + int(used)\n\n                elif device.startswith( '\\\\\\\\' ) or device.startswith( '//' ):\n                    self.logger.log(\"Not Adding network-drive as it starts with slahes, Device name : {0} used space in KB : {1} fstype : {2}\".format(device,used,fstype),True)\n                    total_used_network_shares = total_used_network_shares + int(used)\n\n                else:\n                    #Only when OS disk is included\n                    if(self.isOnlyOSDiskBackupEnabled == True):\n                        if(mountpoint == '/'):\n                            total_used = total_used + int(used)\n                            self.logger.log(\"Adding only root device to size calculation. Device name : {0} used space in KB : {1} mount point : {2} fstype : {3}\".format(device,used,mountpoint,fstype),True)\n                            self.logger.log(\"Total Used Space: {0}\".format(total_used),True)\n                    #Handling a case where LunList is empty for UnmanagedVM's and failures if occurred( as we will billing for all the non resource disks)\n                    elif( (self.size_calc_failed == True or self.LunListEmpty == True) and device != resource_disk_device):\n                        self.logger.log(\"Adding Device name : {0} for billing used space in KB : {1} mount point : {2} fstype : {3}\".format(device,used,mountpoint,fstype),True)\n                        total_used = total_used + int(used) #return in KB\n                    #LunList is empty but the device is an actual temporary disk so excluding it\n                    elif( (self.size_calc_failed == True or self.LunListEmpty == True) and device == resource_disk_device):\n                        self.logger.log(\"Device {0} is not included for billing as it is a resource disk, used space in KB : {1} mount point : {2} fstype :{3}\".format(device,used,mountpoint,fstype),True)\n                        excluded_disks_used = excluded_disks_used + int(used)\n                    #Including only the disks which are asked to include (Here LunList can't be empty this case is handled at the CRP end)\n                    else:\n                        if self.isAnyDiskExcluded == False and device != resource_disk_device:\n                            #No disk has been excluded So can include every non resource disk\n                            self.logger.log(\"Adding Device name : {0} for billing used space in KB : {1} mount point : {2} fstype : {3}\".format(device,used,mountpoint,fstype),True)\n                            total_used = total_used + int(used) #return in KB\n                        elif self.isAnyDiskExcluded == False and device == resource_disk_device:\n                            #excluding resource disk even in the case where all disks are included as it is the actual temporary disk\n                            self.logger.log(\"Device {0} is not included for billing as it is a resource disk, used space in KB : {1} mount point : {2} fstype : {3}\".format(device,used,mountpoint,fstype),True)\n                            excluded_disks_used = excluded_disks_used + int(used)\n                        elif mountpoint in self.device_mount_points and device != resource_disk_device:\n                            self.logger.log(\"Adding Device name : {0} for billing used space in KB : {1} mount point : {2} fstype : {3}\".format(device,used,mountpoint,fstype),True)\n                            total_used = total_used + int(used) #return in KB\n                        elif device != resource_disk_device and -1 in self.includedLunList:\n                            if mountpoint in self.root_mount_points :\n                                self.logger.log(\"Adding Device name : {0} for billing used space in KB : {1} mount point : {2} fstype : {3}\".format(device,used,mountpoint,fstype),True)\n                                total_used = total_used + int(used) #return in KB\n                            else:\n                                #check for logicalVolumes\n                                templgv = device.split(\"-\")\n                                if(len(templgv) > 1 and templgv[0] in self.logicalVolume_to_bill):\n                                    self.logger.log(\"Adding Device name : {0} for billing used space in KB : {1} mount point : {2} fstype : {3}\".format(device,used,mountpoint,fstype),True)\n                                    total_used = total_used + int(used) #return in KB\n                                else:\n                                    self.logger.log(\"Device {0} is not included for billing as it is not part of the disks to be included, used space in KB : {1} mount point : {2} fstype : {3}\".format(device,used,mountpoint,fstype),True)\n                                    excluded_disks_used = excluded_disks_used + int(used)\n                        else:\n                            # check for logicalVolumes even if os disk is not included\n                            templgv = device.split(\"-\")\n                            if(len(templgv) > 1 and templgv[0] in self.logicalVolume_to_bill):\n                                self.logger.log(\"Adding Device name : {0} for billing used space in KB : {1} mount point : {2} fstype : {3}\".format(device,used,mountpoint,fstype),True)\n                                total_used = total_used + int(used) #return in KB\n                            else:\n                                self.logger.log(\"Device {0} is not included for billing as it is not part of the disks to be included, used space in KB : {1} mount point : {2} fstype : {3}\".format(device,used,mountpoint,fstype),True)\n                                excluded_disks_used = excluded_disks_used + int(used)\n                    if not (isKnownFs or fstype == '' or fstype == None):\n                        total_used_unknown_fs = total_used_unknown_fs + int(used)\n\n                index = index + 1\n\n            if not len(unknown_fs_types) == 0:\n                Utils.HandlerUtil.HandlerUtility.add_to_telemetery_data(\"unknownFSTypeInDf\",str(unknown_fs_types))\n                Utils.HandlerUtil.HandlerUtility.add_to_telemetery_data(\"totalUsedunknownFS\",str(total_used_unknown_fs))\n                self.logger.log(\"Total used space in Bytes of unknown FSTypes : {0}\".format(total_used_unknown_fs * 1024),True)\n\n            if total_used_temporary_disks != actual_temp_disk_used :\n                self.logger.log(\"Billing differenct because of incorrect temp disk: {0}\".format(str(total_used_temporary_disks - actual_temp_disk_used)))\n\n            if not len(network_fs_types) == 0:\n                Utils.HandlerUtil.HandlerUtility.add_to_telemetery_data(\"networkFSTypeInDf\",str(network_fs_types))\n                Utils.HandlerUtil.HandlerUtility.add_to_telemetery_data(\"totalUsedNetworkShare\",str(total_used_network_shares))\n                self.logger.log(\"Total used space in Bytes of network shares : {0}\".format(total_used_network_shares * 1024),True)\n            if total_used_gluster !=0 :\n                Utils.HandlerUtil.HandlerUtility.add_to_telemetery_data(\"glusterFSSize\",str(total_used_gluster))\n            if total_used_temporary_disks !=0:\n                Utils.HandlerUtil.HandlerUtility.add_to_telemetery_data(\"tempDisksSize\",str(total_used_temporary_disks))\n            if total_used_ram_disks != 0:\n                Utils.HandlerUtil.HandlerUtility.add_to_telemetery_data(\"ramDisksSize\",str(total_used_ram_disks))\n            if total_used_loop_device != 0 :\n                Utils.HandlerUtil.HandlerUtility.add_to_telemetery_data(\"loopDevicesSize\",str(total_used_loop_device))\n            totalSpaceUsed = total_used + excluded_disks_used\n            self.logger.log(\"TotalUsedSpace ( both included and excluded disks ) in Bytes : {0} , TotalUsedSpaceAfterExcludeLUN in Bytes : {1} , TotalLUNExcludedUsedSpace in Bytes : {2} \".format(totalSpaceUsed *1024 , total_used * 1024 , excluded_disks_used *1024 ),True)\n            if total_sd_size != 0 :\n                Utils.HandlerUtil.HandlerUtility.add_to_telemetery_data(\"totalsdSize\",str(total_sd_size))\n            self.logger.log(\"Total sd* used space in Bytes : {0}\".format(total_sd_size * 1024),True)\n            self.logger.log(\"SizeComputationFailedFlag {0}\".format(self.size_calc_failed))\n            return total_used * 1024,self.size_calc_failed #Converting into Bytes\n        except Exception as e:\n            errMsg = 'Unable to fetch total used space with error: %s, stack trace: %s' % (str(e), traceback.format_exc())\n            self.logger.log(errMsg,True)\n            self.size_calc_failed = True\n            return 0,self.size_calc_failed\n"
  },
  {
    "path": "VMBackup/main/Utils/Status.py",
    "content": "import json\n\nclass TopLevelStatus:\n    def __init__(self, version, timestampUTC, status):\n        self.version = version\n        self.timestampUTC = timestampUTC\n        self.status = status\n\n    def convertToDictionary(self):\n        return dict(version = self.version, timestampUTC = self.timestampUTC, status = self.status)\n\nclass StatusObj:\n    def __init__(self, name, operation, status, substatus, code, formattedMessage, telemetrydata, storageDetails, uniqueMachineId, taskId, commandStartTimeUTCTicks, snapshotInfo, vmHealthInfo):\n        self.name = name\n        self.operation = operation\n        self.status = status\n        self.substatus = substatus\n        self.code = code\n        self.formattedMessage = formattedMessage\n        self.telemetryData = telemetrydata\n        self.storageDetails = storageDetails\n        self.uniqueMachineId = uniqueMachineId\n        self.taskId = taskId\n        self.commandStartTimeUTCTicks = commandStartTimeUTCTicks\n        self.snapshotInfo = snapshotInfo\n        self.vmHealthInfo = vmHealthInfo\n        \n    def convertToDictionary(self):\n        return dict(name = self.name, operation = self.operation, status = self.status, substatus = self.substatus, code = self.code, taskId = self.taskId, formattedMessage = self.formattedMessage, storageDetails = self.storageDetails, commandStartTimeUTCTicks = self.commandStartTimeUTCTicks, telemetryData = self.telemetryData, uniqueMachineId = self.uniqueMachineId, snapshotInfo = self.snapshotInfo, vmHealthInfo = self.vmHealthInfo)\n\n\nclass VmHealthInfoObj:\n    def __init__(self, vmHealthState, vmHealthStatusCode):\n        self.vmHealthState = vmHealthState\n        self.vmHealthStatusCode = vmHealthStatusCode\n\n    def convertToDictionary(self):\n        return dict(vmHealthState = self.vmHealthState,vmHealthStatusCode = self.vmHealthStatusCode)\n\nclass SubstatusObj:\n    def __init__(self, code, name, status, formattedMessage):\n        self.code = code\n        self.name = name\n        self.status = status\n        self.formattedMessage = formattedMessage\n        \n    def convertToDictionary(self):\n        return dict(code = self.code, name = self.name, status = self.status, formattedMessage = self.formattedMessage)\n\nclass StorageDetails:\n    def __init__(self, partitionCount, totalUsedSizeInBytes, isStoragespacePresent, isSizeComputationFailed):\n        self.partitionCount =  partitionCount\n        self.totalUsedSizeInBytes = totalUsedSizeInBytes\n        self.isStoragespacePresent = isStoragespacePresent\n        self.isSizeComputationFailed = isSizeComputationFailed\n\n    def convertToDictionary(self):\n        return dict(partitionCount = self.partitionCount, totalUsedSizeInBytes = self.totalUsedSizeInBytes, isStoragespacePresent = self.isStoragespacePresent, isSizeComputationFailed = self.isSizeComputationFailed)\n\nclass SnapshotInfoObj:\n    def __init__(self, isSuccessful, snapshotUri, errorMessage, blobUri, directDriveSnapshotIdentifier = None):\n        self.isSuccessful = isSuccessful\n        self.snapshotUri = snapshotUri  # snapshotUri is populated only for XStore disks (will be None for DD disks)\n        self.errorMessage = errorMessage\n        self.blobUri = blobUri  # blobUri is populated for both XStore and DD disks (this is base blobUri, NOT snapshotUri)\n        self.directDriveSnapshotIdentifier = directDriveSnapshotIdentifier  # This is populated only for DD disks (will be None for XStore disks)\n \n    def convertToDictionary(self):\n        return dict(isSuccessful = self.isSuccessful, snapshotUri = self.snapshotUri, errorMessage = self.errorMessage, blobUri = self.blobUri, directDriveSnapshotIdentifier = self.directDriveSnapshotIdentifier)\n\nclass DirectDriveSnapshotIdentifier:\n    def __init__(self, creationTime, id, token, instantAccessDurationMinutes = None):\n        self.creationTime = creationTime\n        self.id = id\n        self.token = token\n        self.instantAccessDurationMinutes = instantAccessDurationMinutes # This is populated for DD disk with Instant Access snapshot\n\n    def convertToDictionary(self):\n        return dict(creationTime = self.creationTime, id = self.id, token = self.token, instantAccessDurationMinutes = self.instantAccessDurationMinutes)\n\nclass CreationTime:\n    def __init__(self, DateTime, OffsetMinutes):\n        self.DateTime = DateTime\n        self.OffsetMinutes = OffsetMinutes\n\n    def convertToDictionary(self):\n        return dict(DateTime = self.DateTime, OffsetMinutes = self.OffsetMinutes)\n\nclass FormattedMessage:\n    def __init__(self, lang, message):\n        self.lang = lang\n        self.message = message\n\nclass ExtVmHealthStateEnum():\n    green = 0\n    yellow = 128\n    red = 256\n\nclass SnapshotConsistencyType():\n    none = 0\n    fileSystemConsistent = 1\n    applicationConsistent = 2\n    crashConsistent = 3\n\nclass ExtensionResponse:\n    def __init__(self, messageStr, snapshotConsistency, jobMessage, failure_flag):\n        self.messageStr = messageStr\n        self.snapshotConsistency = snapshotConsistency\n        self.jobMessage = jobMessage\n        self.failure_flag = failure_flag\n\n    def convertToDictionary(self):\n        return dict(messageStr = self.messageStr, snapshotConsistency = self.snapshotConsistency, jobMessage = self.jobMessage, isSizeComputationFailed = self.failure_flag)\n\n"
  },
  {
    "path": "VMBackup/main/Utils/StringHelper.py",
    "content": "import datetime\n\nclass StringHelper:\n    def resolve_string(self,severity_level, message):\n        try:\n            msg_body = datetime.datetime.utcnow().isoformat() + \"\\t\" + \"[\" + severity_level + \"]:\\t\"\n            if message and message.strip():\n                msg_body += message + \" \"\n            msg_body += \"\\n\"\n            return msg_body\n        except Exception as e:\n            pass\n"
  },
  {
    "path": "VMBackup/main/Utils/WAAgentUtil.py",
    "content": "# Wrapper module for waagent\n#\n# waagent is not written as a module. This wrapper module is created \n# to use the waagent code as a module.\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\ntry:\n    # For Python 3.5 and later, use importlib\n    import importlib.util\n    has_importlib_util = True\nexcept ImportError:\n    has_importlib_util = False\n\ntry:\n    import imp as imp\n    has_imp = True\nexcept ImportError:\n    has_imp = False\n    \nif not has_importlib_util and not has_imp:\n    raise ImportError(\"Neither importlib.util nor imp module is available\")\n\nimport os\nimport os.path\n\n#\n# The following code will search and load waagent code and expose\n# it as a submodule of current module\n#\ndef searchWAAgent():\n    agentPath = os.path.join(os.getcwd(), \"main/WaagentLib.py\")\n    if(os.path.isfile(agentPath)):\n        return agentPath\n    user_paths = os.environ['PYTHONPATH'].split(os.pathsep)\n    for user_path in user_paths:\n        agentPath = os.path.join(user_path, 'waagent')\n        if(os.path.isfile(agentPath)):\n            return agentPath\n    return None\n\ndef searchWAAgentOld():\n    agentPath = '/usr/sbin/waagent'\n    if(os.path.isfile(agentPath)):\n        return agentPath\n    user_paths = os.environ['PYTHONPATH'].split(os.pathsep)\n    for user_path in user_paths:\n        agentPath = os.path.join(user_path, 'waagent')\n        if(os.path.isfile(agentPath)):\n            return agentPath\n    return None\n\npathUsed = 1 \ntry:\n    agentPath = searchWAAgent()\n    if agentPath is None:\n       pathUsed = 0\n       # Search for the old agent path if the new one is not found\n       agentPath = searchWAAgentOld()\n    if agentPath:\n        if has_importlib_util:\n            # For Python 3.5 and later, use importlib\n            spec = importlib.util.spec_from_file_location('waagent', agentPath)\n            waagent = importlib.util.module_from_spec(spec)\n            spec.loader.exec_module(waagent)       \n        elif has_imp:\n            # For Python 3.4 and earlier, use imp module\n            waagent = imp.load_source('waagent', agentPath)\n        else:\n            raise Exception(\"No suitable import mechanism available.\")\n    else:\n        raise Exception(\"Can't load new or old waagent. Agent path not found.\")\nexcept Exception as e:\n    raise Exception(str(e))\n\nif not hasattr(waagent, \"AddExtensionEvent\"):\n    \"\"\"\n    If AddExtensionEvent is not defined, provide a dummy impl.\n    \"\"\"\n    def _AddExtensionEvent(*args, **kwargs):\n        pass\n    waagent.AddExtensionEvent = _AddExtensionEvent\n\nif not hasattr(waagent, \"WALAEventOperation\"):\n    class _WALAEventOperation:\n        HeartBeat = \"HeartBeat\"\n        Provision = \"Provision\"\n        Install = \"Install\"\n        UnIsntall = \"UnInstall\"\n        Disable = \"Disable\"\n        Enable = \"Enable\"\n        Download = \"Download\"\n        Upgrade = \"Upgrade\"\n        Update = \"Update\"           \n    waagent.WALAEventOperation = _WALAEventOperation\n\n__ExtensionName__ = None\ndef InitExtensionEventLog(name):\n    __ExtensionName__ = name\n\ndef AddExtensionEvent(name=__ExtensionName__,\n                      op=waagent.WALAEventOperation.Enable, \n                      isSuccess=False, \n                      message=None):\n    if name is not None:\n        waagent.AddExtensionEvent(name=name,\n                                  op=op,\n                                  isSuccess=isSuccess,\n                                  message=message)\n\ndef GetPathUsed():\n    return pathUsed\n"
  },
  {
    "path": "VMBackup/main/Utils/__init__.py",
    "content": "#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n"
  },
  {
    "path": "VMBackup/main/Utils/dhcpUtils.py",
    "content": "# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.4+ and Openssl 1.0+\n\nimport sys\nimport platform \nimport os \nimport subprocess \nimport socket\nimport array\nimport struct\nfrom uuid import getnode as get_mac\nimport json\n\n# Utils for sample.py\n\ndef make_address(ip_address):\n    '''\n    Returns the address for scheduled event endpoint from the IP address provided.\n    '''\n    return 'http://' + ip_address + '/metadata/scheduledevents?api-version=2017-03-01'\n\ndef check_ip_address(address, headers):\n    '''\n    Checks whether the address of the scheduled event endpoint is valid.\n    '''\n    try:\n        response = get_scheduled_events(address, headers)\n        return 'Events' in json.loads(response.read().decode('utf-8'))\n    except (urllib.error.URLError, UnicodeDecodeError, json.decoder.JSONDecodeError) as _:\n        return False\n\ndef get_ip_address_reg_env(use_registry):\n    '''\n    Get the IP address of scheduled event from registry or environment.\n    Returns None if IP address is not provided or stored.\n    '''\n    ip_address = None\n    if use_registry and sys.platform == 'win32':\n        import winreg as wreg\n        # use ScheduledEventsIp in registry if available.\n        try:\n            key = wreg.OpenKey(wreg.HKEY_LOCAL_MACHINE, \"Software\\\\ScheduledEvents\")\n            ip_address = wreg.QueryValueEx(key, 'ScheduledEventsIp')[0]\n            key.Close()\n        except FileNotFoundError:\n            pass\n    elif sys.platform == 'win32' or \"linux\" in sys.platform:\n        # use SCHEDULEDEVENTSIP in system variables if available.\n        ip_address = os.getenv('SCHEDULEDEVENTSIP')\n\n    return ip_address\n\n\n# Utils for discovery.py\n\ndef unpack(buf, offset, range):\n    \"\"\"\n    Unpack bytes into python values.\n    \"\"\"\n    result = 0\n    for i in range:\n        result = (result << 8) | str_to_ord(buf[offset + i])\n    return result\n        \ndef unpack_big_endian(buf, offset, length):\n    \"\"\"\n    Unpack big endian bytes into python values.\n    \"\"\"\n    return unpack(buf, offset, list(range(0, length)))\n\ndef hex_dump3(buf, offset, length):\n    \"\"\"\n    Dump range of buf in formatted hex.\n    \"\"\"\n    return ''.join(['%02X' % str_to_ord(char) for char in buf[offset:offset + length]])\n\ndef hex_dump2(buf):\n    \"\"\"\n    Dump buf in formatted hex.\n    \"\"\"\n    return hex_dump3(buf, 0, len(buf))\n\ndef hex_dump(buffer, size):\n    \"\"\"\n    Return Hex formated dump of a 'buffer' of 'size'.\n    \"\"\"\n    if size < 0:\n        size = len(buffer)\n    result = \"\"\n    for i in range(0, size):\n        if (i % 16) == 0:\n            result += \"%06X: \" % i\n        byte = buffer[i]\n        if type(byte) == str:\n            byte = ord(byte.decode('latin1'))\n        result += \"%02X \" % byte\n        if (i & 15) == 7:\n            result += \" \"\n        if ((i + 1) % 16) == 0 or (i + 1) == size:\n            j = i\n            while ((j + 1) % 16) != 0:\n                result += \"   \"\n                if (j & 7) == 7:\n                    result += \" \"\n                j += 1\n            result += \" \"\n            for j in range(i - (i % 16), i + 1):\n                byte = buffer[j]\n                if type(byte) == str:\n                    byte = str_to_ord(byte.decode('latin1'))\n                k = '.'\n                if is_printable(byte):\n                    k = chr(byte)\n                result += k\n            if (i + 1) != size:\n                result += \"\\n\"\n    return result\n\ndef str_to_ord(a):\n    \"\"\"\n    Allows indexing into a string or an array of integers transparently.\n    Generic utility function.\n    \"\"\"\n    if type(a) == type(b'') or type(a) == type(u''):\n        a = ord(a)\n    return a\n\ndef compare_bytes(a, b, start, length):\n    for offset in range(start, start + length):\n        if str_to_ord(a[offset]) != str_to_ord(b[offset]):\n            return False\n    return True\n\ndef int_to_ip4_addr(a):\n    \"\"\"\n    Build DHCP request string.\n    \"\"\"\n    return \"%u.%u.%u.%u\" % ((a >> 24) & 0xFF,\n                            (a >> 16) & 0xFF,\n                            (a >> 8) & 0xFF,\n                            (a) & 0xFF)\n\n\ndef hexstr_to_bytearray(a):\n    \"\"\"\n    Return hex string packed into a binary struct.\n    \"\"\"\n    b = b\"\"\n    for c in range(0, len(a) // 2):\n        b += struct.pack(\"B\", int(a[c * 2:c * 2 + 2], 16))\n    return b\n\ndef is_printable(ch):\n    \"\"\"\n    Return True if character is displayable.\n    \"\"\"\n    return (is_in_range(ch, str_to_ord('A'), str_to_ord('Z'))\n            or is_in_range(ch, str_to_ord('a'), str_to_ord('z'))\n            or is_in_range(ch, str_to_ord('0'), str_to_ord('9')))\n\t\t\t\ndef is_in_range(a, low, high):\n    \"\"\"\n    Return True if 'a' in 'low' <= a >= 'high'\n    \"\"\"\n    return (a >= low and a <= high)\n                                                        \nif not hasattr(subprocess,'check_output'): \n    def check_output(*popenargs, **kwargs): \n        r\"\"\"Backport from subprocess module from python 2.7\"\"\" \n        if 'stdout' in kwargs: \n            raise ValueError('stdout argument not allowed, ' \n                             'it will be overridden.') \n        process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs) \n        output, unused_err = process.communicate() \n        retcode = process.poll() \n        if retcode: \n            cmd = kwargs.get(\"args\") \n            if cmd is None: \n                cmd = popenargs[0] \n            raise subprocess.CalledProcessError(retcode, cmd, output=output) \n        return output \n\n    # Exception classes used by this module. \n    class CalledProcessError(Exception): \n        def __init__(self, returncode, cmd, output=None): \n            self.returncode = returncode \n            self.cmd = cmd \n            self.output = output \n        def __str__(self): \n            return (\"Command '{0}' returned non-zero exit status {1}\" \"\").format(self.cmd, self.returncode) \n    subprocess.check_output=check_output \n    subprocess.CalledProcessError=CalledProcessError \n\n\"\"\" \nShell command util functions \n\"\"\" \ndef run(cmd, chk_err=True): \n    \"\"\" \n    Calls run_get_output on 'cmd', returning only the return code. \n    If chk_err=True then errors will be reported in the log. \n    If chk_err=False then errors will be suppressed from the log. \n    \"\"\" \n    retcode,out=run_get_output(cmd,chk_err) \n    return retcode \n \ndef run_get_output(cmd, chk_err=True, log_cmd=False): \n    \"\"\" \n    Wrapper for subprocess.check_output. \n    Execute 'cmd'.  Returns return code and STDOUT, trapping expected exceptions. \n    Reports exceptions to Error if chk_err parameter is True \n    \"\"\" \n    try: \n        output=subprocess.check_output(cmd,stderr=subprocess.STDOUT,shell=True) \n        output = ustr(output, encoding='utf-8', errors=\"backslashreplace\") \n    except subprocess.CalledProcessError as e : \n        output = ustr(e.output, encoding='utf-8', errors=\"backslashreplace\") \n        return e.returncode, output  \n    return 0, output \n\n# End shell command util functions \n\nclass DefaultOSUtil(object):\n\n    def __init__(self, logger):\n        self.logger = logger\n\n    def get_mac_in_bytes(self):\n        mac = get_mac()\n        machex = '%012x' % mac\n        try:\n            macb = bytearray.fromhex(machex)\n        except TypeError:\n            # Work-around for Python 2.6 bug \n            macb = bytearray.fromhex(unicode(machex))\n        return macb\n        \n    def allow_dhcp_broadcast(self):\n        #Open DHCP port if iptables is enabled.\n        # We supress error logging on error.\n        run(\"iptables -D INPUT -p udp --dport 68 -j ACCEPT\",\n            chk_err=False)\n        run(\"iptables -I INPUT -p udp --dport 68 -j ACCEPT\",\n            chk_err=False)\n\n    def get_first_if(self):\n        \"\"\"\n        Return the interface name, and ip addr of the\n        first active non-loopback interface.\n        \"\"\"\n        iface=''\n        expected=16 # how many devices should I expect...\n        struct_size=40 # for 64bit the size is 40 bytes\n        sock = socket.socket(socket.AF_INET,\n                             socket.SOCK_DGRAM,\n                             socket.IPPROTO_UDP)\n        buff=array.array('B', b'\\0' * (expected * struct_size))\n        param = struct.pack('iL',\n                            expected*struct_size,\n                            buff.buffer_info()[0])\n        ret = fcntl.ioctl(sock.fileno(), 0x8912, param)\n        retsize=(struct.unpack('iL', ret)[0])\n        if retsize == (expected * struct_size):\n            self.logger.log(('SIOCGIFCONF returned more than {0} up network interfaces.').format(expected))\n        sock = buff.tostring()\n        primary = bytearray(self.get_primary_interface(), encoding='utf-8')\n        for i in range(0, struct_size * expected, struct_size):\n            iface=sock[i:i+16].split(b'\\0', 1)[0]\n            if len(iface) == 0 or self.is_loopback(iface) or iface != primary:\n                # test the next one\n                self.logger.log('interface [{0}] skipped'.format(iface))\n                continue\n            else:\n                # use this one\n                self.logger.log('interface [{0}] selected'.format(iface))\n                break\n\n        return iface.decode('latin-1'), socket.inet_ntoa(sock[i+20:i+24])\n\n    def get_primary_interface(self):\n        \"\"\"\n        Get the name of the primary interface, which is the one with the\n        default route attached to it; if there are multiple default routes,\n        the primary has the lowest Metric.\n        :return: the interface which has the default route\n        \"\"\"\n        # from linux/route.h\n        RTF_GATEWAY = 0x02\n        DEFAULT_DEST = \"00000000\"\n\n        hdr_iface = \"Iface\"\n        hdr_dest = \"Destination\"\n        hdr_flags = \"Flags\"\n        hdr_metric = \"Metric\"\n\n        idx_iface = -1\n        idx_dest = -1\n        idx_flags = -1\n        idx_metric = -1\n        primary = None\n        primary_metric = None\n\n        self.logger.log(\"examine /proc/net/route for primary interface\")\n        with open('/proc/net/route') as routing_table:\n            idx = 0\n            for header in list(filter(lambda h: len(h) > 0, routing_table.readline().strip(\" \\n\").split(\"\\t\"))):\n                if header == hdr_iface:\n                    idx_iface = idx\n                elif header == hdr_dest:\n                    idx_dest = idx\n                elif header == hdr_flags:\n                    idx_flags = idx\n                elif header == hdr_metric:\n                    idx_metric = idx\n                idx = idx + 1\n            for entry in routing_table.readlines():\n                route = entry.strip(\" \\n\").split(\"\\t\")\n                if route[idx_dest] == DEFAULT_DEST and int(route[idx_flags]) & RTF_GATEWAY == RTF_GATEWAY:\n                    metric = int(route[idx_metric])\n                    iface = route[idx_iface]\n                    if primary is None or metric < primary_metric:\n                        primary = iface\n                        primary_metric = metric\n\n        if primary is None:\n            primary = ''\n\n        self.logger.log('primary interface is [{0}]'.format(primary))\n        return primary\n\n    def is_loopback(self, ifname):\n        s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)\n        result = fcntl.ioctl(s.fileno(), 0x8913, struct.pack('256s', ifname[:15]))\n        flags, = struct.unpack('H', result[16:18])\n        isloopback = flags & 8 == 8\n        self.logger.log('interface [{0}] has flags [{1}], is loopback [{2}]'.format(ifname, flags, isloopback))\n        return isloopback\n\n    def get_ip4_addr(self):\n        return self.get_first_if()[1]\n\n    def start_network(self):\n        pass\n\n    def route_add(self, net, mask, gateway):\n        \"\"\"\n        Add specified route using /sbin/route add -net.\n        \"\"\"\n        cmd = (\"/sbin/route add -net \"\n               \"{0} netmask {1} gw {2}\").format(net, mask, gateway)\n        return run(cmd, chk_err=False)\n                \n\"\"\"\nAdd alies for python2 and python3 libs and fucntions.\n\"\"\"\n\nif sys.version_info[0]== 3:\n    import http.client as httpclient\n    from urllib.parse import urlparse\n\n    \"\"\"Rename Python3 str to ustr\"\"\"\n    ustr = str\n\n    bytebuffer = memoryview\n\n    read_input = input\n\nelif sys.version_info[0] == 2:\n    import httplib as httpclient\n    from urlparse import urlparse\n\n    \"\"\"Rename Python2 unicode to ustr\"\"\"\n    ustr = unicode\n\n    bytebuffer = buffer\n\n    read_input = raw_input\n\nelse:\n    raise ImportError(\"Unknown python version:{0}\".format(sys.version_info))\n"
  },
  {
    "path": "VMBackup/main/VMSnapshotPluginHost.conf",
    "content": "[pre_post]\ntimeoutInSeconds: 1800\nnumberOfPlugins: 1\npluginName0: ScriptRunner\npluginPath0: /etc/azure\npluginConfigPath0: /etc/azure/VMSnapshotScriptPluginConfig.json\n"
  },
  {
    "path": "VMBackup/main/WaagentLib.py",
    "content": "#!/usr/bin/env python\n#\n# Azure Linux Agent\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.6+ and Openssl 1.0+\n#\n# Implements parts of RFC 2131, 1541, 1497 and\n# http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx\n# http://msdn.microsoft.com/en-us/library/cc227259%28PROT.13%29.aspx\n#\n\n# TODO: Many classes, methods, and imports in this file might not be needed by VM Backup Extension\n# and should be removed to reduce file size and eliminate unnecessary dependencies.\n# Future cleanup should analyze actual VMBackup usage and remove unused code.\n\n# Note: crypt module deprecated in Python 3.13+, but gen_password_hash() is not used by VMBackup\ntry:\n    import crypt\nexcept ImportError:\n    # Python 3.13+ removed crypt module, but VMBackup doesn't use password functions\n    crypt = None\nimport random\nimport base64\n\ntry:\n    import httplib as httplibs\nexcept ImportError:\n    import http.client as httplibs\nimport os\nimport os.path\nimport platform\nimport pwd\nimport re\nimport shutil\nimport socket\ntry:\n    import SocketServer as SocketServers\nexcept ImportError:\n    import socketserver as SocketServers\nimport string\nimport subprocess\nimport sys\nimport tempfile\nimport textwrap\nimport threading\nimport time\nimport traceback\nimport xml.dom.minidom\nimport inspect\nimport zipfile\nimport json\nimport datetime\nimport xml.sax.saxutils\ntry:\n    from packaging.version import Version as LooseVersion\nexcept ImportError:\n    try:\n        from distutils.version import LooseVersion\n    except ImportError:\n        # Fallback for environments without packaging or distutils\n        class LooseVersion:\n            \"\"\"\n            Custom version comparison class that implements semantic versioning.\n            \n            Examples of version comparisons that work correctly:\n            - LooseVersion(\"10.0\") > LooseVersion(\"2.0\")     # True (10 > 2, not string \"10.0\" < \"2.0\")\n            - LooseVersion(\"1.10\") > LooseVersion(\"1.2\")     # True (10 > 2 in minor version)\n            - LooseVersion(\"2.1.3\") > LooseVersion(\"2.1\")    # True (2.1.3 > 2.1.0)\n            - LooseVersion(\"1.0-alpha\") < LooseVersion(\"1.0\") # True (pre-release < release)\n            - LooseVersion(\"1.0-beta\") > LooseVersion(\"1.0-alpha\") # True (beta > alpha)\n            - LooseVersion(\"1.0-rc\") > LooseVersion(\"1.0-beta\")    # True (rc > beta)\n            \n            How parsing works:\n            - \"2.1.3\" → (2, 1, 3)\n            - \"1.0-alpha\" → (1, 0, -1000)  # alpha = -1000 for correct precedence\n            - \"1.0-beta\" → (1, 0, -100)    # beta = -100\n            - \"1.0-rc\" → (1, 0, -10)       # rc = -10\n            - \"1.0\" → (1, 0)               # release version (no negative suffix)\n            \n            Tuple comparison ensures: (1, 0, -1000) < (1, 0, -100) < (1, 0, -10) < (1, 0)\n            \"\"\"\n            def __init__(self, version_string):\n                self.version = str(version_string)\n                # Parse version into comparable parts\n                self._parsed = self._parse_version(self.version)\n                \n            def _parse_version(self, version_str):\n                \"\"\"\n                Parse version string into comparable tuple of integers and strings.\n                \n                Parsing examples:\n                - \"2.1.3\" → splits to [\"2\", \"1\", \"3\"] → converts to (2, 1, 3)\n                - \"1.0-alpha\" → splits to [\"1\", \"0\", \"alpha\"] → converts to (1, 0, -1000)\n                - \"1.10.5-beta2\" → splits to [\"1\", \"10\", \"5\", \"beta2\"] → converts to (1, 10, 5, \"beta2\")\n                \"\"\"\n                import re\n                # Split by dots, hyphens, and underscores\n                parts = re.split(r'[.\\-_]', version_str.lower())\n                parsed = []\n                for part in parts:\n                    # Try to convert to int, otherwise keep as string\n                    try:\n                        parsed.append(int(part))\n                    except ValueError:\n                        # Handle pre-release identifiers with negative values for correct precedence\n                        # This ensures: alpha < beta < rc < release\n                        if part in ('alpha', 'a'):\n                            parsed.append(-1000)  # Lowest precedence\n                        elif part in ('beta', 'b'):\n                            parsed.append(-100)   # Medium precedence\n                        elif part in ('rc', 'pre'):\n                            parsed.append(-10)    # High precedence (but still < release)\n                        else:\n                            parsed.append(part)   # Keep as string for mixed alphanumeric\n                return tuple(parsed)\n                \n            def __str__(self):\n                return self.version\n                \n            def __eq__(self, other):\n                if isinstance(other, LooseVersion):\n                    return self._parsed == other._parsed\n                return self._parsed == LooseVersion(other)._parsed\n                \n            def __lt__(self, other):\n                if isinstance(other, LooseVersion):\n                    return self._parsed < other._parsed\n                return self._parsed < LooseVersion(other)._parsed\n                \n            def __le__(self, other):\n                if isinstance(other, LooseVersion):\n                    return self._parsed <= other._parsed\n                return self._parsed <= LooseVersion(other)._parsed\n                \n            def __gt__(self, other):\n                if isinstance(other, LooseVersion):\n                    return self._parsed > other._parsed\n                return self._parsed > LooseVersion(other)._parsed\n                \n            def __ge__(self, other):\n                if isinstance(other, LooseVersion):\n                    return self._parsed >= other._parsed\n                return self._parsed >= LooseVersion(other)._parsed\n\nif not hasattr(subprocess, 'check_output'):\n    def check_output(*popenargs, **kwargs):\n        r\"\"\"Backport from subprocess module from python 2.7\"\"\"\n        if 'stdout' in kwargs:\n            raise ValueError('stdout argument not allowed, it will be overridden.')\n        process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)\n        output, unused_err = process.communicate()\n        retcode = process.poll()\n        if retcode:\n            cmd = kwargs.get(\"args\")\n            if cmd is None:\n                cmd = popenargs[0]\n            raise subprocess.CalledProcessError(retcode, cmd, output=output)\n        return output\n\n\n    # Exception classes used by this module.\n    class CalledProcessError(Exception):\n        def __init__(self, returncode, cmd, output=None):\n            self.returncode = returncode\n            self.cmd = cmd\n            self.output = output\n\n        def __str__(self):\n            return \"Command '%s' returned non-zero exit status %d\" % (self.cmd, self.returncode)\n\n\n    subprocess.check_output = check_output\n    subprocess.CalledProcessError = CalledProcessError\n\nGuestAgentName = \"WALinuxAgent\"\nGuestAgentLongName = \"Azure Linux Agent\"\nGuestAgentVersion = \"WALinuxAgent-2.0.16\"\nProtocolVersion = \"2012-11-30\"  # WARNING this value is used to confirm the correct fabric protocol.\n\nConfig = None\nWaAgent = None\nDiskActivated = False\nOpenssl = \"openssl\"\nChildren = []\nExtensionChildren = []\nVMM_STARTUP_SCRIPT_NAME = 'install'\nVMM_CONFIG_FILE_NAME = 'linuxosconfiguration.xml'\nglobal RulesFiles\nRulesFiles = [\"/lib/udev/rules.d/75-persistent-net-generator.rules\",\n              \"/etc/udev/rules.d/70-persistent-net.rules\"]\nVarLibDhcpDirectories = [\"/var/lib/dhclient\", \"/var/lib/dhcpcd\", \"/var/lib/dhcp\"]\nEtcDhcpClientConfFiles = [\"/etc/dhcp/dhclient.conf\", \"/etc/dhcp3/dhclient.conf\"]\nglobal LibDir\nLibDir = \"/var/lib/waagent\"\nglobal provisioned\nprovisioned = False\nglobal provisionError\nprovisionError = None\nHandlerStatusToAggStatus = {\"installed\": \"Installing\", \"enabled\": \"Ready\", \"unintalled\": \"NotReady\",\n                            \"disabled\": \"NotReady\"}\n\nWaagentConf = \"\"\"\\\n#\n# Azure Linux Agent Configuration\n#\n\nRole.StateConsumer=None                 # Specified program is invoked with the argument \"Ready\" when we report ready status\n                                        # to the endpoint server.\nRole.ConfigurationConsumer=None         # Specified program is invoked with XML file argument specifying role configuration.\nRole.TopologyConsumer=None              # Specified program is invoked with XML file argument specifying role topology.\n\nProvisioning.Enabled=y                  #\nProvisioning.DeleteRootPassword=y       # Password authentication for root account will be unavailable.\nProvisioning.RegenerateSshHostKeyPair=y # Generate fresh host key pair.\nProvisioning.SshHostKeyPairType=rsa     # Supported values are \"rsa\", \"dsa\" and \"ecdsa\".\nProvisioning.MonitorHostName=y          # Monitor host name changes and publish changes via DHCP requests.\n\nResourceDisk.Format=y                   # Format if unformatted. If 'n', resource disk will not be mounted.\nResourceDisk.Filesystem=ext4            # Typically ext3 or ext4. FreeBSD images should use 'ufs2' here.\nResourceDisk.MountPoint=/mnt/resource   #\nResourceDisk.EnableSwap=n               # Create and use swapfile on resource disk.\nResourceDisk.SwapSizeMB=0               # Size of the swapfile.\n\nLBProbeResponder=y                      # Respond to load balancer probes if requested by Azure.\n\nLogs.Verbose=n                          # Enable verbose logs\n\nOS.RootDeviceScsiTimeout=300            # Root device timeout in seconds.\nOS.OpensslPath=None                     # If \"None\", the system default version is used.\n\"\"\"\nREADME_FILENAME = \"DATALOSS_WARNING_README.txt\"\nREADME_FILECONTENT = \"\"\"\\\nWARNING: THIS IS A TEMPORARY DISK. \n\nAny data stored on this drive is SUBJECT TO LOSS and THERE IS NO WAY TO RECOVER IT.\n\nPlease do not use this disk for storing any personal or application data.\n\nFor additional details to please refer to the MSDN documentation at : http://msdn.microsoft.com/en-us/library/windowsazure/jj672979.aspx\n\"\"\"\n\n\n############################################################\n# BEGIN DISTRO CLASS DEFS\n############################################################\n############################################################\n#\tAbstractDistro\n############################################################\nclass AbstractDistro(object):\n    \"\"\"\n    AbstractDistro defines a skeleton neccesary for a concrete Distro class.\n\n    Generic methods and attributes are kept here, distribution specific attributes\n    and behavior are to be placed in the concrete child named distroDistro, where\n    distro is the string returned by calling python platform.linux_distribution()[0].\n    So for CentOS the derived class is called 'centosDistro'.\n    \"\"\"\n\n    def __init__(self):\n        \"\"\"\n        Generic Attributes go here.  These are based on 'majority rules'.\n        This __init__() may be called or overriden by the child.\n        \"\"\"\n        self.agent_service_name = os.path.basename(sys.argv[0])\n        self.selinux = None\n        self.service_cmd = '/usr/sbin/service'\n        self.ssh_service_restart_option = 'restart'\n        self.ssh_service_name = 'ssh'\n        self.ssh_config_file = '/etc/ssh/sshd_config'\n        self.hostname_file_path = '/etc/hostname'\n        self.dhcp_client_name = 'dhclient'\n        self.requiredDeps = ['route', 'shutdown', 'ssh-keygen', 'useradd', 'usermod',\n                             'openssl', 'sfdisk', 'fdisk', 'mkfs',\n                             'sed', 'grep', 'sudo', 'parted']\n        self.init_script_file = '/etc/init.d/waagent'\n        self.agent_package_name = 'WALinuxAgent'\n        self.fileBlackList = [\"/root/.bash_history\", \"/var/log/waagent.log\", '/etc/resolv.conf']\n        self.agent_files_to_uninstall = [\"/etc/waagent.conf\", \"/etc/logrotate.d/waagent\"]\n        self.grubKernelBootOptionsFile = '/etc/default/grub'\n        self.grubKernelBootOptionsLine = 'GRUB_CMDLINE_LINUX_DEFAULT='\n        self.getpidcmd = 'pidof'\n        self.mount_dvd_cmd = 'mount'\n        self.sudoers_dir_base = '/etc'\n        self.waagent_conf_file = WaagentConf\n        self.shadow_file_mode = 0o600\n        self.shadow_file_path = \"/etc/shadow\"\n        self.dhcp_enabled = False\n\n    def isSelinuxSystem(self):\n        \"\"\"\n        Checks and sets self.selinux = True if SELinux is available on system.\n        \"\"\"\n        if self.selinux == None:\n            if Run(\"which getenforce\", chk_err=False):\n                self.selinux = False\n            else:\n                self.selinux = True\n        return self.selinux\n\n    def isSelinuxRunning(self):\n        \"\"\"\n        Calls shell command 'getenforce' and returns True if 'Enforcing'.\n        \"\"\"\n        if self.isSelinuxSystem():\n            return RunGetOutput(\"getenforce\")[1].startswith(\"Enforcing\")\n        else:\n            return False\n\n    def setSelinuxEnforce(self, state):\n        \"\"\"\n        Calls shell command 'setenforce' with 'state' and returns resulting exit code.\n        \"\"\"\n        if self.isSelinuxSystem():\n            if state:\n                s = '1'\n            else:\n                s = '0'\n            return Run(\"setenforce \" + s)\n\n    def setSelinuxContext(self, path, cn):\n        \"\"\"\n        Calls shell 'chcon' with 'path' and 'cn' context.\n        Returns exit result.\n        \"\"\"\n        if self.isSelinuxSystem():\n            if not os.path.exists(path):\n                Error(\"Path does not exist: {0}\".format(path))\n                return 1\n            return Run('chcon ' + cn + ' ' + path)\n\n    def setHostname(self, name):\n        \"\"\"\n        Shell call to hostname.\n        Returns resulting exit code.\n        \"\"\"\n        return Run('hostname ' + name)\n\n    def publishHostname(self, name):\n        \"\"\"\n        Set the contents of the hostname file to 'name'.\n        Return 1 on failure.\n        \"\"\"\n        try:\n            r = SetFileContents(self.hostname_file_path, name)\n            for f in EtcDhcpClientConfFiles:\n                if os.path.exists(f) and FindStringInFile(f,\n                                                          r'^[^#]*?send\\s*host-name.*?(<hostname>|gethostname[(,)])') == None:\n                    r = ReplaceFileContentsAtomic('/etc/dhcp/dhclient.conf', \"send host-name \\\"\" + name + \"\\\";\\n\"\n                                                  + \"\\n\".join(list(filter(lambda a: not a.startswith(\"send host-name\"),\n                                                                     GetFileContents('/etc/dhcp/dhclient.conf').split(\n                                                                         '\\n')))))\n        except:\n            return 1\n        return r\n\n    def installAgentServiceScriptFiles(self):\n        \"\"\"\n        Create the waagent support files for service installation.\n        Called by registerAgentService()\n        Abstract Virtual Function.  Over-ridden in concrete Distro classes.\n        \"\"\"\n        pass\n\n    def registerAgentService(self):\n        \"\"\"\n        Calls installAgentService to create service files.\n        Shell exec service registration commands. (e.g. chkconfig --add waagent)\n        Abstract Virtual Function.  Over-ridden in concrete Distro classes.\n        \"\"\"\n        pass\n\n    def uninstallAgentService(self):\n        \"\"\"\n        Call service subsystem to remove waagent script.\n        Abstract Virtual Function.  Over-ridden in concrete Distro classes.\n        \"\"\"\n        pass\n\n    def unregisterAgentService(self):\n        \"\"\"\n        Calls self.stopAgentService and call self.uninstallAgentService()\n        \"\"\"\n        self.stopAgentService()\n        self.uninstallAgentService()\n\n    def startAgentService(self):\n        \"\"\"\n        Service call to start the Agent service\n        \"\"\"\n        return Run(self.service_cmd + ' ' + self.agent_service_name + ' start')\n\n    def stopAgentService(self):\n        \"\"\"\n        Service call to stop the Agent service\n        \"\"\"\n        return Run(self.service_cmd + ' ' + self.agent_service_name + ' stop', False)\n\n    def restartSshService(self):\n        \"\"\"\n        Service call to re(start) the SSH service\n        \"\"\"\n        sshRestartCmd = self.service_cmd + \" \" + self.ssh_service_name + \" \" + self.ssh_service_restart_option\n        retcode = Run(sshRestartCmd)\n        if retcode > 0:\n            Error(\"Failed to restart SSH service with return code:\" + str(retcode))\n        return retcode\n\n    def checkPackageInstalled(self, p):\n        \"\"\"\n        Query package database for prescence of an installed package.\n        Abstract Virtual Function.  Over-ridden in concrete Distro classes.\n        \"\"\"\n        pass\n\n    def checkPackageUpdateable(self, p):\n        \"\"\"\n        Online check if updated package of walinuxagent is available.\n        Abstract Virtual Function.  Over-ridden in concrete Distro classes.\n        \"\"\"\n        pass\n\n    def deleteRootPassword(self):\n        \"\"\"\n        Generic root password removal.\n        \"\"\"\n        filepath = \"/etc/shadow\"\n        ReplaceFileContentsAtomic(filepath, \"root:*LOCK*:14600::::::\\n\"\n                                  + \"\\n\".join(\n            list(filter(lambda a: not a.startswith(\"root:\"), GetFileContents(filepath).split('\\n')))))\n        os.chmod(filepath, self.shadow_file_mode)\n        if self.isSelinuxSystem():\n            self.setSelinuxContext(filepath, 'system_u:object_r:shadow_t:s0')\n        Log(\"Root password deleted.\")\n        return 0\n\n    def changePass(self, user, password):\n        Log(\"Change user password\")\n        crypt_id = Config.get(\"Provisioning.PasswordCryptId\")\n        if crypt_id is None:\n            crypt_id = \"6\"\n\n        salt_len = Config.get(\"Provisioning.PasswordCryptSaltLength\")\n        try:\n            salt_len = int(salt_len)\n            if salt_len < 0 or salt_len > 10:\n                salt_len = 10\n        except (ValueError, TypeError):\n            salt_len = 10\n\n        return self.chpasswd(user, password, crypt_id=crypt_id,\n                             salt_len=salt_len)\n\n    def chpasswd(self, username, password, crypt_id=6, salt_len=10):\n        passwd_hash = self.gen_password_hash(password, crypt_id, salt_len)\n        cmd = \"usermod -p '{0}' {1}\".format(passwd_hash, username)\n        ret, output = RunGetOutput(cmd, log_cmd=False)\n        if ret != 0:\n            return \"Failed to set password for {0}: {1}\".format(username, output)\n\n    def gen_password_hash(self, password, crypt_id, salt_len):\n        if crypt is None:\n            raise ImportError(\"crypt module not available (Python 3.13+). This function is not used by VMBackup.\")\n        collection = string.ascii_letters + string.digits\n        salt = ''.join(random.choice(collection) for _ in range(salt_len))\n        salt = \"${0}${1}\".format(crypt_id, salt)\n        return crypt.crypt(password, salt)\n\n    def load_ata_piix(self):\n        return WaAgent.TryLoadAtapiix()\n\n    def unload_ata_piix(self):\n        \"\"\"\n        Generic function to remove ata_piix.ko.\n        \"\"\"\n        return WaAgent.TryUnloadAtapiix()\n\n    def deprovisionWarnUser(self):\n        \"\"\"\n        Generic user warnings used at deprovision.\n        \"\"\"\n        print(\"WARNING! Nameserver configuration in /etc/resolv.conf will be deleted.\")\n\n    def deprovisionDeleteFiles(self):\n        \"\"\"\n        Files to delete when VM is deprovisioned\n        \"\"\"\n        for a in VarLibDhcpDirectories:\n            Run(\"rm -f \" + a + \"/*\")\n\n        # Clear LibDir, remove nameserver and root bash history\n\n        for f in os.listdir(LibDir) + self.fileBlackList:\n            try:\n                os.remove(f)\n            except:\n                pass\n        return 0\n\n    def uninstallDeleteFiles(self):\n        \"\"\"\n        Files to delete when agent is uninstalled.\n        \"\"\"\n        for f in self.agent_files_to_uninstall:\n            try:\n                os.remove(f)\n            except:\n                pass\n        return 0\n\n    def checkDependencies(self):\n        \"\"\"\n        Generic dependency check.\n        Return 1 unless all dependencies are satisfied.\n        \"\"\"\n        if self.checkPackageInstalled('NetworkManager'):\n            Error(GuestAgentLongName + \" is not compatible with network-manager.\")\n            return 1\n        try:\n            m = __import__('pyasn1')\n        except ImportError:\n            Error(GuestAgentLongName + \" requires python-pyasn1 for your Linux distribution.\")\n            return 1\n        for a in self.requiredDeps:\n            if Run(\"which \" + a + \" > /dev/null 2>&1\", chk_err=False):\n                Error(\"Missing required dependency: \" + a)\n                return 1\n        return 0\n\n    def packagedInstall(self, buildroot):\n        \"\"\"\n        Called from setup.py for use by RPM.\n        Copies generated files waagent.conf, under the buildroot.\n        \"\"\"\n        if not os.path.exists(buildroot + '/etc'):\n            os.mkdir(buildroot + '/etc')\n        SetFileContents(buildroot + '/etc/waagent.conf', MyDistro.waagent_conf_file)\n\n        if not os.path.exists(buildroot + '/etc/logrotate.d'):\n            os.mkdir(buildroot + '/etc/logrotate.d')\n        SetFileContents(buildroot + '/etc/logrotate.d/waagent', WaagentLogrotate)\n\n        self.init_script_file = buildroot + self.init_script_file\n        # this allows us to call installAgentServiceScriptFiles()\n        if not os.path.exists(os.path.dirname(self.init_script_file)):\n            os.mkdir(os.path.dirname(self.init_script_file))\n        self.installAgentServiceScriptFiles()\n\n    def RestartInterface(self, iface, max_retry=3):\n        for retry in range(1, max_retry + 1):\n            ret = Run(\"ifdown \" + iface + \" && ifup \" + iface)\n            if ret == 0:\n                return\n            Log(\"Failed to restart interface: {0}, ret={1}\".format(iface, ret))\n            if retry < max_retry:\n                Log(\"Retry restart interface in 5 seconds\")\n                time.sleep(5)\n\n    def CreateAccount(self, user, password, expiration, thumbprint):\n        return CreateAccount(user, password, expiration, thumbprint)\n\n    def DeleteAccount(self, user):\n        return DeleteAccount(user)\n\n\n    def Install(self):\n        return Install()\n\n    def mediaHasFilesystem(self, dsk):\n        if len(dsk) == 0:\n            return False\n        if Run(\"LC_ALL=C fdisk -l \" + dsk + \" | grep Disk\"):\n            return False\n        return True\n\n    def mountDVD(self, dvd, location):\n        return RunGetOutput(self.mount_dvd_cmd + ' ' + dvd + ' ' + location)\n\n    def GetHome(self):\n        return GetHome()\n\n    def getDhcpClientName(self):\n        return self.dhcp_client_name\n\n    def initScsiDiskTimeout(self):\n        \"\"\"\n        Set the SCSI disk timeout when the agent starts running\n        \"\"\"\n        self.setScsiDiskTimeout()\n\n    def setScsiDiskTimeout(self):\n        \"\"\"\n        Iterate all SCSI disks(include hot-add) and set their timeout if their value are different from the OS.RootDeviceScsiTimeout\n        \"\"\"\n        try:\n            scsiTimeout = Config.get(\"OS.RootDeviceScsiTimeout\")\n            for diskName in [disk for disk in os.listdir(\"/sys/block\") if disk.startswith(\"sd\")]:\n                self.setBlockDeviceTimeout(diskName, scsiTimeout)\n        except:\n            pass\n\n    def setBlockDeviceTimeout(self, device, timeout):\n        \"\"\"\n        Set SCSI disk timeout by set /sys/block/sd*/device/timeout\n        \"\"\"\n        if timeout != None and device:\n            filePath = \"/sys/block/\" + device + \"/device/timeout\"\n            if (GetFileContents(filePath).splitlines()[0].rstrip() != timeout):\n                SetFileContents(filePath, timeout)\n                Log(\"SetBlockDeviceTimeout: Update the device \" + device + \" with timeout \" + timeout)\n\n    def waitForSshHostKey(self, path):\n        \"\"\"\n        Provide a dummy waiting, since by default, ssh host key is created by waagent and the key\n        should already been created.\n        \"\"\"\n        if (os.path.isfile(path)):\n            return True\n        else:\n            Error(\"Can't find host key: {0}\".format(path))\n            return False\n\n    def isDHCPEnabled(self):\n        return self.dhcp_enabled\n\n    def stopDHCP(self):\n        \"\"\"\n        Stop the system DHCP client so that the agent can bind on its port. If\n        the distro has set dhcp_enabled to True, it will need to provide an\n        implementation of this method.\n        \"\"\"\n        raise NotImplementedError('stopDHCP method missing')\n\n    def startDHCP(self):\n        \"\"\"\n        Start the system DHCP client. If the distro has set dhcp_enabled to\n        True, it will need to provide an implementation of this method.\n        \"\"\"\n        raise NotImplementedError('startDHCP method missing')\n\n    def translateCustomData(self, data):\n        \"\"\"\n        Translate the custom data from a Base64 encoding. Default to no-op.\n        \"\"\"\n        decodeCustomData = Config.get(\"Provisioning.DecodeCustomData\")\n        if decodeCustomData != None and decodeCustomData.lower().startswith(\"y\"):\n            return base64.b64decode(data)\n        return data\n\n    def getConfigurationPath(self):\n        return \"/etc/waagent.conf\"\n\n    def getProcessorCores(self):\n        return int(RunGetOutput(\"grep 'processor.*:' /proc/cpuinfo |wc -l\")[1])\n\n    def getTotalMemory(self):\n        return int(RunGetOutput(\"grep MemTotal /proc/meminfo |awk '{print $2}'\")[1]) / 1024\n\n    def getInterfaceNameByMac(self, mac):\n        ret, output = RunGetOutput(\"ifconfig -a\")\n        if ret != 0:\n            raise Exception(\"Failed to get network interface info\")\n        output = output.replace('\\n', '')\n        match = re.search(r\"(eth\\d).*(HWaddr|ether) {0}\".format(mac),\n                          output, re.IGNORECASE)\n        if match is None:\n            raise Exception(\"Failed to get ifname with mac: {0}\".format(mac))\n        output = match.group(0)\n        eths = re.findall(r\"eth\\d\", output)\n        if eths is None or len(eths) == 0:\n            raise Exception(\"Failed to get ifname with mac: {0}\".format(mac))\n        return eths[-1]\n\n    def configIpV4(self, ifName, addr, netmask=24):\n        ret, output = RunGetOutput(\"ifconfig {0} up\".format(ifName))\n        if ret != 0:\n            raise Exception(\"Failed to bring up {0}: {1}\".format(ifName,\n                                                                 output))\n        ret, output = RunGetOutput(\"ifconfig {0} {1}/{2}\".format(ifName, addr,\n                                                                 netmask))\n        if ret != 0:\n            raise Exception(\"Failed to config ipv4 for {0}: {1}\".format(ifName,\n                                                                        output))\n\n    def setDefaultGateway(self, gateway):\n        Run(\"/sbin/route add default gw\" + gateway, chk_err=False)\n\n    def routeAdd(self, net, mask, gateway):\n        Run(\"/sbin/route add -net \" + net + \" netmask \" + mask + \" gw \" + gateway,\n            chk_err=False)\n\n\n############################################################\n#\tGentooDistro\n############################################################\ngentoo_init_file = \"\"\"\\\n#!/sbin/runscript\n\ncommand=/usr/sbin/waagent\npidfile=/var/run/waagent.pid\ncommand_args=-daemon\ncommand_background=true\nname=\"Azure Linux Agent\"\n\ndepend()\n{\n\tneed localmount\n\tuse logger network\n\tafter bootmisc modules\n}\n\n\"\"\"\n\n\nclass gentooDistro(AbstractDistro):\n    \"\"\"\n    Gentoo distro concrete class\n    \"\"\"\n\n    def __init__(self):  #\n        super(gentooDistro, self).__init__()\n        self.service_cmd = '/sbin/service'\n        self.ssh_service_name = 'sshd'\n        self.hostname_file_path = '/etc/conf.d/hostname'\n        self.dhcp_client_name = 'dhcpcd'\n        self.shadow_file_mode = 0o640\n        self.init_file = gentoo_init_file\n\n    def publishHostname(self, name):\n        try:\n            if (os.path.isfile(self.hostname_file_path)):\n                r = ReplaceFileContentsAtomic(self.hostname_file_path, \"hostname=\\\"\" + name + \"\\\"\\n\"\n                                              + \"\\n\".join(list(filter(lambda a: not a.startswith(\"hostname=\"),\n                                                                 GetFileContents(self.hostname_file_path).split(\"\\n\")))))\n        except:\n            return 1\n        return r\n\n    def installAgentServiceScriptFiles(self):\n        SetFileContents(self.init_script_file, self.init_file)\n        os.chmod(self.init_script_file, 0o755)\n\n    def registerAgentService(self):\n        self.installAgentServiceScriptFiles()\n        return Run('rc-update add ' + self.agent_service_name + ' default')\n\n    def uninstallAgentService(self):\n        return Run('rc-update del ' + self.agent_service_name + ' default')\n\n    def unregisterAgentService(self):\n        self.stopAgentService()\n        return self.uninstallAgentService()\n\n    def checkPackageInstalled(self, p):\n        if Run('eix -I ^' + p + '$', chk_err=False):\n            return 0\n        else:\n            return 1\n\n    def checkPackageUpdateable(self, p):\n        if Run('eix -u ^' + p + '$', chk_err=False):\n            return 0\n        else:\n            return 1\n\n    def RestartInterface(self, iface):\n        Run(\"/etc/init.d/net.\" + iface + \" restart\")\n\n\n############################################################\n#\tSuSEDistro\n############################################################\nsuse_init_file = \"\"\"\\\n#! /bin/sh\n#\n# Azure Linux Agent sysV init script\n#\n# Copyright 2013 Microsoft Corporation\n# Copyright SUSE LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# /etc/init.d/waagent\n#\n#  and symbolic link\n#\n# /usr/sbin/rcwaagent\n#\n# System startup script for the waagent\n#\n### BEGIN INIT INFO\n# Provides: AzureLinuxAgent\n# Required-Start: $network sshd\n# Required-Stop: $network sshd\n# Default-Start: 3 5\n# Default-Stop: 0 1 2 6\n# Description: Start the AzureLinuxAgent\n### END INIT INFO\n\nPYTHON=/usr/bin/python\nWAZD_BIN=/usr/sbin/waagent\nWAZD_CONF=/etc/waagent.conf\nWAZD_PIDFILE=/var/run/waagent.pid\n\ntest -x \"$WAZD_BIN\" || { echo \"$WAZD_BIN not installed\"; exit 5; }\ntest -e \"$WAZD_CONF\" || { echo \"$WAZD_CONF not found\"; exit 6; }\n\n. /etc/rc.status\n\n# First reset status of this service\nrc_reset\n\n# Return values acc. to LSB for all commands but status:\n# 0 - success\n# 1 - misc error\n# 2 - invalid or excess args\n# 3 - unimplemented feature (e.g. reload)\n# 4 - insufficient privilege\n# 5 - program not installed\n# 6 - program not configured\n#\n# Note that starting an already running service, stopping\n# or restarting a not-running service as well as the restart\n# with force-reload (in case signalling is not supported) are\n# considered a success.\n\n\ncase \"$1\" in\n    start)\n        echo -n \"Starting AzureLinuxAgent\"\n        ## Start daemon with startproc(8). If this fails\n        ## the echo return value is set appropriate.\n        startproc -f ${PYTHON} ${WAZD_BIN} -daemon\n        rc_status -v\n        ;;\n    stop)\n        echo -n \"Shutting down AzureLinuxAgent\"\n        ## Stop daemon with killproc(8) and if this fails\n        ## set echo the echo return value.\n        killproc -p ${WAZD_PIDFILE} ${PYTHON} ${WAZD_BIN}\n        rc_status -v\n        ;;\n    try-restart)\n        ## Stop the service and if this succeeds (i.e. the\n        ## service was running before), start it again.\n        $0 status >/dev/null && $0 restart\n        rc_status\n        ;;\n    restart)\n        ## Stop the service and regardless of whether it was\n        ## running or not, start it again.\n        $0 stop\n        sleep 1\n        $0 start\n        rc_status\n        ;;\n    force-reload|reload)\n        rc_status\n        ;;\n    status)\n        echo -n \"Checking for service AzureLinuxAgent \"\n        ## Check status with checkproc(8), if process is running\n        ## checkproc will return with exit status 0.\n\n        checkproc -p ${WAZD_PIDFILE} ${PYTHON} ${WAZD_BIN}\n        rc_status -v\n        ;;\n    probe)\n        ;;\n    *)\n        echo \"Usage: $0 {start|stop|status|try-restart|restart|force-reload|reload}\"\n        exit 1\n        ;;\nesac\nrc_exit\n\"\"\"\n\n\nclass SuSEDistro(AbstractDistro):\n    \"\"\"\n    SuSE Distro concrete class\n    Put SuSE specific behavior here...\n    \"\"\"\n\n    def __init__(self):\n        super(SuSEDistro, self).__init__()\n        self.service_cmd = '/sbin/service'\n        self.ssh_service_name = 'sshd'\n        self.kernel_boot_options_file = '/boot/grub/menu.lst'\n        self.hostname_file_path = '/etc/HOSTNAME'\n        self.requiredDeps += [\"/sbin/insserv\"]\n        self.init_file = suse_init_file\n        self.dhcp_client_name = 'dhcpcd'\n        if ((DistInfo(fullname=1)[0] == 'SUSE Linux Enterprise Server' and DistInfo()[1] >= '12') or \\\n                    (DistInfo(fullname=1)[0] == 'openSUSE' and DistInfo()[1] >= '13.2')):\n            self.dhcp_client_name = 'wickedd-dhcp4'\n        self.grubKernelBootOptionsFile = '/boot/grub/menu.lst'\n        self.grubKernelBootOptionsLine = 'kernel'\n        self.getpidcmd = 'pidof '\n        self.dhcp_enabled = True\n\n    def checkPackageInstalled(self, p):\n        if Run(\"rpm -q \" + p, chk_err=False):\n            return 0\n        else:\n            return 1\n\n    def checkPackageUpdateable(self, p):\n        if Run(\"zypper list-updates | grep \" + p, chk_err=False):\n            return 1\n        else:\n            return 0\n\n    def installAgentServiceScriptFiles(self):\n        try:\n            SetFileContents(self.init_script_file, self.init_file)\n            os.chmod(self.init_script_file, 0o744)\n        except:\n            pass\n\n    def registerAgentService(self):\n        self.installAgentServiceScriptFiles()\n        return Run('insserv ' + self.agent_service_name)\n\n    def uninstallAgentService(self):\n        return Run('insserv -r ' + self.agent_service_name)\n\n    def unregisterAgentService(self):\n        self.stopAgentService()\n        return self.uninstallAgentService()\n\n    def startDHCP(self):\n        Run(\"service \" + self.dhcp_client_name + \" start\", chk_err=False)\n\n    def stopDHCP(self):\n        Run(\"service \" + self.dhcp_client_name + \" stop\", chk_err=False)\n\n\n############################################################\n#\tredhatDistro\n############################################################\n\nredhat_init_file = \"\"\"\\\n#!/bin/bash\n#\n# Init file for AzureLinuxAgent.\n#\n# chkconfig: 2345 60 80\n# description: AzureLinuxAgent\n#\n\n# source function library\n. /etc/rc.d/init.d/functions\n\nRETVAL=0\nFriendlyName=\"AzureLinuxAgent\"\nWAZD_BIN=/usr/sbin/waagent\n\nstart()\n{\n    echo -n $\"Starting $FriendlyName: \"\n    $WAZD_BIN -daemon &\n}\n\nstop()\n{\n    echo -n $\"Stopping $FriendlyName: \"\n    killproc -p /var/run/waagent.pid $WAZD_BIN\n    RETVAL=$?\n    echo\n    return $RETVAL\n}\n\ncase \"$1\" in\n    start)\n        start\n        ;;\n    stop)\n        stop\n        ;;\n    restart)\n        stop\n        start\n        ;;\n    reload)\n        ;;\n    report)\n        ;;\n    status)\n        status $WAZD_BIN\n        RETVAL=$?\n        ;;\n    *)\n        echo $\"Usage: $0 {start|stop|restart|status}\"\n        RETVAL=1\nesac\nexit $RETVAL\n\"\"\"\n\n\nclass redhatDistro(AbstractDistro):\n    \"\"\"\n    Redhat Distro concrete class\n    Put Redhat specific behavior here...\n    \"\"\"\n\n    def __init__(self):\n        super(redhatDistro, self).__init__()\n        self.service_cmd = '/sbin/service'\n        self.ssh_service_restart_option = 'condrestart'\n        self.ssh_service_name = 'sshd'\n        self.hostname_file_path = None if DistInfo()[1] < '7.0' else '/etc/hostname'\n        self.init_file = redhat_init_file\n        self.grubKernelBootOptionsFile = '/boot/grub/menu.lst'\n        self.grubKernelBootOptionsLine = 'kernel'\n\n    def publishHostname(self, name):\n        super(redhatDistro, self).publishHostname(name)\n        if DistInfo()[1] < '7.0':\n            filepath = \"/etc/sysconfig/network\"\n            if os.path.isfile(filepath):\n                ReplaceFileContentsAtomic(filepath, \"HOSTNAME=\" + name + \"\\n\"\n                                          + \"\\n\".join(\n                    list(filter(lambda a: not a.startswith(\"HOSTNAME\"), GetFileContents(filepath).split('\\n')))))\n\n        ethernetInterface = MyDistro.GetInterfaceName()\n        filepath = \"/etc/sysconfig/network-scripts/ifcfg-\" + ethernetInterface\n        if os.path.isfile(filepath):\n            ReplaceFileContentsAtomic(filepath, \"DHCP_HOSTNAME=\" + name + \"\\n\"\n                                      + \"\\n\".join(\n                list(filter(lambda a: not a.startswith(\"DHCP_HOSTNAME\"), GetFileContents(filepath).split('\\n')))))\n        return 0\n\n    def installAgentServiceScriptFiles(self):\n        SetFileContents(self.init_script_file, self.init_file)\n        os.chmod(self.init_script_file, 0o744)\n        return 0\n\n    def registerAgentService(self):\n        self.installAgentServiceScriptFiles()\n        return Run('chkconfig --add waagent')\n\n    def uninstallAgentService(self):\n        return Run('chkconfig --del ' + self.agent_service_name)\n\n    def unregisterAgentService(self):\n        self.stopAgentService()\n        return self.uninstallAgentService()\n\n    def checkPackageInstalled(self, p):\n        if Run(\"yum list installed \" + p, chk_err=False):\n            return 0\n        else:\n            return 1\n\n    def checkPackageUpdateable(self, p):\n        if Run(\"yum check-update | grep \" + p, chk_err=False):\n            return 1\n        else:\n            return 0\n\n    def checkDependencies(self):\n        \"\"\"\n        Generic dependency check.\n        Return 1 unless all dependencies are satisfied.\n        \"\"\"\n        if DistInfo()[1] < '7.0' and self.checkPackageInstalled('NetworkManager'):\n            Error(GuestAgentLongName + \" is not compatible with network-manager.\")\n            return 1\n        try:\n            m = __import__('pyasn1')\n        except ImportError:\n            Error(GuestAgentLongName + \" requires python-pyasn1 for your Linux distribution.\")\n            return 1\n        for a in self.requiredDeps:\n            if Run(\"which \" + a + \" > /dev/null 2>&1\", chk_err=False):\n                Error(\"Missing required dependency: \" + a)\n                return 1\n        return 0\n\n\n############################################################\n#\tcentosDistro\n############################################################\n\nclass centosDistro(redhatDistro):\n    \"\"\"\n    CentOS Distro concrete class\n    Put CentOS specific behavior here...\n    \"\"\"\n\n    def __init__(self):\n        super(centosDistro, self).__init__()\n\n\n############################################################\n#   eulerosDistro\n############################################################\n\nclass eulerosDistro(redhatDistro):\n    \"\"\"\n    EulerOS Distro concrete class\n    Put EulerOS specific behavior here...\n    \"\"\"\n\n    def __init__(self):\n        super(eulerosDistro, self).__init__()\n\n\n############################################################\n#\toracleDistro\n############################################################\n\nclass oracleDistro(redhatDistro):\n    \"\"\"\n    Oracle Distro concrete class\n    Put Oracle specific behavior here...\n    \"\"\"\n\n    def __init__(self):\n        super(oracleDistro, self).__init__()\n\n\n############################################################\n#\tasianuxDistro\n############################################################\n\nclass asianuxDistro(redhatDistro):\n    \"\"\"\n    Asianux Distro concrete class\n    Put Asianux specific behavior here...\n    \"\"\"\n\n    def __init__(self):\n        super(asianuxDistro, self).__init__()\n\n\n############################################################\n#   CoreOSDistro\n############################################################\n\nclass CoreOSDistro(AbstractDistro):\n    \"\"\"\n    CoreOS Distro concrete class\n    Put CoreOS specific behavior here...\n    \"\"\"\n    CORE_UID = 500\n\n    def __init__(self):\n        super(CoreOSDistro, self).__init__()\n        self.requiredDeps += [\"/usr/bin/systemctl\"]\n        self.agent_service_name = 'waagent'\n        self.init_script_file = '/etc/systemd/system/waagent.service'\n        self.fileBlackList.append(\"/etc/machine-id\")\n        self.dhcp_client_name = 'systemd-networkd'\n        self.getpidcmd = 'pidof '\n        self.shadow_file_mode = 0o640\n        self.waagent_path = '/usr/share/oem/bin'\n        self.python_path = '/usr/share/oem/python/bin'\n        self.dhcp_enabled = True\n        if 'PATH' in os.environ:\n            os.environ['PATH'] = \"{0}:{1}\".format(os.environ['PATH'], self.python_path)\n        else:\n            os.environ['PATH'] = self.python_path\n\n        if 'PYTHONPATH' in os.environ:\n            os.environ['PYTHONPATH'] = \"{0}:{1}\".format(os.environ['PYTHONPATH'], self.waagent_path)\n        else:\n            os.environ['PYTHONPATH'] = self.waagent_path\n\n    def checkPackageInstalled(self, p):\n        \"\"\"\n        There is no package manager in CoreOS.  Return 1 since it must be preinstalled.\n        \"\"\"\n        return 1\n\n    def checkDependencies(self):\n        for a in self.requiredDeps:\n            if Run(\"which \" + a + \" > /dev/null 2>&1\", chk_err=False):\n                Error(\"Missing required dependency: \" + a)\n                return 1\n        return 0\n\n    def checkPackageUpdateable(self, p):\n        \"\"\"\n        There is no package manager in CoreOS.  Return 0 since it can't be updated via package.\n        \"\"\"\n        return 0\n\n    def startAgentService(self):\n        return Run('systemctl start ' + self.agent_service_name)\n\n    def stopAgentService(self):\n        return Run('systemctl stop ' + self.agent_service_name)\n\n    def restartSshService(self):\n        \"\"\"\n        SSH is socket activated on CoreOS. No need to restart it.\n        \"\"\"\n        return 0\n\n    def sshDeployPublicKey(self, fprint, path):\n        \"\"\"\n        We support PKCS8.\n        \"\"\"\n        if Run(\"ssh-keygen -i -m PKCS8 -f \" + fprint + \" >> \" + path):\n            return 1\n        else:\n            return 0\n\n    def RestartInterface(self, iface):\n        Run(\"systemctl restart systemd-networkd\")\n\n    def CreateAccount(self, user, password, expiration, thumbprint):\n        \"\"\"\n        Create a user account, with 'user', 'password', 'expiration', ssh keys\n        and sudo permissions.\n        Returns None if successful, error string on failure.\n        \"\"\"\n        userentry = None\n        try:\n            userentry = pwd.getpwnam(user)\n        except:\n            pass\n        uidmin = None\n        try:\n            uidmin = int(GetLineStartingWith(\"UID_MIN\", \"/etc/login.defs\").split()[1])\n        except:\n            pass\n        if uidmin == None:\n            uidmin = 100\n        if userentry != None and userentry[2] < uidmin and userentry[2] != self.CORE_UID:\n            Error(\"CreateAccount: \" + user + \" is a system user. Will not set password.\")\n            return \"Failed to set password for system user: \" + user + \" (0x06).\"\n        if userentry == None:\n            command = \"useradd --create-home --password '*' \" + user\n            if expiration != None:\n                command += \" --expiredate \" + expiration.split('.')[0]\n            if Run(command):\n                Error(\"Failed to create user account: \" + user)\n                return \"Failed to create user account: \" + user + \" (0x07).\"\n        else:\n            Log(\"CreateAccount: \" + user + \" already exists. Will update password.\")\n        if password != None:\n            self.changePass(user, password)\n        try:\n            if password == None:\n                SetFileContents(\"/etc/sudoers.d/waagent\", user + \" ALL = (ALL) NOPASSWD: ALL\\n\")\n            else:\n                SetFileContents(\"/etc/sudoers.d/waagent\", user + \" ALL = (ALL) ALL\\n\")\n            os.chmod(\"/etc/sudoers.d/waagent\", 0o440)\n        except:\n            Error(\"CreateAccount: Failed to configure sudo access for user.\")\n            return \"Failed to configure sudo privileges (0x08).\"\n        home = MyDistro.GetHome()\n        if thumbprint != None:\n            dir = home + \"/\" + user + \"/.ssh\"\n            CreateDir(dir, user, 0o700)\n            pub = dir + \"/id_rsa.pub\"\n            prv = dir + \"/id_rsa\"\n            Run(\"ssh-keygen -y -f \" + thumbprint + \".prv > \" + pub)\n            SetFileContents(prv, GetFileContents(thumbprint + \".prv\"))\n            for f in [pub, prv]:\n                os.chmod(f, 0o600)\n                ChangeOwner(f, user)\n            SetFileContents(dir + \"/authorized_keys\", GetFileContents(pub))\n            ChangeOwner(dir + \"/authorized_keys\", user)\n        Log(\"Created user account: \" + user)\n        return None\n\n    def startDHCP(self):\n        Run(\"systemctl start \" + self.dhcp_client_name, chk_err=False)\n\n    def stopDHCP(self):\n        Run(\"systemctl stop \" + self.dhcp_client_name, chk_err=False)\n\n    def translateCustomData(self, data):\n        return base64.b64decode(data)\n\n    def getConfigurationPath(self):\n        return \"/usr/share/oem/waagent.conf\"\n\n\n############################################################\n#\tdebianDistro\n############################################################\ndebian_init_file = \"\"\"\\\n#!/bin/sh\n### BEGIN INIT INFO\n# Provides:          AzureLinuxAgent\n# Required-Start:    $network $syslog\n# Required-Stop:     $network $syslog\n# Should-Start:      $network $syslog\n# Should-Stop:       $network $syslog\n# Default-Start:     2 3 4 5\n# Default-Stop:      0 1 6\n# Short-Description: AzureLinuxAgent\n# Description:       AzureLinuxAgent\n### END INIT INFO\n\n. /lib/lsb/init-functions\n\nOPTIONS=\"-daemon\"\nWAZD_BIN=/usr/sbin/waagent\nWAZD_PID=/var/run/waagent.pid\n\ncase \"$1\" in\n    start)\n        log_begin_msg \"Starting AzureLinuxAgent...\"\n        pid=$( pidofproc $WAZD_BIN )\n        if [ -n \"$pid\" ] ; then\n              log_begin_msg \"Already running.\"\n              log_end_msg 0\n              exit 0\n        fi\n        start-stop-daemon --start --quiet --oknodo --background --exec $WAZD_BIN -- $OPTIONS\n        log_end_msg $?\n        ;;\n\n    stop)\n        log_begin_msg \"Stopping AzureLinuxAgent...\"\n        start-stop-daemon --stop --quiet --oknodo --pidfile $WAZD_PID\n        ret=$?\n        rm -f $WAZD_PID\n        log_end_msg $ret\n        ;;\n    force-reload)\n        $0 restart\n        ;;\n    restart)\n        $0 stop\n        $0 start\n        ;;\n    status)\n        status_of_proc $WAZD_BIN && exit 0 || exit $?\n        ;;\n    *)\n        log_success_msg \"Usage: /etc/init.d/waagent {start|stop|force-reload|restart|status}\"\n        exit 1\n        ;;\nesac\n\nexit 0\n\"\"\"\n\n\nclass debianDistro(AbstractDistro):\n    \"\"\"\n    debian Distro concrete class\n    Put debian specific behavior here...\n    \"\"\"\n\n    def __init__(self):\n        super(debianDistro, self).__init__()\n        self.requiredDeps += [\"/usr/sbin/update-rc.d\"]\n        self.init_file = debian_init_file\n        self.agent_package_name = 'walinuxagent'\n        self.dhcp_client_name = 'dhclient'\n        self.getpidcmd = 'pidof '\n        self.shadow_file_mode = 0o640\n\n    def checkPackageInstalled(self, p):\n        \"\"\"\n        Check that the package is installed.\n        Return 1 if installed, 0 if not installed.\n        This method of using dpkg-query\n        allows wildcards to be present in the\n        package name.\n        \"\"\"\n        if not Run(\"dpkg-query -W -f='${Status}\\n' '\" + p + \"' | grep ' installed' 2>&1\", chk_err=False):\n            return 1\n        else:\n            return 0\n\n    def checkDependencies(self):\n        \"\"\"\n        Debian dependency check.  python-pyasn1 is NOT needed.\n        Return 1 unless all dependencies are satisfied.\n        NOTE: using network*manager will catch either package name in Ubuntu or debian.\n        \"\"\"\n        if self.checkPackageInstalled('network*manager'):\n            Error(GuestAgentLongName + \" is not compatible with network-manager.\")\n            return 1\n        for a in self.requiredDeps:\n            if Run(\"which \" + a + \" > /dev/null 2>&1\", chk_err=False):\n                Error(\"Missing required dependency: \" + a)\n                return 1\n        return 0\n\n    def checkPackageUpdateable(self, p):\n        if Run(\"apt-get update ; apt-get upgrade -us | grep \" + p, chk_err=False):\n            return 1\n        else:\n            return 0\n\n    def installAgentServiceScriptFiles(self):\n        \"\"\"\n        If we are packaged - the service name is walinuxagent, do nothing.\n        \"\"\"\n        if self.agent_service_name == 'walinuxagent':\n            return 0\n        try:\n            SetFileContents(self.init_script_file, self.init_file)\n            os.chmod(self.init_script_file, 0o744)\n        except OSError as e:\n            ErrorWithPrefix('installAgentServiceScriptFiles',\n                            'Exception: ' + str(e) + ' occured creating ' + self.init_script_file)\n            return 1\n        return 0\n\n    def registerAgentService(self):\n        if self.installAgentServiceScriptFiles() == 0:\n            return Run('update-rc.d waagent defaults')\n        else:\n            return 1\n\n    def uninstallAgentService(self):\n        return Run('update-rc.d -f ' + self.agent_service_name + ' remove')\n\n    def unregisterAgentService(self):\n        self.stopAgentService()\n        return self.uninstallAgentService()\n\n    def sshDeployPublicKey(self, fprint, path):\n        \"\"\"\n        We support PKCS8.\n        \"\"\"\n        if Run(\"ssh-keygen -i -m PKCS8 -f \" + fprint + \" >> \" + path):\n            return 1\n        else:\n            return 0\n\n\n############################################################\n#\tKaliDistro - WIP\n#       Functioning on Kali 1.1.0a so far\n############################################################\nclass KaliDistro(debianDistro):\n    \"\"\"\n    Kali Distro concrete class\n    Put Kali specific behavior here...\n    \"\"\"\n\n    def __init__(self):\n        super(KaliDistro, self).__init__()\n\n\n############################################################\n#\tUbuntuDistro\n############################################################\nubuntu_upstart_file = \"\"\"\\\n#walinuxagent - start Azure agent\n\ndescription \"walinuxagent\"\nauthor \"Ben Howard <ben.howard@canonical.com>\"\n\nstart on (filesystem and started rsyslog)\n\npre-start script\n\n\tWALINUXAGENT_ENABLED=1\n    [ -r /etc/default/walinuxagent ] && . /etc/default/walinuxagent\n\n    if [ \"$WALINUXAGENT_ENABLED\" != \"1\" ]; then\n        exit 1\n    fi\n\n    if [ ! -x /usr/sbin/waagent ]; then\n        exit 1\n    fi\n\n    #Load the udf module\n    modprobe -b udf\nend script\n\nexec /usr/sbin/waagent -daemon\n\"\"\"\n\n\nclass UbuntuDistro(debianDistro):\n    \"\"\"\n    Ubuntu Distro concrete class\n    Put Ubuntu specific behavior here...\n    \"\"\"\n\n    def __init__(self):\n        super(UbuntuDistro, self).__init__()\n        self.init_script_file = '/etc/init/waagent.conf'\n        self.init_file = ubuntu_upstart_file\n        self.fileBlackList = [\"/root/.bash_history\", \"/var/log/waagent.log\"]\n        self.dhcp_client_name = None\n        self.getpidcmd = 'pidof '\n\n    def registerAgentService(self):\n        return self.installAgentServiceScriptFiles()\n\n    def uninstallAgentService(self):\n        \"\"\"\n        If we are packaged - the service name is walinuxagent, do nothing.\n        \"\"\"\n        if self.agent_service_name == 'walinuxagent':\n            return 0\n        os.remove('/etc/init/' + self.agent_service_name + '.conf')\n\n    def unregisterAgentService(self):\n        \"\"\"\n        If we are packaged - the service name is walinuxagent, do nothing.\n        \"\"\"\n        if self.agent_service_name == 'walinuxagent':\n            return\n        self.stopAgentService()\n        return self.uninstallAgentService()\n\n    def deprovisionWarnUser(self):\n        \"\"\"\n        Ubuntu specific warning string from Deprovision.\n        \"\"\"\n        print(\"WARNING! Nameserver configuration in /etc/resolvconf/resolv.conf.d/{tail,original} will be deleted.\")\n\n    def deprovisionDeleteFiles(self):\n        \"\"\"\n        Ubuntu uses resolv.conf by default, so removing /etc/resolv.conf will\n        break resolvconf. Therefore, we check to see if resolvconf is in use,\n        and if so, we remove the resolvconf artifacts.\n        \"\"\"\n        if os.path.realpath('/etc/resolv.conf') != '/run/resolvconf/resolv.conf':\n            Log(\"resolvconf is not configured. Removing /etc/resolv.conf\")\n            self.fileBlackList.append('/etc/resolv.conf')\n        else:\n            Log(\"resolvconf is enabled; leaving /etc/resolv.conf intact\")\n            resolvConfD = '/etc/resolvconf/resolv.conf.d/'\n            self.fileBlackList.extend([resolvConfD + 'tail', resolvConfD + 'original'])\n        for f in os.listdir(LibDir) + self.fileBlackList:\n            try:\n                os.remove(f)\n            except:\n                pass\n        return 0\n\n    def getDhcpClientName(self):\n        if self.dhcp_client_name != None:\n            return self.dhcp_client_name\n        if DistInfo()[1] == '12.04':\n            self.dhcp_client_name = 'dhclient3'\n        else:\n            self.dhcp_client_name = 'dhclient'\n        return self.dhcp_client_name\n\n    def waitForSshHostKey(self, path):\n        \"\"\"\n        Wait until the ssh host key is generated by cloud init.\n        \"\"\"\n        for retry in range(0, 10):\n            if (os.path.isfile(path)):\n                return True\n            time.sleep(1)\n        Error(\"Can't find host key: {0}\".format(path))\n        return False\n\n\n############################################################\n#\tLinuxMintDistro\n############################################################\n\nclass LinuxMintDistro(UbuntuDistro):\n    \"\"\"\n    LinuxMint Distro concrete class\n    Put LinuxMint specific behavior here...\n    \"\"\"\n\n    def __init__(self):\n        super(LinuxMintDistro, self).__init__()\n\n############################################################\n#      DefaultDistro\n############################################################\n\nclass DefaultDistro(UbuntuDistro):\n    \"\"\"\n    Default Distro concrete class\n    Put Default distro specific behavior here...\n    \"\"\"\n\n    def __init__(self):\n        super(DefaultDistro, self).__init__()\n\n############################################################\n#\tfedoraDistro\n############################################################\nfedora_systemd_service = \"\"\"\\\n[Unit]\nDescription=Azure Linux Agent\nAfter=network.target\nAfter=sshd.service\nConditionFileIsExecutable=/usr/sbin/waagent\nConditionPathExists=/etc/waagent.conf\n\n[Service]\nType=simple\nExecStart=/usr/sbin/waagent -daemon\n\n[Install]\nWantedBy=multi-user.target\n\"\"\"\n\n\nclass fedoraDistro(redhatDistro):\n    \"\"\"\n    FedoraDistro concrete class\n    Put Fedora specific behavior here...\n    \"\"\"\n\n    def __init__(self):\n        super(fedoraDistro, self).__init__()\n        self.service_cmd = '/usr/bin/systemctl'\n        self.hostname_file_path = '/etc/hostname'\n        self.init_script_file = '/usr/lib/systemd/system/' + self.agent_service_name + '.service'\n        self.init_file = fedora_systemd_service\n        self.grubKernelBootOptionsFile = '/etc/default/grub'\n        self.grubKernelBootOptionsLine = 'GRUB_CMDLINE_LINUX='\n\n    def publishHostname(self, name):\n        SetFileContents(self.hostname_file_path, name + '\\n')\n        ethernetInterface = MyDistro.GetInterfaceName()\n        filepath = \"/etc/sysconfig/network-scripts/ifcfg-\" + ethernetInterface\n        if os.path.isfile(filepath):\n            ReplaceFileContentsAtomic(filepath, \"DHCP_HOSTNAME=\" + name + \"\\n\"\n                                      + \"\\n\".join(\n                list(filter(lambda a: not a.startswith(\"DHCP_HOSTNAME\"), GetFileContents(filepath).split('\\n')))))\n        return 0\n\n    def installAgentServiceScriptFiles(self):\n        SetFileContents(self.init_script_file, self.init_file)\n        os.chmod(self.init_script_file, 0o644)\n        return Run(self.service_cmd + ' daemon-reload')\n\n    def registerAgentService(self):\n        self.installAgentServiceScriptFiles()\n        return Run(self.service_cmd + ' enable ' + self.agent_service_name)\n\n    def uninstallAgentService(self):\n        \"\"\"\n        Call service subsystem to remove waagent script.\n        \"\"\"\n        return Run(self.service_cmd + ' disable ' + self.agent_service_name)\n\n    def unregisterAgentService(self):\n        \"\"\"\n        Calls self.stopAgentService and call self.uninstallAgentService()\n        \"\"\"\n        self.stopAgentService()\n        self.uninstallAgentService()\n\n    def startAgentService(self):\n        \"\"\"\n        Service call to start the Agent service\n        \"\"\"\n        return Run(self.service_cmd + ' start ' + self.agent_service_name)\n\n    def stopAgentService(self):\n        \"\"\"\n        Service call to stop the Agent service\n        \"\"\"\n        return Run(self.service_cmd + ' stop ' + self.agent_service_name, False)\n\n    def restartSshService(self):\n        \"\"\"\n        Service call to re(start) the SSH service\n        \"\"\"\n        sshRestartCmd = self.service_cmd + \" \" + self.ssh_service_restart_option + \" \" + self.ssh_service_name\n        retcode = Run(sshRestartCmd)\n        if retcode > 0:\n            Error(\"Failed to restart SSH service with return code:\" + str(retcode))\n        return retcode\n\n\n\n    def deleteRootPassword(self):\n        return Run(\"/sbin/usermod root -p '!!'\")\n\n    def packagedInstall(self, buildroot):\n        \"\"\"\n        Called from setup.py for use by RPM.\n        Copies generated files waagent.conf, under the buildroot.\n        \"\"\"\n        if not os.path.exists(buildroot + '/etc'):\n            os.mkdir(buildroot + '/etc')\n        SetFileContents(buildroot + '/etc/waagent.conf', MyDistro.waagent_conf_file)\n\n        if not os.path.exists(buildroot + '/etc/logrotate.d'):\n            os.mkdir(buildroot + '/etc/logrotate.d')\n        SetFileContents(buildroot + '/etc/logrotate.d/WALinuxAgent', WaagentLogrotate)\n\n        self.init_script_file = buildroot + self.init_script_file\n        # this allows us to call installAgentServiceScriptFiles()\n        if not os.path.exists(os.path.dirname(self.init_script_file)):\n            os.mkdir(os.path.dirname(self.init_script_file))\n        self.installAgentServiceScriptFiles()\n\n    def CreateAccount(self, user, password, expiration, thumbprint):\n        super(fedoraDistro, self).CreateAccount(user, password, expiration, thumbprint)\n        Run('/sbin/usermod ' + user + ' -G wheel')\n\n    def DeleteAccount(self, user):\n        Run('/sbin/usermod ' + user + ' -G \"\"')\n        super(fedoraDistro, self).DeleteAccount(user)\n\n\n############################################################\n#\tFreeBSD\n############################################################\nFreeBSDWaagentConf = \"\"\"\\\n#\n# Azure Linux Agent Configuration\n#\n\nRole.StateConsumer=None                 # Specified program is invoked with the argument \"Ready\" when we report ready status\n                                        # to the endpoint server.\nRole.ConfigurationConsumer=None         # Specified program is invoked with XML file argument specifying role configuration.\nRole.TopologyConsumer=None              # Specified program is invoked with XML file argument specifying role topology.\n\nProvisioning.Enabled=y                  #\nProvisioning.DeleteRootPassword=y       # Password authentication for root account will be unavailable.\nProvisioning.RegenerateSshHostKeyPair=y # Generate fresh host key pair.\nProvisioning.SshHostKeyPairType=rsa     # Supported values are \"rsa\", \"dsa\" and \"ecdsa\".\nProvisioning.MonitorHostName=y          # Monitor host name changes and publish changes via DHCP requests.\n\nResourceDisk.Format=y                   # Format if unformatted. If 'n', resource disk will not be mounted.\nResourceDisk.Filesystem=ufs2            #\nResourceDisk.MountPoint=/mnt/resource   #\nResourceDisk.EnableSwap=n               # Create and use swapfile on resource disk.\nResourceDisk.SwapSizeMB=0               # Size of the swapfile.\n\nLBProbeResponder=y                      # Respond to load balancer probes if requested by Azure.\n\nLogs.Verbose=n                          # Enable verbose logs\n\nOS.RootDeviceScsiTimeout=300            # Root device timeout in seconds.\nOS.OpensslPath=None                     # If \"None\", the system default version is used.\n\"\"\"\n\nbsd_init_file = \"\"\"\\\n#! /bin/sh\n\n# PROVIDE: waagent\n# REQUIRE: DAEMON cleanvar sshd\n# BEFORE: LOGIN\n# KEYWORD: nojail\n\n. /etc/rc.subr\nexport PATH=$PATH:/usr/local/bin\nname=\"waagent\"\nrcvar=\"waagent_enable\"\ncommand=\"/usr/sbin/${name}\"\ncommand_interpreter=\"/usr/local/bin/python\"\nwaagent_flags=\" daemon &\"\n\npidfile=\"/var/run/waagent.pid\"\n\nload_rc_config $name\nrun_rc_command \"$1\"\n\n\"\"\"\nbsd_activate_resource_disk_txt = \"\"\"\\\n#!/usr/bin/env python\n\nimport os\nimport sys\nimport imp\n\n# waagent has no '.py' therefore create waagent module import manually.\n__name__='setupmain' #prevent waagent.__main__ from executing\nwaagent=imp.load_source('waagent','/tmp/waagent') \nwaagent.LoggerInit('/var/log/waagent.log','/dev/console')\nfrom waagent import RunGetOutput,Run\nConfig=waagent.ConfigurationProvider(None)\nformat = Config.get(\"ResourceDisk.Format\")\nif format == None or format.lower().startswith(\"n\"):\n    sys.exit(0)\ndevice_base = 'da1'\ndevice = \"/dev/\" + device_base\nfor entry in RunGetOutput(\"mount\")[1].split():\n    if entry.startswith(device + \"s1\"):\n        waagent.Log(\"ActivateResourceDisk: \" + device + \"s1 is already mounted.\")\n        sys.exit(0)\nmountpoint = Config.get(\"ResourceDisk.MountPoint\")\nif mountpoint == None:\n    mountpoint = \"/mnt/resource\"\nwaagent.CreateDir(mountpoint, \"root\", 0755)\nfs = Config.get(\"ResourceDisk.Filesystem\")\nif waagent.FreeBSDDistro().mediaHasFilesystem(device) == False :\n    Run(\"newfs \" + device + \"s1\")\nif Run(\"mount \" + device + \"s1 \" + mountpoint):\n    waagent.Error(\"ActivateResourceDisk: Failed to mount resource disk (\" + device + \"s1).\")\n    sys.exit(0)\nwaagent.Log(\"Resource disk (\" + device + \"s1) is mounted at \" + mountpoint + \" with fstype \" + fs)\nwaagent.SetFileContents(os.path.join(mountpoint,waagent.README_FILENAME), waagent.README_FILECONTENT)\nswap = Config.get(\"ResourceDisk.EnableSwap\")\nif swap == None or swap.lower().startswith(\"n\"):\n    sys.exit(0)\nsizeKB = int(Config.get(\"ResourceDisk.SwapSizeMB\")) * 1024\nif os.path.isfile(mountpoint + \"/swapfile\") and os.path.getsize(mountpoint + \"/swapfile\") != (sizeKB * 1024):\n    os.remove(mountpoint + \"/swapfile\")\nif not os.path.isfile(mountpoint + \"/swapfile\"):\n    Run(\"umask 0077 && dd if=/dev/zero of=\" + mountpoint + \"/swapfile bs=1024 count=\" + str(sizeKB))\nif Run(\"mdconfig -a -t vnode -f \" + mountpoint + \"/swapfile -u 0\"):\n    waagent.Error(\"ActivateResourceDisk: Configuring swap - Failed to create md0\")\nif not Run(\"swapon /dev/md0\"):\n    waagent.Log(\"Enabled \" + str(sizeKB) + \" KB of swap at \" + mountpoint + \"/swapfile\")\nelse:\n    waagent.Error(\"ActivateResourceDisk: Failed to activate swap at \" + mountpoint + \"/swapfile\")\n\"\"\"\n\n\nclass FreeBSDDistro(AbstractDistro):\n    \"\"\"\n    \"\"\"\n\n    def __init__(self):\n        \"\"\"\n        Generic Attributes go here.  These are based on 'majority rules'.\n        This __init__() may be called or overriden by the child.\n        \"\"\"\n        super(FreeBSDDistro, self).__init__()\n        self.agent_service_name = os.path.basename(sys.argv[0])\n        self.selinux = False\n        self.ssh_service_name = 'sshd'\n        self.ssh_config_file = '/etc/ssh/sshd_config'\n        self.hostname_file_path = '/etc/hostname'\n        self.dhcp_client_name = 'dhclient'\n        self.requiredDeps = ['route', 'shutdown', 'ssh-keygen', 'pw'\n            , 'openssl', 'fdisk', 'sed', 'grep', 'sudo']\n        self.init_script_file = '/etc/rc.d/waagent'\n        self.init_file = bsd_init_file\n        self.agent_package_name = 'WALinuxAgent'\n        self.fileBlackList = [\"/root/.bash_history\", \"/var/log/waagent.log\", '/etc/resolv.conf']\n        self.agent_files_to_uninstall = [\"/etc/waagent.conf\"]\n        self.grubKernelBootOptionsFile = '/boot/loader.conf'\n        self.grubKernelBootOptionsLine = ''\n        self.getpidcmd = 'pgrep -n'\n        self.mount_dvd_cmd = 'dd bs=2048 count=33 skip=295 if='  # custom data max len is 64k\n        self.sudoers_dir_base = '/usr/local/etc'\n        self.waagent_conf_file = FreeBSDWaagentConf\n\n    def installAgentServiceScriptFiles(self):\n        SetFileContents(self.init_script_file, self.init_file)\n        os.chmod(self.init_script_file, 0o777)\n        AppendFileContents(\"/etc/rc.conf\", \"waagent_enable='YES'\\n\")\n        return 0\n\n    def registerAgentService(self):\n        self.installAgentServiceScriptFiles()\n        return Run(\"services_mkdb \" + self.init_script_file)\n\n    def sshDeployPublicKey(self, fprint, path):\n        \"\"\"\n        We support PKCS8.\n        \"\"\"\n        if Run(\"ssh-keygen -i -m PKCS8 -f \" + fprint + \" >> \" + path):\n            return 1\n        else:\n            return 0\n\n    def deleteRootPassword(self):\n        \"\"\"\n        BSD root password removal.\n        \"\"\"\n        filepath = \"/etc/master.passwd\"\n        ReplaceStringInFile(filepath, r'root:.*?:', 'root::')\n        # ReplaceFileContentsAtomic(filepath,\"root:*LOCK*:14600::::::\\n\"\n        #                          + \"\\n\".join(filter(lambda a: not a.startswith(\"root:\"),GetFileContents(filepath).split('\\n'))))\n        os.chmod(filepath, self.shadow_file_mode)\n        if self.isSelinuxSystem():\n            self.setSelinuxContext(filepath, 'system_u:object_r:shadow_t:s0')\n        RunGetOutput(\"pwd_mkdb -u root /etc/master.passwd\")\n        Log(\"Root password deleted.\")\n        return 0\n\n    def changePass(self, user, password):\n        return RunSendStdin(\"pw usermod \" + user + \" -h 0 \", password, log_cmd=False)\n\n    def load_ata_piix(self):\n        return 0\n\n    def unload_ata_piix(self):\n        return 0\n\n    def checkDependencies(self):\n        \"\"\"\n        FreeBSD dependency check.\n        Return 1 unless all dependencies are satisfied.\n        \"\"\"\n        for a in self.requiredDeps:\n            if Run(\"which \" + a + \" > /dev/null 2>&1\", chk_err=False):\n                Error(\"Missing required dependency: \" + a)\n                return 1\n        return 0\n\n    def packagedInstall(self, buildroot):\n        pass\n\n    def GetInterfaceName(self):\n        \"\"\"\n        Return the ip of the \n        active ethernet interface.\n        \"\"\"\n        iface, inet, mac = self.GetFreeBSDEthernetInfo()\n        return iface\n\n    def RestartInterface(self, iface):\n        Run(\"service netif restart\")\n\n    def GetIpv4Address(self):\n        \"\"\"\n        Return the ip of the \n        active ethernet interface.\n        \"\"\"\n        iface, inet, mac = self.GetFreeBSDEthernetInfo()\n        return inet\n\n    def GetMacAddress(self):\n        \"\"\"\n        Return the ip of the \n        active ethernet interface.\n        \"\"\"\n        iface, inet, mac = self.GetFreeBSDEthernetInfo()\n        l = mac.split(':')\n        r = []\n        for i in l:\n            r.append(int(i, 16))\n        return r\n\n    def GetFreeBSDEthernetInfo(self):\n        \"\"\"\n        There is no SIOCGIFCONF\n        on freeBSD - just parse ifconfig.\n        Returns strings: iface, inet4_addr, and mac\n        or 'None,None,None' if unable to parse.\n        We will sleep and retry as the network must be up.\n        \"\"\"\n        code, output = RunGetOutput(\"ifconfig\", chk_err=False)\n        Log(output)\n        retries = 10\n        cmd = 'ifconfig | grep -A2 -B2 ether | grep -B3 inet | grep -A4 UP '\n        code = 1\n\n        while code > 0:\n            if code > 0 and retries == 0:\n                Error(\"GetFreeBSDEthernetInfo - Failed to detect ethernet interface\")\n                return None, None, None\n            code, output = RunGetOutput(cmd, chk_err=False)\n            retries -= 1\n            if code > 0 and retries > 0:\n                Log(\"GetFreeBSDEthernetInfo - Error: retry ethernet detection \" + str(retries))\n                if retries == 9:\n                    c, o = RunGetOutput(\"ifconfig | grep -A1 -B2 ether\", chk_err=False)\n                    if c == 0:\n                        t = o.replace('\\n', ' ')\n                        t = t.split()\n                        i = t[0][:-1]\n                        Log(RunGetOutput('id')[1])\n                        Run('dhclient ' + i)\n                time.sleep(10)\n\n        j = output.replace('\\n', ' ')\n        j = j.split()\n        iface = j[0][:-1]\n\n        for i in range(len(j)):\n            if j[i] == 'inet':\n                inet = j[i + 1]\n            elif j[i] == 'ether':\n                mac = j[i + 1]\n\n        return iface, inet, mac\n\n    def CreateAccount(self, user, password, expiration, thumbprint):\n        \"\"\"\n        Create a user account, with 'user', 'password', 'expiration', ssh keys\n        and sudo permissions.\n        Returns None if successful, error string on failure.\n        \"\"\"\n        userentry = None\n        try:\n            userentry = pwd.getpwnam(user)\n        except:\n            pass\n        uidmin = None\n        try:\n            if os.path.isfile(\"/etc/login.defs\"):\n                uidmin = int(GetLineStartingWith(\"UID_MIN\", \"/etc/login.defs\").split()[1])\n        except:\n            pass\n        if uidmin == None:\n            uidmin = 100\n        if userentry != None and userentry[2] < uidmin:\n            Error(\"CreateAccount: \" + user + \" is a system user. Will not set password.\")\n            return \"Failed to set password for system user: \" + user + \" (0x06).\"\n        if userentry == None:\n            command = \"pw useradd \" + user + \" -m\"\n            if expiration != None:\n                command += \" -e \" + expiration.split('.')[0]\n            if Run(command):\n                Error(\"Failed to create user account: \" + user)\n                return \"Failed to create user account: \" + user + \" (0x07).\"\n            else:\n                Log(\"CreateAccount: \" + user + \" already exists. Will update password.\")\n\n        if password != None:\n            self.changePass(user, password)\n        try:\n            # for older distros create sudoers.d\n            if not os.path.isdir(MyDistro.sudoers_dir_base + '/sudoers.d/'):\n                # create the /etc/sudoers.d/ directory\n                os.mkdir(MyDistro.sudoers_dir_base + '/sudoers.d')\n                # add the include of sudoers.d to the /etc/sudoers\n                SetFileContents(MyDistro.sudoers_dir_base + '/sudoers', GetFileContents(\n                    MyDistro.sudoers_dir_base + '/sudoers') + '\\n#includedir ' + MyDistro.sudoers_dir_base + '/sudoers.d\\n')\n            if password == None:\n                SetFileContents(MyDistro.sudoers_dir_base + \"/sudoers.d/waagent\", user + \" ALL = (ALL) NOPASSWD: ALL\\n\")\n            else:\n                SetFileContents(MyDistro.sudoers_dir_base + \"/sudoers.d/waagent\", user + \" ALL = (ALL) ALL\\n\")\n            os.chmod(MyDistro.sudoers_dir_base + \"/sudoers.d/waagent\", 0o440)\n        except:\n            Error(\"CreateAccount: Failed to configure sudo access for user.\")\n            return \"Failed to configure sudo privileges (0x08).\"\n        home = MyDistro.GetHome()\n        if thumbprint != None:\n            dir = home + \"/\" + user + \"/.ssh\"\n            CreateDir(dir, user, 0o700)\n            pub = dir + \"/id_rsa.pub\"\n            prv = dir + \"/id_rsa\"\n            Run(\"ssh-keygen -y -f \" + thumbprint + \".prv > \" + pub)\n            SetFileContents(prv, GetFileContents(thumbprint + \".prv\"))\n            for f in [pub, prv]:\n                os.chmod(f, 0o600)\n                ChangeOwner(f, user)\n            SetFileContents(dir + \"/authorized_keys\", GetFileContents(pub))\n            ChangeOwner(dir + \"/authorized_keys\", user)\n        Log(\"Created user account: \" + user)\n        return None\n\n    def DeleteAccount(self, user):\n        \"\"\"\n        Delete the 'user'.\n        Clear utmp first, to avoid error.\n        Removes the /etc/sudoers.d/waagent file.\n        \"\"\"\n        userentry = None\n        try:\n            userentry = pwd.getpwnam(user)\n        except:\n            pass\n        if userentry == None:\n            Error(\"DeleteAccount: \" + user + \" not found.\")\n            return\n        uidmin = None\n        try:\n            if os.path.isfile(\"/etc/login.defs\"):\n                uidmin = int(GetLineStartingWith(\"UID_MIN\", \"/etc/login.defs\").split()[1])\n        except:\n            pass\n        if uidmin == None:\n            uidmin = 100\n        if userentry[2] < uidmin:\n            Error(\"DeleteAccount: \" + user + \" is a system user. Will not delete account.\")\n            return\n        Run(\"> /var/run/utmp\")  # Delete utmp to prevent error if we are the 'user' deleted\n        pid = subprocess.Popen(['rmuser', '-y', user], stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n                               stdin=subprocess.PIPE).pid\n        try:\n            os.remove(MyDistro.sudoers_dir_base + \"/sudoers.d/waagent\")\n        except:\n            pass\n        return\n\n    def ActivateResourceDiskNoThread(self):\n        \"\"\"\n        Format, mount, and if specified in the configuration\n        set resource disk as swap.\n        \"\"\"\n        global DiskActivated\n        Run('cp /usr/sbin/waagent /tmp/')\n        SetFileContents('/tmp/bsd_activate_resource_disk.py', bsd_activate_resource_disk_txt)\n        Run('chmod +x /tmp/bsd_activate_resource_disk.py')\n        pid = subprocess.Popen([\"/tmp/bsd_activate_resource_disk.py\", \"\"]).pid\n        Log(\"Spawning bsd_activate_resource_disk.py\")\n        DiskActivated = True\n        return\n\n    def Install(self):\n        \"\"\"\n        Install the agent service.\n        Check dependencies.\n        Create /etc/waagent.conf and move old version to\n        /etc/waagent.conf.old\n        Copy RulesFiles to /var/lib/waagent\n        Create /etc/logrotate.d/waagent\n        Set /etc/ssh/sshd_config ClientAliveInterval to 180\n        Call ApplyVNUMAWorkaround()\n        \"\"\"\n        if MyDistro.checkDependencies():\n            return 1\n        os.chmod(sys.argv[0], 0o755)\n        SwitchCwd()\n        for a in RulesFiles:\n            if os.path.isfile(a):\n                if os.path.isfile(GetLastPathElement(a)):\n                    os.remove(GetLastPathElement(a))\n                shutil.move(a, \".\")\n                Warn(\"Moved \" + a + \" -> \" + LibDir + \"/\" + GetLastPathElement(a))\n        MyDistro.registerAgentService()\n        if os.path.isfile(\"/etc/waagent.conf\"):\n            try:\n                os.remove(\"/etc/waagent.conf.old\")\n            except:\n                pass\n            try:\n                os.rename(\"/etc/waagent.conf\", \"/etc/waagent.conf.old\")\n                Warn(\"Existing /etc/waagent.conf has been renamed to /etc/waagent.conf.old\")\n            except:\n                pass\n        SetFileContents(\"/etc/waagent.conf\", self.waagent_conf_file)\n        if os.path.exists('/usr/local/etc/logrotate.d/'):\n            SetFileContents(\"/usr/local/etc/logrotate.d/waagent\", WaagentLogrotate)\n        filepath = \"/etc/ssh/sshd_config\"\n        ReplaceFileContentsAtomic(filepath, \"\\n\".join(list(filter(lambda a: not\n        a.startswith(\"ClientAliveInterval\"),\n                                                             GetFileContents(filepath).split(\n                                                                 '\\n')))) + \"\\nClientAliveInterval 180\\n\")\n        Log(\"Configured SSH client probing to keep connections alive.\")\n        # ApplyVNUMAWorkaround()\n        return 0\n\n    def mediaHasFilesystem(self, dsk):\n        if Run('LC_ALL=C fdisk -p ' + dsk + ' | grep \"invalid fdisk partition table found\" ', False):\n            return False\n        return True\n\n    def mountDVD(self, dvd, location):\n        # At this point we cannot read a joliet option udf DVD in freebsd10 - so we 'dd' it into our location\n        retcode, out = RunGetOutput(self.mount_dvd_cmd + dvd + ' of=' + location + '/ovf-env.xml')\n        if retcode != 0:\n            return retcode, out\n\n        ovfxml = (GetFileContents(location + \"/ovf-env.xml\", asbin=False))\n        if ord(ovfxml[0]) > 128 and ord(ovfxml[1]) > 128 and ord(ovfxml[2]) > 128:\n            ovfxml = ovfxml[\n                     3:]  # BOM is not stripped. First three bytes are > 128 and not unicode chars so we ignore them.\n        ovfxml = ovfxml.strip(chr(0x00))\n        ovfxml = \"\".join(list(filter(lambda x: ord(x) < 128, ovfxml)))\n        ovfxml = re.sub(r'</Environment>.*\\Z', '', ovfxml, 0, re.DOTALL)\n        ovfxml += '</Environment>'\n        SetFileContents(location + \"/ovf-env.xml\", ovfxml)\n        return retcode, out\n\n    def GetHome(self):\n        return '/home'\n\n    def initScsiDiskTimeout(self):\n        \"\"\"\n        Set the SCSI disk timeout by updating the kernal config\n        \"\"\"\n        timeout = Config.get(\"OS.RootDeviceScsiTimeout\")\n        if timeout:\n            Run(\"sysctl kern.cam.da.default_timeout=\" + timeout)\n\n    def setScsiDiskTimeout(self):\n        return\n\n    def setBlockDeviceTimeout(self, device, timeout):\n        return\n\n    def getProcessorCores(self):\n        return int(RunGetOutput(\"sysctl hw.ncpu | awk '{print $2}'\")[1])\n\n    def getTotalMemory(self):\n        return int(RunGetOutput(\"sysctl hw.realmem | awk '{print $2}'\")[1]) / 1024\n\n    def setDefaultGateway(self, gateway):\n        Run(\"/sbin/route add default \" + gateway, chk_err=False)\n\n    def routeAdd(self, net, mask, gateway):\n        Run(\"/sbin/route add -net \" + net + \" \" + mask + \" \" + gateway, chk_err=False)\n\n\nclass NSBSDDistro(FreeBSDDistro):\n    \"\"\"\n    Stormhield NS-BSD OS\n    \"\"\"\n\n    def __init__(self):\n        super(NSBSDDistro, self).__init__()\n\n\n############################################################\n# END DISTRO CLASS DEFS\n############################################################\n\n# This lets us index into a string or an array of integers transparently.\ndef Ord(a):\n    \"\"\"\n    Allows indexing into a string or an array of integers transparently.\n    Generic utility function.\n    \"\"\"\n    if type(a) == type(\"a\"):\n        a = ord(a)\n    return a\n\n\ndef IsLinux():\n    \"\"\"\n    Returns True if platform is Linux.\n    Generic utility function.\n    \"\"\"\n    return (platform.uname()[0] == \"Linux\")\n\n\ndef GetLastPathElement(path):\n    \"\"\"\n    Similar to basename.\n    Generic utility function.\n    \"\"\"\n    return path.rsplit('/', 1)[1]\n\n\ndef GetFileContents(filepath, asbin=False):\n    \"\"\"\n    Read and return contents of 'filepath'.\n    \"\"\"\n    mode = 'r'\n    if asbin:\n        mode += 'b'\n    c = None\n    try:\n        with open(filepath, mode) as F:\n            c = F.read()\n    except IOError as e:\n        ErrorWithPrefix('GetFileContents', 'Reading from file ' + filepath + ' Exception is ' + str(e))\n        return None\n    return c\n\n\ndef SetFileContents(filepath, contents):\n    \"\"\"\n    Write 'contents' to 'filepath'.\n    \"\"\"\n    if type(contents) == str:\n        contents = contents.encode('latin-1', 'ignore')\n    try:\n        with open(filepath, \"wb+\") as F:\n            F.write(contents)\n    except IOError as e:\n        ErrorWithPrefix('SetFileContents', 'Writing to file ' + filepath + ' Exception is ' + str(e))\n        return None\n    return 0\n\n\ndef AppendFileContents(filepath, contents):\n    \"\"\"\n    Append 'contents' to 'filepath'.\n    \"\"\"\n    if type(contents) == str:\n        contents = contents.encode('latin-1')\n    try:\n        with open(filepath, \"a+\") as F:\n            F.write(contents)\n    except IOError as e:\n        ErrorWithPrefix('AppendFileContents', 'Appending to file ' + filepath + ' Exception is ' + str(e))\n        return None\n    return 0\n\n\ndef ReplaceFileContentsAtomic(filepath, contents):\n    \"\"\"\n    Write 'contents' to 'filepath' by creating a temp file, and replacing original.\n    \"\"\"\n    handle, temp = tempfile.mkstemp(dir=os.path.dirname(filepath))\n    if type(contents) == str:\n        contents = contents.encode('latin-1')\n    try:\n        os.write(handle, contents)\n    except IOError as e:\n        ErrorWithPrefix('ReplaceFileContentsAtomic', 'Writing to file ' + filepath + ' Exception is ' + str(e))\n        return None\n    finally:\n        os.close(handle)\n    try:\n        os.rename(temp, filepath)\n        return None\n    except IOError as e:\n        ErrorWithPrefix('ReplaceFileContentsAtomic', 'Renaming ' + temp + ' to ' + filepath + ' Exception is ' + str(e))\n    try:\n        os.remove(filepath)\n    except IOError as e:\n        ErrorWithPrefix('ReplaceFileContentsAtomic', 'Removing ' + filepath + ' Exception is ' + str(e))\n    try:\n        os.rename(temp, filepath)\n    except IOError as e:\n        ErrorWithPrefix('ReplaceFileContentsAtomic', 'Removing ' + filepath + ' Exception is ' + str(e))\n        return 1\n    return 0\n\n\ndef GetLineStartingWith(prefix, filepath):\n    \"\"\"\n    Return line from 'filepath' if the line startswith 'prefix'\n    \"\"\"\n    for line in GetFileContents(filepath).split('\\n'):\n        if line.startswith(prefix):\n            return line\n    return None\n\n\ndef Run(cmd, chk_err=True):\n    \"\"\"\n    Calls RunGetOutput on 'cmd', returning only the return code.\n    If chk_err=True then errors will be reported in the log.\n    If chk_err=False then errors will be suppressed from the log.\n    \"\"\"\n    retcode, out = RunGetOutput(cmd, chk_err)\n    return retcode\n\n\ndef RunGetOutput(cmd, chk_err=True, log_cmd=True):\n    \"\"\"\n    Wrapper for subprocess.check_output.\n    Execute 'cmd'.  Returns return code and STDOUT, trapping expected exceptions.\n    Reports exceptions to Error if chk_err parameter is True\n    \"\"\"\n    if log_cmd:\n        LogIfVerbose(cmd)\n    try:\n        output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)\n        if isinstance(output, bytes):\n            output = output.decode('latin-1')\n    except subprocess.CalledProcessError as e:\n        if chk_err and log_cmd:\n            Error('CalledProcessError.  Error Code is ' + str(e.returncode))\n            Error('CalledProcessError.  Command string was ' + e.cmd)\n            if isinstance(e.output[:-1], bytes):\n                Error('CalledProcessError.  Command result was ' + (e.output[:-1]).decode('latin-1'))\n            else:\n                Error('CalledProcessError.  Command result was ' + (e.output[:-1]))\n        if isinstance(e.output, bytes):\n            return_value = e.output.decode('latin-1')\n        else:\n            return_value = e.output\n        return e.returncode, return_value\n    \n    return 0, output\n\n\ndef RunSendStdin(cmd, input, chk_err=True, log_cmd=True):\n    \"\"\"\n    Wrapper for subprocess.Popen.\n    Execute 'cmd', sending 'input' to STDIN of 'cmd'.\n    Returns return code and STDOUT, trapping expected exceptions.\n    Reports exceptions to Error if chk_err parameter is True\n    \"\"\"\n    if log_cmd:\n        LogIfVerbose(cmd + input)\n    try:\n        me = subprocess.Popen([cmd], shell=True, stdin=subprocess.PIPE, stderr=subprocess.STDOUT,\n                              stdout=subprocess.PIPE)\n        output = me.communicate(input)\n    except OSError as e:\n        if chk_err and log_cmd:\n            Error('CalledProcessError.  Error Code is ' + str(me.returncode))\n            Error('CalledProcessError.  Command string was ' + cmd)\n            Error('CalledProcessError.  Command result was ' + output[0].decode('latin-1'))\n            return 1, output[0].decode('latin-1')\n    if me.returncode !=  0 and chk_err is True and log_cmd:\n        Error('CalledProcessError.  Error Code is ' + str(me.returncode))\n        Error('CalledProcessError.  Command string was ' + cmd)\n        Error('CalledProcessError.  Command result was ' + output[0].decode('latin-1'))\n    return me.returncode, output[0].decode('latin-1')\n\n\ndef GetNodeTextData(a):\n    \"\"\"\n    Filter non-text nodes from DOM tree\n    \"\"\"\n    for b in a.childNodes:\n        if b.nodeType == b.TEXT_NODE:\n            return b.data\n\n\ndef GetHome():\n    \"\"\"\n    Attempt to guess the $HOME location.\n    Return the path string.\n    \"\"\"\n    home = None\n    try:\n        home = GetLineStartingWith(\"HOME\", \"/etc/default/useradd\").split('=')[1].strip()\n    except:\n        pass\n    if (home == None) or (home.startswith(\"/\") == False):\n        home = \"/home\"\n    return home\n\n\ndef ChangeOwner(filepath, user):\n    \"\"\"\n    Lookup user.  Attempt chown 'filepath' to 'user'.\n    \"\"\"\n    p = None\n    try:\n        p = pwd.getpwnam(user)\n    except:\n        pass\n    if p != None:\n        if not os.path.exists(filepath):\n            Error(\"Path does not exist: {0}\".format(filepath))\n        else:\n            os.chown(filepath, p[2], p[3])\n\n\ndef CreateDir(dirpath, user, mode):\n    \"\"\"\n    Attempt os.makedirs, catch all exceptions.\n    Call ChangeOwner afterwards.\n    \"\"\"\n    try:\n        os.makedirs(dirpath, mode)\n    except:\n        pass\n    ChangeOwner(dirpath, user)\n\n\ndef CreateAccount(user, password, expiration, thumbprint):\n    \"\"\"\n    Create a user account, with 'user', 'password', 'expiration', ssh keys\n    and sudo permissions.\n    Returns None if successful, error string on failure.\n    \"\"\"\n    userentry = None\n    try:\n        userentry = pwd.getpwnam(user)\n    except:\n        pass\n    uidmin = None\n    try:\n        uidmin = int(GetLineStartingWith(\"UID_MIN\", \"/etc/login.defs\").split()[1])\n    except:\n        pass\n    if uidmin == None:\n        uidmin = 100\n    if userentry != None and userentry[2] < uidmin:\n        Error(\"CreateAccount: \" + user + \" is a system user. Will not set password.\")\n        return \"Failed to set password for system user: \" + user + \" (0x06).\"\n    if userentry == None:\n        command = \"useradd -m \" + user\n        if expiration != None:\n            command += \" -e \" + expiration.split('.')[0]\n        if Run(command):\n            Error(\"Failed to create user account: \" + user)\n            return \"Failed to create user account: \" + user + \" (0x07).\"\n    else:\n        Log(\"CreateAccount: \" + user + \" already exists. Will update password.\")\n    if password != None:\n        MyDistro.changePass(user, password)\n    try:\n        # for older distros create sudoers.d\n        if not os.path.isdir('/etc/sudoers.d/'):\n            # create the /etc/sudoers.d/ directory\n            os.mkdir('/etc/sudoers.d/')\n            # add the include of sudoers.d to the /etc/sudoers\n            SetFileContents('/etc/sudoers', GetFileContents('/etc/sudoers') + '\\n#includedir /etc/sudoers.d\\n')\n        if password == None:\n            SetFileContents(\"/etc/sudoers.d/waagent\", user + \" ALL = (ALL) NOPASSWD: ALL\\n\")\n        else:\n            SetFileContents(\"/etc/sudoers.d/waagent\", user + \" ALL = (ALL) ALL\\n\")\n        os.chmod(\"/etc/sudoers.d/waagent\", 0o440)\n    except:\n        Error(\"CreateAccount: Failed to configure sudo access for user.\")\n        return \"Failed to configure sudo privileges (0x08).\"\n    home = MyDistro.GetHome()\n    if thumbprint != None:\n        dir = home + \"/\" + user + \"/.ssh\"\n        CreateDir(dir, user, 0o700)\n        pub = dir + \"/id_rsa.pub\"\n        prv = dir + \"/id_rsa\"\n        Run(\"ssh-keygen -y -f \" + thumbprint + \".prv > \" + pub)\n        SetFileContents(prv, GetFileContents(thumbprint + \".prv\"))\n        for f in [pub, prv]:\n            os.chmod(f, 0o600)\n            ChangeOwner(f, user)\n        SetFileContents(dir + \"/authorized_keys\", GetFileContents(pub))\n        ChangeOwner(dir + \"/authorized_keys\", user)\n    Log(\"Created user account: \" + user)\n    return None\n\n\ndef DeleteAccount(user):\n    \"\"\"\n    Delete the 'user'.\n    Clear utmp first, to avoid error.\n    Removes the /etc/sudoers.d/waagent file.\n    \"\"\"\n    userentry = None\n    try:\n        userentry = pwd.getpwnam(user)\n    except:\n        pass\n    if userentry == None:\n        Error(\"DeleteAccount: \" + user + \" not found.\")\n        return\n    uidmin = None\n    try:\n        uidmin = int(GetLineStartingWith(\"UID_MIN\", \"/etc/login.defs\").split()[1])\n    except:\n        pass\n    if uidmin == None:\n        uidmin = 100\n    if userentry[2] < uidmin:\n        Error(\"DeleteAccount: \" + user + \" is a system user. Will not delete account.\")\n        return\n    Run(\"> /var/run/utmp\")  # Delete utmp to prevent error if we are the 'user' deleted\n    Run(\"userdel -f -r \" + user)\n    try:\n        os.remove(\"/etc/sudoers.d/waagent\")\n    except:\n        pass\n    return\n\n\ndef IsInRangeInclusive(a, low, high):\n    \"\"\"\n    Return True if 'a' in 'low' <= a >= 'high'\n    \"\"\"\n    return (a >= low and a <= high)\n\n\ndef IsPrintable(ch):\n    \"\"\"\n    Return True if character is displayable.\n    \"\"\"\n    return IsInRangeInclusive(ch, Ord('A'), Ord('Z')) or IsInRangeInclusive(ch, Ord('a'),\n                                                                            Ord('z')) or IsInRangeInclusive(ch,\n                                                                                                            Ord('0'),\n                                                                                                            Ord('9'))\n\n\ndef HexDump(buffer, size):\n    \"\"\"\n    Return Hex formated dump of a 'buffer' of 'size'.\n    \"\"\"\n    if size < 0:\n        size = len(buffer)\n    result = \"\"\n    for i in range(0, size):\n        if (i % 16) == 0:\n            result += \"%06X: \" % i\n        byte = buffer[i]\n        if type(byte) == str:\n            byte = ord(byte.decode('latin1'))\n        result += \"%02X \" % byte\n        if (i & 15) == 7:\n            result += \" \"\n        if ((i + 1) % 16) == 0 or (i + 1) == size:\n            j = i\n            while ((j + 1) % 16) != 0:\n                result += \"   \"\n                if (j & 7) == 7:\n                    result += \" \"\n                j += 1\n            result += \" \"\n            for j in range(i - (i % 16), i + 1):\n                byte = buffer[j]\n                if type(byte) == str:\n                    byte = ord(byte.decode('latin1'))\n                k = '.'\n                if IsPrintable(byte):\n                    k = chr(byte)\n                result += k\n            if (i + 1) != size:\n                result += \"\\n\"\n    return result\n\n\ndef SimpleLog(file_path, message):\n    if not file_path or len(message) < 1:\n        return\n    t = time.localtime()\n    t = \"%04u/%02u/%02u %02u:%02u:%02u \" % (t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec)\n    lines = re.sub(re.compile(r'^(.)', re.MULTILINE), t + r'\\1', message)\n    with open(file_path, \"ab\") as F:\n        lines = \"\".join(list(filter(lambda x: x in string.printable, lines)))\n        F.write((lines + \"\\n\").encode('ascii','ignore'))\n\n\nclass Logger(object):\n    \"\"\"\n    The Agent's logging assumptions are:\n    For Log, and LogWithPrefix all messages are logged to the\n    self.file_path and to the self.con_path.  Setting either path\n    parameter to None skips that log.  If Verbose is enabled, messages\n    calling the LogIfVerbose method will be logged to file_path yet\n    not to con_path.  Error and Warn messages are normal log messages\n    with the 'ERROR:' or 'WARNING:' prefix added.\n    \"\"\"\n\n    def __init__(self, filepath, conpath, verbose=False):\n        \"\"\"\n        Construct an instance of Logger.\n        \"\"\"\n        self.file_path = filepath\n        self.con_path = conpath\n        self.verbose = verbose\n\n    def ThrottleLog(self, counter):\n        \"\"\"\n        Log everything up to 10, every 10 up to 100, then every 100.\n        \"\"\"\n        return (counter < 10) or ((counter < 100) and ((counter % 10) == 0)) or ((counter % 100) == 0)\n\n    def LogToFile(self, message):\n        \"\"\"\n        Write 'message' to logfile.\n        \"\"\"\n        if self.file_path:\n            try:\n                with open(self.file_path, \"ab\") as F:\n                    message = \"\".join(list(filter(lambda x: x in string.printable, message)))\n                    F.write((message + \"\\n\").encode('ascii','ignore'))\n            except IOError as e:\n                ##print e\n                pass\n\n    def LogToCon(self, message):\n        \"\"\"\n        Write 'message' to /dev/console.\n        This supports serial port logging if the /dev/console\n        is redirected to ttys0 in kernel boot options.\n        \"\"\"\n        if self.con_path:\n            try:\n                with open(self.con_path, \"wb\") as C:\n                    message = \"\".join(list(filter(lambda x: x in string.printable, message)))\n                    C.write((message + \"\\n\").encode('ascii','ignore'))\n            except IOError as e:\n                pass\n\n    def Log(self, message):\n        \"\"\"\n        Standard Log function.\n        Logs to self.file_path, and con_path\n        \"\"\"\n        self.LogWithPrefix(\"\", message)\n\n    def LogWithPrefix(self, prefix, message):\n        \"\"\"\n        Prefix each line of 'message' with current time+'prefix'.\n        \"\"\"\n        t = time.localtime()\n        t = \"%04u/%02u/%02u %02u:%02u:%02u \" % (t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec)\n        t += prefix\n        for line in message.split('\\n'):\n            line = t + line\n            self.LogToFile(line)\n            self.LogToCon(line)\n\n    def NoLog(self, message):\n        \"\"\"\n        Don't Log.\n        \"\"\"\n        pass\n\n    def LogIfVerbose(self, message):\n        \"\"\"\n        Only log 'message' if global Verbose is True.\n        \"\"\"\n        self.LogWithPrefixIfVerbose('', message)\n\n    def LogWithPrefixIfVerbose(self, prefix, message):\n        \"\"\"\n        Only log 'message' if global Verbose is True.\n        Prefix each line of 'message' with current time+'prefix'.\n        \"\"\"\n        if self.verbose == True:\n            t = time.localtime()\n            t = \"%04u/%02u/%02u %02u:%02u:%02u \" % (t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec)\n            t += prefix\n            for line in message.split('\\n'):\n                line = t + line\n                self.LogToFile(line)\n                self.LogToCon(line)\n\n    def Warn(self, message):\n        \"\"\"\n        Prepend the text \"WARNING:\" to the prefix for each line in 'message'.\n        \"\"\"\n        self.LogWithPrefix(\"WARNING:\", message)\n\n    def Error(self, message):\n        \"\"\"\n        Call ErrorWithPrefix(message).\n        \"\"\"\n        ErrorWithPrefix(\"\", message)\n\n    def ErrorWithPrefix(self, prefix, message):\n        \"\"\"\n        Prepend the text \"ERROR:\" to the prefix for each line in 'message'.\n        Errors written to logfile, and /dev/console\n        \"\"\"\n        self.LogWithPrefix(\"ERROR:\", message)\n\n\ndef LoggerInit(log_file_path, log_con_path, verbose=False):\n    \"\"\"\n    Create log object and export its methods to global scope.\n    \"\"\"\n    global Log, LogWithPrefix, LogIfVerbose, LogWithPrefixIfVerbose, Error, ErrorWithPrefix, Warn, NoLog, ThrottleLog, myLogger\n    l = Logger(log_file_path, log_con_path, verbose)\n    Log, LogWithPrefix, LogIfVerbose, LogWithPrefixIfVerbose, Error, ErrorWithPrefix, Warn, NoLog, ThrottleLog, myLogger = l.Log, l.LogWithPrefix, l.LogIfVerbose, l.LogWithPrefixIfVerbose, l.Error, l.ErrorWithPrefix, l.Warn, l.NoLog, l.ThrottleLog, l\n\nclass HttpResourceGoneError(Exception):\n    pass\n\n\nclass Util(object):\n    \"\"\"\n    Http communication class.\n    Base of GoalState, and Agent classes.\n    \"\"\"\n    RetryWaitingInterval = 10\n\n    def __init__(self):\n        self.Endpoint = None\n\n    def _ParseUrl(self, url):\n        secure = False\n        host = self.Endpoint\n        path = url\n        port = None\n\n        # \"http[s]://hostname[:port][/]\"\n        if url.startswith(\"http://\"):\n            url = url[7:]\n            if \"/\" in url:\n                host = url[0: url.index(\"/\")]\n                path = url[url.index(\"/\"):]\n            else:\n                host = url\n                path = \"/\"\n        elif url.startswith(\"https://\"):\n            secure = True\n            url = url[8:]\n            if \"/\" in url:\n                host = url[0: url.index(\"/\")]\n                path = url[url.index(\"/\"):]\n            else:\n                host = url\n                path = \"/\"\n\n        if host is None:\n            raise ValueError(\"Host is invalid:{0}\".format(url))\n\n        if (\":\" in host):\n            pos = host.rfind(\":\")\n            port = int(host[pos + 1:])\n            host = host[0:pos]\n\n        return host, port, secure, path\n\n    def GetHttpProxy(self, secure):\n        \"\"\"\n        Get http_proxy and https_proxy from environment variables.\n        Username and password is not supported now.\n        \"\"\"\n        host = Config.get(\"HttpProxy.Host\")\n        port = Config.get(\"HttpProxy.Port\")\n        return (host, port)\n\n    def _HttpRequest(self, method, host, path, port=None, data=None, secure=False,\n                     headers=None, proxyHost=None, proxyPort=None):\n        resp = None\n        conn = None\n        try:\n            if secure:\n                port = 443 if port is None else port\n                if proxyHost is not None and proxyPort is not None:\n                    conn = httplibs.HTTPSConnection(proxyHost, proxyPort, timeout=10)\n                    conn.set_tunnel(host, port)\n                    # If proxy is used, full url is needed.\n                    path = \"https://{0}:{1}{2}\".format(host, port, path)\n                else:\n                    conn = httplibs.HTTPSConnection(host, port, timeout=10)\n            else:\n                port = 80 if port is None else port\n                if proxyHost is not None and proxyPort is not None:\n                    conn = httplibs.HTTPConnection(proxyHost, proxyPort, timeout=10)\n                    # If proxy is used, full url is needed.\n                    path = \"http://{0}:{1}{2}\".format(host, port, path)\n                else:\n                    conn = httplibs.HTTPConnection(host, port, timeout=10)\n            if headers == None:\n                conn.request(method, path, data)\n            else:\n                conn.request(method, path, data, headers)\n            resp = conn.getresponse()\n        except httplibs.HTTPException as e:\n            Error('HTTPException {0}, args:{1}'.format(e, repr(e.args)))\n        except IOError as e:\n            Error('Socket IOError {0}, args:{1}'.format(e, repr(e.args)))\n        return resp\n\n    def HttpRequest(self, method, url, data=None,\n                    headers=None, maxRetry=3, chkProxy=False):\n        \"\"\"\n        Sending http request to server\n        On error, sleep 10 and maxRetry times.\n        Return the output buffer or None.\n        \"\"\"\n        LogIfVerbose(\"HTTP Req: {0} {1}\".format(method, url))\n        LogIfVerbose(\"HTTP Req: Data={0}\".format(data))\n        LogIfVerbose(\"HTTP Req: Header={0}\".format(headers))\n        try:\n            host, port, secure, path = self._ParseUrl(url)\n        except ValueError as e:\n            Error(\"Failed to parse url:{0}\".format(url))\n            return None\n\n        # Check proxy\n        proxyHost, proxyPort = (None, None)\n        if chkProxy:\n            proxyHost, proxyPort = self.GetHttpProxy(secure)\n\n        # If httplib module is not built with ssl support. Fallback to http\n        if secure and not hasattr(httplibs, \"HTTPSConnection\"):\n            Warn(\"httplib is not built with ssl support\")\n            secure = False\n            proxyHost, proxyPort = self.GetHttpProxy(secure)\n\n        # If httplib module doesn't support https tunnelling. Fallback to http\n        if secure and \\\n                        proxyHost is not None and \\\n                        proxyPort is not None and \\\n                not hasattr(httplibs.HTTPSConnection, \"set_tunnel\"):\n            Warn(\"httplib doesn't support https tunnelling(new in python 2.7)\")\n            secure = False\n            proxyHost, proxyPort = self.GetHttpProxy(secure)\n\n        resp = self._HttpRequest(method, host, path, port=port, data=data,\n                                 secure=secure, headers=headers,\n                                 proxyHost=proxyHost, proxyPort=proxyPort)\n        for retry in range(0, maxRetry):\n            if resp is not None and \\\n                    (resp.status == httplibs.OK or \\\n                                 resp.status == httplibs.CREATED or \\\n                                 resp.status == httplibs.ACCEPTED):\n                return resp\n\n            if resp is not None and resp.status == httplibs.GONE:\n                raise HttpResourceGoneError(\"Http resource gone.\")\n\n            Error(\"Retry={0}\".format(retry))\n            Error(\"HTTP Req: {0} {1}\".format(method, url))\n            Error(\"HTTP Req: Data={0}\".format(data))\n            Error(\"HTTP Req: Header={0}\".format(headers))\n            if resp is None:\n                Error(\"HTTP Err: response is empty. {0}\".format(retry))\n            else:\n                Error(\"HTTP Err: Status={0}\".format(resp.status))\n                Error(\"HTTP Err: Reason={0}\".format(resp.reason))\n                Error(\"HTTP Err: Header={0}\".format(resp.getheaders()))\n                Error(\"HTTP Err: Body={0}\".format(resp.read()))\n\n            time.sleep(self.__class__.RetryWaitingInterval)\n            resp = self._HttpRequest(method, host, path, port=port, data=data,\n                                     secure=secure, headers=headers,\n                                     proxyHost=proxyHost, proxyPort=proxyPort)\n\n        return None\n\n    def HttpGet(self, url, headers=None, maxRetry=3, chkProxy=False):\n        return self.HttpRequest(\"GET\", url, headers=headers,\n                                maxRetry=maxRetry, chkProxy=chkProxy)\n\n    def HttpHead(self, url, headers=None, maxRetry=3, chkProxy=False):\n        return self.HttpRequest(\"HEAD\", url, headers=headers,\n                                maxRetry=maxRetry, chkProxy=chkProxy)\n\n    def HttpPost(self, url, data, headers=None, maxRetry=3, chkProxy=False):\n        return self.HttpRequest(\"POST\", url, data=data, headers=headers,\n                                maxRetry=maxRetry, chkProxy=chkProxy)\n\n    def HttpPut(self, url, data, headers=None, maxRetry=3, chkProxy=False):\n        return self.HttpRequest(\"PUT\", url, data=data, headers=headers,\n                                maxRetry=maxRetry, chkProxy=chkProxy)\n\n    def HttpDelete(self, url, headers=None, maxRetry=3, chkProxy=False):\n        return self.HttpRequest(\"DELETE\", url, headers=headers,\n                                maxRetry=maxRetry, chkProxy=chkProxy)\n\n    def HttpGetWithoutHeaders(self, url, maxRetry=3, chkProxy=False):\n        \"\"\"\n        Return data from an HTTP get on 'url'.\n        \"\"\"\n        resp = self.HttpGet(url, headers=None, maxRetry=maxRetry,\n                            chkProxy=chkProxy)\n        return resp.read() if resp is not None else None\n\n    def HttpGetWithHeaders(self, url, maxRetry=3, chkProxy=False):\n        \"\"\"\n        Return data from an HTTP get on 'url' with\n        x-ms-agent-name and x-ms-version\n        headers.\n        \"\"\"\n        resp = self.HttpGet(url, headers={\n            \"x-ms-agent-name\": GuestAgentName,\n            \"x-ms-version\": ProtocolVersion\n        }, maxRetry=maxRetry, chkProxy=chkProxy)\n        return resp.read() if resp is not None else None\n\n    def HttpSecureGetWithHeaders(self, url, transportCert, maxRetry=3,\n                                 chkProxy=False):\n        \"\"\"\n        Return output of get using ssl cert.\n        \"\"\"\n        resp = self.HttpGet(url, headers={\n            \"x-ms-agent-name\": GuestAgentName,\n            \"x-ms-version\": ProtocolVersion,\n            \"x-ms-cipher-name\": \"DES_EDE3_CBC\",\n            \"x-ms-guest-agent-public-x509-cert\": transportCert\n        }, maxRetry=maxRetry, chkProxy=chkProxy)\n        return resp.read() if resp is not None else None\n\n    def HttpPostWithHeaders(self, url, data, maxRetry=3, chkProxy=False):\n        headers = {\n            \"x-ms-agent-name\": GuestAgentName,\n            \"Content-Type\": \"text/xml; charset=utf-8\",\n            \"x-ms-version\": ProtocolVersion\n        }\n        try:\n            return self.HttpPost(url, data=data, headers=headers,\n                                 maxRetry=maxRetry, chkProxy=chkProxy)\n        except HttpResourceGoneError as e:\n            Error(\"Failed to post: {0} {1}\".format(url, e))\n            return None\n\n\n__StorageVersion = \"2014-02-14\"\n\n\ndef GetBlobType(url):\n    restutil = Util()\n    # Check blob type\n    LogIfVerbose(\"Check blob type.\")\n    timestamp = time.strftime(\"%Y-%m-%dT%H:%M:%SZ\", time.gmtime())\n    blobPropResp = restutil.HttpHead(url, {\n        \"x-ms-date\": timestamp,\n        'x-ms-version': __StorageVersion\n    }, chkProxy=True);\n    blobType = None\n    if blobPropResp is None:\n        Error(\"Can't get status blob type.\")\n        return None\n    blobType = blobPropResp.getheader(\"x-ms-blob-type\")\n    LogIfVerbose(\"Blob type={0}\".format(blobType))\n    return blobType\n\n\ndef PutBlockBlob(url, data):\n    restutil = Util()\n    LogIfVerbose(\"Upload block blob\")\n    timestamp = time.strftime(\"%Y-%m-%dT%H:%M:%SZ\", time.gmtime())\n    ret = restutil.HttpPut(url, data, {\n        \"x-ms-date\": timestamp,\n        \"x-ms-blob-type\": \"BlockBlob\",\n        \"Content-Length\": str(len(data)),\n        \"x-ms-version\": __StorageVersion\n    }, chkProxy=True)\n    if ret is None:\n        Error(\"Failed to upload block blob for status.\")\n        return -1\n    return 0\n\n\ndef PutPageBlob(url, data):\n    restutil = Util()\n    LogIfVerbose(\"Replace old page blob\")\n    timestamp = time.strftime(\"%Y-%m-%dT%H:%M:%SZ\", time.gmtime())\n    # Align to 512 bytes\n    pageBlobSize = ((len(data) + 511) / 512) * 512\n    ret = restutil.HttpPut(url, \"\", {\n        \"x-ms-date\": timestamp,\n        \"x-ms-blob-type\": \"PageBlob\",\n        \"Content-Length\": \"0\",\n        \"x-ms-blob-content-length\": str(pageBlobSize),\n        \"x-ms-version\": __StorageVersion\n    }, chkProxy=True)\n    if ret is None:\n        Error(\"Failed to clean up page blob for status\")\n        return -1\n\n    if url.index('?') < 0:\n        url = \"{0}?comp=page\".format(url)\n    else:\n        url = \"{0}&comp=page\".format(url)\n\n    LogIfVerbose(\"Upload page blob\")\n    pageMax = 4 * 1024 * 1024  # Max page size: 4MB\n    start = 0\n    end = 0\n    while end < len(data):\n        end = min(len(data), start + pageMax)\n        contentSize = end - start\n        # Align to 512 bytes\n        pageEnd = ((end + 511) / 512) * 512\n        bufSize = pageEnd - start\n        buf = bytearray(bufSize)\n        buf[0: contentSize] = data[start: end]\n        if sys.version_info > (3,):\n            buffer = memoryview\n        ret = restutil.HttpPut(url, buffer(buf), {\n            \"x-ms-date\": timestamp,\n            \"x-ms-range\": \"bytes={0}-{1}\".format(start, pageEnd - 1),\n            \"x-ms-page-write\": \"update\",\n            \"x-ms-version\": __StorageVersion,\n            \"Content-Length\": str(pageEnd - start)\n        }, chkProxy=True)\n        if ret is None:\n            Error(\"Failed to upload page blob for status\")\n            return -1\n        start = end\n    return 0\n\n\ndef UploadStatusBlob(url, data):\n    LogIfVerbose(\"Upload status blob\")\n    LogIfVerbose(\"Status={0}\".format(data))\n    blobType = GetBlobType(url)\n\n    if blobType == \"BlockBlob\":\n        return PutBlockBlob(url, data)\n    elif blobType == \"PageBlob\":\n        return PutPageBlob(url, data)\n    else:\n        Error(\"Unknown blob type: {0}\".format(blobType))\n        return -1\n\n\nclass TCPHandler(SocketServers.BaseRequestHandler):\n    \"\"\"\n    Callback object for LoadBalancerProbeServer.\n    Recv and send LB probe messages.\n    \"\"\"\n\n    def __init__(self, lb_probe):\n        super(TCPHandler, self).__init__()\n        self.lb_probe = lb_probe\n\n    def GetHttpDateTimeNow(self):\n        \"\"\"\n        Return formatted gmtime \"Date: Fri, 25 Mar 2011 04:53:10 GMT\"\n        \"\"\"\n        return time.strftime(\"%a, %d %b %Y %H:%M:%S GMT\", time.gmtime())\n\n    def handle(self):\n        \"\"\"\n        Log LB probe messages, read the socket buffer,\n        send LB probe response back to server.\n        \"\"\"\n        self.lb_probe.ProbeCounter = (self.lb_probe.ProbeCounter + 1) % 1000000\n        log = [NoLog, LogIfVerbose][ThrottleLog(self.lb_probe.ProbeCounter)]\n        strCounter = str(self.lb_probe.ProbeCounter)\n        if self.lb_probe.ProbeCounter == 1:\n            Log(\"Receiving LB probes.\")\n        log(\"Received LB probe # \" + strCounter)\n        self.request.recv(1024)\n        self.request.send(\n            \"HTTP/1.1 200 OK\\r\\nContent-Length: 2\\r\\nContent-Type: text/html\\r\\nDate: \" + self.GetHttpDateTimeNow() + \"\\r\\n\\r\\nOK\")\n\n\nclass LoadBalancerProbeServer(object):\n    \"\"\"\n    Threaded object to receive and send LB probe messages.\n    Load Balancer messages but be recv'd by\n    the load balancing server, or this node may be shut-down.\n    \"\"\"\n\n    def __init__(self, port):\n        self.ProbeCounter = 0\n        self.server = SocketServers.TCPServer((self.get_ip(), port), TCPHandler)\n        self.server_thread = threading.Thread(target=self.server.serve_forever)\n        self.server_thread.setDaemon(True)\n        self.server_thread.start()\n\n    def shutdown(self):\n        self.server.shutdown()\n\n    def get_ip(self):\n        for retry in range(1, 6):\n            ip = MyDistro.GetIpv4Address()\n            if ip == None:\n                Log(\"LoadBalancerProbeServer: GetIpv4Address() returned None, sleeping 10 before retry \" + str(\n                    retry + 1))\n                time.sleep(10)\n            else:\n                return ip\n\n\nclass ConfigurationProvider(object):\n    \"\"\"\n    Parse amd store key:values in waagent.conf\n    \"\"\"\n\n    def __init__(self, walaConfigFile):\n        self.values = dict()\n        if 'MyDistro' not in globals():\n            global MyDistro\n            MyDistro = GetMyDistro()\n        if walaConfigFile is None:\n            walaConfigFile = MyDistro.getConfigurationPath()\n        if os.path.isfile(walaConfigFile) == False:\n            raise Exception(\"Missing configuration in {0}\".format(walaConfigFile))\n        try:\n            for line in GetFileContents(walaConfigFile).split('\\n'):\n                if not line.startswith(\"#\") and \"=\" in line:\n                    parts = line.split()[0].split('=')\n                    value = parts[1].strip(\"\\\" \")\n                    if value != \"None\":\n                        self.values[parts[0]] = value\n                    else:\n                        self.values[parts[0]] = None\n        except:\n            Error(\"Unable to parse {0}\".format(walaConfigFile))\n            raise\n        return\n\n    def get(self, key):\n        return self.values.get(key)\n\n\nclass EnvMonitor(object):\n    \"\"\"\n    Montor changes to dhcp and hostname.\n    If dhcp clinet process re-start has occurred, reset routes, dhcp with fabric.\n    \"\"\"\n\n    def __init__(self):\n        self.shutdown = False\n        self.HostName = socket.gethostname()\n        self.server_thread = threading.Thread(target=self.monitor)\n        self.server_thread.setDaemon(True)\n        self.server_thread.start()\n        self.published = False\n\n    def monitor(self):\n        \"\"\"\n        Monitor dhcp client pid and hostname.\n        If dhcp clinet process re-start has occurred, reset routes, dhcp with fabric.\n        \"\"\"\n        publish = Config.get(\"Provisioning.MonitorHostName\")\n        dhcpcmd = MyDistro.getpidcmd + ' ' + MyDistro.getDhcpClientName()\n        dhcppid = RunGetOutput(dhcpcmd)[1]\n        while not self.shutdown:\n            for a in RulesFiles:\n                if os.path.isfile(a):\n                    if os.path.isfile(GetLastPathElement(a)):\n                        os.remove(GetLastPathElement(a))\n                    shutil.move(a, \".\")\n                    Log(\"EnvMonitor: Moved \" + a + \" -> \" + LibDir)\n            MyDistro.setScsiDiskTimeout()\n            if publish != None and publish.lower().startswith(\"y\"):\n                try:\n                    if socket.gethostname() != self.HostName:\n                        Log(\"EnvMonitor: Detected host name change: \" + self.HostName + \" -> \" + socket.gethostname())\n                        self.HostName = socket.gethostname()\n                        WaAgent.UpdateAndPublishHostName(self.HostName)\n                        dhcppid = RunGetOutput(dhcpcmd)[1]\n                        self.published = True\n                except:\n                    pass\n            else:\n                self.published = True\n            pid = \"\"\n            if not os.path.isdir(\"/proc/\" + dhcppid.strip()):\n                pid = RunGetOutput(dhcpcmd)[1]\n            if pid != \"\" and pid != dhcppid:\n                Log(\"EnvMonitor: Detected dhcp client restart. Restoring routing table.\")\n                WaAgent.RestoreRoutes()\n                dhcppid = pid\n            for child in Children:\n                if child.poll() != None:\n                    Children.remove(child)\n            time.sleep(5)\n\n    def SetHostName(self, name):\n        \"\"\"\n        Generic call to MyDistro.setHostname(name).\n        Complian to Log on error.\n        \"\"\"\n        if socket.gethostname() == name:\n            self.published = True\n        elif MyDistro.setHostname(name):\n            Error(\"Error: SetHostName: Cannot set hostname to \" + name)\n            return (\"Error: SetHostName: Cannot set hostname to \" + name)\n\n    def IsHostnamePublished(self):\n        \"\"\"\n        Return self.published  \n        \"\"\"\n        return self.published\n\n    def ShutdownService(self):\n        \"\"\"\n        Stop server comminucation and join the thread to main thread.\n        \"\"\"\n        self.shutdown = True\n        self.server_thread.join()\n\n\nclass Certificates(object):\n    \"\"\"\n    Object containing certificates of host and provisioned user.\n    Parses and splits certificates into files.\n    \"\"\"\n\n    #     <CertificateFile xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:noNamespaceSchemaLocation=\"certificates10.xsd\">\n    #     <Version>2010-12-15</Version>\n    #     <Incarnation>2</Incarnation>\n    #     <Format>Pkcs7BlobWithPfxContents</Format>\n    #     <Data>MIILTAY...\n    #     </Data>\n    #     </CertificateFile>\n\n    def __init__(self):\n        self.reinitialize()\n\n    def reinitialize(self):\n        \"\"\"\n        Reset the Role, Incarnation\n        \"\"\"\n        self.Incarnation = None\n        self.Role = None\n\n    def Parse(self, xmlText):\n        \"\"\"\n        Parse multiple certificates into seperate files.\n        \"\"\"\n        self.reinitialize()\n        SetFileContents(\"Certificates.xml\", xmlText)\n        dom = xml.dom.minidom.parseString(xmlText)\n        for a in [\"CertificateFile\", \"Version\", \"Incarnation\",\n                  \"Format\", \"Data\", ]:\n            if not dom.getElementsByTagName(a):\n                Error(\"Certificates.Parse: Missing \" + a)\n                return None\n        node = dom.childNodes[0]\n        if node.localName != \"CertificateFile\":\n            Error(\"Certificates.Parse: root not CertificateFile\")\n            return None\n        SetFileContents(\"Certificates.p7m\",\n                        \"MIME-Version: 1.0\\n\"\n                        + \"Content-Disposition: attachment; filename=\\\"Certificates.p7m\\\"\\n\"\n                        + \"Content-Type: application/x-pkcs7-mime; name=\\\"Certificates.p7m\\\"\\n\"\n                        + \"Content-Transfer-Encoding: base64\\n\\n\"\n                        + GetNodeTextData(dom.getElementsByTagName(\"Data\")[0]))\n        if Run(\n                                        Openssl + \" cms -decrypt -in Certificates.p7m -inkey TransportPrivate.pem -recip TransportCert.pem | \" + Openssl + \" pkcs12 -nodes -password pass: -out Certificates.pem\"):\n            Error(\"Certificates.Parse: Failed to extract certificates from CMS message.\")\n            return self\n        # There may be multiple certificates in this package. Split them.\n        file = open(\"Certificates.pem\")\n        pindex = 1\n        cindex = 1\n        output = open(\"temp.pem\", \"w\")\n        for line in file.readlines():\n            output.write(line)\n            if re.match(r'[-]+END .*?(KEY|CERTIFICATE)[-]+$', line):\n                output.close()\n                if re.match(r'[-]+END .*?KEY[-]+$', line):\n                    os.rename(\"temp.pem\", str(pindex) + \".prv\")\n                    pindex += 1\n                else:\n                    os.rename(\"temp.pem\", str(cindex) + \".crt\")\n                    cindex += 1\n                output = open(\"temp.pem\", \"w\")\n        output.close()\n        os.remove(\"temp.pem\")\n        keys = dict()\n        index = 1\n        filename = str(index) + \".crt\"\n        while os.path.isfile(filename):\n            thumbprint = \\\n            (RunGetOutput(Openssl + \" x509 -in \" + filename + \" -fingerprint -noout\")[1]).rstrip().split('=')[\n                1].replace(':', '').upper()\n            pubkey = RunGetOutput(Openssl + \" x509 -in \" + filename + \" -pubkey -noout\")[1]\n            keys[pubkey] = thumbprint\n            os.rename(filename, thumbprint + \".crt\")\n            os.chmod(thumbprint + \".crt\", 0o600)\n            MyDistro.setSelinuxContext(thumbprint + '.crt', 'unconfined_u:object_r:ssh_home_t:s0')\n            index += 1\n            filename = str(index) + \".crt\"\n        index = 1\n        filename = str(index) + \".prv\"\n        while os.path.isfile(filename):\n            pubkey = RunGetOutput(Openssl + \" rsa -in \" + filename + \" -pubout 2> /dev/null \")[1]\n            os.rename(filename, keys[pubkey] + \".prv\")\n            os.chmod(keys[pubkey] + \".prv\", 0o600)\n            MyDistro.setSelinuxContext(keys[pubkey] + '.prv', 'unconfined_u:object_r:ssh_home_t:s0')\n            index += 1\n            filename = str(index) + \".prv\"\n        return self\n\n\nclass ExtensionsConfig(object):\n    \"\"\"\n    Parse ExtensionsConfig, downloading and unpacking them to /var/lib/waagent.\n    Install if <enabled>true</enabled>, remove if it is set to false.\n    \"\"\"\n\n    # <?xml version=\"1.0\" encoding=\"utf-8\"?>\n    # <Extensions version=\"1.0.0.0\" goalStateIncarnation=\"6\"><Plugins>\n    #  <Plugin name=\"OSTCExtensions.ExampleHandlerLinux\" version=\"1.5\"\n    # location=\"http://previewusnorthcache.blob.core.test-cint.azure-test.net/d84b216d00bf4d96982be531539e1513/OSTCExtensions_ExampleHandlerLinux_usnorth_manifest.xml\"\n    # config=\"\" state=\"enabled\" autoUpgrade=\"false\" runAsStartupTask=\"false\" isJson=\"true\" />\n    # </Plugins>\n    # <PluginSettings>\n    #  <Plugin name=\"OSTCExtensions.ExampleHandlerLinux\" version=\"1.5\">\n    #    <RuntimeSettings seqNo=\"2\">{\"runtimeSettings\":[{\"handlerSettings\":{\"protectedSettingsCertThumbprint\":\"1BE9A13AA1321C7C515EF109746998BAB6D86FD1\",\n    # \"protectedSettings\":\"MIIByAYJKoZIhvcNAQcDoIIBuTCCAbUCAQAxggFxMIIBbQIBADBVMEExPzA9BgoJkiaJk/IsZAEZFi9XaW5kb3dzIEF6dXJlIFNlcnZpY2UgTWFuYWdlbWVudCBmb3IgR\n    # Xh0ZW5zaW9ucwIQZi7dw+nhc6VHQTQpCiiV2zANBgkqhkiG9w0BAQEFAASCAQCKr09QKMGhwYe+O4/a8td+vpB4eTR+BQso84cV5KCAnD6iUIMcSYTrn9aveY6v6ykRLEw8GRKfri2d6\n    # tvVDggUrBqDwIgzejGTlCstcMJItWa8Je8gHZVSDfoN80AEOTws9Fp+wNXAbSuMJNb8EnpkpvigAWU2v6pGLEFvSKC0MCjDTkjpjqciGMcbe/r85RG3Zo21HLl0xNOpjDs/qqikc/ri43Y76E/X\n    # v1vBSHEGMFprPy/Hwo3PqZCnulcbVzNnaXN3qi/kxV897xGMPPC3IrO7Nc++AT9qRLFI0841JLcLTlnoVG1okPzK9w6ttksDQmKBSHt3mfYV+skqs+EOMDsGCSqGSIb3DQEHATAUBggqh\n    # kiG9w0DBwQITgu0Nu3iFPuAGD6/QzKdtrnCI5425fIUy7LtpXJGmpWDUA==\",\"publicSettings\":{\"port\":\"3000\"}}}]}</RuntimeSettings>\n    #  </Plugin>\n    # </PluginSettings>\n\n\n    def __init__(self):\n        self.reinitialize()\n\n    def reinitialize(self):\n        \"\"\"\n        Reset members.\n        \"\"\"\n        self.Extensions = None\n        self.Plugins = None\n        self.Util = None\n\n    def Parse(self, xmlText):\n        \"\"\"\n        Write configuration to file ExtensionsConfig.xml.\n        Log plugin specific activity to /var/log/azure/<Publisher>.<PluginName>/<Version>/CommandExecution.log.\n        If state is enabled:\n            if the plugin is installed:\n                if the new plugin's version is higher\n                if DisallowMajorVersionUpgrade is false or if true, the version is a minor version do upgrade:\n                    download the new archive\n                    do the updateCommand.\n                    disable the old plugin and remove\n                    enable the new plugin\n                if the new plugin's version is the same or lower:\n                    create the new .settings file from the configuration received\n                    do the enableCommand\n            if the plugin is not installed:\n                download/unpack archive and call the installCommand/Enable\n        if state is disabled:\n            call disableCommand\n        if state is uninstall:\n            call uninstallCommand\n            remove old plugin directory.\n        \"\"\"\n        self.reinitialize()\n        self.Util = Util()\n        dom = xml.dom.minidom.parseString(xmlText)\n        LogIfVerbose(xmlText)\n        self.plugin_log_dir = '/var/log/azure'\n        if not os.path.exists(self.plugin_log_dir):\n            os.mkdir(self.plugin_log_dir)\n        try:\n            self.Extensions = dom.getElementsByTagName(\"Extensions\")\n            pg = dom.getElementsByTagName(\"Plugins\")\n            if len(pg) > 0:\n                self.Plugins = pg[0].getElementsByTagName(\"Plugin\")\n            else:\n                self.Plugins = []\n            incarnation = self.Extensions[0].getAttribute(\"goalStateIncarnation\")\n            SetFileContents('ExtensionsConfig.' + incarnation + '.xml', xmlText)\n        except Exception as e:\n            Error('ERROR:  Error parsing ExtensionsConfig: {0}.'.format(e))\n            return None\n        for p in self.Plugins:\n            if len(p.getAttribute(\"location\")) < 1:  # this plugin is inside the PluginSettings\n                continue\n            p.setAttribute('restricted', 'false')\n            previous_version = None\n            version = p.getAttribute(\"version\")\n            name = p.getAttribute(\"name\")\n            plog_dir = self.plugin_log_dir + '/' + name + '/' + version\n            if not os.path.exists(plog_dir):\n                os.makedirs(plog_dir)\n            p.plugin_log = plog_dir + '/CommandExecution.log'\n            handler = name + '-' + version\n            if p.getAttribute(\"isJson\") != 'true':\n                Error(\"Plugin \" + name + \" version: \" + version + \" is not a JSON Extension.  Skipping.\")\n                continue\n            Log(\"Found Plugin: \" + name + ' version: ' + version)\n            if p.getAttribute(\"state\") == 'disabled' or p.getAttribute(\"state\") == 'uninstall':\n                # disable\n                zip_dir = LibDir + \"/\" + name + '-' + version\n                mfile = None\n                for root, dirs, files in os.walk(zip_dir):\n                    for f in files:\n                        if f in ('HandlerManifest.json'):\n                            mfile = os.path.join(root, f)\n                    if mfile != None:\n                        break\n                if mfile == None:\n                    Error('HandlerManifest.json not found.')\n                    continue\n                manifest = GetFileContents(mfile)\n                p.setAttribute('manifestdata', manifest)\n                if self.launchCommand(p.plugin_log, name, version, 'disableCommand') == None:\n                    self.SetHandlerState(handler, 'Enabled')\n                    Error('Unable to disable ' + name)\n                    SimpleLog(p.plugin_log, 'ERROR: Unable to disable ' + name)\n                else:\n                    self.SetHandlerState(handler, 'Disabled')\n                    Log(name + ' is disabled')\n                    SimpleLog(p.plugin_log, name + ' is disabled')\n\n                # uninstall if needed\n                if p.getAttribute(\"state\") == 'uninstall':\n                    if self.launchCommand(p.plugin_log, name, version, 'uninstallCommand') == None:\n                        self.SetHandlerState(handler, 'Installed')\n                        Error('Unable to uninstall ' + name)\n                        SimpleLog(p.plugin_log, 'Unable to uninstall ' + name)\n                    else:\n                        self.SetHandlerState(handler, 'NotInstalled')\n                        Log(name + ' uninstallCommand completed .')\n                    # remove the plugin\n                    Run('rm -rf ' + LibDir + '/' + name + '-' + version + '*')\n                    Log(name + '-' + version + ' extension files deleted.')\n                    SimpleLog(p.plugin_log, name + '-' + version + ' extension files deleted.')\n\n                continue\n                # state is enabled\n            # if the same plugin exists and the version is newer or\n            # does not exist then download and unzip the new plugin\n            plg_dir = None\n\n            latest_version_installed = LooseVersion(\"0.0\")\n            for item in os.listdir(LibDir):\n                itemPath = os.path.join(LibDir, item)\n                if os.path.isdir(itemPath) and name in item:\n                    try:\n                        # Split plugin dir name with '-' to get intalled plugin name and version\n                        sperator = item.rfind('-')\n                        if sperator < 0:\n                            continue\n                        installed_plg_name = item[0:sperator]\n                        installed_plg_version = LooseVersion(item[sperator + 1:])\n\n                        # Check installed plugin name and compare installed version to get the latest version installed\n                        if installed_plg_name == name and installed_plg_version > latest_version_installed:\n                            plg_dir = itemPath\n                            previous_version = str(installed_plg_version)\n                            latest_version_installed = installed_plg_version\n                    except Exception as e:\n                        Warn(\"Invalid plugin dir name: {0} {1}\".format(item, e))\n                        continue\n\n            if plg_dir == None or LooseVersion(version) > LooseVersion(previous_version):\n                location = p.getAttribute(\"location\")\n                Log(\"Downloading plugin manifest: \" + name + \" from \" + location)\n                SimpleLog(p.plugin_log, \"Downloading plugin manifest: \" + name + \" from \" + location)\n\n                self.Util.Endpoint = location.split('/')[2]\n                Log(\"Plugin server is: \" + self.Util.Endpoint)\n                SimpleLog(p.plugin_log, \"Plugin server is: \" + self.Util.Endpoint)\n\n                manifest = self.Util.HttpGetWithoutHeaders(location, chkProxy=True)\n                if manifest == None:\n                    Error(\n                        \"Unable to download plugin manifest\" + name + \" from primary location.  Attempting with failover location.\")\n                    SimpleLog(p.plugin_log,\n                              \"Unable to download plugin manifest\" + name + \" from primary location.  Attempting with failover location.\")\n                    failoverlocation = p.getAttribute(\"failoverlocation\")\n                    self.Util.Endpoint = failoverlocation.split('/')[2]\n                    Log(\"Plugin failover server is: \" + self.Util.Endpoint)\n                    SimpleLog(p.plugin_log, \"Plugin failover server is: \" + self.Util.Endpoint)\n\n                    manifest = self.Util.HttpGetWithoutHeaders(failoverlocation, chkProxy=True)\n                # if failoverlocation also fail what to do then?\n                if manifest == None:\n                    AddExtensionEvent(name, WALAEventOperation.Download, False, 0, version,\n                                      \"Download mainfest fail \" + failoverlocation)\n                    Log(\"Plugin manifest \" + name + \" downloading failed from failover location.\")\n                    SimpleLog(p.plugin_log, \"Plugin manifest \" + name + \" downloading failed from failover location.\")\n\n                filepath = LibDir + \"/\" + name + '.' + incarnation + '.manifest'\n                if os.path.splitext(location)[-1] == '.xml':  # if this is an xml file we may have a BOM\n                    if ord(manifest[0]) > 128 and ord(manifest[1]) > 128 and ord(manifest[2]) > 128:\n                        manifest = manifest[3:]\n                SetFileContents(filepath, manifest)\n                # Get the bundle url from the manifest\n                p.setAttribute('manifestdata', manifest)\n                man_dom = xml.dom.minidom.parseString(manifest)\n                bundle_uri = \"\"\n                for mp in man_dom.getElementsByTagName(\"Plugin\"):\n                    if GetNodeTextData(mp.getElementsByTagName(\"Version\")[0]) == version:\n                        bundle_uri = GetNodeTextData(mp.getElementsByTagName(\"Uri\")[0])\n                        break\n                if len(mp.getElementsByTagName(\"DisallowMajorVersionUpgrade\")):\n                    if GetNodeTextData(mp.getElementsByTagName(\"DisallowMajorVersionUpgrade\")[\n                                           0]) == 'true' and previous_version != None and previous_version.split('.')[\n                        0] != version.split('.')[0]:\n                        Log('DisallowMajorVersionUpgrade is true, this major version is restricted from upgrade.')\n                        SimpleLog(p.plugin_log,\n                                  'DisallowMajorVersionUpgrade is true, this major version is restricted from upgrade.')\n                        p.setAttribute('restricted', 'true')\n                        continue\n                if len(bundle_uri) < 1:\n                    Error(\"Unable to fetch Bundle URI from manifest for \" + name + \" v \" + version)\n                    SimpleLog(p.plugin_log, \"Unable to fetch Bundle URI from manifest for \" + name + \" v \" + version)\n                    continue\n                Log(\"Bundle URI = \" + bundle_uri)\n                SimpleLog(p.plugin_log, \"Bundle URI = \" + bundle_uri)\n\n                # Download the zipfile archive and save as '.zip'\n                bundle = self.Util.HttpGetWithoutHeaders(bundle_uri, chkProxy=True)\n                if bundle == None:\n                    AddExtensionEvent(name, WALAEventOperation.Download, True, 0, version,\n                                      \"Download zip fail \" + bundle_uri)\n                    Error(\"Unable to download plugin bundle\" + bundle_uri)\n                    SimpleLog(p.plugin_log, \"Unable to download plugin bundle\" + bundle_uri)\n                    continue\n                AddExtensionEvent(name, WALAEventOperation.Download, True, 0, version, \"Download Success\")\n                b = bytearray(bundle)\n                filepath = LibDir + \"/\" + os.path.basename(bundle_uri) + '.zip'\n                SetFileContents(filepath, b)\n                Log(\"Plugin bundle\" + bundle_uri + \"downloaded successfully length = \" + str(len(bundle)))\n                SimpleLog(p.plugin_log,\n                          \"Plugin bundle\" + bundle_uri + \"downloaded successfully length = \" + str(len(bundle)))\n\n                # unpack the archive\n                z = zipfile.ZipFile(filepath)\n                zip_dir = LibDir + \"/\" + name + '-' + version\n                z.extractall(zip_dir)\n                Log('Extracted ' + bundle_uri + ' to ' + zip_dir)\n                SimpleLog(p.plugin_log, 'Extracted ' + bundle_uri + ' to ' + zip_dir)\n\n                # zip no file perms in .zip so set all the scripts to +x\n                Run(\"find \" + zip_dir + \" -type f | xargs chmod  u+x \")\n                # write out the base64 config data so the plugin can process it.\n                mfile = None\n                for root, dirs, files in os.walk(zip_dir):\n                    for f in files:\n                        if f in ('HandlerManifest.json'):\n                            mfile = os.path.join(root, f)\n                    if mfile != None:\n                        break\n                if mfile == None:\n                    Error('HandlerManifest.json not found.')\n                    SimpleLog(p.plugin_log, 'HandlerManifest.json not found.')\n                    continue\n                manifest = GetFileContents(mfile)\n                p.setAttribute('manifestdata', manifest)\n                # create the status and config dirs\n                Run('mkdir -p ' + root + '/status')\n                Run('mkdir -p ' + root + '/config')\n                # write out the configuration data to goalStateIncarnation.settings file in the config path.\n                config = ''\n                seqNo = '0'\n                if len(dom.getElementsByTagName(\"PluginSettings\")) != 0:\n                    pslist = dom.getElementsByTagName(\"PluginSettings\")[0].getElementsByTagName(\"Plugin\")\n                    for ps in pslist:\n                        if name == ps.getAttribute(\"name\") and version == ps.getAttribute(\"version\"):\n                            Log(\"Found RuntimeSettings for \" + name + \" V \" + version)\n                            SimpleLog(p.plugin_log, \"Found RuntimeSettings for \" + name + \" V \" + version)\n\n                            config = GetNodeTextData(ps.getElementsByTagName(\"RuntimeSettings\")[0])\n                            seqNo = ps.getElementsByTagName(\"RuntimeSettings\")[0].getAttribute(\"seqNo\")\n                            break\n                if config == '':\n                    Log(\"No RuntimeSettings for \" + name + \" V \" + version)\n                    SimpleLog(p.plugin_log, \"No RuntimeSettings for \" + name + \" V \" + version)\n\n                SetFileContents(root + \"/config/\" + seqNo + \".settings\", config)\n                # create HandlerEnvironment.json\n                handler_env = '[{  \"name\": \"' + name + '\", \"seqNo\": \"' + seqNo + '\", \"version\": 1.0,  \"handlerEnvironment\": {    \"logFolder\": \"' + os.path.dirname(\n                    p.plugin_log) + '\",    \"configFolder\": \"' + root + '/config\",    \"statusFolder\": \"' + root + '/status\",    \"heartbeatFile\": \"' + root + '/heartbeat.log\"}}]'\n                SetFileContents(root + '/HandlerEnvironment.json', handler_env)\n                self.SetHandlerState(handler, 'NotInstalled')\n\n                cmd = ''\n                getcmd = 'installCommand'\n                if plg_dir != None and previous_version != None and LooseVersion(version) > LooseVersion(\n                        previous_version):\n                    previous_handler = name + '-' + previous_version\n                    if self.GetHandlerState(previous_handler) != 'NotInstalled':\n                        getcmd = 'updateCommand'\n                        # disable the old plugin if it exists\n                        if self.launchCommand(p.plugin_log, name, previous_version, 'disableCommand') == None:\n                            self.SetHandlerState(previous_handler, 'Enabled')\n                            Error('Unable to disable old plugin ' + name + ' version ' + previous_version)\n                            SimpleLog(p.plugin_log,\n                                      'Unable to disable old plugin ' + name + ' version ' + previous_version)\n                        else:\n                            self.SetHandlerState(previous_handler, 'Disabled')\n                            Log(name + ' version ' + previous_version + ' is disabled')\n                            SimpleLog(p.plugin_log, name + ' version ' + previous_version + ' is disabled')\n\n                        try:\n                            Log(\"Copy status file from old plugin dir to new\")\n                            old_plg_dir = plg_dir\n                            new_plg_dir = os.path.join(LibDir, \"{0}-{1}\".format(name, version))\n                            old_ext_status_dir = os.path.join(old_plg_dir, \"status\")\n                            new_ext_status_dir = os.path.join(new_plg_dir, \"status\")\n                            if os.path.isdir(old_ext_status_dir):\n                                for status_file in os.listdir(old_ext_status_dir):\n                                    status_file_path = os.path.join(old_ext_status_dir, status_file)\n                                    if os.path.isfile(status_file_path):\n                                        shutil.copy2(status_file_path, new_ext_status_dir)\n                            mrseq_file = os.path.join(old_plg_dir, \"mrseq\")\n                            if os.path.isfile(mrseq_file):\n                                shutil.copy(mrseq_file, new_plg_dir)\n                        except Exception as e:\n                            Error(\"Failed to copy status file.\")\n\n                isupgradeSuccess = True\n                if getcmd == 'updateCommand':\n                    if self.launchCommand(p.plugin_log, name, version, getcmd, previous_version) == None:\n                        Error('Update failed for ' + name + '-' + version)\n                        SimpleLog(p.plugin_log, 'Update failed for ' + name + '-' + version)\n                        isupgradeSuccess = False\n                    else:\n                        Log('Update complete' + name + '-' + version)\n                        SimpleLog(p.plugin_log, 'Update complete' + name + '-' + version)\n\n                    # if we updated - call unistall for the old plugin\n                    if self.launchCommand(p.plugin_log, name, previous_version, 'uninstallCommand') == None:\n                        self.SetHandlerState(previous_handler, 'Installed')\n                        Error('Uninstall failed for ' + name + '-' + previous_version)\n                        SimpleLog(p.plugin_log, 'Uninstall failed for ' + name + '-' + previous_version)\n                        isupgradeSuccess = False\n                    else:\n                        self.SetHandlerState(previous_handler, 'NotInstalled')\n                        Log('Uninstall complete' + previous_handler)\n                        SimpleLog(p.plugin_log, 'Uninstall complete' + name + '-' + previous_version)\n\n                    try:\n                        # rm old plugin dir\n                        if os.path.isdir(plg_dir):\n                            shutil.rmtree(plg_dir)\n                            Log(name + '-' + previous_version + ' extension files deleted.')\n                            SimpleLog(p.plugin_log, name + '-' + previous_version + ' extension files deleted.')\n                    except Exception as e:\n                        Error(\"Failed to remove old plugin directory\")\n\n                    AddExtensionEvent(name, WALAEventOperation.Upgrade, isupgradeSuccess, 0, previous_version)\n                else:  # run install\n                    if self.launchCommand(p.plugin_log, name, version, getcmd) == None:\n                        self.SetHandlerState(handler, 'NotInstalled')\n                        Error('Installation failed for ' + name + '-' + version)\n                        SimpleLog(p.plugin_log, 'Installation failed for ' + name + '-' + version)\n                    else:\n                        self.SetHandlerState(handler, 'Installed')\n                        Log('Installation completed for ' + name + '-' + version)\n                        SimpleLog(p.plugin_log, 'Installation completed for ' + name + '-' + version)\n\n            # end if plg_dir == none or version > = prev\n            # change incarnation of settings file so it knows how to name status...\n            zip_dir = LibDir + \"/\" + name + '-' + version\n            mfile = None\n            for root, dirs, files in os.walk(zip_dir):\n                for f in files:\n                    if f in ('HandlerManifest.json'):\n                        mfile = os.path.join(root, f)\n                if mfile != None:\n                    break\n            if mfile == None:\n                Error('HandlerManifest.json not found.')\n                SimpleLog(p.plugin_log, 'HandlerManifest.json not found.')\n\n                continue\n            manifest = GetFileContents(mfile)\n            p.setAttribute('manifestdata', manifest)\n            config = ''\n            seqNo = '0'\n            if len(dom.getElementsByTagName(\"PluginSettings\")) != 0:\n                try:\n                    pslist = dom.getElementsByTagName(\"PluginSettings\")[0].getElementsByTagName(\"Plugin\")\n                except:\n                    Error('Error parsing ExtensionsConfig.')\n                    SimpleLog(p.plugin_log, 'Error parsing ExtensionsConfig.')\n\n                    continue\n                for ps in pslist:\n                    if name == ps.getAttribute(\"name\") and version == ps.getAttribute(\"version\"):\n                        Log(\"Found RuntimeSettings for \" + name + \" V \" + version)\n                        SimpleLog(p.plugin_log, \"Found RuntimeSettings for \" + name + \" V \" + version)\n\n                        config = GetNodeTextData(ps.getElementsByTagName(\"RuntimeSettings\")[0])\n                        seqNo = ps.getElementsByTagName(\"RuntimeSettings\")[0].getAttribute(\"seqNo\")\n                        break\n            if config == '':\n                Error(\"No RuntimeSettings for \" + name + \" V \" + version)\n                SimpleLog(p.plugin_log, \"No RuntimeSettings for \" + name + \" V \" + version)\n\n            SetFileContents(root + \"/config/\" + seqNo + \".settings\", config)\n\n            # state is still enable\n            if (self.GetHandlerState(handler) == 'NotInstalled'):  # run install first if true\n                if self.launchCommand(p.plugin_log, name, version, 'installCommand') == None:\n                    self.SetHandlerState(handler, 'NotInstalled')\n                    Error('Installation failed for ' + name + '-' + version)\n                    SimpleLog(p.plugin_log, 'Installation failed for ' + name + '-' + version)\n\n                else:\n                    self.SetHandlerState(handler, 'Installed')\n                    Log('Installation completed for ' + name + '-' + version)\n                    SimpleLog(p.plugin_log, 'Installation completed for ' + name + '-' + version)\n\n            if (self.GetHandlerState(handler) != 'NotInstalled'):\n                if self.launchCommand(p.plugin_log, name, version, 'enableCommand') == None:\n                    self.SetHandlerState(handler, 'Installed')\n                    Error('Enable failed for ' + name + '-' + version)\n                    SimpleLog(p.plugin_log, 'Enable failed for ' + name + '-' + version)\n\n                else:\n                    self.SetHandlerState(handler, 'Enabled')\n                    Log('Enable completed for ' + name + '-' + version)\n                    SimpleLog(p.plugin_log, 'Enable completed for ' + name + '-' + version)\n\n            # this plugin processing is complete\n            Log('Processing completed for ' + name + '-' + version)\n            SimpleLog(p.plugin_log, 'Processing completed for ' + name + '-' + version)\n\n        # end plugin processing loop\n        Log('Finished processing ExtensionsConfig.xml')\n        try:\n            SimpleLog(p.plugin_log, 'Finished processing ExtensionsConfig.xml')\n        except:\n            pass\n\n        return self\n\n    def launchCommand(self, plugin_log, name, version, command, prev_version=None):\n        commandToEventOperation = {\n            \"installCommand\": WALAEventOperation.Install,\n            \"uninstallCommand\": WALAEventOperation.UnIsntall,\n            \"updateCommand\": WALAEventOperation.Upgrade,\n            \"enableCommand\": WALAEventOperation.Enable,\n            \"disableCommand\": WALAEventOperation.Disable,\n        }\n        isSuccess = True\n        start = datetime.datetime.now()\n        r = self.__launchCommandWithoutEventLog(plugin_log, name, version, command, prev_version)\n        if r == None:\n            isSuccess = False\n        Duration = int((datetime.datetime.now() - start).seconds)\n        if commandToEventOperation.get(command):\n            AddExtensionEvent(name, commandToEventOperation[command], isSuccess, Duration, version)\n        return r\n\n    def __launchCommandWithoutEventLog(self, plugin_log, name, version, command, prev_version=None):\n        # get the manifest and read the command\n        mfile = None\n        zip_dir = LibDir + \"/\" + name + '-' + version\n        for root, dirs, files in os.walk(zip_dir):\n            for f in files:\n                if f in ('HandlerManifest.json'):\n                    mfile = os.path.join(root, f)\n            if mfile != None:\n                break\n        if mfile == None:\n            Error('HandlerManifest.json not found.')\n            SimpleLog(plugin_log, 'HandlerManifest.json not found.')\n\n            return None\n        manifest = GetFileContents(mfile)\n        try:\n            jsn = json.loads(manifest)\n        except:\n            Error('Error parsing HandlerManifest.json.')\n            SimpleLog(plugin_log, 'Error parsing HandlerManifest.json.')\n\n            return None\n        if type(jsn) == list:\n            jsn = jsn[0]\n        if jsn.has_key('handlerManifest'):\n            cmd = jsn['handlerManifest'][command]\n        else:\n            Error('Key handlerManifest not found.  Handler cannot be installed.')\n            SimpleLog(plugin_log, 'Key handlerManifest not found.  Handler cannot be installed.')\n\n        if len(cmd) == 0:\n            Error('Unable to read ' + command)\n            SimpleLog(plugin_log, 'Unable to read ' + command)\n\n            return None\n\n        # for update we send the path of the old installation\n        arg = ''\n        if prev_version != None:\n            arg = ' ' + LibDir + '/' + name + '-' + prev_version\n        dirpath = os.path.dirname(mfile)\n        LogIfVerbose('Command is ' + dirpath + '/' + cmd)\n        # launch\n        pid = None\n        try:\n            child = subprocess.Popen(dirpath + '/' + cmd + arg, shell=True, cwd=dirpath, stdout=subprocess.PIPE)\n        except Exception as e:\n            Error('Exception launching ' + cmd + str(e))\n            SimpleLog(plugin_log, 'Exception launching ' + cmd + str(e))\n\n        pid = child.pid\n        if pid == None or pid < 1:\n            ExtensionChildren.append((-1, root))\n            Error('Error launching ' + cmd + '.')\n            SimpleLog(plugin_log, 'Error launching ' + cmd + '.')\n\n        else:\n            ExtensionChildren.append((pid, root))\n            Log(\"Spawned \" + cmd + \" PID \" + str(pid))\n            SimpleLog(plugin_log, \"Spawned \" + cmd + \" PID \" + str(pid))\n\n        # wait until install/upgrade is finished\n        timeout = 300  # 5 minutes\n        retry = timeout / 5\n        while retry > 0 and child.poll() == None:\n            LogIfVerbose(cmd + ' still running with PID ' + str(pid))\n            time.sleep(5)\n            retry -= 1\n        if retry == 0:\n            Error('Process exceeded timeout of ' + str(timeout) + ' seconds. Terminating process ' + str(pid))\n            SimpleLog(plugin_log,\n                      'Process exceeded timeout of ' + str(timeout) + ' seconds. Terminating process ' + str(pid))\n\n            os.kill(pid, 9)\n            return None\n        code = child.wait()\n        if code == None or code != 0:\n            Error('Process ' + str(pid) + ' returned non-zero exit code (' + str(code) + ')')\n            SimpleLog(plugin_log, 'Process ' + str(pid) + ' returned non-zero exit code (' + str(code) + ')')\n\n            return None\n        Log(command + ' completed.')\n        SimpleLog(plugin_log, command + ' completed.')\n\n        return 0\n\n    def ReportHandlerStatus(self):\n        \"\"\"\n        Collect all status reports.\n        \"\"\"\n        # { \"version\": \"1.0\", \"timestampUTC\": \"2014-03-31T21:28:58Z\",\n        # \"aggregateStatus\": {\n        # \"guestAgentStatus\": { \"version\": \"2.0.4PRE\", \"status\": \"Ready\", \"formattedMessage\": { \"lang\": \"en-US\", \"message\": \"GuestAgent is running and accepting new configurations.\" } },\n        # \"handlerAggregateStatus\": [{\n        # \"handlerName\": \"ExampleHandlerLinux\", \"handlerVersion\": \"1.0\", \"status\": \"Ready\", \"runtimeSettingsStatus\": {\n        # \"sequenceNumber\": \"2\", \"settingsStatus\": { \"timestampUTC\": \"2014-03-31T23:46:00Z\", \"status\": { \"name\": \"ExampleHandlerLinux\", \"operation\": \"Command Execution Finished\", \"configurationAppliedTime\": \"2014-03-31T23:46:00Z\", \"status\": \"success\", \"formattedMessage\": { \"lang\": \"en-US\", \"message\": \"Finished executing command\" },\n        # \"substatus\": [\n        # { \"name\": \"StdOut\", \"status\": \"success\", \"formattedMessage\": { \"lang\": \"en-US\", \"message\": \"Goodbye world!\" }  },\n        # { \"name\": \"StdErr\", \"status\": \"success\", \"formattedMessage\": { \"lang\": \"en-US\", \"message\": \"\" } }\n        # ]\n        # } } } }\n        # ]\n        #  }}\n\n        try:\n            incarnation = self.Extensions[0].getAttribute(\"goalStateIncarnation\")\n        except:\n            Error('Error parsing attribute \"goalStateIncarnation\".  Unable to send status reports')\n            return -1\n        status = ''\n        statuses = ''\n        for p in self.Plugins:\n            if p.getAttribute(\"state\") == 'uninstall' or p.getAttribute(\"restricted\") == 'true':\n                continue\n            version = p.getAttribute(\"version\")\n            name = p.getAttribute(\"name\")\n            if p.getAttribute(\"isJson\") != 'true':\n                LogIfVerbose(\"Plugin \" + name + \" version: \" + version + \" is not a JSON Extension.  Skipping.\")\n                continue\n            reportHeartbeat = False\n            if len(p.getAttribute(\"manifestdata\")) < 1:\n                Error(\"Failed to get manifestdata.\")\n            else:\n                reportHeartbeat = json.loads(p.getAttribute(\"manifestdata\"))[0]['handlerManifest']['reportHeartbeat']\n            if len(statuses) > 0:\n                statuses += ','\n            statuses += self.GenerateAggStatus(name, version, reportHeartbeat)\n        tstamp = time.strftime(\"%Y-%m-%dT%H:%M:%SZ\", time.gmtime())\n        # header\n        # agent state\n        if provisioned == False:\n            if provisionError == None:\n                agent_state = 'Provisioning'\n                agent_msg = 'Guest Agent is starting.'\n            else:\n                agent_state = 'Provisioning Error.'\n                agent_msg = provisionError\n        else:\n            agent_state = 'Ready'\n            agent_msg = 'GuestAgent is running and accepting new configurations.'\n\n        status = '{\"version\":\"1.0\",\"timestampUTC\":\"' + tstamp + '\",\"aggregateStatus\":{\"guestAgentStatus\":{\"version\":\"' + GuestAgentVersion + '\",\"status\":\"' + agent_state + '\",\"formattedMessage\":{\"lang\":\"en-US\",\"message\":\"' + agent_msg + '\"}},\"handlerAggregateStatus\":[' + statuses + ']}}'\n        try:\n            uri = GetNodeTextData(self.Extensions[0].getElementsByTagName(\"StatusUploadBlob\")[0]).replace('&amp;', '&')\n        except:\n            Error('Error parsing element \"StatusUploadBlob\".  Unable to send status reports')\n            return -1\n\n        LogIfVerbose('Status report ' + status + ' sent to ' + uri)\n        return UploadStatusBlob(uri, status.encode(\"utf-8\"))\n\n    def GetCurrentSequenceNumber(self, plugin_base_dir):\n        \"\"\"\n        Get the settings file with biggest file number in config folder\n        \"\"\"\n        config_dir = os.path.join(plugin_base_dir, 'config')\n        seq_no = 0\n        for subdir, dirs, files in os.walk(config_dir):\n            for file in files:\n                try:\n                    cur_seq_no = int(os.path.basename(file).split('.')[0])\n                    if cur_seq_no > seq_no:\n                        seq_no = cur_seq_no\n                except ValueError:\n                    continue\n        return str(seq_no)\n\n    def GenerateAggStatus(self, name, version, reportHeartbeat=False):\n        \"\"\"\n        Generate the status which Azure can understand by the status and heartbeat reported by extension\n        \"\"\"\n        plugin_base_dir = LibDir + '/' + name + '-' + version + '/'\n        current_seq_no = self.GetCurrentSequenceNumber(plugin_base_dir)\n        status_file = os.path.join(plugin_base_dir, 'status/', current_seq_no + '.status')\n        heartbeat_file = os.path.join(plugin_base_dir, 'heartbeat.log')\n\n        handler_state_file = os.path.join(plugin_base_dir, 'config', 'HandlerState')\n        agg_state = 'NotReady'\n        handler_state = None\n        status_obj = None\n        status_code = None\n        formatted_message = None\n        localized_message = None\n\n        if os.path.exists(handler_state_file):\n            handler_state = GetFileContents(handler_state_file).lower()\n        if handler_state in HandlerStatusToAggStatus:\n            agg_state = HandlerStatusToAggStatus[handler_state]\n        if reportHeartbeat:\n            if os.path.exists(heartbeat_file):\n                d = int(time.time() - os.stat(heartbeat_file).st_mtime)\n                if d > 600:  # not updated for more than 10 min\n                    agg_state = 'Unresponsive'\n                else:\n                    try:\n                        heartbeat = json.loads(GetFileContents(heartbeat_file))[0][\"heartbeat\"]\n                        agg_state = heartbeat.get(\"status\")\n                        status_code = heartbeat.get(\"code\")\n                        formatted_message = heartbeat.get(\"formattedMessage\")\n                        localized_message = heartbeat.get(\"message\")\n                    except:\n                        Error(\"Incorrect heartbeat file. Ignore it. \")\n            else:\n                agg_state = 'Unresponsive'\n        # get status file reported by extension\n        if os.path.exists(status_file):\n            # raw status generated by extension is an array, get the first item and remove the unnecessary element\n            try:\n                status_obj = json.loads(GetFileContents(status_file))[0]\n                del status_obj[\"version\"]\n            except:\n                Error(\"Incorrect status file. Will NOT settingsStatus in settings. \")\n        agg_status_obj = {\"handlerName\": name, \"handlerVersion\": version, \"status\": agg_state, \"runtimeSettingsStatus\":\n            {\"sequenceNumber\": current_seq_no}}\n        if status_obj:\n            agg_status_obj[\"runtimeSettingsStatus\"][\"settingsStatus\"] = status_obj\n        if status_code != None:\n            agg_status_obj[\"code\"] = status_code\n        if formatted_message:\n            agg_status_obj[\"formattedMessage\"] = formatted_message\n        if localized_message:\n            agg_status_obj[\"message\"] = localized_message\n        agg_status_string = json.dumps(agg_status_obj)\n        LogIfVerbose(\"Handler Aggregated Status:\" + agg_status_string)\n        return agg_status_string\n\n    def SetHandlerState(self, handler, state=''):\n        zip_dir = LibDir + \"/\" + handler\n        mfile = None\n        for root, dirs, files in os.walk(zip_dir):\n            for f in files:\n                if f in ('HandlerManifest.json'):\n                    mfile = os.path.join(root, f)\n            if mfile != None:\n                break\n        if mfile == None:\n            Error('SetHandlerState(): HandlerManifest.json not found, cannot set HandlerState.')\n            return None\n        Log(\"SetHandlerState: \" + handler + \", \" + state)\n        return SetFileContents(os.path.dirname(mfile) + '/config/HandlerState', state)\n\n    def GetHandlerState(self, handler):\n        handlerState = GetFileContents(handler + '/config/HandlerState')\n        if (handlerState):\n            return handlerState.rstrip('\\r\\n')\n        else:\n            return 'NotInstalled'\n\n\nclass HostingEnvironmentConfig(object):\n    \"\"\"\n    Parse Hosting enviromnet config and store in\n    HostingEnvironmentConfig.xml\n    \"\"\"\n\n    #\n    # <HostingEnvironmentConfig version=\"1.0.0.0\" goalStateIncarnation=\"1\">\n    #   <StoredCertificates>\n    #     <StoredCertificate name=\"Stored0Microsoft.WindowsAzure.Plugins.RemoteAccess.PasswordEncryption\" certificateId=\"sha1:C093FA5CD3AAE057CB7C4E04532B2E16E07C26CA\" storeName=\"My\" configurationLevel=\"System\" />\n    #   </StoredCertificates>\n    #   <Deployment name=\"db00a7755a5e4e8a8fe4b19bc3b330c3\" guid=\"{ce5a036f-5c93-40e7-8adf-2613631008ab}\" incarnation=\"2\">\n    #     <Service name=\"MyVMRoleService\" guid=\"{00000000-0000-0000-0000-000000000000}\" />\n    #     <ServiceInstance name=\"db00a7755a5e4e8a8fe4b19bc3b330c3.1\" guid=\"{d113f4d7-9ead-4e73-b715-b724b5b7842c}\" />\n    #   </Deployment>\n    #   <Incarnation number=\"1\" instance=\"MachineRole_IN_0\" guid=\"{a0faca35-52e5-4ec7-8fd1-63d2bc107d9b}\" />\n    #   <Role guid=\"{73d95f1c-6472-e58e-7a1a-523554e11d46}\" name=\"MachineRole\" hostingEnvironmentVersion=\"1\" software=\"\" softwareType=\"ApplicationPackage\" entryPoint=\"\" parameters=\"\" settleTimeSeconds=\"10\" />\n    #   <HostingEnvironmentSettings name=\"full\" Runtime=\"rd_fabric_stable.110217-1402.RuntimePackage_1.0.0.8.zip\">\n    #     <CAS mode=\"full\" />\n    #     <PrivilegeLevel mode=\"max\" />\n    #     <AdditionalProperties><CgiHandlers></CgiHandlers></AdditionalProperties>\n    #   </HostingEnvironmentSettings>\n    #   <ApplicationSettings>\n    #     <Setting name=\"__ModelData\" value=\"&lt;m role=&quot;MachineRole&quot; xmlns=&quot;urn:azure:m:v1&quot;>&lt;r name=&quot;MachineRole&quot;>&lt;e name=&quot;a&quot; />&lt;e name=&quot;b&quot; />&lt;e name=&quot;Microsoft.WindowsAzure.Plugins.RemoteAccess.Rdp&quot; />&lt;e name=&quot;Microsoft.WindowsAzure.Plugins.RemoteForwarder.RdpInput&quot; />&lt;/r>&lt;/m>\" />\n    #     <Setting name=\"Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString\" value=\"DefaultEndpointsProtocol=http;AccountName=osimages;AccountKey=DNZQ...\" />\n    #     <Setting name=\"Microsoft.WindowsAzure.Plugins.RemoteForwarder.Enabled\" value=\"true\" />\n    #   </ApplicationSettings>\n    #   <ResourceReferences>\n    #     <Resource name=\"DiagnosticStore\" type=\"directory\" request=\"Microsoft.Cis.Fabric.Controller.Descriptions.ServiceDescription.Data.Policy\" sticky=\"true\" size=\"1\" path=\"db00a7755a5e4e8a8fe4b19bc3b330c3.MachineRole.DiagnosticStore\\\" disableQuota=\"false\" />\n    #   </ResourceReferences>\n    # </HostingEnvironmentConfig>\n    #\n    def __init__(self):\n        self.reinitialize()\n\n    def reinitialize(self):\n        \"\"\"\n        Reset Members.\n        \"\"\"\n        self.StoredCertificates = None\n        self.Deployment = None\n        self.Incarnation = None\n        self.Role = None\n        self.HostingEnvironmentSettings = None\n        self.ApplicationSettings = None\n        self.Certificates = None\n        self.ResourceReferences = None\n\n    def Parse(self, xmlText):\n        \"\"\"\n        Parse and create HostingEnvironmentConfig.xml.\n        \"\"\"\n        self.reinitialize()\n        SetFileContents(\"HostingEnvironmentConfig.xml\", xmlText)\n        dom = xml.dom.minidom.parseString(xmlText)\n        for a in [\"HostingEnvironmentConfig\", \"Deployment\", \"Service\",\n                  \"ServiceInstance\", \"Incarnation\", \"Role\", ]:\n            if not dom.getElementsByTagName(a):\n                Error(\"HostingEnvironmentConfig.Parse: Missing \" + a)\n                return None\n        node = dom.childNodes[0]\n        if node.localName != \"HostingEnvironmentConfig\":\n            Error(\"HostingEnvironmentConfig.Parse: root not HostingEnvironmentConfig\")\n            return None\n        self.ApplicationSettings = dom.getElementsByTagName(\"Setting\")\n        self.Certificates = dom.getElementsByTagName(\"StoredCertificate\")\n        return self\n\n    def DecryptPassword(self, e):\n        \"\"\"\n        Return decrypted password.\n        \"\"\"\n        SetFileContents(\"password.p7m\",\n                        \"MIME-Version: 1.0\\n\"\n                        + \"Content-Disposition: attachment; filename=\\\"password.p7m\\\"\\n\"\n                        + \"Content-Type: application/x-pkcs7-mime; name=\\\"password.p7m\\\"\\n\"\n                        + \"Content-Transfer-Encoding: base64\\n\\n\"\n                        + textwrap.fill(e, 64))\n        return RunGetOutput(Openssl + \" cms -decrypt -in password.p7m -inkey Certificates.pem -recip Certificates.pem\")[\n            1]\n\n    def ActivateResourceDisk(self):\n        return MyDistro.ActivateResourceDisk()\n\n    def Process(self):\n        \"\"\"\n        Execute ActivateResourceDisk in separate thread.\n        Create the user account.\n        Launch ConfigurationConsumer if specified in the config.\n        \"\"\"\n        no_thread = False\n        if DiskActivated == False:\n            for m in inspect.getmembers(MyDistro):\n                if 'ActivateResourceDiskNoThread' in m:\n                    no_thread = True\n                    break\n            if no_thread == True:\n                MyDistro.ActivateResourceDiskNoThread()\n            else:\n                diskThread = threading.Thread(target=self.ActivateResourceDisk)\n                diskThread.start()\n        User = None\n        Pass = None\n        Expiration = None\n        Thumbprint = None\n        for b in self.ApplicationSettings:\n            sname = b.getAttribute(\"name\")\n            svalue = b.getAttribute(\"value\")\n        if User != None and Pass != None:\n            if User != \"root\" and User != \"\" and Pass != \"\":\n                CreateAccount(User, Pass, Expiration, Thumbprint)\n            else:\n                Error(\"Not creating user account: \" + User)\n        for c in self.Certificates:\n            csha1 = c.getAttribute(\"certificateId\").split(':')[1].upper()\n            if os.path.isfile(csha1 + \".prv\"):\n                Log(\"Private key with thumbprint: \" + csha1 + \" was retrieved.\")\n            if os.path.isfile(csha1 + \".crt\"):\n                Log(\"Public cert with thumbprint: \" + csha1 + \" was retrieved.\")\n        program = Config.get(\"Role.ConfigurationConsumer\")\n        if program != None:\n            try:\n                Children.append(subprocess.Popen([program, LibDir + \"/HostingEnvironmentConfig.xml\"]))\n            except OSError as e:\n                ErrorWithPrefix('HostingEnvironmentConfig.Process',\n                                'Exception: ' + str(e) + ' occured launching ' + program)\n\n\n\nclass WALAEvent(object):\n    def __init__(self):\n\n        self.providerId = \"\"\n        self.eventId = 1\n\n        self.OpcodeName = \"\"\n        self.KeywordName = \"\"\n        self.TaskName = \"\"\n        self.TenantName = \"\"\n        self.RoleName = \"\"\n        self.RoleInstanceName = \"\"\n        self.ContainerId = \"\"\n        self.ExecutionMode = \"IAAS\"\n        self.OSVersion = \"\"\n        self.GAVersion = \"\"\n        self.RAM = 0\n        self.Processors = 0\n\n    def ToXml(self):\n        strEventid = u'<Event id=\"{0}\"/>'.format(self.eventId)\n        strProviderid = u'<Provider id=\"{0}\"/>'.format(self.providerId)\n        strRecordFormat = u'<Param Name=\"{0}\" Value=\"{1}\" T=\"{2}\" />'\n        strRecordNoQuoteFormat = u'<Param Name=\"{0}\" Value={1} T=\"{2}\" />'\n        strMtStr = u'mt:wstr'\n        strMtUInt64 = u'mt:uint64'\n        strMtBool = u'mt:bool'\n        strMtFloat = u'mt:float64'\n        strEventsData = u\"\"\n\n        for attName in self.__dict__:\n            if attName in [\"eventId\", \"filedCount\", \"providerId\"]:\n                continue\n\n            attValue = self.__dict__[attName]\n            if type(attValue) is int:\n                strEventsData += strRecordFormat.format(attName, attValue, strMtUInt64)\n                continue\n            if type(attValue) is str:\n                attValue = xml.sax.saxutils.quoteattr(attValue)\n                strEventsData += strRecordNoQuoteFormat.format(attName, attValue, strMtStr)\n                continue\n            if str(type(attValue)).count(\"'unicode'\") > 0:\n                attValue = xml.sax.saxutils.quoteattr(attValue)\n                strEventsData += strRecordNoQuoteFormat.format(attName, attValue, strMtStr)\n                continue\n            if type(attValue) is bool:\n                strEventsData += strRecordFormat.format(attName, attValue, strMtBool)\n                continue\n            if type(attValue) is float:\n                strEventsData += strRecordFormat.format(attName, attValue, strMtFloat)\n                continue\n\n            Log(\"Warning: property \" + attName + \":\" + str(type(attValue)) + \":type\" + str(\n                type(attValue)) + \"Can't convert to events data:\" + \":type not supported\")\n\n        return u\"<Data>{0}{1}{2}</Data>\".format(strProviderid, strEventid, strEventsData)\n\n    def Save(self):\n        eventfolder = LibDir + \"/events\"\n        if not os.path.exists(eventfolder):\n            os.mkdir(eventfolder)\n            os.chmod(eventfolder, 0o700)\n        if len(os.listdir(eventfolder)) > 1000:\n            raise Exception(\"WriteToFolder:Too many file under \" + eventfolder + \" exit\")\n\n        filename = os.path.join(eventfolder, str(int(time.time() * 1000000)))\n        with open(filename + \".tmp\", 'wb+') as hfile:\n            hfile.write(self.ToXml().encode(\"utf-8\"))\n        os.rename(filename + \".tmp\", filename + \".tld\")\n\n\nclass WALAEventOperation:\n    HeartBeat = \"HeartBeat\"\n    Provision = \"Provision\"\n    Install = \"Install\"\n    UnIsntall = \"UnInstall\"\n    Disable = \"Disable\"\n    Enable = \"Enable\"\n    Download = \"Download\"\n    Upgrade = \"Upgrade\"\n    Update = \"Update\"\n\n\ndef AddExtensionEvent(name, op, isSuccess, duration=0, version=\"1.0\", message=\"\", type=\"\", isInternal=False):\n    event = ExtensionEvent()\n    event.Name = name\n    event.Version = version\n    event.IsInternal = isInternal\n    event.Operation = op\n    event.OperationSuccess = isSuccess\n    event.Message = message\n    event.Duration = duration\n    event.ExtensionType = type\n    try:\n        event.Save()\n    except:\n        Error(\"Error \" + traceback.format_exc())\n\n\nclass ExtensionEvent(WALAEvent):\n    def __init__(self):\n        WALAEvent.__init__(self)\n        self.eventId = 1\n        self.providerId = \"69B669B9-4AF8-4C50-BDC4-6006FA76E975\"\n        self.Name = \"\"\n        self.Version = \"\"\n        self.IsInternal = False\n        self.Operation = \"\"\n        self.OperationSuccess = True\n        self.ExtensionType = \"\"\n        self.Message = \"\"\n        self.Duration = 0\n\n\nclass WALAEventMonitor(WALAEvent):\n    def __init__(self, postMethod):\n        WALAEvent.__init__(self)\n        self.post = postMethod\n        self.sysInfo = {}\n        self.eventdir = LibDir + \"/events\"\n        self.issysteminfoinitilized = False\n\n    def StartEventsLoop(self):\n        eventThread = threading.Thread(target=self.EventsLoop)\n        eventThread.setDaemon(True)\n        eventThread.start()\n\n    def EventsLoop(self):\n        LastReportHeartBeatTime = datetime.datetime.min\n        try:\n            while True:\n                if (datetime.datetime.now() - LastReportHeartBeatTime) > \\\n                        datetime.timedelta(minutes=30):\n                    LastReportHeartBeatTime = datetime.datetime.now()\n                    AddExtensionEvent(op=WALAEventOperation.HeartBeat, name=\"WALA\", isSuccess=True)\n                self.postNumbersInOneLoop = 0\n                self.CollectAndSendWALAEvents()\n                time.sleep(60)\n        except:\n            Error(\"Exception in events loop:\" + traceback.format_exc())\n\n    def SendEvent(self, providerid, events):\n        dataFormat = u'<?xml version=\"1.0\"?><TelemetryData version=\"1.0\"><Provider id=\"{0}\">{1}' \\\n                     '</Provider></TelemetryData>'\n        data = dataFormat.format(providerid, events)\n        self.post(\"/machine/?comp=telemetrydata\", data)\n\n    def CollectAndSendWALAEvents(self):\n        if not os.path.exists(self.eventdir):\n            return\n        # Throtting, can't send more than 3 events in 15 seconds\n        eventSendNumber = 0\n        eventFiles = os.listdir(self.eventdir)\n        events = {}\n        for file in eventFiles:\n            if not file.endswith(\".tld\"):\n                continue\n            with open(os.path.join(self.eventdir, file), \"rb\") as hfile:\n                # if fail to open or delete the file, throw exception\n                xmlStr = hfile.read().decode(\"utf-8\", 'ignore')\n            os.remove(os.path.join(self.eventdir, file))\n            params = \"\"\n            eventid = \"\"\n            providerid = \"\"\n            # if exception happen during process an event, catch it and continue\n            try:\n                xmlStr = self.AddSystemInfo(xmlStr)\n                for node in xml.dom.minidom.parseString(xmlStr.encode(\"utf-8\")).childNodes[0].childNodes:\n                    if node.tagName == \"Param\":\n                        params += node.toxml()\n                    if node.tagName == \"Event\":\n                        eventid = node.getAttribute(\"id\")\n                    if node.tagName == \"Provider\":\n                        providerid = node.getAttribute(\"id\")\n            except:\n                Error(traceback.format_exc())\n                continue\n            if len(params) == 0 or len(eventid) == 0 or len(providerid) == 0:\n                Error(\"Empty filed in params:\" + params + \" event id:\" + eventid + \" provider id:\" + providerid)\n                continue\n\n            eventstr = u'<Event id=\"{0}\"><![CDATA[{1}]]></Event>'.format(eventid, params)\n            if not events.get(providerid):\n                events[providerid] = \"\"\n            if len(events[providerid]) > 0 and len(events.get(providerid) + eventstr) >= 63 * 1024:\n                eventSendNumber += 1\n                self.SendEvent(providerid, events.get(providerid))\n                if eventSendNumber % 3 == 0:\n                    time.sleep(15)\n                events[providerid] = \"\"\n            if len(eventstr) >= 63 * 1024:\n                Error(\"Signle event too large abort \" + eventstr[:300])\n                continue\n\n            events[providerid] = events.get(providerid) + eventstr\n\n        for key in events.keys():\n            if len(events[key]) > 0:\n                eventSendNumber += 1\n                self.SendEvent(key, events[key])\n                if eventSendNumber % 3 == 0:\n                    time.sleep(15)\n\n    def AddSystemInfo(self, eventData):\n        if not self.issysteminfoinitilized:\n            self.issysteminfoinitilized = True\n            try:\n                self.sysInfo[\"OSVersion\"] = platform.system() + \":\" + \"-\".join(DistInfo(1)) + \":\" + platform.release()\n                self.sysInfo[\"GAVersion\"] = GuestAgentVersion\n                self.sysInfo[\"RAM\"] = MyDistro.getTotalMemory()\n                self.sysInfo[\"Processors\"] = MyDistro.getProcessorCores()\n                sharedConfig = xml.dom.minidom.parse(\"/var/lib/waagent/SharedConfig.xml\").childNodes[0]\n                hostEnvConfig = xml.dom.minidom.parse(\"/var/lib/waagent/HostingEnvironmentConfig.xml\").childNodes[0]\n                gfiles = RunGetOutput(\"ls -t /var/lib/waagent/GoalState.*.xml\")[1]\n                goalStateConfi = xml.dom.minidom.parse(gfiles.split(\"\\n\")[0]).childNodes[0]\n                self.sysInfo[\"TenantName\"] = hostEnvConfig.getElementsByTagName(\"Deployment\")[0].getAttribute(\"name\")\n                self.sysInfo[\"RoleName\"] = hostEnvConfig.getElementsByTagName(\"Role\")[0].getAttribute(\"name\")\n                self.sysInfo[\"RoleInstanceName\"] = sharedConfig.getElementsByTagName(\"Instance\")[0].getAttribute(\"id\")\n                self.sysInfo[\"ContainerId\"] = goalStateConfi.getElementsByTagName(\"ContainerId\")[0].childNodes[\n                    0].nodeValue\n            except:\n                Error(traceback.format_exc())\n\n        eventObject = xml.dom.minidom.parseString(eventData.encode(\"utf-8\")).childNodes[0]\n        for node in eventObject.childNodes:\n            if node.tagName == \"Param\":\n                name = node.getAttribute(\"Name\")\n                if self.sysInfo.get(name):\n                    node.setAttribute(\"Value\", xml.sax.saxutils.escape(str(self.sysInfo[name])))\n\n        return eventObject.toxml()\n\n\n\nWaagentLogrotate = \"\"\"\\\n/var/log/waagent.log {\n    monthly\n    rotate 6\n    notifempty\n    missingok\n}\n\"\"\"\n\n\ndef GetMountPoint(mountlist, device):\n    \"\"\"\n    Example of mountlist:\n        /dev/sda1 on / type ext4 (rw)\n        proc on /proc type proc (rw)\n        sysfs on /sys type sysfs (rw)\n        devpts on /dev/pts type devpts (rw,gid=5,mode=620)\n        tmpfs on /dev/shm type tmpfs (rw,rootcontext=\"system_u:object_r:tmpfs_t:s0\")\n        none on /proc/sys/fs/binfmt_misc type binfmt_misc (rw)\n        /dev/sdb1 on /mnt/resource type ext4 (rw)\n    \"\"\"\n    if (mountlist and device):\n        for entry in mountlist.split('\\n'):\n            if (re.search(device, entry)):\n                tokens = entry.split()\n                # Return the 3rd column of this line\n                return tokens[2] if len(tokens) > 2 else None\n    return None\n\n\ndef FindInLinuxKernelCmdline(option):\n    \"\"\"\n    Return match object if 'option' is present in the kernel boot options\n    of the grub configuration.\n    \"\"\"\n    m = None\n    matchs = r'^.*?' + MyDistro.grubKernelBootOptionsLine + r'.*?' + option + r'.*$'\n    try:\n        m = FindStringInFile(MyDistro.grubKernelBootOptionsFile, matchs)\n    except IOError as e:\n        Error(\n            'FindInLinuxKernelCmdline: Exception opening ' + MyDistro.grubKernelBootOptionsFile + 'Exception:' + str(e))\n\n    return m\n\n\ndef AppendToLinuxKernelCmdline(option):\n    \"\"\"\n    Add 'option' to the kernel boot options of the grub configuration.\n    \"\"\"\n    if not FindInLinuxKernelCmdline(option):\n        src = r'^(.*?' + MyDistro.grubKernelBootOptionsLine + r')(.*?)(\"?)$'\n        rep = r'\\1\\2 ' + option + r'\\3'\n        try:\n            ReplaceStringInFile(MyDistro.grubKernelBootOptionsFile, src, rep)\n        except IOError as e:\n            Error(\n                'AppendToLinuxKernelCmdline: Exception opening ' + MyDistro.grubKernelBootOptionsFile + 'Exception:' + str(\n                    e))\n            return 1\n        Run(\"update-grub\", chk_err=False)\n    return 0\n\n\ndef RemoveFromLinuxKernelCmdline(option):\n    \"\"\"\n    Remove 'option' to the kernel boot options of the grub configuration.\n    \"\"\"\n    if FindInLinuxKernelCmdline(option):\n        src = r'^(.*?' + MyDistro.grubKernelBootOptionsLine + r'.*?)(' + option + r')(.*?)(\"?)$'\n        rep = r'\\1\\3\\4'\n        try:\n            ReplaceStringInFile(MyDistro.grubKernelBootOptionsFile, src, rep)\n        except IOError as e:\n            Error(\n                'RemoveFromLinuxKernelCmdline: Exception opening ' + MyDistro.grubKernelBootOptionsFile + 'Exception:' + str(\n                    e))\n            return 1\n        Run(\"update-grub\", chk_err=False)\n    return 0\n\n\ndef FindStringInFile(fname, matchs):\n    \"\"\"\n    Return match object if found in file.\n    \"\"\"\n    try:\n        ms = re.compile(matchs)\n        for l in (open(fname, 'r')).readlines():\n            m = re.search(ms, l)\n            if m:\n                return m\n    except:\n        raise\n\n    return None\n\n\ndef ReplaceStringInFile(fname, src, repl):\n    \"\"\"\n    Replace 'src' with 'repl' in file.\n    \"\"\"\n    try:\n        sr = re.compile(src)\n        if FindStringInFile(fname, src):\n            updated = ''\n            for l in (open(fname, 'r')).readlines():\n                n = re.sub(sr, repl, l)\n                updated += n\n            ReplaceFileContentsAtomic(fname, updated)\n    except:\n        raise\n    return\n\n\ndef ApplyVNUMAWorkaround():\n    \"\"\"\n    If kernel version has NUMA bug, add 'numa=off' to\n    kernel boot options.\n    \"\"\"\n    VersionParts = platform.release().replace('-', '.').split('.')\n    if int(VersionParts[0]) > 2:\n        return\n    if int(VersionParts[1]) > 6:\n        return\n    if int(VersionParts[2]) > 37:\n        return\n    if AppendToLinuxKernelCmdline(\"numa=off\") == 0:\n        Log(\"Your kernel version \" + platform.release() + \" has a NUMA-related bug: NUMA has been disabled.\")\n    else:\n        \"Error adding 'numa=off'.  NUMA has not been disabled.\"\n\n\ndef RevertVNUMAWorkaround():\n    \"\"\"\n    Remove 'numa=off' from kernel boot options.\n    \"\"\"\n    if RemoveFromLinuxKernelCmdline(\"numa=off\") == 0:\n        Log('NUMA has been re-enabled')\n    else:\n        Log('NUMA has not been re-enabled')\n\n\ndef Install():\n    \"\"\"\n    Install the agent service.\n    Check dependencies.\n    Create /etc/waagent.conf and move old version to\n    /etc/waagent.conf.old\n    Copy RulesFiles to /var/lib/waagent\n    Create /etc/logrotate.d/waagent\n    Set /etc/ssh/sshd_config ClientAliveInterval to 180\n    Call ApplyVNUMAWorkaround()\n    \"\"\"\n    if MyDistro.checkDependencies():\n        return 1\n    os.chmod(sys.argv[0], 0o755)\n    SwitchCwd()\n    for a in RulesFiles:\n        if os.path.isfile(a):\n            if os.path.isfile(GetLastPathElement(a)):\n                os.remove(GetLastPathElement(a))\n            shutil.move(a, \".\")\n            Warn(\"Moved \" + a + \" -> \" + LibDir + \"/\" + GetLastPathElement(a))\n    MyDistro.registerAgentService()\n    if os.path.isfile(\"/etc/waagent.conf\"):\n        try:\n            os.remove(\"/etc/waagent.conf.old\")\n        except:\n            pass\n        try:\n            os.rename(\"/etc/waagent.conf\", \"/etc/waagent.conf.old\")\n            Warn(\"Existing /etc/waagent.conf has been renamed to /etc/waagent.conf.old\")\n        except:\n            pass\n    SetFileContents(\"/etc/waagent.conf\", MyDistro.waagent_conf_file)\n    SetFileContents(\"/etc/logrotate.d/waagent\", WaagentLogrotate)\n    filepath = \"/etc/ssh/sshd_config\"\n    ReplaceFileContentsAtomic(filepath, \"\\n\".join(filter(lambda a: not\n    a.startswith(\"ClientAliveInterval\"),\n                                                         GetFileContents(filepath).split(\n                                                             '\\n'))) + \"\\nClientAliveInterval 180\\n\")\n    Log(\"Configured SSH client probing to keep connections alive.\")\n    ApplyVNUMAWorkaround()\n    return 0\n\n\ndef GetMyDistro(dist_class_name=''):\n    \"\"\"\n    Return MyDistro object.\n    NOTE: Logging is not initialized at this point.\n    \"\"\"\n    if dist_class_name == '':\n        if 'Linux' in platform.system():\n            Distro = DistInfo()[0]\n        else:  # I know this is not Linux!\n            if 'FreeBSD' in platform.system():\n                Distro = platform.system()\n            if 'NS-BSD' in platform.system():\n                Distro = platform.system()\n                Distro = Distro.replace(\"-\", \"\")\n        Distro = Distro.strip('\"')\n        Distro = Distro.strip(' ')\n        dist_class_name = Distro + 'Distro'\n        if dist_class_name not in globals():\n            if ('SuSE'.lower() in Distro.lower()):\n                Distro = 'SuSE'\n            elif ('Ubuntu'.lower() in Distro.lower()):\n                Distro = 'Ubuntu'\n            elif ('centos'.lower() in Distro.lower()  or 'big-ip'.lower() in Distro.lower()):\n                Distro = 'centos'\n            elif ('debian'.lower() in Distro.lower()):\n                Distro = 'debian'\n            elif ('oracle'.lower() in Distro.lower()):\n                Distro = 'oracle'\n            elif ('redhat'.lower() in Distro.lower()):\n                Distro = 'redhat'\n            elif ('Kali'.lower() in Distro.lower()):\n                Distro = 'Kali'\n            elif ('FreeBSD'.lower() in  Distro.lower() or 'gaia'.lower() in Distro.lower() or 'panos'.lower() in Distro.lower()):\n                Distro = 'FreeBSD'\n            else:\n                Distro = 'Default'\n            dist_class_name = Distro + 'Distro'\n    else:\n        Distro = dist_class_name\n    if dist_class_name not in globals():\n        ##print Distro + ' is not a supported distribution.'\n        return None\n    return globals()[dist_class_name]()  # the distro class inside this module.\n\ndef DistInfo(fullname=0):\n    try:\n        if 'FreeBSD' in platform.system():\n            release = re.sub('\\\\-.*$', '', str(platform.release()))\n            distinfo = ['FreeBSD', release]\n            return distinfo\n        if 'NS-BSD' in platform.system():\n            release = re.sub('\\\\-.*$', '', str(platform.release()))\n            distinfo = ['NS-BSD', release]\n            return distinfo\n        if 'linux_distribution' in dir(platform):\n            distinfo = list(platform.linux_distribution(full_distribution_name=0))\n            # remove trailing whitespace in distro name\n            if(distinfo[0] == ''):\n                osfile= open(\"/etc/os-release\", \"r\")\n                for line in osfile:\n                    lists=str(line).split(\"=\")\n                    if(lists[0]== \"NAME\"):\n                        distname = lists[1].split(\"\\\"\")\n                        distinfo[0] = distname[1]\n                        if(distinfo[0].lower() == \"sles\"):\n                            distinfo[0] = \"SuSE\"\n                osfile.close()\n            distinfo[0] = distinfo[0].strip()\n            return distinfo\n        if 'Linux' in platform.system():\n            distinfo = [\"Default\"]\n            if \"ubuntu\" in platform.version().lower():\n                distinfo[0] = \"Ubuntu\"\n            elif 'suse' in platform.version().lower():\n                distinfo[0] = \"SuSE\"\n            elif 'centos' in platform.version().lower():\n                distinfo[0] = \"centos\"\n            elif 'debian' in platform.version().lower():\n                distinfo[0] = \"debian\"\n            elif 'oracle' in platform.version().lower():\n                distinfo[0] = \"oracle\"\n            elif 'redhat' in platform.version().lower() or 'rhel' in platform.version().lower():\n                distinfo[0] = \"redhat\"\n            elif 'kali' in platform.version().lower():\n                distinfo[0] = \"Kali\"\n            return distinfo\n        else:\n            return platform.dist()\n    except Exception as e:\n        errMsg = 'Failed to retrieve the distinfo with error: %s, stack trace: %s' % (str(e), traceback.format_exc())\n        logger.log(errMsg)\n        distinfo = ['Abstract','1.0']\n        return distinfo\n\ndef PackagedInstall(buildroot):\n    \"\"\"\n    Called from setup.py for use by RPM.\n    Generic implementation Creates directories and\n    files /etc/waagent.conf, /etc/init.d/waagent, /usr/sbin/waagent,\n    /etc/logrotate.d/waagent, /etc/sudoers.d/waagent under buildroot.\n    Copies generated files waagent.conf, into place and exits.\n    \"\"\"\n    MyDistro = GetMyDistro()\n    if MyDistro == None:\n        sys.exit(1)\n    MyDistro.packagedInstall(buildroot)\n\n\ndef LibraryInstall(buildroot):\n    pass\n\n\ndef Uninstall():\n    \"\"\"\n    Uninstall the agent service.\n    Copy RulesFiles back to original locations.\n    Delete agent-related files.\n    Call RevertVNUMAWorkaround().\n    \"\"\"\n    SwitchCwd()\n    for a in RulesFiles:\n        if os.path.isfile(GetLastPathElement(a)):\n            try:\n                shutil.move(GetLastPathElement(a), a)\n                Warn(\"Moved \" + LibDir + \"/\" + GetLastPathElement(a) + \" -> \" + a)\n            except:\n                pass\n    MyDistro.unregisterAgentService()\n    MyDistro.uninstallDeleteFiles()\n    RevertVNUMAWorkaround()\n    return 0\n\n\ndef Deprovision(force, deluser):\n    \"\"\"\n    Remove user accounts created by provisioning.\n    Disables root password if Provisioning.DeleteRootPassword = 'y'\n    Stop agent service.\n    Remove SSH host keys if they were generated by the provision.\n    Set hostname to 'localhost.localdomain'.\n    Delete cached system configuration files in /var/lib and /var/lib/waagent.\n    \"\"\"\n\n    # Append blank line at the end of file, so the ctime of this file is changed every time\n    Run(\"echo ''>>\" + MyDistro.getConfigurationPath())\n\n    SwitchCwd()\n\n\n    print(\"WARNING! The waagent service will be stopped.\")\n    print(\"WARNING! All SSH host key pairs will be deleted.\")\n    print(\"WARNING! Cached DHCP leases will be deleted.\")\n    MyDistro.deprovisionWarnUser()\n    delRootPass = Config.get(\"Provisioning.DeleteRootPassword\")\n    if delRootPass != None and delRootPass.lower().startswith(\"y\"):\n        print(\"WARNING! root password will be disabled. You will not be able to login as root.\")\n\n    try:\n        input = raw_input\n    except NameError:\n        pass\n    if force == False and not input('Do you want to proceed (y/n)? ').startswith('y'):\n        return 1\n\n    MyDistro.stopAgentService()\n\n    # Remove SSH host keys\n    regenerateKeys = Config.get(\"Provisioning.RegenerateSshHostKeyPair\")\n    if regenerateKeys == None or regenerateKeys.lower().startswith(\"y\"):\n        Run(\"rm -f /etc/ssh/ssh_host_*key*\")\n\n    # Remove root password\n    if delRootPass != None and delRootPass.lower().startswith(\"y\"):\n        MyDistro.deleteRootPassword()\n    # Remove distribution specific networking configuration\n\n    MyDistro.publishHostname('localhost.localdomain')\n    MyDistro.deprovisionDeleteFiles()\n    return 0\n\n\ndef SwitchCwd():\n    \"\"\"\n    Switch to cwd to /var/lib/waagent.\n    Create if not present.\n    \"\"\"\n    CreateDir(LibDir, \"root\", 0o700)\n    os.chdir(LibDir)\n\n\ndef Usage():\n    \"\"\"\n    Print the arguments to waagent.\n    \"\"\"\n    print(\"usage: \" + sys.argv[\n        0] + \" [-verbose] [-force] [-help|-install|-uninstall|-deprovision[+user]|-version|-serialconsole|-daemon]\")\n    return 0\n\n\ndef main():\n    \"\"\"\n    Instantiate MyDistro, exit if distro class is not defined.\n    Parse command-line arguments, exit with usage() on error.\n    Instantiate ConfigurationProvider.\n    Call appropriate non-daemon methods and exit.\n    If daemon mode, enter Agent.Run() loop.\n    \"\"\"\n    if GuestAgentVersion == \"\":\n        print(\"WARNING! This is a non-standard agent that does not include a valid version string.\")\n\n    if len(sys.argv) == 1:\n        sys.exit(Usage())\n\n    LoggerInit('/var/log/waagent.log', '/dev/console')\n    global LinuxDistro\n    LinuxDistro = DistInfo()[0]\n\n    global MyDistro\n    MyDistro = GetMyDistro()\n    if MyDistro == None:\n        sys.exit(1)\n    args = []\n    conf_file = None\n    global force\n    force = False\n    for a in sys.argv[1:]:\n        if re.match(\"^([-/]*)(help|usage|\\\\?)\", a):\n            sys.exit(Usage())\n        elif re.match(\"^([-/]*)version\", a):\n            print(GuestAgentVersion + \" running on \" + LinuxDistro)\n            sys.exit(0)\n        elif re.match(\"^([-/]*)verbose\", a):\n            myLogger.verbose = True\n        elif re.match(\"^([-/]*)force\", a):\n            force = True\n        elif re.match(\"^(?:[-/]*)conf=.+\", a):\n            conf_file = re.match(\"^(?:[-/]*)conf=(.+)\", a).groups()[0]\n        elif re.match(\"^([-/]*)(setup|install)\", a):\n            sys.exit(MyDistro.Install())\n        elif re.match(\"^([-/]*)(uninstall)\", a):\n            sys.exit(Uninstall())\n        else:\n            args.append(a)\n    global Config\n    Config = ConfigurationProvider(conf_file)\n\n    logfile = Config.get(\"Logs.File\")\n    if logfile is not None:\n        myLogger.file_path = logfile\n    logconsole = Config.get(\"Logs.Console\")\n    if logconsole is not None and logconsole.lower().startswith(\"n\"):\n        myLogger.con_path = None\n    verbose = Config.get(\"Logs.Verbose\")\n    if verbose != None and verbose.lower().startswith(\"y\"):\n        myLogger.verbose = True\n    global daemon\n    daemon = False\n    for a in args:\n        if re.match(\"^([-/]*)deprovision\\\\+user\", a):\n            sys.exit(Deprovision(force, True))\n        elif re.match(\"^([-/]*)deprovision\", a):\n            sys.exit(Deprovision(force, False))\n        elif re.match(\"^([-/]*)daemon\", a):\n            daemon = True\n        elif re.match(\"^([-/]*)serialconsole\", a):\n            AppendToLinuxKernelCmdline(\"console=ttyS0 earlyprintk=ttyS0\")\n            Log(\"Configured kernel to use ttyS0 as the boot console.\")\n            sys.exit(0)\n        else:\n            print(\"Invalid command line parameter:\" + a)\n            sys.exit(1)\n\n    if daemon == False:\n        sys.exit(Usage())\n    global modloaded\n    modloaded = False\n\n    while True:\n        try:\n            SwitchCwd()\n            Log(GuestAgentLongName + \" Version: \" + GuestAgentVersion)\n            if IsLinux():\n                Log(\"Linux Distribution Detected      : \" + LinuxDistro)\n        except Exception as e:\n            Error(traceback.format_exc())\n            Error(\"Exception: \" + str(e))\n            Log(\"Restart agent in 15 seconds\")\n            time.sleep(15)\n\n\nif __name__ == '__main__':\n    main()"
  },
  {
    "path": "VMBackup/main/__init__.py",
    "content": "#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n"
  },
  {
    "path": "VMBackup/main/backuplogger.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport datetime\nimport os\nimport string\nimport time\nimport traceback\nfrom blobwriter import BlobWriter\nfrom Utils.WAAgentUtil import waagent\nimport sys\n\nclass Backuplogger(object):\n    def __init__(self, hutil):\n        self.msg = ''\n        self.con_path = '/dev/console'\n        self.enforced_local_flag_value = True\n        self.hutil = hutil\n        self.prev_log = ''\n        self.logging_off = False\n\n    def enforce_local_flag(self, enforced_local):\n        #Pause file logging during I/O freeze period by setting Enforced_local_flag_value to False\n        #Enforced_local_flag_value is turned to False from True when Freeze Starts\n        #Enforced_local_flag_value is turned to True from False when Freeze Ends\n        if (self.hutil.get_intvalue_from_configfile('LoggingOff', 0) == 1):\n            self.logging_off = True\n        if (self.enforced_local_flag_value != False and enforced_local == False and self.logging_off == True):\n            pass\n        elif (self.enforced_local_flag_value != False and enforced_local == False):\n            self.msg = self.msg + \"================== Logs during Freeze Start ==============\" + \"\\n\"\n        elif (self.enforced_local_flag_value == False and enforced_local == True):\n            self.msg = self.msg + \"================== Logs during Freeze End ==============\" + \"\\n\"\n            self.commit_to_local()\n        self.enforced_local_flag_value = enforced_local\n\n    \"\"\"description of class\"\"\"\n    def log(self, msg, local=False, level='Info'):\n        if(self.enforced_local_flag_value == False and self.logging_off == True):\n            return\n        WriteLog = self.hutil.get_strvalue_from_configfile('WriteLog','True')\n        if (WriteLog == None or WriteLog == 'True'):\n            log_msg = \"\"\n            if sys.version_info > (3,):\n                log_msg = self.log_to_con_py3(msg, level)\n            else:\n                log_msg = \"{0}  {1}  {2} \\n\".format(str(datetime.datetime.utcnow()) , level , msg)\n                if(self.enforced_local_flag_value != False):\n                    self.log_to_con(log_msg)\n            if(self.enforced_local_flag_value == False):\n                self.msg += log_msg\n            else:\n                self.hutil.log(str(msg),level)\n\n    def log_to_con(self, msg):\n        try:\n            with open(self.con_path, \"wb\") as C :\n                message = \"\".join(list(filter(lambda x : x in string.printable, msg)))\n                C.write(message.encode('ascii','ignore'))\n        except IOError as e:\n            pass\n        except Exception as e:\n            pass\n\n    def log_to_con_py3(self, msg, level='Info'):\n        log_msg = \"\"\n        try:\n            if type(msg) is not str:\n                msg = str(msg, errors=\"backslashreplace\")\n            time = datetime.datetime.utcnow().strftime(u'%Y/%m/%d %H:%M:%S.%f')\n            log_msg = u\"{0}  {1}  {2} \\n\".format(time , level , msg)\n            log_msg= str(log_msg.encode('ascii', \"backslashreplace\"), \n                         encoding=\"ascii\")\n            if(self.enforced_local_flag_value != False):\n                with open(self.con_path, \"w\") as C :\n                    C.write(log_msg)\n        except IOError:\n            pass\n        except Exception as e:\n            log_msg = \"###### Exception in log_to_con_py3\"\n        return log_msg\n\n    def commit(self, logbloburi):\n        #commit to local file system first, then commit to the network.\n        try:\n            self.hutil.log(self.msg)\n            self.msg = ''\n        except Exception as e:\n            pass \n        try:\n            self.commit_to_blob(logbloburi)\n        except Exception as e:\n            self.hutil.log('commit to blob failed')\n\n    def commit_to_local(self):\n        self.hutil.log(self.msg)\n        self.msg = ''\n\n    def commit_to_blob(self, logbloburi):\n        UploadStatusAndLog = self.hutil.get_strvalue_from_configfile('UploadStatusAndLog','True')\n        if (UploadStatusAndLog == None or UploadStatusAndLog == 'True'):\n            log_to_blob = \"\"\n            blobWriter = BlobWriter(self.hutil)\n            # append the wala log at the end.\n            try:\n                # distro information\n                if(self.hutil is not None and self.hutil.patching is not None and self.hutil.patching.distro_info is not None):\n                    distro_str = \"\"\n                    if(len(self.hutil.patching.distro_info)>1):\n                        distro_str = self.hutil.patching.distro_info[0] + \" \" + self.hutil.patching.distro_info[1]\n                    else:\n                        distro_str = self.hutil.patching.distro_info[0]\n                    self.msg = \"Distro Info:\" + distro_str + \"\\n\" + self.msg\n                self.msg = \"Guest Agent Version is :\" + waagent.GuestAgentVersion + \"\\n\" + self.msg\n                log_to_blob = str(self.hutil.fetch_log_message()) + \"Tail of shell script log:\" + str(self.hutil.get_shell_script_log())\n            except Exception as e:\n                errMsg = 'Failed to get the waagent log with error: %s, stack trace: %s' % (str(e), traceback.format_exc())\n                self.hutil.log(errMsg)\n            blobWriter.WriteBlob(log_to_blob, logbloburi)\n\n    def set_prev_log(self):\n        self.prev_log = self.hutil.get_prev_log()\n"
  },
  {
    "path": "VMBackup/main/blobwriter.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport time\nimport datetime\nimport traceback\ntry:\n    import urlparse\nexcept ImportError:\n    import urllib.parse as urlparse\nfrom common import CommonVariables\nfrom HttpUtil import HttpUtil\nfrom Utils import HandlerUtil\n\nclass BlobProperties():\n    def __init__(self, blobType, contentLength):\n        self.blobType = blobType\n        self.contentLength = contentLength\n    def __str__(self):\n        return ' blobType: ' + str(self.blobType) + ' contentLength: ' + str(self.contentLength)\n\nclass BlobWriter(object):\n    blobEmptyDetails = {}\n    \"\"\"description of class\"\"\"\n    def __init__(self, hutil):\n        self.hutil = hutil\n    \"\"\"\n    network call should have retry.\n    \"\"\"\n    def WriteBlob(self,msg,blobUri):\n        try:\n            # get the blob type\n            if(blobUri is not None):\n                if (self.IsEmptyBlob(blobUri) == False):\n                    raise Exception(\"Cannot perform write operation on a non empty blob\")\n                \n                blobProperties = self.GetBlobProperties(blobUri)\n                blobType = \"pageblob\"\n\n                if(blobProperties is not None):\n                    blobType = blobProperties.blobType\n\n                if (str(blobType).lower() == \"pageblob\"):\n                    # Clear Page-Blob Contents\n                    self.ClearPageBlob(blobUri, blobProperties)\n                    # Write to Page-Blob\n                    self.WritePageBlob(msg, blobUri, blobProperties)\n                else:\n                    self.WriteBlockBlob(msg, blobUri)\n            else:\n                self.hutil.log(\"bloburi is None\")\n        except Exception as e:\n            self.hutil.log(\"Failed to committing the log with error: %s, stack trace: %s\" % (str(e), traceback.format_exc()))\n\n    def WriteBlockBlob(self,msg,blobUri):\n        retry_times = 3\n        while(retry_times > 0):\n            try:\n                # get the blob type\n                if(blobUri is not None):\n                    http_util = HttpUtil(self.hutil)\n                    sasuri_obj = urlparse.urlparse(blobUri)\n                    headers = {}\n                    headers[\"x-ms-blob-type\"] = 'BlockBlob'\n                    self.hutil.log(str(headers))\n                    result = http_util.Call(method = 'PUT', sasuri_obj = sasuri_obj, data = msg, headers = headers, fallback_to_curl = True)\n                    if(result == CommonVariables.success):\n                        self.hutil.log(\"blob written succesfully\")\n                        retry_times = 0\n                    else:\n                        self.hutil.log(\"blob failed to write\")\n                        HandlerUtil.HandlerUtility.add_to_telemetery_data(CommonVariables.statusBlobUploadError, \"true\")\n                else:\n                    self.hutil.log(\"bloburi is None\")\n                    retry_times = 0\n                    HandlerUtil.HandlerUtility.add_to_telemetery_data(CommonVariables.statusBlobUploadError, \"true\")\n            except Exception as e:\n                HandlerUtil.HandlerUtility.add_to_telemetery_data(CommonVariables.statusBlobUploadError, \"true\")\n                self.hutil.log(\"Failed to committing the log with error: %s, stack trace: %s\" % (str(e), traceback.format_exc()))\n            self.hutil.log(\"retry times is \" + str(retry_times))\n            retry_times = retry_times - 1\n\n    def WritePageBlob(self, message, blobUri, blobProperties):\n        if(blobUri is not None):\n            retry_times = 3\n            while(retry_times > 0):\n                msg = message\n                try:\n                    PAGE_SIZE_BYTES = 512\n                    PAGE_UPLOAD_LIMIT_BYTES = 4194304 # 4 MB\n                    STATUS_BLOB_LIMIT_BYTES = 10485760 # 10 MB\n                    http_util = HttpUtil(self.hutil)\n                    sasuri_obj = urlparse.urlparse(blobUri + '&comp=page')\n                    # Get Blob-properties to know content-length\n                    blobContentLength = int(blobProperties.contentLength)\n                    self.hutil.log(\"WritePageBlob: contentLength:\"+str(blobContentLength))\n                    maxMsgLen = STATUS_BLOB_LIMIT_BYTES\n                    if (blobContentLength > STATUS_BLOB_LIMIT_BYTES):\n                        maxMsgLen = blobContentLength\n                    msgLen = len(msg)\n                    self.hutil.log(\"WritePageBlob: msg length:\"+str(msgLen))\n                    if(len(msg) > maxMsgLen):\n                        msg = msg[msgLen-maxMsgLen:msgLen]\n                        msgLen = len(msg)\n                        self.hutil.log(\"WritePageBlob: msg length after aligning to maxMsgLen:\"+str(msgLen))\n                    if((msgLen % PAGE_SIZE_BYTES) != 0):\n                        # Add padding to message to make its legth multiple of 512\n                        paddedLen = msgLen + (512 - (msgLen % PAGE_SIZE_BYTES))\n                        msg = msg.ljust(paddedLen)\n                        msgLen = len(msg)\n                        self.hutil.log(\"WritePageBlob: msg length after aligning to page-size(512):\"+str(msgLen))\n                    if(blobContentLength < msgLen):\n                        # Try to resize blob to increase its size\n                        isSuccessful = self.try_resize_page_blob(blobUri, msgLen)\n                        if(isSuccessful == True):\n                            self.hutil.log(\"WritePageBlob: page-blob resized successfully new size(blobContentLength):\"+str(msgLen))\n                            blobContentLength = msgLen\n                        else:\n                            self.hutil.log(\"WritePageBlob: page-blob resize failed\")\n                    if(msgLen > blobContentLength):\n                        msg = msg[msgLen-blobContentLength:msgLen]\n                        msgLen = len(msg)\n                        self.hutil.log(\"WritePageBlob: msg length after aligning to blobContentLength:\"+str(msgLen))\n                    # Write Pages\n                    result = CommonVariables.error\n                    bytes_sent = 0\n                    while (bytes_sent < msgLen):\n                        bytes_remaining = msgLen - bytes_sent\n                        pageContent = None\n                        if(bytes_remaining > PAGE_UPLOAD_LIMIT_BYTES): # more than 4 MB\n                            pageContent = msg[bytes_sent:bytes_sent+PAGE_UPLOAD_LIMIT_BYTES]\n                        else:\n                            pageContent = msg[bytes_sent:msgLen]\n                        self.hutil.log(\"WritePageBlob: pageContentLen:\"+str(len(pageContent)))\n                        result = self.put_page_update(pageContent, blobUri, bytes_sent)\n                        if(result == CommonVariables.success):\n                            self.hutil.log(\"WritePageBlob: page written succesfully\")\n                        else:\n                            self.hutil.log(\"WritePageBlob: page failed to write\")\n                            break\n                        bytes_sent = bytes_sent + len(pageContent)                      \n                    if(result == CommonVariables.success):\n                        self.hutil.log(\"WritePageBlob: page-blob written succesfully\")\n                        retry_times = 0\n                    else:\n                        self.hutil.log(\"WritePageBlob: page-blob failed to write\")\n                        HandlerUtil.HandlerUtility.add_to_telemetery_data(CommonVariables.statusBlobUploadError, \"true\")\n                except Exception as e:\n                    HandlerUtil.HandlerUtility.add_to_telemetery_data(CommonVariables.statusBlobUploadError, \"true\")\n                    self.hutil.log(\"WritePageBlob: Failed to write to page-blob with error: %s, stack trace: %s\" % (str(e), traceback.format_exc()))\n                self.hutil.log(\"WritePageBlob: retry times is \" + str(retry_times))\n                retry_times = retry_times - 1\n        else:\n            self.hutil.log(\"WritePageBlob: bloburi is None\")\n\n    def ClearPageBlob(self, blobUri, blobProperties):\n        if(blobUri is not None):\n            retry_times = 3\n            while(retry_times > 0):\n                try:\n                    http_util = HttpUtil(self.hutil)\n                    sasuri_obj = urlparse.urlparse(blobUri + '&comp=page')\n                    # Get Blob-properties to know content-length\n                    contentLength = int(blobProperties.contentLength)\n                    # Clear Pages\n                    if(contentLength > 0):\n                        result = self.put_page_clear(blobUri, 0, contentLength)\n                        if(result == CommonVariables.success):\n                            self.hutil.log(\"ClearPageBlob: page-blob cleared succesfully\")\n                            retry_times = 0\n                        else:\n                            self.hutil.log(\"ClearPageBlob: page-blob failed to clear\")\n                    else:\n                        self.hutil.log(\"ClearPageBlob: page-blob contentLength is 0\")\n                        retry_times = 0\n                except Exception as e:\n                    self.hutil.log(\"ClearPageBlob: Failed to clear to page-blob with error: %s, stack trace: %s\" % (str(e), traceback.format_exc()))\n                self.hutil.log(\"ClearPageBlob: retry times is \" + str(retry_times))\n                retry_times = retry_times - 1\n        else:\n            self.hutil.log(\"ClearPageBlob: bloburi is None\")\n\n    def GetBlobType(self, blobUri):\n        blobType = \"BlockBlob\"\n        if(blobUri is not None):\n            # Get Blob Properties\n            blobProperties = self.GetBlobProperties(blobUri)\n            if(blobProperties is not None):\n                blobType = blobProperties.blobType\n        self.hutil.log(\"GetBlobType: Blob-Type :\"+str(blobType))\n        return blobType\n\n    def GetBlobProperties(self, blobUri):\n        blobProperties = None\n        if(blobUri is not None):\n            retry_times = 3\n            while(retry_times > 0):\n                try:\n                    http_util = HttpUtil(self.hutil)\n                    sasuri_obj = urlparse.urlparse(blobUri)\n                    headers = {}\n                    result, httpResp, errMsg = http_util.HttpCallGetResponse('GET', sasuri_obj, None, headers = headers)\n                    self.hutil.log(\"GetBlobProperties: HttpCallGetResponse : result :\" + str(result) + \", errMsg :\" + str(errMsg))\n                    blobProperties = self.httpresponse_get_blob_properties(httpResp)\n                    self.hutil.log(\"GetBlobProperties: blobProperties :\" + str(blobProperties))\n                    retry_times = 0\n                except Exception as e:\n                    self.hutil.log(\"GetBlobProperties: Failed to get blob properties with error: %s, stack trace: %s\" % (str(e), traceback.format_exc()))\n                    self.hutil.log(\"GetBlobProperties: retry times is \" + str(retry_times))\n                    retry_times = retry_times - 1\n        return blobProperties\n\n    def put_page_clear(self, blobUri, pageBlobIndex, clearLength):\n        http_util = HttpUtil(self.hutil)\n        sasuri_obj = urlparse.urlparse(blobUri + '&comp=page')\n        headers = {}\n        headers[\"x-ms-page-write\"] = 'clear'\n        headers[\"x-ms-range\"] = 'bytes={0}-{1}'.format(pageBlobIndex, pageBlobIndex + clearLength - 1)\n        headers[\"Content-Length\"] = 0\n        result = http_util.Call(method = 'PUT', sasuri_obj = sasuri_obj, data = None, headers = headers, fallback_to_curl = True)\n        return result\n\n    def put_page_update(self, pageContent, blobUri, pageBlobIndex):\n        http_util = HttpUtil(self.hutil)\n        sasuri_obj = urlparse.urlparse(blobUri + '&comp=page')\n        headers = {}\n        headers[\"x-ms-page-write\"] = 'update'\n        headers[\"x-ms-range\"] = 'bytes={0}-{1}'.format(pageBlobIndex, pageBlobIndex + len(pageContent) - 1)\n        headers[\"Content-Length\"] = len(str(pageContent))\n        result = http_util.Call(method = 'PUT', sasuri_obj = sasuri_obj, data = pageContent, headers = headers, fallback_to_curl = True)\n        return result\n    \n    def try_resize_page_blob(self, blobUri, size):\n        isSuccessful = False\n        if (size % 512 == 0):\n            try:\n                http_util = HttpUtil(self.hutil)\n                sasuri_obj = urlparse.urlparse(blobUri + '&comp=properties')\n                headers = {}\n                headers[\"x-ms-blob-content-length\"] = size\n                headers[\"Content-Length\"] = size\n                result = http_util.Call(method = 'PUT', sasuri_obj = sasuri_obj, data = None, headers = headers, fallback_to_curl = True)\n                if(result == CommonVariables.success):\n                    isSuccessful = True\n                else:\n                    self.hutil.log(\"try_resize_page_blob: page-blob resize failed, size :\"+str(size)+\", result :\"+str(result))\n            except Exception as e:\n                self.hutil.log(\"try_resize_page_blob: failed to resize page-blob with error: %s, stack trace: %s\" % (str(e), traceback.format_exc()))\n        else:\n            self.hutil.log(\"try_resize_page_blob: invalid size : \" + str(size))\n        return isSuccessful\n\n    def httpresponse_get_blob_properties(self, httpResp):\n        blobProperties = None\n        if(httpResp != None):\n            self.hutil.log(\"httpresponse_get_blob_properties: Blob-properties response status:\"+str(httpResp.status))\n            if(httpResp.status == 200):\n                resp_headers = httpResp.getheaders()\n                blobType = httpResp.getheader('x-ms-blob-type')\n                contentLength = httpResp.getheader('Content-Length')\n                blobProperties = BlobProperties(blobType, contentLength)\n        return blobProperties\n\n    def VerifyIfBlobIsEmpty(self, blobUri):\n        try:\n            if(blobUri is not None):\n                blobProperties = self.GetBlobProperties(blobUri)\n                if (str(blobProperties.blobType).lower() == \"pageblob\"):\n                    self.hutil.log(\"VerifyIfBlobIsEmpty: Skipping for page blob\")\n                    return True\n                    \n                self.hutil.log(\"VerifyIfBlobIsEmpty: Content Length of blob: \" + str(blobProperties.contentLength))\n                if(int(blobProperties.contentLength) == 0):\n                    return True\n                else:\n                    return False\n            else:\n                self.hutil.log(\"VerifyIfBlobIsEmpty: bloburi is None\")\n        except Exception as e:\n            self.hutil.log(\"VerifyIfBlobIsEmpty: Failed to get the blob content length with error: %s, stack trace: %s\" % (str(e), traceback.format_exc()))\n        return True\n\n    def IsEmptyBlob(self, blobUri):\n        if (bool(BlobWriter.blobEmptyDetails) == True):\n            if (blobUri in BlobWriter.blobEmptyDetails.keys()):\n                return BlobWriter.blobEmptyDetails[blobUri]\n        \n        isEmptyBlob = self.VerifyIfBlobIsEmpty(blobUri)\n        BlobWriter.blobEmptyDetails[blobUri] = isEmptyBlob\n        return isEmptyBlob\n\n"
  },
  {
    "path": "VMBackup/main/common.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nclass CommonVariables:\n    azure_path = 'main/azure'\n    utils_path_name = 'Utils'\n    snapshot_service_path_name = \"IaaSExtensionSnapshotService\"\n    extension_name = 'MyBackupTestLinuxInt'\n    extension_version = \"1.0.9120.0\"\n    extension_zip_version = \"1\"\n    extension_type = extension_name\n    extension_media_link = 'https://sopattna.blob.core.windows.net/extensions/' + extension_name + '-' + str(extension_version) + '.zip'\n    extension_label = 'Windows Azure VMBackup Extension for Linux IaaS'\n    extension_description = extension_label\n    object_str = 'objectStr'\n    logs_blob_uri = 'logsBlobUri'\n    status_blob_uri = 'statusBlobUri'\n    commandStartTimeUTCTicks = \"commandStartTimeUTCTicks\"\n    task_id = 'taskId'\n    command_to_execute = 'commandToExecute'\n    iaas_vmbackup_command = 'snapshot'\n    iaas_install_command = 'install'\n    locale = 'locale'\n    vmType = 'vmType'\n    VmTypeV1 = 'microsoft.classiccompute/virtualmachines'\n    VmTypeV2 = 'microsoft.compute/virtualmachines'\n    customSettings = 'customSettings'\n    statusBlobUploadError = 'statusBlobUploadError'\n    TempStatusFileName = 'tempStatusFile.status'\n    onlyLocalFilesystems = 'onlyLocalFilesystems'\n    \n    # -------------------- Dynamic Settings from CRP --------------------\n    isSnapshotTtlEnabled = 'isSnapshotTtlEnabled'\n    useMccfForLad = 'useMccfForLad'\n    useMccfToFetchDsasForAllDisks = 'useMccfToFetchDsasForAllDisks'\n    enableSnapshotExtensionPolling = \"EnableVMSnapshotExtensionPolling\"\n    isVmmdBlobIncluded = 'isVmmdBlobIncluded'\n    key = 'Key'\n    value = 'Value'\n    snapshotTtlHeader = 'x-ms-snapshot-ttl-expiry-hours'\n\n    snapshotTaskToken = 'snapshotTaskToken'\n    snapshotCreator = 'snapshotCreator'\n    hostStatusCodePreSnapshot = 'hostStatusCodePreSnapshot'\n    hostStatusCodeDoSnapshot = 'hostStatusCodeDoSnapshot'\n    guestExtension = 'guestExtension'\n    backupHostService = 'backupHostService'\n    includedDisks = 'includedDisks'\n    isAnyDiskExcluded = 'isAnyDiskExcluded'\n    dataDiskLunList = 'dataDiskLunList'\n    isOSDiskIncluded = 'isOSDiskIncluded'\n    isVmgsBlobIncluded = 'isVmgsBlobIncluded'\n    isVMADEEnabled = 'isVMADEEnabled'\n    isOsDiskADEEncrypted = 'isOsDiskADEEncrypted'\n    areDataDisksADEEncrypted = 'areDataDisksADEEncrypted'\n    diskEncryptionSettings = 'DiskEncryptionSettings'\n    isAnyWADiskIncluded = 'isAnyWADiskIncluded'\n    isAnyDirectDriveDiskIncluded = 'isAnyDirectDriveDiskIncluded'\n    diskEncryptionKey = \"x-ms-meta-DiskEncryptionSettings\"\n    instantAccessDurationMinutes = 'instantAccessDurationMinutes'\n\n    onlyGuest = 'onlyGuest'\n    firstGuestThenHost = 'firstGuestThenHost'\n    firstHostThenGuest = 'firstHostThenGuest'\n    onlyHost = 'onlyHost'\n\n    SnapshotMethod = 'SnapshotMethod'\n    IsAnySnapshotFailed = 'IsAnySnapshotFailed'\n    SnapshotRateExceededFailureCount = 'SnapshotRateExceededFailureCount'\n\n    status_transitioning = 'transitioning'\n    status_warning = 'warning'\n    status_success = 'success'\n    status_error = 'error'\n\n    unable_to_open_err_string= 'file open failed for some mount'\n\n    \"\"\"\n    error code definitions\n    \"\"\"\n    success_appconsistent = 0\n    success = 1\n    error = 2\n    SuccessAlreadyProcessedInput = 3\n    ExtensionTempTerminalState = 4\n\n    error_parameter = 11\n    error_12 = 12\n    error_wrong_time = 13\n    error_same_taskid = 14\n    error_http_failure = 15\n    FailedHandlerGuestAgentCertificateNotFound = 16\n    #error_upload_status_blob = 16\n\n    FailedRetryableSnapshotFailedNoNetwork = 76\n    FailedSnapshotLimitReached = 85\n    FailedRetryableSnapshotRateExceeded = 173\n    FailedFsFreezeFailed = 121\n    FailedFsFreezeTimeout = 122\n    FailedUnableToOpenMount = 123\n    FailedSafeFreezeBinaryNotFound = 124\n    FailedHostSnapshotRemoteServerError = 556\n    \"\"\"\n    Pre-Post Plugin error code definitions\n    \"\"\"\n\n    PrePost_PluginStatus_Success = 0\n    PrePost_ScriptStatus_Success = 0\n    PrePost_ScriptStatus_Error = 1\n    PrePost_ScriptStatus_Warning = 2\n    FailedInvalidDataDiskLunList = 17\n\n    FailedPrepostPreScriptFailed = 300\n    FailedPrepostPostScriptFailed = 301\n    FailedPrepostPreScriptNotFound = 302\n    FailedPrepostPostScriptNotFound = 303\n    FailedPrepostPluginhostConfigParsing = 304\n    FailedPrepostPluginConfigParsing = 305\n    FailedPrepostPreScriptPermissionError = 306\n    FailedPrepostPostScriptPermissionError = 307\n    FailedPrepostPreScriptTimeout = 308\n    FailedPrepostPostScriptTimeout = 309\n    FailedPrepostPluginhostPreTimeout = 310\n    FailedPrepostPluginhostPostTimeout = 311\n    FailedPrepostCheckSumMismatch = 312\n    FailedPrepostPluginhostConfigNotFound = 313\n    FailedPrepostPluginhostConfigPermissionError = 314\n    FailedPrepostPluginhostConfigOwnershipError = 315\n    FailedPrepostPluginConfigNotFound = 316\n    FailedPrepostPluginConfigPermissionError = 317\n    FailedPrepostPluginConfigOwnershipError = 318\n    FailedGuestAgentInvokedCommandTooLate = 402\n    \n    FailedWorkloadPreError = 500\n    FailedWorkloadConfParsingError = 501\n    FailedWorkloadInvalidRole = 502\n    FailedWorkloadInvalidWorkloadName = 503\n    FailedWorkloadPostError = 504\n    FailedWorkloadAuthorizationMissing = 505\n    FailedWorkloadConnectionError = 506\n    FailedWorkloadIPCDirectoryMissing = 507\n    FailedWorkloadDatabaseStatusChanged = 508\n    FailedWorkloadQuiescingError = 509\n    FailedWorkloadQuiescingTimeout = 510\n    FailedWorkloadDatabaseInNoArchiveLog = 511\n    FailedWorkloadLogModeChanged = 512\n\n    \"\"\"\n    Consistency-Types\n    \"\"\"\n    consistency_none = 'none'\n    consistency_crashConsistent = 'crashConsistent'\n    consistency_fileSystemConsistent = 'fileSystemConsistent'\n    consistency_applicationConsistent = 'applicationConsistent'\n\n    @staticmethod\n    def isTerminalStatus(status):\n        return (status==CommonVariables.status_success or status==CommonVariables.status_error)\n\nclass DeviceItem(object):\n    def __init__(self):\n        #NAME,TYPE,FSTYPE,MOUNTPOINT,LABEL,UUID,MODEL\n        self.name = None\n        self.type = None\n        self.file_system = None\n        self.mount_point = None\n        self.label = None\n        self.uuid = None\n        self.model = None\n        self.size = None\n    def __str__(self):\n        return \"name:\" + str(self.name) + \" type:\" + str(self.type) + \" fstype:\" + str(self.file_system) + \" mountpoint:\" + str(self.mount_point) + \" label:\" + str(self.label) + \" model:\" + str(self.model)\n"
  },
  {
    "path": "VMBackup/main/dhcpHandler.py",
    "content": "# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.4+ and Openssl 1.0+\n\n\n# Output of running the script on Windows:\n# The scheduled events endpoint IP address will be available as part of the system environment variable.\n#\n# Output of running the script on Linux:\n# The scheduled events endpoint IP address will be available as part of the environment variable for all users.\n\nimport os\nimport socket\nimport struct\nimport array\nimport time\nfrom Utils import dhcpUtils\nimport sys\nif sys.platform == 'win32':\n    import _winreg as wreg\nfrom uuid import getnode as get_mac\n\n\"\"\"\nDefines dhcp exception\n\"\"\"\n\nclass BaseError(Exception):\n    \"\"\"\n    Base error class.\n    \"\"\"\n    def __init__(self, errno, msg, inner=None):\n        msg = u\"({0}){1}\".format(errno, msg)\n        if inner is not None:\n            msg = u\"{0} \\n  inner error: {1}\".format(msg, inner)\n        super(BaseError, self).__init__(msg)\n\nclass DhcpError(BaseError):\n    \"\"\"\n    Failed to handle dhcp response\n    \"\"\"\n    def __init__(self, msg=None, inner=None):\n        super(DhcpError, self).__init__('000006', msg, inner)\n\nclass DhcpHandler(object):\n\n    def __init__(self, logger):\n        self.osutil = dhcpUtils.DefaultOSUtil(logger)\n        self.endpoint = None\n        self.gateway = None\n        self.routes = None\n        self._request_broadcast = False\n        self.skip_cache = False\n        self.logger = logger\n\n    def getHostEndoint(self):\n        self.run()\n        return self.endpoint\n\n    def run(self):\n        \"\"\"\n        Send dhcp request\n        \"\"\"\n\n        self.send_dhcp_req()\n\n    def _send_dhcp_req(self, request):\n        __waiting_duration__ = [0, 10, 30, 60, 60]\n        for duration in __waiting_duration__:\n            try:\n                self.osutil.allow_dhcp_broadcast()\n                response = self.socket_send(request)\n                self.validate_dhcp_resp(request, response)\n                return response\n            except DhcpError as e:\n                self.logger.log(\"Failed to send DHCP request: \" + str(e))\n            time.sleep(duration)\n        return None\n\n    def send_dhcp_req(self):\n        \"\"\"\n        Build dhcp request with mac addr\n        Configure route to allow dhcp traffic\n        Stop dhcp service if necessary\n        \"\"\"\n        self.logger.log(\"Sending dhcp request\")\n        mac_addr = self.osutil.get_mac_in_bytes()\n\n\n        req = self.build_dhcp_request(mac_addr, self._request_broadcast)\n\n        resp = self._send_dhcp_req(req)\n\n        if resp is None:\n            raise DhcpError(\"Failed to receive dhcp response.\")\n        self.endpoint, self.gateway, self.routes = self.parse_dhcp_resp(resp)\n        self.logger.log('Scheduled Events endpoint IP address:' + self.endpoint)\n\n    def validate_dhcp_resp(self, request, response):\n        bytes_recv = len(response)\n        if bytes_recv < 0xF6:\n            self.logger.log(\"HandleDhcpResponse: Too few bytes received: \" + str(bytes_recv))\n            return False\n\n        self.logger.log(\"BytesReceived:{0}\" + str(hex(bytes_recv)))\n        #self.logger.log(\"DHCP response:{0}\" + dhcpUtils.hex_dump(response, bytes_recv))\n\n        # check transactionId, cookie, MAC address cookie should never mismatch\n        # transactionId and MAC address may mismatch if we see a response\n        # meant from another machine\n        if not dhcpUtils.compare_bytes(request, response, 0xEC, 4):\n            self.logger.log(\"Cookie not match:\\nsend={0},\\nreceive={1}\".format(dhcpUtils.hex_dump3(request, 0xEC, 4), dhcpUtils.hex_dump3(response, 0xEC, 4)))\n            raise DhcpError(\"Cookie in dhcp respones doesn't match the request\")\n\n        if not dhcpUtils.compare_bytes(request, response, 4, 4):\n            self.logger.log(\"TransactionID not match:\\nsend={0},\\nreceive={1}\".format(dhcpUtils.hex_dump3(request, 4, 4), dhcpUtils.hex_dump3(response, 4, 4)))\n            raise DhcpError(\"TransactionID in dhcp respones \"\n                            \"doesn't match the request\")\n\n        if not dhcpUtils.compare_bytes(request, response, 0x1C, 6):\n            self.logger.log(\"Mac Address not match:\\nsend={0},\\nreceive={1}\".format(dhcpUtils.hex_dump3(request, 0x1C, 6), dhcpUtils.hex_dump3(response, 0x1C, 6)))\n            raise DhcpError(\"Mac Addr in dhcp respones \"\n                            \"doesn't match the request\")\n\n\n    def parse_route(self, response, option, i, length, bytes_recv):\n        # http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx\n        self.logger.log(\"Routes at offset: {0} with length:{1}\".format(hex(i), hex(length)))\n        routes = []\n        if length < 5:\n            self.logger.log(\"Data too small for option:{0}\" + str(option))\n        j = i + 2\n        while j < (i + length + 2):\n            mask_len_bits = dhcpUtils.str_to_ord(response[j])\n            mask_len_bytes = (((mask_len_bits + 7) & ~7) >> 3)\n            mask = 0xFFFFFFFF & (0xFFFFFFFF << (32 - mask_len_bits))\n            j += 1\n            net = dhcpUtils.unpack_big_endian(response, j, mask_len_bytes)\n            net <<= (32 - mask_len_bytes * 8)\n            net &= mask\n            j += mask_len_bytes\n            gateway = dhcpUtils.unpack_big_endian(response, j, 4)\n            j += 4\n            routes.append((net, mask, gateway))\n        if j != (i + length + 2):\n            self.logger.log(\"Unable to parse routes\")\n        return routes\n\n\n    def parse_ip_addr(self, response, option, i, length, bytes_recv):\n        if i + 5 < bytes_recv:\n            if length != 4:\n                self.logger.log(\"Endpoint or Default Gateway not 4 bytes\")\n                return None\n            addr = dhcpUtils.unpack_big_endian(response, i + 2, 4)\n            ip_addr = dhcpUtils.int_to_ip4_addr(addr)\n            return ip_addr\n        else:\n            self.logger.log(\"Data too small for option: \" + str(option))\n        return None\n\n\n    def parse_dhcp_resp(self, response):\n        \"\"\"\n        Parse DHCP response:\n        Returns endpoint server or None on error.\n        \"\"\"\n        self.logger.log('Parsing Dhcp response')\n        bytes_recv = len(response)\n        endpoint = None\n        gateway = None\n        routes = None\n\n        # Walk all the returned options, parsing out what we need, ignoring the\n        # others. We need the custom option 245 to find the the endpoint we talk to\n        # options 3 for default gateway and 249 for routes; 255 is end.\n\n        i = 0xF0  # offset to first option\n        while i < bytes_recv:\n            option = dhcpUtils.str_to_ord(response[i])\n            length = 0\n            if (i + 1) < bytes_recv:\n                length = dhcpUtils.str_to_ord(response[i + 1])\n                self.logger.log(\"DHCP option {0} at offset:{1} with length:{2}\".format(hex(option), hex(i), hex(length)))\n            if option == 255:\n                self.logger.log(\"DHCP packet ended at offset:{0}\".format(hex(i)))\n                break\n            elif option == 249:\n                routes = self.parse_route(response, option, i, length, bytes_recv)\n            elif option == 3:\n                gateway = self.parse_ip_addr(response, option, i, length, bytes_recv)\n                self.logger.log(\"Default gateway:{0}, at {1}\".format(gateway, hex(i)))\n            elif option == 245:\n                endpoint = self.parse_ip_addr(response, option, i, length, bytes_recv)\n                self.logger.log(\"Azure scheduled events endpoint IP:{0}, at {1}\".format(endpoint, hex(i)))\n            else:\n                self.logger.log(\"Skipping DHCP option:{0} at {1} with length {2}\".format(hex(option), hex(i), hex(length)))\n            i += length + 2\n        return endpoint, gateway, routes\n\n\n    def socket_send(self, request):\n        sock = None\n        try:\n            sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM,\n                                 socket.IPPROTO_UDP)\n            sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\n            sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n            sock.bind((\"0.0.0.0\", 68))\n            sock.sendto(request, (\"<broadcast>\", 67))\n            sock.settimeout(10)\n            self.logger.log(\"Send DHCP request: Setting socket.timeout=10, entering recv\")\n            response = sock.recv(1024)\n            return response\n        except IOError as e:\n            raise DhcpError(\"{0}\".format(e))\n        finally:\n            if sock is not None:\n                sock.close()\n\n\n    def build_dhcp_request(self, mac_addr, request_broadcast):\n        \"\"\"\n        Build DHCP request string.\n        \"\"\"\n        #\n        # typedef struct _DHCP {\n        #  UINT8   Opcode;                    /* op:    BOOTREQUEST or BOOTREPLY */\n        #  UINT8   HardwareAddressType;       /* htype: ethernet */\n        #  UINT8   HardwareAddressLength;     /* hlen:  6 (48 bit mac address) */\n        #  UINT8   Hops;                      /* hops:  0 */\n        #  UINT8   TransactionID[4];          /* xid:   random */\n        #  UINT8   Seconds[2];                /* secs:  0 */\n        #  UINT8   Flags[2];                  /* flags: 0 or 0x8000 for broadcast*/\n        #  UINT8   ClientIpAddress[4];        /* ciaddr: 0 */\n        #  UINT8   YourIpAddress[4];          /* yiaddr: 0 */\n        #  UINT8   ServerIpAddress[4];        /* siaddr: 0 */\n        #  UINT8   RelayAgentIpAddress[4];    /* giaddr: 0 */\n        #  UINT8   ClientHardwareAddress[16]; /* chaddr: 6 byte eth MAC address */\n        #  UINT8   ServerName[64];            /* sname:  0 */\n        #  UINT8   BootFileName[128];         /* file:   0  */\n        #  UINT8   MagicCookie[4];            /*   99  130   83   99 */\n        #                                        /* 0x63 0x82 0x53 0x63 */\n        #     /* options -- hard code ours */\n        #\n        #     UINT8 MessageTypeCode;              /* 53 */\n        #     UINT8 MessageTypeLength;            /* 1 */\n        #     UINT8 MessageType;                  /* 1 for DISCOVER */\n        #     UINT8 End;                          /* 255 */\n        # } DHCP;\n        #\n\n        # tuple of 244 zeros\n        # (struct.pack_into would be good here, but requires Python 2.5)\n        request = [0] * 244\n\n        trans_id = self.gen_trans_id()\n\n        # Opcode = 1\n        # HardwareAddressType = 1 (ethernet/MAC)\n        # HardwareAddressLength = 6 (ethernet/MAC/48 bits)\n        for a in range(0, 3):\n            request[a] = [1, 1, 6][a]\n\n        # fill in transaction id (random number to ensure response matches request)\n        for a in range(0, 4):\n            request[4 + a] = dhcpUtils.str_to_ord(trans_id[a])\n\n        self.logger.log(\"BuildDhcpRequest: transactionId:{0},{1:04x}\".format(dhcpUtils.hex_dump2(trans_id), dhcpUtils.unpack_big_endian(request, 4, 4)))\n\n        if request_broadcast:\n            # set broadcast flag to true to request the dhcp sever\n            # to respond to a boradcast address,\n            # this is useful when user dhclient fails.\n            request[0x0A] = 0x80;\n\n        # fill in ClientHardwareAddress\n        for a in range(0, 6):\n            request[0x1C + a] = dhcpUtils.str_to_ord(mac_addr[a])\n\n        # DHCP Magic Cookie: 99, 130, 83, 99\n        # MessageTypeCode = 53 DHCP Message Type\n        # MessageTypeLength = 1\n        # MessageType = DHCPDISCOVER\n        # End = 255 DHCP_END\n        for a in range(0, 8):\n            request[0xEC + a] = [99, 130, 83, 99, 53, 1, 1, 255][a]\n        return array.array(\"B\", request)\n\n    def gen_trans_id(self):\n        return os.urandom(4)\n\n"
  },
  {
    "path": "VMBackup/main/freezesnapshotter.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\ntry:\n    import urlparse as urlparser\nexcept ImportError:\n    import urllib.parse as urlparser\nimport traceback\nimport datetime\ntry:\n    import ConfigParser as ConfigParsers\nexcept ImportError:\n    import configparser as ConfigParsers\nimport multiprocessing as mp\nimport time\nimport json\nfrom common import CommonVariables\nfrom HttpUtil import HttpUtil\nfrom Utils import Status\nfrom Utils import HandlerUtil\nfrom fsfreezer import FsFreezer\nfrom guestsnapshotter import GuestSnapshotter\nfrom hostsnapshotter import HostSnapshotter\nfrom Utils import HostSnapshotObjects\nimport ExtensionErrorCodeHelper\n# need to be implemented in next release\n#from dhcpHandler import DhcpHandler\n\nclass FreezeSnapshotter(object):\n    \"\"\"description of class\"\"\"\n    def __init__(self, logger, hutil , freezer, g_fsfreeze_on, para_parser, takeCrashConsistentSnapshot):\n        self.logger = logger\n        self.configfile = '/etc/azure/vmbackup.conf'\n        self.hutil = hutil\n        self.freezer = freezer\n        self.g_fsfreeze_on = g_fsfreeze_on\n        self.para_parser = para_parser\n        if(para_parser.snapshotTaskToken == None):\n            para_parser.snapshotTaskToken = '' #making snapshot string empty when snapshotTaskToken is null\n        self.logger.log('snapshotTaskToken : ' + str(para_parser.snapshotTaskToken))\n        self.takeSnapshotFrom = CommonVariables.firstHostThenGuest\n        self.isManaged = False\n        self.taskId = self.para_parser.taskId\n        self.hostIp = '168.63.129.16'\n        self.additional_headers = []\n        self.extensionErrorCode = ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.success\n        self.takeCrashConsistentSnapshot = takeCrashConsistentSnapshot\n        self.logger.log('FreezeSnapshotter : takeCrashConsistentSnapshot = ' + str(self.takeCrashConsistentSnapshot))\n        \n        #implement in next release\n        '''\n        # fetching wireserver IP from DHCP\n        self.dhcpHandlerObj = None\n        try:\n            self.dhcpHandlerObj = DhcpHandler(self.logger)\n            self.hostIp = self.dhcpHandlerObj.getHostEndoint()\n        except Exception as e:\n            errorMsg = \"Failed to get hostIp from DHCP with error: %s, stack trace: %s\" % (str(e), traceback.format_exc())\n            self.logger.log(errorMsg, True, 'Error')\n            self.hostIp = '168.63.129.16'\n        '''\n\n        self.logger.log( \"hostIp : \" + self.hostIp)\n\n        try:\n            if(para_parser.customSettings != None and para_parser.customSettings != ''):\n                self.logger.log('customSettings : ' + str(para_parser.customSettings))\n                customSettings = json.loads(para_parser.customSettings)\n                snapshotMethodConfigValue = self.hutil.get_strvalue_from_configfile(CommonVariables.SnapshotMethod,customSettings['takeSnapshotFrom'])\n                self.logger.log('snapshotMethodConfigValue : ' + str(snapshotMethodConfigValue))\n                if snapshotMethodConfigValue != None and snapshotMethodConfigValue != '':\n                    self.takeSnapshotFrom = snapshotMethodConfigValue\n                else:\n                    self.takeSnapshotFrom = customSettings['takeSnapshotFrom']\n\n                self.isManaged = customSettings['isManagedVm']\n                if( \"backupTaskId\" in customSettings.keys()):\n                    self.taskId = customSettings[\"backupTaskId\"]\n\n                waDiskLunList= []\n\n                if \"waDiskLunList\" in customSettings.keys() and customSettings['waDiskLunList'] != None :\n                    waDiskLunList = customSettings['waDiskLunList']            \n                    self.logger.log('WA Disk Lun List ' + str(waDiskLunList))\n\n                if waDiskLunList!=None and waDiskLunList.count != 0 and para_parser.includeLunList!=None and para_parser.includeLunList.count!=0 : \n                    for crpLunNo in para_parser.includeLunList :\n                        if crpLunNo in waDiskLunList :\n                            self.logger.log('WA disk is present on the VM. Setting the snapshot mode to onlyHost.')\n                            self.takeSnapshotFrom = CommonVariables.onlyHost\n                            break\n            else:\n                self.logger.log('CustomSettings is null in extension input.')\n                snapshotMethodConfigValue = self.hutil.get_strvalue_from_configfile(CommonVariables.SnapshotMethod,CommonVariables.firstHostThenGuest)\n                self.logger.log('snapshotMethodConfigValue : ' + str(snapshotMethodConfigValue))\n                if snapshotMethodConfigValue != None and snapshotMethodConfigValue != '':\n                    self.takeSnapshotFrom = snapshotMethodConfigValue\n        except Exception as e:\n            errMsg = 'Failed to serialize customSettings with error: %s, stack trace: %s' % (str(e), traceback.format_exc())\n            self.logger.log(errMsg, True, 'Error')\n            self.isManaged = True\n        \n        try:\n            if(para_parser.includedDisks != None and CommonVariables.isAnyWADiskIncluded in para_parser.includedDisks.keys()):\n                if (para_parser.includedDisks[CommonVariables.isAnyWADiskIncluded] == True):\n                    self.logger.log('WA disk is included. Setting the snapshot mode to onlyHost.')\n                    self.takeSnapshotFrom = CommonVariables.onlyHost\n\n            if(para_parser.includedDisks != None and CommonVariables.isVmgsBlobIncluded in para_parser.includedDisks.keys()):\n                if (para_parser.includedDisks[CommonVariables.isVmgsBlobIncluded] == True):\n                    self.logger.log('Vmgs Blob is included. Setting the snapshot mode to onlyHost.')\n                    self.takeSnapshotFrom = CommonVariables.onlyHost\n\n            if(para_parser.includedDisks != None and CommonVariables.isAnyDirectDriveDiskIncluded in para_parser.includedDisks.keys()):\n                if (para_parser.includedDisks[CommonVariables.isAnyDirectDriveDiskIncluded] == True):\n                    self.logger.log('DirectDrive Disk is included. Setting the snapshot mode to onlyHost.')\n                    self.takeSnapshotFrom = CommonVariables.onlyHost\n\n            if(para_parser.includedDisks != None and CommonVariables.isAnyDiskExcluded in para_parser.includedDisks):\n                # IsAnyDiskExcluded is true, but the included LUN list is empty in the extensions input\n                if (para_parser.includedDisks[CommonVariables.isAnyDiskExcluded] == True and (para_parser.includeLunList == None or para_parser.includeLunList.count == 0)):\n                    # When the direct drive disk is part of the disks. so, failing the extension as snapshot can't be taken via Guest\n                    if( CommonVariables.isAnyDirectDriveDiskIncluded in para_parser.includedDisks and para_parser.includedDisks[CommonVariables.isAnyDirectDriveDiskIncluded] == True):\n                        errMsg = 'DirectDrive disk is included, so the host must create the snapshot. IsAnyDiskExcluded is true, but, the included LUN list is empty in the extension input, '\\\n                                    'which is not allowed for host DoSnapshot. Thus, failing the extension run.'\n                        self.logger.log(errMsg, True, 'Error')\n                        self.hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.FailedInvalidDataDiskLunList)\n                    # When the VmgsBlob is part of the disks. so, failing the extension as snapshot can't be taken via Guest\n                    elif( CommonVariables.isVmgsBlobIncluded in para_parser.includedDisks and para_parser.includedDisks[CommonVariables.isVmgsBlobIncluded] == True):\n                        errMsg = 'VmgsBlob is included, so the host must create the snapshot. IsAnyDiskExcluded is true, but, the included LUN list is empty in the extension input, '\\\n                                    'which is not allowed for host DoSnapshot. Thus, failing the extension run.'\n                        self.logger.log(errMsg, True, 'Error')\n                        self.hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.FailedInvalidDataDiskLunList)\n                    # When the WADisk is part of the disks. so, failing the extension as snapshot can't be taken via Guest\n                    elif( CommonVariables.isAnyWADiskIncluded in para_parser.includedDisks and para_parser.includedDisks[CommonVariables.isAnyWADiskIncluded] == True):\n                        errMsg = 'WADisk is included, so the host must create the snapshot. IsAnyDiskExcluded is true, but, the included LUN list is empty in the extension input, '\\\n                                    'which is not allowed for host DoSnapshot. Thus, failing the extension run.'\n                        self.logger.log(errMsg, True, 'Error')\n                        self.hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.FailedInvalidDataDiskLunList)\n                    else:\n                        self.logger.log('Some disks are excluded from backup and LUN list is not present. Setting the snapshot mode to onlyGuest.')\n                        self.takeSnapshotFrom = CommonVariables.onlyGuest\n\n            #Check if snapshot uri has special characters\n            if self.hutil.UriHasSpecialCharacters(self.para_parser.blobs):\n                self.logger.log('Some disk blob Uris have special characters.')\n        except Exception as e:\n            errMsg = 'Failed to process flags in includedDisks with error: %s, stack trace: %s' % (str(e), traceback.format_exc())\n            self.logger.log(errMsg, True, 'Error')\n\n        self.logger.log('[FreezeSnapshotter] isManaged flag : ' + str(self.isManaged))\n\n    def doFreezeSnapshot(self):\n        run_result = CommonVariables.success\n        run_status = 'success'\n        all_failed = False\n        unable_to_sleep = False\n\n        \"\"\" Do Not remove below HttpUtil object creation. This is to ensure HttpUtil singleton object is created before freeze.\"\"\"\n        http_util = HttpUtil(self.logger)\n\n        if(self.takeSnapshotFrom == CommonVariables.onlyGuest):\n            run_result, run_status, blob_snapshot_info_array, all_failed, all_snapshots_failed, unable_to_sleep, is_inconsistent = self.takeSnapshotFromGuest()\n        elif(self.takeSnapshotFrom == CommonVariables.firstGuestThenHost):\n            run_result, run_status, blob_snapshot_info_array, all_failed, unable_to_sleep, is_inconsistent = self.takeSnapshotFromFirstGuestThenHost()\n        elif(self.takeSnapshotFrom == CommonVariables.firstHostThenGuest):\n            run_result, run_status, blob_snapshot_info_array, all_failed, unable_to_sleep, is_inconsistent = self.takeSnapshotFromFirstHostThenGuest()\n        elif(self.takeSnapshotFrom == CommonVariables.onlyHost):\n            run_result, run_status, blob_snapshot_info_array, all_failed, unable_to_sleep, is_inconsistent = self.takeSnapshotFromOnlyHost()\n        else :\n            self.logger.log('Snapshot method did not match any listed type, taking  firstHostThenGuest as default')\n            run_result, run_status, blob_snapshot_info_array, all_failed, unable_to_sleep, is_inconsistent = self.takeSnapshotFromFirstHostThenGuest()\n\n        self.logger.log('doFreezeSnapshot : run_result - {0} run_status - {1} all_failed - {2} unable_to_sleep - {3} is_inconsistent - {4} values post snapshot'.format(str(run_result), str(run_status), str(all_failed), str(unable_to_sleep), str(is_inconsistent)))\n\n        if (run_result == CommonVariables.success):\n            run_result, run_status = self.updateErrorCode(blob_snapshot_info_array, all_failed, unable_to_sleep, is_inconsistent)\n\n        snapshot_info_array = self.update_snapshotinfoarray(blob_snapshot_info_array)\n\n        if not (run_result == CommonVariables.success):\n            self.hutil.SetExtErrorCode(self.extensionErrorCode)\n\n        return run_result, run_status, snapshot_info_array\n    \n    def update_snapshotinfoarray(self, blob_snapshot_info_array):\n        snapshot_info_array = []\n\n        self.logger.log('updating snapshot info array from blob snapshot info')\n        if blob_snapshot_info_array != None and blob_snapshot_info_array !=[]:\n            for blob_snapshot_info in blob_snapshot_info_array:\n                if blob_snapshot_info != None:\n                    self.logger.log(\"IsSuccessful:{0}, SnapshotUri:{1}, ErrorMessage:{2}\".format(blob_snapshot_info.isSuccessful, blob_snapshot_info.snapshotUri, blob_snapshot_info.errorMessage))\n\n                    # Sample SnapshotBlobUri Format\n                    # UltraDisk:     https://md-dd-e470ba041280442aabc964b73060460b.z48.disk.storage.azure.net/disks/e470ba04-1280-442a-abc9-64b73060460b/snapshots?snapshotId=C8E4AC08-8BA6-46B6-973A-BD6C0BD22CD7\n                    # Standard Disk: https://md-pbhlk3l5mb1q.z27.blob.storage.azure.net:443/zzvgfnxr4fgw/abcd?snapshot=2021-07-31T10:07:37.6596865Z\n\n                    blobUri = blob_snapshot_info.snapshotUri\n                    if(blob_snapshot_info.snapshotUri):\n                        endIndexOfBlobUri = blob_snapshot_info.snapshotUri.find('?')\n                        if(blob_snapshot_info.ddSnapshotIdentifier != None):\n                            endIndexOfBlobUri = blob_snapshot_info.snapshotUri.find(\"/snapshots\")\n                        if(endIndexOfBlobUri != -1):\n                            blobUri = blobUri[0:endIndexOfBlobUri]\n                        else:\n                            self.logger.log(\"Unable to find end index of blobUri in snapshotUri. Assigning default snapshotUri to blobUri. This {0} a DirectDrive disk\".format(\"is\" if(blob_snapshot_info.ddSnapshotIdentifier != None) else \"is not\"))\n                    self.logger.log(\"blobUri : {0}\".format(blobUri))\n                        \n                    ddSnapshotIdentifierInfo = None\n                    if(blob_snapshot_info.ddSnapshotIdentifier != None):\n                        # snapshotUri is None for DD Disks. It is populated only for XStore disks\n                        blob_snapshot_info.snapshotUri = None\n                        creationTimeStr = '\\\\/Date(' + blob_snapshot_info.ddSnapshotIdentifier.creationTime + ')\\\\/'\n                        creationTimeObj = Status.CreationTime(creationTimeStr, 0)\n                        ddSnapshotIdentifierInfo = Status.DirectDriveSnapshotIdentifier(creationTimeObj, blob_snapshot_info.ddSnapshotIdentifier.id, blob_snapshot_info.ddSnapshotIdentifier.token, blob_snapshot_info.ddSnapshotIdentifier.instantAccessDurationMinutes)\n                        self.logger.log(\"DDSnapshotIdentifier Information to CRP- creationTime : {0}, id : {1}, token : {2}, instantAccessDurationMinutes : {3}\".format(\n                                        ddSnapshotIdentifierInfo.creationTime.DateTime, ddSnapshotIdentifierInfo.id, ddSnapshotIdentifierInfo.token,\n                                        ddSnapshotIdentifierInfo.instantAccessDurationMinutes if ddSnapshotIdentifierInfo.instantAccessDurationMinutes is not None else 'Not Set'))\n                    else:\n                        self.logger.log(\"No DD Snapshot Identifier Found. Hence directDriveSnapshotIdentifier will be Null\")\n                    \n                    snapshot_info_array.append(Status.SnapshotInfoObj(blob_snapshot_info.isSuccessful, blob_snapshot_info.snapshotUri, blob_snapshot_info.errorMessage, blobUri, ddSnapshotIdentifierInfo))\n\n        return snapshot_info_array\n\n    def updateErrorCode(self, blob_snapshot_info_array, all_failed, unable_to_sleep, is_inconsistent):\n        run_result = CommonVariables.success\n        any_failed = False\n        run_status = 'success'\n\n        if unable_to_sleep:\n            run_result = CommonVariables.error\n            run_status = 'error'\n            error_msg = 'T:S Machine unable to sleep'\n            self.logger.log(error_msg, True, 'Error')\n        elif is_inconsistent == True :\n            run_result = CommonVariables.error\n            run_status = 'error'\n            error_msg = 'Snapshots are inconsistent'\n            self.logger.log(error_msg, True, 'Error')\n        elif blob_snapshot_info_array != None:\n            for blob_snapshot_info in blob_snapshot_info_array:\n                if blob_snapshot_info != None and blob_snapshot_info.errorMessage != None :\n                    if 'The rate of snapshot blob calls is exceeded' in blob_snapshot_info.errorMessage:\n                        run_result = CommonVariables.FailedRetryableSnapshotRateExceeded\n                        run_status = 'error'\n                        error_msg = 'Retrying when snapshot failed with SnapshotRateExceeded'\n                        self.extensionErrorCode = ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.FailedRetryableSnapshotRateExceeded\n                        self.logger.log(error_msg, True, 'Error')\n                        break\n                    elif 'The snapshot count against this blob has been exceeded' in blob_snapshot_info.errorMessage:\n                        run_result = CommonVariables.FailedSnapshotLimitReached\n                        run_status = 'error'\n                        error_msg = 'T:S Enable failed with FailedSnapshotLimitReached errror'\n                        self.extensionErrorCode = ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.FailedSnapshotLimitReached\n                        error_msg = error_msg + ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.StatusCodeStringBuilder(self.extensionErrorCode)\n                        self.logger.log(error_msg, True, 'Error')\n                        break\n                    elif blob_snapshot_info.isSuccessful == False and not all_failed:\n                        any_failed = True\n                elif blob_snapshot_info != None and blob_snapshot_info.isSuccessful == False:\n                    any_failed = True\n        \n        if all_failed:\n            doSnapshot_status = HandlerUtil.HandlerUtility.get_telemetry_data(CommonVariables.hostStatusCodeDoSnapshot)\n            preSnapshot_status = HandlerUtil.HandlerUtility.get_telemetry_data(CommonVariables.hostStatusCodePreSnapshot)\n\n            if run_result == CommonVariables.success and doSnapshot_status == \"556\" and preSnapshot_status == \"200\":\n                run_result = ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.FailedHostSnapshotRemoteServerError\n                error_msg = 'T:S Enable failed with FailedHostSnapshotRemoteServerError error'\n                self.extensionErrorCode = ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.FailedHostSnapshotRemoteServerError\n            else: \n                run_result = ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.FailedRetryableSnapshotFailedNoNetwork\n                error_msg = 'T:S Enable failed with FailedRetryableSnapshotFailedNoNetwork error'\n                self.extensionErrorCode = ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.FailedRetryableSnapshotFailedNoNetwork\n            error_msg = error_msg + ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.StatusCodeStringBuilder(self.extensionErrorCode)\n            self.logger.log(error_msg, True, 'Error')\n        elif run_result == CommonVariables.success and any_failed:\n            run_result = CommonVariables.FailedRetryableSnapshotFailedNoNetwork\n            error_msg = 'T:S Enable failed with FailedRetryableSnapshotFailedRestrictedNetwork errror'\n            self.extensionErrorCode = ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.FailedRetryableSnapshotFailedRestrictedNetwork\n            error_msg = error_msg + ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.StatusCodeStringBuilder(self.extensionErrorCode)\n            run_status = 'error'\n            self.logger.log(error_msg, True, 'Error')\n        \n        return run_result, run_status\n\n    def freeze(self):\n        try:\n            timeout = self.hutil.get_intvalue_from_configfile('timeout',60)\n            self.logger.log('T:S freeze, timeout value ' + str(timeout))\n            time_before_freeze = datetime.datetime.now()\n            freeze_result,timedout = self.freezer.freeze_safe(timeout)\n            time_after_freeze = datetime.datetime.now()\n            freezeTimeTaken = time_after_freeze-time_before_freeze\n            self.logger.log('T:S ***** freeze, time_before_freeze=' + str(time_before_freeze) + \", time_after_freeze=\" + str(time_after_freeze) + \", freezeTimeTaken=\" + str(freezeTimeTaken))\n            HandlerUtil.HandlerUtility.add_to_telemetery_data(\"FreezeTime\", str(time_after_freeze-time_before_freeze-datetime.timedelta(seconds=5)))\n            run_result = CommonVariables.success\n            run_status = 'success'\n            all_failed= False\n            is_inconsistent =  False\n            self.logger.log('T:S freeze result ' + str(freeze_result) + ', timedout :' + str(timedout))\n            if (timedout == True):\n                run_result = CommonVariables.FailedFsFreezeTimeout\n                run_status = 'error'\n                error_msg = 'T:S ###### Enable failed with error: freeze took longer than timeout'\n                self.extensionErrorCode = ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.FailedRetryableFsFreezeTimeout\n                error_msg = error_msg + ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.StatusCodeStringBuilder(self.extensionErrorCode)\n                self.logger.log(error_msg, True, 'Error')\n            elif(freeze_result is not None and len(freeze_result.errors) > 0 and CommonVariables.unable_to_open_err_string in str(freeze_result)):\n                run_result = CommonVariables.FailedUnableToOpenMount\n                run_status = 'error'\n                error_msg = 'T:S Enable failed with error: ' + str(freeze_result)\n                self.extensionErrorCode = ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.FailedRetryableUnableToOpenMount\n                error_msg = error_msg + ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.StatusCodeStringBuilder(self.extensionErrorCode)\n                self.logger.log(error_msg, True, 'Warning')\n            elif(freeze_result is not None and len(freeze_result.errors) > 0):\n                run_result = CommonVariables.FailedFsFreezeFailed\n                run_status = 'error'\n                error_msg = 'T:S Enable failed with error: ' + str(freeze_result)\n                self.extensionErrorCode = ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.FailedRetryableFsFreezeFailed\n                error_msg = error_msg + ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.StatusCodeStringBuilder(self.extensionErrorCode)\n                self.logger.log(error_msg, True, 'Warning')\n        except Exception as e:\n            errMsg = 'Failed to do the freeze with error: %s, stack trace: %s' % (str(e), traceback.format_exc())\n            self.logger.log(errMsg, True, 'Error')\n            run_result = CommonVariables.error\n            run_status = 'error'\n        \n        return run_result, run_status\n\n    def takeSnapshotFromGuest(self):\n        run_result = CommonVariables.success\n        run_status = 'success'\n\n        all_failed= False\n        is_inconsistent =  False\n        unable_to_sleep = False\n        blob_snapshot_info_array = None\n        all_snapshots_failed = False\n        try:\n            if( self.para_parser.blobs == None or len(self.para_parser.blobs) == 0) :\n                run_result = CommonVariables.FailedRetryableSnapshotFailedNoNetwork\n                run_status = 'error'\n                error_msg = 'T:S taking snapshot failed as blobs are empty or none'\n                self.logger.log(error_msg, True, 'Error')\n                all_failed = True\n                all_snapshots_failed = True\n                return run_result, run_status, blob_snapshot_info_array, all_failed, all_snapshots_failed, unable_to_sleep, is_inconsistent\n\n            if(self.para_parser.isVMADEEnabled == True and self.para_parser.blobs != None):\n                # fetch the disk encryption details\n                self.fetchDiskBlobMetadata()\n\n            if self.g_fsfreeze_on :\n                run_result, run_status = self.freeze()\n\n            if(self.para_parser is not None and self.is_command_timedout(self.para_parser) == True):\n                self.hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.FailedGuestAgentInvokedCommandTooLate)\n                run_result = CommonVariables.FailedGuestAgentInvokedCommandTooLate\n                run_status = 'error'\n                all_failed = True\n                all_snapshots_failed = True\n                self.logger.log('T:S takeSnapshotFromGuest : Thawing as failing due to CRP timeout', True, 'Error')\n                self.freezer.thaw_safe()\n            elif(run_result == CommonVariables.success or self.takeCrashConsistentSnapshot == True):\n                HandlerUtil.HandlerUtility.add_to_telemetery_data(CommonVariables.snapshotCreator, CommonVariables.guestExtension)\n                snap_shotter = GuestSnapshotter(self.logger, self.hutil)\n                self.logger.log('T:S doing snapshot now...')\n                time_before_snapshot = datetime.datetime.now()\n                snapshot_result, blob_snapshot_info_array, all_failed, is_inconsistent, unable_to_sleep, all_snapshots_failed = snap_shotter.snapshotall(self.para_parser, self.freezer, self.g_fsfreeze_on)\n                time_after_snapshot = datetime.datetime.now()\n                snapshotTimeTaken = time_after_snapshot-time_before_snapshot\n                self.logger.log('T:S ***** takeSnapshotFromGuest, time_before_snapshot=' + str(time_before_snapshot) + \", time_after_snapshot=\" + str(time_after_snapshot) + \", snapshotTimeTaken=\" + str(snapshotTimeTaken))\n                HandlerUtil.HandlerUtility.add_to_telemetery_data(\"snapshotTimeTaken\", str(snapshotTimeTaken))\n                self.logger.log('T:S snapshotall ends...', True)\n\n        except Exception as e:\n            errMsg = 'Failed to do the snapshot with error: %s, stack trace: %s' % (str(e), traceback.format_exc())\n            self.logger.log(errMsg, True, 'Error')\n            run_result = CommonVariables.error\n            run_status = 'error'\n\n        return run_result, run_status, blob_snapshot_info_array, all_failed, all_snapshots_failed, unable_to_sleep, is_inconsistent\n\n    def takeSnapshotFromFirstGuestThenHost(self):\n        run_result = CommonVariables.success\n        run_status = 'success'\n\n        all_failed= False\n        is_inconsistent =  False\n        unable_to_sleep = False\n        blob_snapshot_info_array = None\n        all_snapshots_failed = False\n\n        run_result, run_status, blob_snapshot_info_array, all_failed, all_snapshots_failed, unable_to_sleep, is_inconsistent  = self.takeSnapshotFromGuest()\n\n        if(all_snapshots_failed):\n            try:\n                #to make sure binary is thawed\n                self.logger.log('[takeSnapshotFromFirstGuestThenHost] : Thawing again post the guest snapshotting failure')\n                self.freezer.thaw_safe()\n            except Exception as e:\n                self.logger.log('[takeSnapshotFromFirstGuestThenHost] : Exception in Thaw %s, stack trace: %s' % (str(e), traceback.format_exc()))\n\n            run_result, run_status, blob_snapshot_info_array,all_failed, unable_to_sleep, is_inconsistent = self.takeSnapshotFromOnlyHost()\n\n        return run_result, run_status, blob_snapshot_info_array, all_failed, unable_to_sleep, is_inconsistent\n\n    def takeSnapshotFromFirstHostThenGuest(self):\n\n        run_result = CommonVariables.success\n        run_status = 'success'\n\n        all_failed= False\n        is_inconsistent =  False\n        unable_to_sleep = False\n        blob_snapshot_info_array = None\n        snap_shotter = HostSnapshotter(self.logger, self.hostIp)\n        pre_snapshot_statuscode, responseBody = snap_shotter.pre_snapshot(self.para_parser, self.taskId, True)\n\n        if(pre_snapshot_statuscode == 200 or pre_snapshot_statuscode == 201):\n            run_result, run_status, blob_snapshot_info_array, all_failed, unable_to_sleep, is_inconsistent = self.takeSnapshotFromOnlyHost()\n        else:\n            run_result, run_status, blob_snapshot_info_array, all_failed, all_snapshots_failed, unable_to_sleep, is_inconsistent  = self.takeSnapshotFromGuest()\n\n            if all_snapshots_failed and run_result != CommonVariables.success:\n                self.extensionErrorCode = ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.FailedRetryableSnapshotFailedNoNetwork\n            elif run_result != CommonVariables.success :\n                self.extensionErrorCode = ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.FailedRetryableSnapshotFailedRestrictedNetwork\n\n        return run_result, run_status, blob_snapshot_info_array, all_failed, unable_to_sleep, is_inconsistent\n\n    def takeSnapshotFromOnlyHost(self):\n        run_result = CommonVariables.success\n        run_status = 'success'\n        all_failed= False\n        is_inconsistent =  False\n        unable_to_sleep = False\n        blob_snapshot_info_array = None\n        self.logger.log('Taking Snapshot through Host')\n        HandlerUtil.HandlerUtility.add_to_telemetery_data(CommonVariables.snapshotCreator, CommonVariables.backupHostService)\n\n        if self.g_fsfreeze_on :\n            run_result, run_status = self.freeze()\n\n        if(self.para_parser is not None and self.is_command_timedout(self.para_parser) == True):\n            self.hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.FailedGuestAgentInvokedCommandTooLate)\n            run_result = CommonVariables.FailedGuestAgentInvokedCommandTooLate\n            run_status = 'error'\n            all_failed = True\n            self.logger.log('T:S takeSnapshotFromOnlyHost : Thawing as failing due to CRP timeout', True, 'Error')\n            self.freezer.thaw_safe()\n        elif(run_result == CommonVariables.success or self.takeCrashConsistentSnapshot == True):\n            snap_shotter = HostSnapshotter(self.logger, self.hostIp)\n            self.logger.log('T:S doing snapshot now...')\n            time_before_snapshot = datetime.datetime.now()\n            blob_snapshot_info_array, all_failed, is_inconsistent, unable_to_sleep  = snap_shotter.snapshotall(self.para_parser, self.freezer, self.g_fsfreeze_on, self.taskId)\n            time_after_snapshot = datetime.datetime.now()\n            snapshotTimeTaken = time_after_snapshot-time_before_snapshot\n            self.logger.log('T:S takeSnapshotFromHost, time_before_snapshot=' + str(time_before_snapshot) + \", time_after_snapshot=\" + str(time_after_snapshot) + \", snapshotTimeTaken=\" + str(snapshotTimeTaken))\n            HandlerUtil.HandlerUtility.add_to_telemetery_data(\"snapshotTimeTaken\", str(snapshotTimeTaken))\n            self.logger.log('T:S snapshotall ends...', True)\n\n        return run_result, run_status, blob_snapshot_info_array, all_failed, unable_to_sleep, is_inconsistent\n\n    def is_command_timedout(self, para_parser):\n        result = False\n        dateTimeNow = datetime.datetime.utcnow()\n        try:\n            try:\n                snap_shotter = HostSnapshotter(self.logger, self.hostIp)\n                pre_snapshot_statuscode,responseBody = snap_shotter.pre_snapshot(self.para_parser, self.taskId)\n                \n                if(int(pre_snapshot_statuscode) == 200 or int(pre_snapshot_statuscode) == 201) and (responseBody != None and responseBody != \"\") :\n                    response = json.loads(responseBody)\n                    dateTimeNow = datetime.datetime(response['responseTime']['year'], response['responseTime']['month'], response['responseTime']['day'], response['responseTime']['hour'], response['responseTime']['minute'], response['responseTime']['second'])\n                    self.logger.log('Date and time extracted from pre-snapshot request: '+ str(dateTimeNow))\n            except Exception as e:\n                self.logger.log('Error in getting Host time falling back to using system time. Exception %s, stack trace: %s' % (str(e), traceback.format_exc()))\n\n            if(para_parser is not None and para_parser.commandStartTimeUTCTicks is not None and para_parser.commandStartTimeUTCTicks != \"\"):\n                utcTicksLong = int(para_parser.commandStartTimeUTCTicks)\n                self.logger.log('utcTicks in long format' + str(utcTicksLong))\n                commandStartTime = self.convert_time(utcTicksLong)\n                self.logger.log('command start time is ' + str(commandStartTime) + \" and utcNow is \" + str(dateTimeNow))\n                timespan = dateTimeNow - commandStartTime\n                MAX_TIMESPAN = 140 * 60 # in seconds\n                total_span_in_seconds = self.timedelta_total_seconds(timespan)\n                self.logger.log('timespan: ' + str(timespan) + ', total_span_in_seconds: ' + str(total_span_in_seconds) + ', MAX_TIMESPAN: ' + str(MAX_TIMESPAN))\n\n                if total_span_in_seconds > MAX_TIMESPAN :\n                    self.logger.log('CRP timeout limit has reached, should abort.')\n                    result = True\n        except Exception as e:\n            self.logger.log('T:S is_command_timedout : Exception %s, stack trace: %s' % (str(e), traceback.format_exc()))\n\n        return result\n\n    def convert_time(self, utcTicks):\n        return datetime.datetime(1, 1, 1) + datetime.timedelta(microseconds = utcTicks / 10)\n\n    def timedelta_total_seconds(self, delta):\n        if not hasattr(datetime.timedelta, 'total_seconds'):\n            return delta.days * 86400 + delta.seconds\n        else:\n            return delta.total_seconds()\n\n    def fetchDiskBlobMetadata(self):\n        headers = self.generate_headers()\n        http_util = HttpUtil(self.logger)\n        for blob in self.para_parser.blobs:\n            sasuri_obj = urlparser.urlparse(blob + '&comp=metadata')\n            result, httpResp, errMsg = http_util.HttpCallGetResponse('GET', sasuri_obj, None, headers = headers)\n            if(result == CommonVariables.success and httpResp != None):\n                resp_headers = httpResp.getheaders()\n                key = CommonVariables.diskEncryptionKey\n                value = \"\"\n                for k,v in resp_headers:\n                    if key == k:\n                        value = str(v)\n                        break\n                self.additional_headers.append((key,value))\n        self.para_parser.disk_encryption_details = self.additional_headers\n\n    def generate_headers(self):\n        \"\"\"Generates headers for the request using SAS token, x-ms-date, and x-ms-version.\"\"\"\n        headers = {\n            \"x-ms-date\": datetime.datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT'),\n            \"x-ms-version\": \"2018-03-28\"\n            }\n        return headers\n\n"
  },
  {
    "path": "VMBackup/main/fsfreezer.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport subprocess\nfrom mounts import Mounts\nimport datetime\nimport threading\nimport os\nimport platform\nimport time\nimport sys\nimport signal\nimport traceback\nimport threading\nimport fcntl\nfrom common import CommonVariables\nfrom Utils.ResourceDiskUtil import ResourceDiskUtil\n\ndef thread_for_binary(self,args):\n    self.logger.log(\"Thread for binary is called\",True)\n    time.sleep(3)\n    self.logger.log(\"Waited in thread for 3 seconds\",True)\n    self.logger.log(\"****** 1. Starting Freeze Binary \",True)\n    self.child = subprocess.Popen(args,stdout=subprocess.PIPE)\n    self.logger.log(\"Binary subprocess Created\",True)\n\nclass FreezeError(object):\n    def __init__(self):\n        self.errorcode = None\n        self.fstype = None\n        self.path = None\n    def __str__(self):\n        return \"errorcode:\" + str(self.errorcode) + \" fstype:\" + str(self.fstype) + \" path\" + str(self.path)\n\nclass FreezeResult(object):\n    def __init__(self):\n        self.errors = []\n    def __str__(self):\n        error_str = \"\"\n        for error in self.errors:\n            error_str+=(str(error)) + \"\\n\"\n        return error_str\n\nclass FreezeHandler(object):\n    def __init__(self,logger,hutil):\n        # sig_handle valid values(0:nothing done,1: freezed successfully, 2:freeze failed)\n        self.sig_handle = 0\n        self.child= None\n        self.logger=logger\n        self.hutil = hutil\n\n    def sigusr1_handler(self,signal,frame):\n        self.logger.log('freezed',False)\n        self.logger.log(\"****** 4. Freeze Completed (Signal=1 received)\",False)\n        self.sig_handle=1\n\n    def sigchld_handler(self,signal,frame):\n        self.logger.log('some child process terminated')\n        if(self.child is not None and self.child.poll() is not None):\n            self.logger.log(\"binary child terminated\",True)\n            self.logger.log(\"****** 9. Binary Process completed (Signal=2 received)\",True)\n            self.sig_handle=2\n\n    def reset_signals(self):\n        self.sig_handle = 0\n        self.child= None\n\n\n    def startproc(self,args):\n        binary_thread = threading.Thread(target=thread_for_binary, args=[self, args])\n        binary_thread.start()\n\n        SafeFreezeWaitInSecondsDefault = 66\n\n        proc_sleep_time = self.hutil.get_intvalue_from_configfile('SafeFreezeWaitInSeconds',SafeFreezeWaitInSecondsDefault)\n        \n        for i in range(0,(int(proc_sleep_time/2))):\n            if(self.sig_handle==0):\n                self.logger.log(\"inside while with sig_handle \"+str(self.sig_handle))\n                time.sleep(2)\n            else:\n                break\n        self.logger.log(\"Binary output for signal handled: \"+str(self.sig_handle))\n        return self.sig_handle\n\n    def signal_receiver(self):\n        signal.signal(signal.SIGUSR1,self.sigusr1_handler)\n        signal.signal(signal.SIGCHLD,self.sigchld_handler)\n\nclass FsFreezer:\n    def __init__(self, patching, logger, hutil):\n        \"\"\"\n        \"\"\"\n        self.patching = patching\n        self.logger = logger\n        self.hutil = hutil\n        self.safeFreezeFolderPath = \"safefreeze/bin/safefreeze\"\n        self.isArm64Machine = False\n        self.file_exists = True # Flag to indiacte safeFreeze Binary presence\n        \n        try:\n            platformMachine = platform.machine()\n            architectureFromUname = os.uname()[-1]\n            self.logger.log(\"platformMachine : \" + str(platformMachine) + \" architectureFromUname : \" + str(architectureFromUname))\n            if((platformMachine != None and (platformMachine.startswith(\"aarch64\") or platformMachine.startswith(\"arm64\"))) or (architectureFromUname != None and (architectureFromUname.startswith(\"aarch64\") or architectureFromUname.startswith(\"arm64\")))):\n                self.isArm64Machine = True\n        except Exception as e:\n            errorMsg = \"Unable to fetch machine processor architecture, error: %s, stack trace: %s\" % (str(e), traceback.format_exc())\n            self.logger.log(errorMsg, 'Error')\n\n        if(self.isArm64Machine == True):\n            self.logger.log(\"isArm64Machine : \" + str(self.isArm64Machine) + \" Using ARM64 safefreeze binary\")\n            self.safeFreezeFolderPath = \"safefreezeArm64/bin/safefreeze\"\n        else:\n            self.logger.log(\"isArm64Machine : \" + str(self.isArm64Machine) + \" Using x64 safefreeze binary\")\n            self.safeFreezeFolderPath = \"safefreeze/bin/safefreeze\"\n        \n        self.logger.log(\"Checking for the safefreeze binary\")\n        self.check_if_file_exists(self.safeFreezeFolderPath)\n        \n        try:\n            self.mounts = Mounts(patching = self.patching, logger = self.logger)\n        except Exception as e:\n            errMsg='Failed to retrieve mount points, Exception %s, stack trace: %s' % (str(e), traceback.format_exc())\n            self.logger.log(errMsg,True,'Warning')\n            self.logger.log(str(e), True)\n            self.mounts = None\n        self.frozen_items = set()\n        self.unfrozen_items = set()\n        self.freeze_handler = FreezeHandler(self.logger, self.hutil)\n        self.mount_open_failed = False\n        resource_disk = ResourceDiskUtil(patching = patching, logger = logger)\n        self.resource_disk_mount_point = resource_disk.get_resource_disk_mount_point()\n        self.skip_freeze = True\n        self.isAquireLockSucceeded = True\n        self.getLockRetry = 0\n        self.maxGetLockRetry = 5\n        self.safeFreezelockFile = None\n    \n    def check_if_file_exists(self, relative_path):\n        full_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), relative_path)\n        self.logger.log(\"path of the file\"+ str(full_path))\n        self.file_exists = os.path.exists(full_path)\n        self.logger.log(\"file path exists \" + str(self.file_exists))\n    \n    def should_skip(self, mount):\n        if(self.resource_disk_mount_point is not None and mount.mount_point == self.resource_disk_mount_point):\n            return True\n        elif((mount.fstype == 'ext3' or mount.fstype == 'ext4' or mount.fstype == 'xfs' or mount.fstype == 'btrfs') and mount.type != 'loop' ):\n            return False\n        else:\n            return True\n    \n    def freeze_safe(self,timeout):\n        self.root_seen = False\n        error_msg=''\n        timedout = False\n        self.skip_freeze = True \n        mounts_to_skip = None\n        try:\n            mounts_to_skip = self.hutil.get_strvalue_from_configfile('MountsToSkip','')\n            self.logger.log(\"skipped mount :\" + str(mounts_to_skip), True)\n            mounts_list_to_skip = mounts_to_skip.split(',')\n        except Exception as e:\n            errMsg='Failed to read from config, Exception %s, stack trace: %s' % (str(e), traceback.format_exc())\n            self.logger.log(errMsg,True,'Warning')\n        try:\n            freeze_result = FreezeResult()\n            freezebin=os.path.join(os.getcwd(),os.path.dirname(__file__),self.safeFreezeFolderPath)\n            args=[freezebin,str(timeout)]\n            no_mount_found = True\n            for mount in self.mounts.mounts:\n                self.logger.log(\"fsfreeze mount :\" + str(mount.mount_point), True)\n                if(mount.mount_point == '/'):\n                    self.root_seen = True\n                    self.root_mount = mount\n                elif(mount.mount_point not in mounts_list_to_skip and not self.should_skip(mount)):\n                    if(self.skip_freeze == True):\n                        self.skip_freeze = False\n                    args.append(str(mount.mount_point))\n            if(self.root_seen and not self.should_skip(self.root_mount)):\n                if(self.skip_freeze == True):\n                    self.skip_freeze = False\n                args.append('/')\n            self.logger.log(\"skip freeze is : \" + str(self.skip_freeze), True)\n            if(self.skip_freeze == True):\n                return freeze_result,timedout\n            self.logger.log(\"arg : \" + str(args),True)\n            self.freeze_handler.reset_signals()\n            self.freeze_handler.signal_receiver()\n            self.logger.log(\"proceeded for accepting signals\", True)\n            if(mounts_to_skip == '/'): #for continue logging to avoid out of memory issue\n                self.logger.enforce_local_flag(True)\n            else:\n                self.logger.enforce_local_flag(False) \n\n            start_time = datetime.datetime.utcnow()\n\n            while self.getLockRetry < self.maxGetLockRetry:\n                try:\n                    if not os.path.isdir('/etc/azure'):\n                        os.mkdir('/etc/azure')\n                    if not os.path.isdir('/etc/azure/MicrosoftRecoverySvcsSafeFreezeLock'):\n                        os.mkdir('/etc/azure/MicrosoftRecoverySvcsSafeFreezeLock')\n                    self.safeFreezelockFile = open(\"/etc/azure/MicrosoftRecoverySvcsSafeFreezeLock/SafeFreezeLockFile\",\"w\")\n                    self.logger.log(\"/etc/azure/MicrosoftRecoverySvcsSafeFreezeLock/SafeFreezeLockFile file opened Sucessfully\",True)\n                    try:\n                        #isAquiredLockSucceeded lock will only be false if there is a issue in taking lock.\n                        #For all other issue like faliure in creating file, not enough space in disk it will be true. so that we can proceed with the backup\n                        self.isAquireLockSucceeded = False  \n                        fcntl.lockf(self.safeFreezelockFile, fcntl.LOCK_EX | fcntl.LOCK_NB)\n                        self.logger.log(\"Aquiring lock succeeded\",True)\n                        self.isAquireLockSucceeded = True\n                        break\n                    except Exception as ex:\n                        self.safeFreezelockFile.close()\n                        self.logger.log(\"Failed to aquire lock: %s, stack trace: %s\" % (str(ex), traceback.format_exc()),True)\n                        raise ex\n                except Exception as e:\n                    self.logger.log(\"Failed to open file or aquire lock: %s, stack trace: %s\" % (str(e), traceback.format_exc()),True)\n                    self.getLockRetry= self.getLockRetry + 1\n                    time.sleep(1)\n                    if(self.getLockRetry == self.maxGetLockRetry - 1):\n                        time.sleep(30)\n                self.logger.log(\"Retry to aquire lock count: \"+ str(self.getLockRetry),True)\n\n            end_time = datetime.datetime.utcnow()\n            self.logger.log(\"Wait time to aquire lock \"+ str(end_time - start_time),True)\n\n            # sig_handle = None\n            if (self.isAquireLockSucceeded == True):\n                self.logger.log(\"Aquired Lock Successful\")\n            sig_handle=self.freeze_handler.startproc(args)\n\n            self.logger.log(\"freeze_safe after returning from startproc : sig_handle=\"+str(sig_handle))\n            if(sig_handle != 1):\n                if (self.freeze_handler.child is not None):\n                    self.log_binary_output()\n                if (sig_handle == 0):\n                    timedout = True\n                    error_msg=\"freeze timed-out\"\n                    freeze_result.errors.append(error_msg)\n                    self.logger.log(error_msg, True, 'Error')\n                elif (self.mount_open_failed == True):\n                    error_msg=CommonVariables.unable_to_open_err_string\n                    freeze_result.errors.append(error_msg)\n                    self.logger.log(error_msg, True, 'Error')\n                elif (self.isAquireLockSucceeded == False):\n                    error_msg=\"Mount Points already freezed by some other processor\"\n                    freeze_result.errors.append(error_msg)\n                    self.logger.log(error_msg,True,'Error')\n                else:\n                    error_msg=\"freeze failed for some mount\"\n                    freeze_result.errors.append(error_msg)\n                    self.logger.log(error_msg, True, 'Error')\n        except Exception as e:\n            self.logger.enforce_local_flag(True)\n            error_msg='freeze failed for some mount with exception, Exception %s, stack trace: %s' % (str(e), traceback.format_exc())\n            freeze_result.errors.append(error_msg)\n            self.logger.log(error_msg, True, 'Error')\n        return freeze_result,timedout\n\n    def releaseFileLock(self):\n        if (self.isAquireLockSucceeded == True):\n            try:\n                fcntl.lockf(self.safeFreezelockFile, fcntl.LOCK_UN)\n                self.safeFreezelockFile.close()\n            except Exception as e:\n                self.logger.log(\"Failed to unlock: %s, stack trace: %s\" % (str(e), traceback.format_exc()),True)\n        try:\n            os.remove(\"/etc/azure/MicrosoftRecoverySvcsSafeFreezeLock/SafeFreezeLockFile\")\n        except Exception as e:\n            self.logger.log(\"Failed to delete /etc/azure/MicrosoftRecoverySvcsSafeFreezeLock/SafeFreezeLockFile file: %s, stack trace: %s\" % (str(e), traceback.format_exc()),True)\n\n    def thaw_safe(self):\n        thaw_result = None\n        unable_to_sleep = False\n        try:\n            thaw_result = FreezeResult()\n            if(self.skip_freeze == True):\n                return thaw_result, unable_to_sleep\n            if(self.freeze_handler.child is None):\n                self.logger.log(\"child already completed\", True)\n                self.logger.log(\"****** 7. Error - Binary Process Already Completed\", True)\n                error_msg = 'snapshot result inconsistent'\n                thaw_result.errors.append(error_msg)\n            elif(self.freeze_handler.child.poll() is None):\n                self.logger.log(\"child process still running\")\n                self.logger.log(\"****** 7. Sending Thaw Signal to Binary\")\n                self.freeze_handler.child.send_signal(signal.SIGUSR1)\n                for i in range(0,30):\n                    if(self.freeze_handler.child.poll() is None):\n                        self.logger.log(\"child still running sigusr1 sent\")\n                        time.sleep(1)\n                    else:\n                        break\n                self.logger.enforce_local_flag(True)\n                self.log_binary_output()\n                if(self.freeze_handler.child.returncode!=0):\n                    error_msg = 'snapshot result inconsistent as child returns with failure'\n                    thaw_result.errors.append(error_msg)\n                    self.logger.log(error_msg, True, 'Error')\n            else:\n                self.logger.log(\"Binary output after process end when no thaw sent: \", True)\n                if(self.freeze_handler.child.returncode==2):\n                    error_msg = 'Unable to execute sleep'\n                    thaw_result.errors.append(error_msg)\n                    unable_to_sleep = True\n                else:\n                    error_msg = 'snapshot result inconsistent'\n                    thaw_result.errors.append(error_msg)\n                self.logger.enforce_local_flag(True)\n                self.log_binary_output()\n                self.logger.log(error_msg, True, 'Error')\n            self.logger.enforce_local_flag(True) \n        finally:\n            self.releaseFileLock()\n        return thaw_result, unable_to_sleep\n\n\n    def log_binary_output(self):\n        self.logger.log(\"============== Binary output traces start ================= \", True)\n        while True:\n            line=self.freeze_handler.child.stdout.readline()\n            if sys.version_info > (3,):\n                line = str(line, encoding='utf-8', errors=\"backslashreplace\")\n            else:\n                line = str(line)\n            if(\"Failed to open:\" in line):\n                self.mount_open_failed = True\n            if(line != ''):\n                self.logger.log(line.rstrip(), True)\n            else:\n                break\n        self.logger.log(\"============== Binary output traces end ================= \", True)\n\n"
  },
  {
    "path": "VMBackup/main/guestsnapshotter.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\ntry:\n    import urlparse as urlparser\nexcept ImportError:\n    import urllib.parse as urlparser\nimport traceback\nimport datetime\ntry:\n    import ConfigParser as ConfigParsers\nexcept ImportError:\n    import configparser as ConfigParsers\nimport multiprocessing as mp\nfrom common import CommonVariables\nfrom HttpUtil import HttpUtil\nfrom Utils import Status\nfrom Utils import HandlerUtil\nfrom fsfreezer import FsFreezer\nfrom Utils import HostSnapshotObjects\n\nclass SnapshotInfoIndexerObj():\n    def __init__(self, index, isSuccessful, snapshotTs, errorMessage):\n        self.index = index\n        self.isSuccessful = isSuccessful\n        self.snapshotTs = snapshotTs\n        self.errorMessage = errorMessage\n        self.statusCode = 500\n    def __str__(self):\n        return 'index: ' + str(self.index) + ' isSuccessful: ' + str(self.isSuccessful) + ' snapshotTs: ' + str(self.snapshotTs) + ' errorMessage: ' + str(self.errorMessage) + ' statusCode: ' + str(self.statusCode)\n\nclass SnapshotError(object):\n    def __init__(self):\n        self.errorcode = CommonVariables.success\n        self.sasuri = None\n    def __str__(self):\n        return 'errorcode: ' + str(self.errorcode)\n\nclass SnapshotResult(object):\n    def __init__(self):\n        self.errors = []\n\n    def __str__(self):\n        error_str = \"\"\n        for error in self.errors:\n            error_str+=(str(error)) + \"\\n\"\n        return error_str\n\nclass GuestSnapshotter(object):\n    \"\"\"description of class\"\"\"\n    def __init__(self, logger, hutil):\n        self.logger = logger\n        self.configfile='/etc/azure/vmbackup.conf'\n        self.hutil = hutil\n\n    def snapshot(self, sasuri, sasuri_index, settings, meta_data, snapshot_result_error, snapshot_info_indexer_queue, global_logger, global_error_logger, disk_encryption_details = None):\n        temp_logger=''\n        error_logger=''\n        snapshot_error = SnapshotError()\n        snapshot_info_indexer = SnapshotInfoIndexerObj(sasuri_index, False, None, None)\n        if(sasuri is None):\n            error_logger = error_logger + str(datetime.datetime.utcnow()) + \" Failed to do the snapshot because sasuri is none \"\n            snapshot_error.errorcode = CommonVariables.error\n            snapshot_error.sasuri = sasuri\n        try:\n            sasuri_obj = urlparser.urlparse(sasuri)\n            if(sasuri_obj is None or sasuri_obj.hostname is None):\n                error_logger = error_logger + str(datetime.datetime.utcnow()) + \" Failed to parse the sasuri \"\n                snapshot_error.errorcode = CommonVariables.error\n                snapshot_error.sasuri = sasuri\n            else:\n                start_time = datetime.datetime.utcnow()\n                body_content = ''\n                headers = {}\n                headers[\"Content-Length\"] = '0'\n                if(meta_data is not None): \n                    for meta in meta_data:\n                        key = meta['Key']\n                        value = meta['Value']\n                        headers[\"x-ms-meta-\" + key] = value\n                temp_logger = temp_logger + str(headers)\n                if(disk_encryption_details is not None and \n                   len(disk_encryption_details) >= 2 and disk_encryption_details[0] and disk_encryption_details[1]):\n                    headers[disk_encryption_details[0]] = disk_encryption_details[1]\n                    self.logger.log(\"appending disk_encryption_details as part of headers while taking a snapshot\")\n                if(CommonVariables.isSnapshotTtlEnabled in settings and settings[CommonVariables.isSnapshotTtlEnabled]):\n                    self.logger.log(\"Not passing the TTL header via Guest path though it is enabled\")\n                http_util = HttpUtil(self.logger)\n                sasuri_obj = urlparser.urlparse(sasuri + '&comp=snapshot')\n                temp_logger = temp_logger + str(datetime.datetime.utcnow()) + ' start calling the snapshot rest api. '\n                # initiate http call for blob-snapshot and get http response\n                result, httpResp, errMsg, responseBody  = http_util.HttpCallGetResponse('PUT', sasuri_obj, body_content, headers = headers, responseBodyRequired = True)\n                temp_logger = temp_logger + str(\"responseBody: \" + responseBody)\n                if(result == CommonVariables.success and httpResp != None):\n                    # retrieve snapshot information from http response\n                    snapshot_info_indexer, snapshot_error, message = self.httpresponse_get_snapshot_info(httpResp, sasuri_index, sasuri, responseBody)\n                    temp_logger = temp_logger + str(datetime.datetime.utcnow()) + ' httpresponse_get_snapshot_info message: ' + str(message)\n                else:\n                    # HttpCall failed\n                    error_logger = error_logger + str(datetime.datetime.utcnow()) + \" snapshot HttpCallGetResponse failed \"\n                    error_logger = error_logger + str(datetime.datetime.utcnow()) + str(errMsg)\n                    snapshot_error.errorcode = CommonVariables.error\n                    snapshot_error.sasuri = sasuri\n                end_time = datetime.datetime.utcnow()\n                time_taken=end_time-start_time\n                temp_logger = temp_logger + str(datetime.datetime.utcnow()) + ' time taken for snapshot ' + str(time_taken)\n        except Exception as e:\n            errorMsg = \" Failed to do the snapshot with error: %s, stack trace: %s\" % (str(e), traceback.format_exc())\n            error_logger = error_logger + str(datetime.datetime.utcnow()) + errorMsg\n            snapshot_error.errorcode = CommonVariables.error\n            snapshot_error.sasuri = sasuri\n        temp_logger=temp_logger + str(datetime.datetime.utcnow()) + ' snapshot ends..'\n        global_logger.put(temp_logger)\n        global_error_logger.put(error_logger)\n        snapshot_result_error.put(snapshot_error)\n        snapshot_info_indexer_queue.put(snapshot_info_indexer)\n\n    def snapshot_seq(self, sasuri, sasuri_index, settings, meta_data, disk_encryption_metadata = None):\n        result = None\n        snapshot_error = SnapshotError()\n        snapshot_info_indexer = SnapshotInfoIndexerObj(sasuri_index, False, None, None)\n        if(sasuri is None):\n            self.logger.log(\"Failed to do the snapshot because sasuri is none\",False,'Error')\n            snapshot_error.errorcode = CommonVariables.error\n            snapshot_error.sasuri = sasuri\n        try:\n            sasuri_obj = urlparser.urlparse(sasuri)\n            if(sasuri_obj is None or sasuri_obj.hostname is None):\n                self.logger.log(\"Failed to parse the sasuri\",False,'Error')\n                snapshot_error.errorcode = CommonVariables.error\n                snapshot_error.sasuri = sasuri\n            else:\n                body_content = ''\n                headers = {}\n                headers[\"Content-Length\"] = '0'\n                if(meta_data is not None):\n                    for meta in meta_data:\n                        key = meta['Key']\n                        value = meta['Value']\n                        headers[\"x-ms-meta-\" + key] = value\n                if(disk_encryption_metadata is not None and len(disk_encryption_metadata) >= 2 and\n                  disk_encryption_metadata[0] and disk_encryption_metadata[1]):\n                    headers[disk_encryption_metadata[0]] = disk_encryption_metadata[1]\n                    self.logger.log(\"appending disk_encryption_details as part of headers while taking a snapshot\")\n                if(CommonVariables.isSnapshotTtlEnabled in settings and settings[CommonVariables.isSnapshotTtlEnabled]):\n                    self.logger.log(\"Not passing the TTL header via Guest path though it is enabled\")\n                http_util = HttpUtil(self.logger)\n                sasuri_obj = urlparser.urlparse(sasuri + '&comp=snapshot')\n                self.logger.log(\"start calling the snapshot rest api\")\n                # initiate http call for blob-snapshot and get http response\n                result, httpResp, errMsg, responseBody  = http_util.HttpCallGetResponse('PUT', sasuri_obj, body_content, headers = headers, responseBodyRequired = True)\n                self.logger.log(\"responseBody: \" + responseBody)\n                if(result == CommonVariables.success and httpResp != None):\n                    # retrieve snapshot information from http response\n                    snapshot_info_indexer, snapshot_error, message = self.httpresponse_get_snapshot_info(httpResp, sasuri_index, sasuri, responseBody)\n                    self.logger.log(' httpresponse_get_snapshot_info message: ' + str(message))\n                else:\n                    # HttpCall failed\n                    self.logger.log(\" snapshot HttpCallGetResponse failed \")\n                    self.logger.log(str(errMsg))\n                    snapshot_error.errorcode = CommonVariables.error\n                    snapshot_error.sasuri = sasuri\n        except Exception as e:\n            errorMsg = \"Failed to do the snapshot with error: %s, stack trace: %s\" % (str(e), traceback.format_exc())\n            self.logger.log(errorMsg, False, 'Error')\n            snapshot_error.errorcode = CommonVariables.error\n            snapshot_error.sasuri = sasuri\n        return snapshot_error, snapshot_info_indexer\n\n    def snapshotall_parallel(self, paras, freezer, thaw_done, g_fsfreeze_on):\n        self.logger.log(\"doing snapshotall now in parallel...\")\n        snapshot_result = SnapshotResult()\n        blob_snapshot_info_array = []\n        all_failed = True\n        exceptOccurred = False\n        is_inconsistent = False\n        thaw_done_local = thaw_done\n        unable_to_sleep = False\n        all_snapshots_failed = False\n        set_next_backup_to_seq = False\n        try:\n            self.logger.log(\"before start of multiprocessing queues..\")\n            mp_jobs = []\n            queue_creation_starttime = datetime.datetime.now()\n            global_logger = mp.Queue()\n            global_error_logger = mp.Queue()\n            snapshot_result_error = mp.Queue()\n            snapshot_info_indexer_queue = mp.Queue()\n            time_before_snapshot_start = datetime.datetime.utcnow()\n            blobs = paras.blobs\n\n            if blobs is not None:\n                # initialize blob_snapshot_info_array\n                mp_jobs = []\n                blob_index = 0\n                self.logger.log('****** 5. Snaphotting (Guest-parallel) Started')\n                for blob in blobs:\n                    blobUri = blob.split(\"?\")[0]\n                    self.logger.log(\"index: \" + str(blob_index) + \" blobUri: \" + str(blobUri))\n                    blob_snapshot_info_array.append(HostSnapshotObjects.BlobSnapshotInfo(False, blobUri, None, 500))\n                    try:\n                        if(paras.isVMADEEnabled and len(paras.disk_encryption_details) > blob_index):\n                            mp_jobs.append(mp.Process(target=self.snapshot,args=(blob, blob_index, paras.wellKnownSettingFlags, paras.backup_metadata, snapshot_result_error, snapshot_info_indexer_queue, global_logger, global_error_logger, paras.disk_encryption_details[blob_index])))\n                        else:\n                            mp_jobs.append(mp.Process(target=self.snapshot,args=(blob, blob_index, paras.wellKnownSettingFlags, paras.backup_metadata, snapshot_result_error, snapshot_info_indexer_queue, global_logger, global_error_logger)))\n                    except Exception as e:\n                        self.logger.log(\"multiprocess queue creation failed\")\n                        all_snapshots_failed = True\n                        raise Exception(\"Exception while creating multiprocess queue\")\n\n                    blob_index = blob_index + 1\n\n                counter = 0\n                for job in mp_jobs:\n                    job.start()\n                    if(counter == 0):\n                        queue_creation_endtime = datetime.datetime.now()\n                        timediff = queue_creation_endtime - queue_creation_starttime\n                        if(timediff.seconds >= 10):\n                            self.logger.log(\"mp queue creation took more than 10 secs. Setting next backup to sequential\")\n                            set_next_backup_to_seq = True\n                    counter = counter + 1\n\n                for job in mp_jobs:\n                    job.join()\n                self.logger.log('****** 6. Snaphotting (Guest-parallel) Completed')\n                thaw_result = None\n                if g_fsfreeze_on and thaw_done_local == False:\n                    time_before_thaw = datetime.datetime.now()\n                    thaw_result, unable_to_sleep = freezer.thaw_safe()\n                    time_after_thaw = datetime.datetime.now()\n                    HandlerUtil.HandlerUtility.add_to_telemetery_data(\"ThawTime\", str(time_after_thaw-time_before_thaw))\n                    thaw_done_local = True\n                    if(set_next_backup_to_seq == True):\n                        self.logger.log(\"Setting to sequential snapshot\")\n                        self.hutil.set_value_to_configfile('seqsnapshot', '1')\n                    self.logger.log('T:S thaw result ' + str(thaw_result))\n                    if(thaw_result is not None and len(thaw_result.errors) > 0  and (snapshot_result is None or len(snapshot_result.errors) == 0)):\n                        is_inconsistent = True\n                        snapshot_result.errors.append(thaw_result.errors)\n                        return snapshot_result, blob_snapshot_info_array, all_failed, exceptOccurred, is_inconsistent, thaw_done_local, unable_to_sleep, all_snapshots_failed\n                self.logger.log('end of snapshot process')\n                logging = [global_logger.get() for job in mp_jobs]\n                self.logger.log(str(logging))\n                error_logging = [global_error_logger.get() for job in mp_jobs]\n                self.logger.log(str(error_logging),False,'Error')\n                if not snapshot_result_error.empty():\n                    results = [snapshot_result_error.get() for job in mp_jobs]\n                    for result in results:\n                        if(result.errorcode != CommonVariables.success):\n                            snapshot_result.errors.append(result)\n                if not snapshot_info_indexer_queue.empty():\n                    snapshot_info_indexers = [snapshot_info_indexer_queue.get() for job in mp_jobs]\n                    for snapshot_info_indexer in snapshot_info_indexers:\n                        # update blob_snapshot_info_array element properties from snapshot_info_indexer object\n                        self.get_snapshot_info(snapshot_info_indexer, blob_snapshot_info_array[snapshot_info_indexer.index])\n                        if (blob_snapshot_info_array[snapshot_info_indexer.index].isSuccessful == True):\n                            all_failed = False\n                        self.logger.log(\"index: \" + str(snapshot_info_indexer.index) + \" blobSnapshotUri: \" + str(blob_snapshot_info_array[snapshot_info_indexer.index].snapshotUri))\n\n                    all_snapshots_failed = all_failed\n                    self.logger.log(\"Setting all_snapshots_failed to \" + str(all_snapshots_failed))\n\n                return snapshot_result, blob_snapshot_info_array, all_failed, exceptOccurred, is_inconsistent, thaw_done_local, unable_to_sleep, all_snapshots_failed\n            else:\n                self.logger.log(\"the blobs are None\")\n                return snapshot_result, blob_snapshot_info_array, all_failed, exceptOccurred, is_inconsistent, thaw_done_local, unable_to_sleep\n        except Exception as e:\n            errorMsg = \" Unable to perform parallel snapshot with error: %s, stack trace: %s\" % (str(e), traceback.format_exc())\n            self.logger.log(errorMsg)\n            exceptOccurred = True\n            return snapshot_result, blob_snapshot_info_array, all_failed, exceptOccurred, is_inconsistent, thaw_done_local, unable_to_sleep, all_snapshots_failed\n\n\n    def snapshotall_seq(self, paras, freezer, thaw_done, g_fsfreeze_on):\n        exceptOccurred = False\n        self.logger.log(\"doing snapshotall now in sequence...\")\n        snapshot_result = SnapshotResult()\n        blob_snapshot_info_array = []\n        all_failed = True\n        is_inconsistent = False\n        thaw_done_local = thaw_done\n        unable_to_sleep = False\n        all_snapshots_failed = False\n        try:\n            blobs = paras.blobs\n            if blobs is not None:\n                blob_index = 0\n                self.logger.log('****** 5. Snaphotting (Guest-seq) Started')\n                for blob in blobs:\n                    blobUri = blob.split(\"?\")[0]\n                    self.logger.log(\"index: \" + str(blob_index) + \" blobUri: \" + str(blobUri))\n                    blob_snapshot_info_array.append(HostSnapshotObjects.BlobSnapshotInfo(False, blobUri, None, 500))\n                    if(paras.isVMADEEnabled == True and len(paras.disk_encryption_details) > blob_index):\n                        snapshotError, snapshot_info_indexer = self.snapshot_seq(blob, blob_index, paras.wellKnownSettingFlags, paras.backup_metadata, paras.disk_encryption_details[blob_index])\n                    else:\n                        snapshotError, snapshot_info_indexer = self.snapshot_seq(blob, blob_index, paras.wellKnownSettingFlags, paras.backup_metadata)\n                    if(snapshotError.errorcode != CommonVariables.success):\n                        snapshot_result.errors.append(snapshotError)\n                    # update blob_snapshot_info_array element properties from snapshot_info_indexer object\n                    self.get_snapshot_info(snapshot_info_indexer, blob_snapshot_info_array[blob_index])\n                    if (blob_snapshot_info_array[blob_index].isSuccessful == True):\n                        all_failed = False\n                    blob_index = blob_index + 1\n\n                self.logger.log('****** 6. Snaphotting (Guest-seq) Completed')\n                all_snapshots_failed = all_failed\n                self.logger.log(\"Setting all_snapshots_failed to \" + str(all_snapshots_failed))\n\n                thaw_result= None\n                if g_fsfreeze_on and thaw_done_local== False:\n                    time_before_thaw = datetime.datetime.now()\n                    thaw_result, unable_to_sleep = freezer.thaw_safe()\n                    time_after_thaw = datetime.datetime.now()\n                    HandlerUtil.HandlerUtility.add_to_telemetery_data(\"ThawTime\", str(time_after_thaw-time_before_thaw))\n                    thaw_done_local = True\n                    self.logger.log('T:S thaw result ' + str(thaw_result))\n                    if(thaw_result is not None and len(thaw_result.errors) > 0 and (snapshot_result is None or len(snapshot_result.errors) == 0)):\n                        snapshot_result.errors.append(thaw_result.errors)\n                        is_inconsistent= True\n                return snapshot_result, blob_snapshot_info_array, all_failed, exceptOccurred, is_inconsistent, thaw_done_local, unable_to_sleep, all_snapshots_failed\n            else:\n                self.logger.log(\"the blobs are None\")\n                return snapshot_result, blob_snapshot_info_array, all_failed, exceptOccurred, is_inconsistent, thaw_done_local, unable_to_sleep\n        except Exception as e:\n            errorMsg = \" Unable to perform sequential snapshot with error: %s, stack trace: %s\" % (str(e), traceback.format_exc())\n            self.logger.log(errorMsg)\n            exceptOccurred = True\n            return snapshot_result, blob_snapshot_info_array, all_failed, exceptOccurred, is_inconsistent, thaw_done_local, unable_to_sleep, all_snapshots_failed\n\n    def snapshotall(self, paras, freezer, g_fsfreeze_on):\n        thaw_done = False\n        if (self.hutil.get_intvalue_from_configfile('seqsnapshot',0) == 1 or self.hutil.get_intvalue_from_configfile('seqsnapshot',0) == 2 or (len(paras.blobs) <= 4)):\n            snapshot_result, blob_snapshot_info_array, all_failed, exceptOccurred, is_inconsistent, thaw_done, unable_to_sleep, all_snapshots_failed =  self.snapshotall_seq(paras, freezer, thaw_done, g_fsfreeze_on)\n        else:\n            snapshot_result, blob_snapshot_info_array, all_failed, exceptOccurred, is_inconsistent, thaw_done, unable_to_sleep, all_snapshots_failed =  self.snapshotall_parallel(paras, freezer, thaw_done, g_fsfreeze_on)\n            self.logger.log(\"exceptOccurred : \" + str(exceptOccurred) + \" thaw_done : \" + str(thaw_done) + \" all_snapshots_failed : \" + str(all_snapshots_failed))\n            if exceptOccurred and thaw_done == False and all_snapshots_failed:\n                self.logger.log(\"Trying sequential snapshotting as parallel snapshotting failed\")\n                snapshot_result, blob_snapshot_info_array, all_failed, exceptOccurred, is_inconsistent,thaw_done, unable_to_sleep, all_snapshots_failed =  self.snapshotall_seq(paras, freezer, thaw_done, g_fsfreeze_on)\n        return snapshot_result, blob_snapshot_info_array, all_failed, is_inconsistent, unable_to_sleep, all_snapshots_failed\n\n    def httpresponse_get_snapshot_info(self, resp, sasuri_index, sasuri, responseBody):\n        snapshot_error = SnapshotError()\n        snapshot_info_indexer = SnapshotInfoIndexerObj(sasuri_index, False, None, None)\n        result = CommonVariables.error_http_failure\n        message = \"\"\n        if(resp != None):\n            message = message + str(datetime.datetime.utcnow()) + \" snapshot resp status: \" + str(resp.status) + \" \"\n            resp_headers = resp.getheaders()\n            message = message + str(datetime.datetime.utcnow()) + \" snapshot resp-header: \" + str(resp_headers) + \" \"\n\n            if(resp.status == 200 or resp.status == 201):\n                result = CommonVariables.success\n                snapshot_info_indexer.isSuccessful = True\n                snapshot_info_indexer.snapshotTs = resp.getheader('x-ms-snapshot')\n            else:\n                result = resp.status\n            snapshot_info_indexer.errorMessage = responseBody\n            snapshot_info_indexer.statusCode = resp.status\n        else:\n            message = message + str(datetime.datetime.utcnow()) + \" snapshot Http connection response is None\" + \" \"\n\n        message = message + str(datetime.datetime.utcnow()) + ' snapshot api returned: {0} '.format(result) + \" \"\n        if(result != CommonVariables.success):\n            snapshot_error.errorcode = result\n            snapshot_error.sasuri = sasuri\n\n        return snapshot_info_indexer, snapshot_error, message\n\n    def get_snapshot_info(self, snapshot_info_indexer, snapshot_info):\n        if (snapshot_info_indexer != None):\n            self.logger.log(\"snapshot_info_indexer: \" + str(snapshot_info_indexer))\n            snapshot_info.isSuccessful = snapshot_info_indexer.isSuccessful\n            if (snapshot_info.isSuccessful == True):\n                snapshot_info.snapshotUri = snapshot_info.snapshotUri + \"?snapshot=\" + str(snapshot_info_indexer.snapshotTs)\n            else:\n                snapshot_info.snapshotUri = None\n            snapshot_info.errorMessage = snapshot_info_indexer.errorMessage\n            snapshot_info.statusCode = snapshot_info_indexer.statusCode\n        else:\n            snapshot_info.isSuccessful = False\n            snapshot_info.snapshotUri = None\n"
  },
  {
    "path": "VMBackup/main/handle.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport array\nimport base64\nimport os\nimport os.path\nimport re\nimport json\nimport string\nimport subprocess\nimport sys\nimport time\nimport shlex\nimport traceback\nimport datetime\nimport random\ntry:\n    import ConfigParser as ConfigParsers\nexcept ImportError:\n    import configparser as ConfigParsers\nfrom threading import Thread\nfrom time import sleep\nfrom os.path import join\nfrom mounts import Mounts\nfrom mounts import Mount\nfrom patch import *\nfrom fsfreezer import FsFreezer\nfrom common import CommonVariables\nfrom parameterparser import ParameterParser\nfrom Utils import HandlerUtil\nfrom Utils.EventLoggerUtil import EventLogger\nfrom Utils import SizeCalculation\nfrom Utils import Status\nfrom freezesnapshotter import FreezeSnapshotter\nfrom backuplogger import Backuplogger\nfrom blobwriter import BlobWriter\nfrom taskidentity import TaskIdentity\nfrom MachineIdentity import MachineIdentity\nimport ExtensionErrorCodeHelper\nfrom PluginHost import PluginHost\nfrom PluginHost import PluginHostResult\nimport platform\nfrom workloadPatch import WorkloadPatch\nfrom signal import SIGTERM;\n\n#Main function is the only entrence to this extension handler\n\ndef main():\n    global MyPatching,backup_logger,hutil,run_result,run_status,error_msg,freezer,freeze_result,snapshot_info_array,total_used_size,size_calculation_failed, patch_class_name, orig_distro, configSeqNo, eventlogger, disable_event_logging\n    try:\n        run_result = CommonVariables.success\n        run_status = 'success'\n        error_msg = ''\n        freeze_result = None\n        snapshot_info_array = None\n        total_used_size = 0\n        size_calculation_failed = False\n        eventlogger = None\n        HandlerUtil.waagent.LoggerInit('/dev/console','/dev/stdout')\n        hutil = HandlerUtil.HandlerUtility(HandlerUtil.waagent.Log, HandlerUtil.waagent.Error, CommonVariables.extension_name)\n        backup_logger = Backuplogger(hutil)\n        MyPatching, patch_class_name, orig_distro = GetMyPatching(backup_logger)\n        hutil.patching = MyPatching\n        configSeqNo = -1\n        hutil.try_parse_context(configSeqNo)\n        disable_event_logging = hutil.get_intvalue_from_configfile(\"disable_logging\", 0)\n        use_async_event_logging = hutil.get_intvalue_from_configfile(\"async_event_logging \", 0)\n        if disable_event_logging == 0 or hutil.event_dir is not None :\n            eventlogger = EventLogger.GetInstance(backup_logger, hutil.event_dir, hutil.severity_level, use_async_event_logging)\n        else:\n            eventlogger = None\n        hutil.set_event_logger(eventlogger)\n        for a in sys.argv[1:]:\n            if re.match(\"^([-/]*)(disable)\", a):\n                disable()\n            elif re.match(\"^([-/]*)(uninstall)\", a):\n                uninstall()\n            elif re.match(\"^([-/]*)(install)\", a):\n                install()\n            elif re.match(\"^([-/]*)(enable)\", a):\n                enable()\n            elif re.match(\"^([-/]*)(update)\", a):\n                update()\n            elif re.match(\"^([-/]*)(daemon)\", a):\n                daemon()\n            elif re.match(\"^([-/]*)(seqNo:)\", a):\n                try:\n                    configSeqNo = int(a.split(':')[1])\n                except:\n                    configSeqNo = -1\n    except Exception as e:\n        if(eventlogger != None):\n            eventlogger.dispose()\n        sys.exit(0)\n\ndef install():\n    global hutil,configSeqNo\n    hutil.do_parse_context('Install', configSeqNo)\n    hutil.do_exit(0, 'Install','success','0', 'Install Succeeded')\n\ndef status_report_to_file(file_report_msg):\n    global backup_logger,hutil\n    hutil.write_to_status_file(file_report_msg)\n    backup_logger.log(\"file status report message:\",True)\n    backup_logger.log(file_report_msg,True)\n\ndef status_report_to_blob(blob_report_msg):\n    global backup_logger,hutil,para_parser\n    UploadStatusAndLog = hutil.get_strvalue_from_configfile('UploadStatusAndLog','True')        \n    if(UploadStatusAndLog == None or UploadStatusAndLog == 'True'):\n        try:\n            if(para_parser is not None and para_parser.statusBlobUri is not None and para_parser.statusBlobUri != \"\"):\n                blobWriter = BlobWriter(hutil)\n                if(blob_report_msg is not None):\n                    blobWriter.WriteBlob(blob_report_msg,para_parser.statusBlobUri)\n                    backup_logger.log(\"blob status report message:\",True)\n                    backup_logger.log(blob_report_msg,True)\n                else:\n                    backup_logger.log(\"blob_report_msg is none\",True)\n        except Exception as e:\n            err_msg='cannot write status to the status blob'+traceback.format_exc()\n            backup_logger.log(err_msg, True, 'Warning')\n\ndef get_status_to_report(status, status_code, message, snapshot_info = None):\n    global MyPatching,backup_logger,hutil,para_parser,total_used_size,size_calculation_failed\n    blob_report_msg = None\n    file_report_msg = None\n    try:\n        if total_used_size == -1 :\n            sizeCalculation = SizeCalculation.SizeCalculation(patching = MyPatching , hutil = hutil, logger = backup_logger , para_parser = para_parser)\n            total_used_size,size_calculation_failed = sizeCalculation.get_total_used_size()\n            number_of_blobs = len(para_parser.includeLunList)\n            maximum_possible_size = number_of_blobs * 1099511627776\n            if(total_used_size>maximum_possible_size and number_of_blobs != 0):\n                total_used_size = maximum_possible_size\n            backup_logger.log(\"Assertion Check, total size : {0} ,maximum_possible_size : {1}\".format(total_used_size,maximum_possible_size),True)\n        if(para_parser is not None):\n            blob_report_msg, file_report_msg = hutil.do_status_report(operation='Enable',status=status,\\\n                    status_code=str(status_code),\\\n                    message=message,\\\n                    taskId=para_parser.taskId,\\\n                    commandStartTimeUTCTicks=para_parser.commandStartTimeUTCTicks,\\\n                    snapshot_info=snapshot_info,\\\n                    total_size = total_used_size,\\\n                    failure_flag = size_calculation_failed)\n    except Exception as e:\n        err_msg='cannot get status report parameters , Exception %s, stack trace: %s' % (str(e), traceback.format_exc())\n        backup_logger.log(err_msg, True, 'Warning')\n    return blob_report_msg, file_report_msg\n\ndef exit_with_commit_log(status,result,error_msg, para_parser):\n    global backup_logger\n    backup_logger.log(error_msg, True, 'Error')\n    if(para_parser is not None and para_parser.logsBlobUri is not None and para_parser.logsBlobUri != \"\"):\n        backup_logger.commit(para_parser.logsBlobUri)\n    blob_report_msg, file_report_msg = get_status_to_report(status, result, error_msg, None)\n    status_report_to_file(file_report_msg)\n    status_report_to_blob(blob_report_msg)\n    if(eventlogger is not None):\n        eventlogger.dispose()\n    sys.exit(0)\n\ndef exit_if_same_taskId(taskId):\n    global backup_logger,hutil,para_parser\n    trans_report_msg = None\n    taskIdentity = TaskIdentity()\n    last_taskId = taskIdentity.stored_identity()\n    if(taskId == last_taskId):\n        backup_logger.log(\"TaskId is same as last, so skip with Processed Status, current:\" + str(taskId) + \"== last:\" + str(last_taskId), True)\n        status=CommonVariables.status_success \n        hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.SuccessAlreadyProcessedInput)\n        status_code=CommonVariables.SuccessAlreadyProcessedInput\n        message='TaskId AlreadyProcessed nothing to do'\n        backup_logger.log(message, True)\n        if(eventlogger is not  None):\n            eventlogger.dispose()\n        sys.exit(0)\n\ndef freeze_snapshot(timeout):\n    try:\n        global hutil,backup_logger,run_result,run_status,error_msg,freezer,freeze_result,para_parser,snapshot_info_array,g_fsfreeze_on, workload_patch\n        canTakeCrashConsistentSnapshot = can_take_crash_consistent_snapshot(para_parser)\n        freeze_snap_shotter = FreezeSnapshotter(backup_logger, hutil, freezer, g_fsfreeze_on, para_parser, canTakeCrashConsistentSnapshot)\n        if (hutil.ExtErrorCode == ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.FailedInvalidDataDiskLunList):\n            temp_result = CommonVariables.FailedInvalidDataDiskLunList\n            temp_status = 'error'\n            error_msg = 'Invalid Input. IsAnyDiskExcluded is marked as true but input LUN list received from CRP is empty. '\\\n               'which is not allowed if VM has Direct Drives or if VM has Write Accelerated disks or if VM is a TVM/CVM.'\n            exit_with_commit_log(temp_status, temp_result,error_msg, para_parser)\n        backup_logger.log(\"Calling do snapshot method\", True, 'Info')\n        run_result, run_status, snapshot_info_array = freeze_snap_shotter.doFreezeSnapshot()\n        if (canTakeCrashConsistentSnapshot == True and run_result != CommonVariables.success and run_result != CommonVariables.success_appconsistent):\n            if (snapshot_info_array is not None and snapshot_info_array !=[] and check_snapshot_array_fail() == False and len(snapshot_info_array) == 1):\n                run_status = CommonVariables.status_success\n                run_result = CommonVariables.success\n                hutil.SetSnapshotConsistencyType(Status.SnapshotConsistencyType.crashConsistent)\n    except Exception as e:\n        errMsg = 'Failed to do the snapshot with error: %s, stack trace: %s' % (str(e), traceback.format_exc())\n        backup_logger.log(errMsg, True, 'Error')\n        run_result = CommonVariables.error\n        run_status = 'error'\n        error_msg = 'Enable failed with exception in safe freeze or snapshot ' \n        hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.error)\n    #snapshot_done = True\n\ndef check_snapshot_array_fail():\n    global snapshot_info_array, backup_logger\n    snapshot_array_fail = False\n    if snapshot_info_array is not None and snapshot_info_array !=[]:\n        for snapshot_index in range(len(snapshot_info_array)):\n            if(snapshot_info_array[snapshot_index].isSuccessful == False):\n                backup_logger.log('T:S  snapshot failed at index ' + str(snapshot_index), True)\n                snapshot_array_fail = True\n                break\n    return snapshot_array_fail\n\ndef get_key_value(jsonObj, key):\n    value = None\n    if(key in jsonObj.keys()):\n        value = jsonObj[key]\n    return value\n\ndef can_take_crash_consistent_snapshot(para_parser):\n    global backup_logger\n    takeCrashConsistentSnapshot = False\n    if(para_parser != None and para_parser.customSettings != None and para_parser.customSettings != ''):\n        customSettings = json.loads(para_parser.customSettings)\n        isManagedVm = get_key_value(customSettings, 'isManagedVm')\n        canTakeCrashConsistentSnapshot = get_key_value(customSettings, 'canTakeCrashConsistentSnapshot')\n        backupRetryCount = get_key_value(customSettings, 'backupRetryCount')\n        numberOfDisks = 0\n        if (para_parser.includeLunList is not None):\n            numberOfDisks = len(para_parser.includeLunList)\n        isAnyNone = (isManagedVm is None or canTakeCrashConsistentSnapshot is None or backupRetryCount is None)\n        if (isAnyNone == False and isManagedVm == True and canTakeCrashConsistentSnapshot == True and backupRetryCount > 0 and numberOfDisks == 1):\n            takeCrashConsistentSnapshot = True\n        backup_logger.log(\"isManagedVm=\" + str(isManagedVm) + \", canTakeCrashConsistentSnapshot=\" + str(canTakeCrashConsistentSnapshot) + \", backupRetryCount=\" + str(backupRetryCount) + \", numberOfDisks=\" + str(numberOfDisks) + \", takeCrashConsistentSnapshot=\" + str(takeCrashConsistentSnapshot), True, 'Info')\n    return takeCrashConsistentSnapshot\n\ndef spawn_monitor(location = \"\", strace_pid = 0):\n    d = location\n    if d == \"\":\n        d = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n        d = os.path.join(d, \"debughelper\")\n    bd = os.path.join(d, \"msft_snap_monit\")\n    try:\n        args = [bd, \"--wd\", d]\n        if (strace_pid > 0):\n            args = [bd, \"--wd\", d, \"--strace\", \"--tracepid\", str(strace_pid)]\n\n        backup_logger.log(\"[spawn_monitor] -> command: %s\" % (\" \".join(args)))\n        p = subprocess.Popen(args)\n        backup_logger.log(\"[spawn_monitor] -> monitoring started\")\n        return p\n    except Exception as e:\n        backup_logger.log(\"[spawn_monitor] -> subprocess Popen failed: %s\" % (e));\n    return None\n\ndef daemon():\n    global MyPatching, backup_logger, hutil, run_result, run_status, error_msg, freezer, para_parser, snapshot_done, snapshot_info_array, g_fsfreeze_on, total_used_size, patch_class_name, orig_distro, workload_patch, configSeqNo, eventlogger\n    try:\n        #this is using the most recent file timestamp.\n        hutil.do_parse_context('Executing', configSeqNo)\n\n        try:\n            backup_logger.log('starting daemon initially', True, \"Warning\")\n            backup_logger.log(\"patch_class_name: \"+str(patch_class_name)+\" and orig_distro: \"+str(orig_distro),True)\n            # handle the restoring scenario.\n            mi = MachineIdentity()\n            stored_identity = mi.stored_identity()\n            if(stored_identity is None):\n                mi.save_identity()\n            else:\n                current_identity = mi.current_identity()\n                if(current_identity != stored_identity):\n                    current_seq_no = -1\n                    backup_logger.log(\"machine identity not same, set current_seq_no to \" + str(current_seq_no) + \" \" + str(stored_identity) + \" \" + str(current_identity), True)\n                    hutil.set_last_seq(current_seq_no)\n                    mi.save_identity()\n        except Exception as e:\n            errMsg = 'Failed to validate sequence number with error: %s, stack trace: %s' % (str(e), traceback.format_exc())\n            backup_logger.log(errMsg, True, 'Error')\n\n        freezer = FsFreezer(patching= MyPatching, logger = backup_logger, hutil = hutil)\n        backup_logger.log(\"safeFreezeBinary exists \" + str(freezer.file_exists), True, 'Info')\n\n        global_error_result = None\n        # precheck\n        freeze_called = False\n        configfile='/etc/azure/vmbackup.conf'\n        thread_timeout=str(60)\n        OnAppFailureDoFsFreeze = True\n        OnAppSuccessDoFsFreeze = True\n        MonitorRun = False\n        MonitorEnableStrace = False\n        MonitorLocation = \"\"\n        #Adding python version to the telemetry\n        try:\n            python_version_info = sys.version_info\n            python_version = str(sys.version_info[0])+ '.'  + str(sys.version_info[1]) + '.'  + str(sys.version_info[2])\n            HandlerUtil.HandlerUtility.add_to_telemetery_data(\"pythonVersion\", python_version)\n        except Exception as e:\n            errMsg = 'Failed to do retrieve python version with error: %s, stack trace: %s' % (str(e), traceback.format_exc())\n            backup_logger.log(errMsg, True, 'Error')\n\n        #fetching platform architecture\n        try:\n            architecture = platform.architecture()[0]\n            HandlerUtil.HandlerUtility.add_to_telemetery_data(\"platformArchitecture\", architecture)\n        except Exception as e:\n            errMsg = 'Failed to do retrieve \"platform architecture\" with error: %s, stack trace: %s' % (str(e), traceback.format_exc())\n            backup_logger.log(errMsg, True, 'Error')\n\n        try:\n            if(freezer.mounts is not None):\n                hutil.partitioncount = len(freezer.mounts.mounts)\n            backup_logger.log(\" configfile \" + str(configfile), True)\n            config = ConfigParsers.ConfigParser()\n            config.read(configfile)\n            if config.has_option('SnapshotThread','timeout'):\n                thread_timeout= config.get('SnapshotThread','timeout')      \n            if config.has_option('SnapshotThread','OnAppFailureDoFsFreeze'):\n                OnAppFailureDoFsFreeze= config.get('SnapshotThread','OnAppFailureDoFsFreeze')\n            if config.has_option('SnapshotThread','OnAppSuccessDoFsFreeze'):\n                OnAppSuccessDoFsFreeze= config.get('SnapshotThread','OnAppSuccessDoFsFreeze')\n            if config.has_option(\"Monitor\", \"Run\"):\n                MonitorRun = config.getboolean(\"Monitor\", \"Run\")\n            if config.has_option(\"Monitor\", \"Strace\"):\n                MonitorEnableStrace = config.getboolean(\"Monitor\", \"Strace\")\n            if config.has_option(\"Monitor\", \"Location\"):\n                MonitorLocation = config.get(\"Monitor\", \"Location\")\n        except Exception as e:\n            errMsg='cannot read config file or file not present'\n            backup_logger.log(errMsg, True, 'Warning')\n        backup_logger.log(\"final thread timeout\" + thread_timeout, True)\n    \n        # Start the monitor process if enabled\n        monitor_process = None\n        if MonitorRun:\n            if MonitorEnableStrace:\n                monitor_process = spawn_monitor(location = MonitorLocation, strace_pid=os.getpid())\n            else:\n                monitor_process = spawn_monitor(location = MonitorLocation)\n\n        snapshot_info_array = None\n        try:\n            # we need to freeze the file system first\n            backup_logger.log('starting daemon for freezing the file system', True)\n            \"\"\"\n            protectedSettings is the privateConfig passed from Powershell.\n            WATCHOUT that, the _context_config are using the most freshest timestamp.\n            if the time sync is alive, this should be right.\n            \"\"\"\n            protected_settings = hutil._context._config['runtimeSettings'][0]['handlerSettings'].get('protectedSettings', {})\n            public_settings = hutil._context._config['runtimeSettings'][0]['handlerSettings'].get('publicSettings')\n            para_parser = ParameterParser(protected_settings, public_settings, backup_logger)\n            hutil.update_settings_file()\n\n            if(para_parser.taskId is not None and para_parser.taskId != \"\" and eventlogger is not None):\n                eventlogger.update_properties(para_parser.taskId)\n\n            if(bool(public_settings) == False and not protected_settings):\n                error_msg = \"unable to load certificate\"\n                hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.FailedHandlerGuestAgentCertificateNotFound)\n                temp_result=CommonVariables.FailedHandlerGuestAgentCertificateNotFound\n                temp_status= 'error'\n                exit_with_commit_log(temp_status, temp_result,error_msg, para_parser)\n            \n            if(freezer.file_exists == False):\n                file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), freezer.safeFreezeFolderPath) \n                error_msg = \"safefreeze binary is missing in the following path \" + str(file_path)\n                hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.FailedSafeFreezeBinaryNotFound)\n                temp_result=CommonVariables.FailedSafeFreezeBinaryNotFound\n                temp_status= 'error'\n                backup_logger.log(\"exiting with commit\",True,\"Info\")\n                exit_with_commit_log(temp_status, temp_result,error_msg, para_parser)\n\n            if(para_parser.commandStartTimeUTCTicks is not None and para_parser.commandStartTimeUTCTicks != \"\"):\n                canTakeCrashConsistentSnapshot = can_take_crash_consistent_snapshot(para_parser)\n                temp_g_fsfreeze_on = True\n                freeze_snap_shotter = FreezeSnapshotter(backup_logger, hutil, freezer, temp_g_fsfreeze_on, para_parser, canTakeCrashConsistentSnapshot)\n                if (hutil.ExtErrorCode == ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.FailedInvalidDataDiskLunList):\n                    temp_result = CommonVariables.FailedInvalidDataDiskLunList\n                    temp_status = 'error'\n                    error_msg = 'Invalid Input. IsAnyDiskExcluded is marked as true but input LUN list received from CRP is empty. '\\\n                   'which is not allowed if VM has Direct Drives or if VM has Write Accelerated disks or if VM is a TVM/CVM.'\n                    exit_with_commit_log(temp_status, temp_result,error_msg, para_parser)\n                if freeze_snap_shotter.is_command_timedout(para_parser) :\n                    error_msg = \"CRP timeout limit has reached, will not take snapshot.\"\n                    errMsg = error_msg\n                    hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.FailedGuestAgentInvokedCommandTooLate)\n                    temp_result=CommonVariables.FailedGuestAgentInvokedCommandTooLate\n                    temp_status= 'error'\n                    exit_with_commit_log(temp_status, temp_result,error_msg, para_parser)\n\n            hutil.save_seq()\n\n            commandToExecute = para_parser.commandToExecute\n            #validate all the required parameter here\n            backup_logger.log('The command '+ commandToExecute+ ' is being validated',True)\n            if(CommonVariables.iaas_install_command in commandToExecute.lower()):\n                backup_logger.log('install succeed.',True)\n                run_status = 'success'\n                error_msg = 'Install Succeeded'\n                run_result = CommonVariables.success\n                backup_logger.log(error_msg)\n            elif(CommonVariables.iaas_vmbackup_command in commandToExecute.lower()):\n                if(para_parser.backup_metadata is None or para_parser.public_config_obj is None):\n                    run_result = CommonVariables.error_parameter\n                    hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.error_parameter)\n                    run_status = 'error'\n                    error_msg = 'required field empty or not correct'\n                    backup_logger.log(error_msg, True, 'Error')\n                else:\n                    backup_logger.log('commandToExecute for backup is ' + commandToExecute, True)\n                    \"\"\"\n                    make sure the log is not doing when the file system is freezed.\n                    \"\"\"\n                    temp_status= 'success'\n                    temp_result=CommonVariables.ExtensionTempTerminalState\n                    temp_msg='Transitioning state in extension'\n                    blob_report_msg, file_report_msg = get_status_to_report(temp_status, temp_result, temp_msg, None)\n                    status_report_to_file(file_report_msg)\n                    status_report_to_blob(blob_report_msg)\n                    #partial logging before freeze\n                    if(para_parser is not None and para_parser.logsBlobUri is not None and para_parser.logsBlobUri != \"\"):\n                        backup_logger.commit_to_blob(para_parser.logsBlobUri)\n                    else:\n                        backup_logger.log(\"the logs blob uri is not there, so do not upload log.\")\n                    backup_logger.log('commandToExecute after commiting the blob is ' + commandToExecute, True)\n                \n                    workload_patch = WorkloadPatch.WorkloadPatch(backup_logger)\n                    #new flow only if workload name is present in workload.conf\n                    if workload_patch.name != None and workload_patch.name != \"\":\n                        backup_logger.log(\"workload backup enabled for workload: \" + workload_patch.name, True)\n                        hutil.set_pre_post_enabled()\n                        pre_skipped = False\n                        if len(workload_patch.error_details) > 0:\n                            backup_logger.log(\"skip pre and post\")\n                            pre_skipped = True\n                        else:\n                            workload_patch.pre()\n                        if len(workload_patch.error_details) > 0:\n                            backup_logger.log(\"file system consistent backup only\")\n                        #todo error handling\n                        if len(workload_patch.error_details) > 0 and OnAppFailureDoFsFreeze == True: #App&FS consistency\n                            g_fsfreeze_on = True\n                        elif len(workload_patch.error_details) > 0 and OnAppFailureDoFsFreeze == False: # Do Fs freeze only if App success\n                            hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.error)\n                            error_msg= 'Failing backup as OnAppFailureDoFsFreeze is set to false'\n                            temp_result=CommonVariables.error\n                            temp_status= 'error'\n                            exit_with_commit_log(temp_status, temp_result,error_msg, para_parser)\n                        elif len(workload_patch.error_details) == 0 and OnAppSuccessDoFsFreeze == False: # App only\n                            g_fsfreeze_on = False\n                        elif len(workload_patch.error_details) == 0 and OnAppSuccessDoFsFreeze == True: #App&FS consistency\n                            g_fsfreeze_on = True\n                        else:\n                            g_fsfreeze_on = True\n                        freeze_snapshot(thread_timeout)\n                        if pre_skipped == False:\n                            workload_patch.post()\n                        workload_error = workload_patch.populateErrors()\n                        if workload_error != None and g_fsfreeze_on == False:\n                            run_status = 'error'\n                            run_result = workload_error.errorCode\n                            hutil.SetExtErrorCode(workload_error.errorCode)\n                            error_msg = 'Workload Patch failed with error message: ' +  workload_error.errorMsg\n                            error_msg = error_msg + ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.StatusCodeStringBuilder(hutil.ExtErrorCode)\n                            backup_logger.log(error_msg, True)\n                        elif workload_error != None and g_fsfreeze_on == True:\n                            hutil.SetExtErrorCode(workload_error.errorCode)\n                            error_msg = 'Workload Patch failed with warning message: ' +  workload_error.errorMsg\n                            error_msg = error_msg + ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.StatusCodeStringBuilder(hutil.ExtErrorCode)\n                            backup_logger.log(error_msg, True)                        \n                        else:\n                            if(run_status == CommonVariables.status_success):\n                                run_status = 'success'\n                                run_result = CommonVariables.success_appconsistent\n                                hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.success_appconsistent)\n                                error_msg = 'Enable Succeeded with App Consistent Snapshot'\n                                backup_logger.log(error_msg, True)\n                            else:\n                                error_msg = 'Enable failed in fsfreeze snapshot flow'\n                                backup_logger.log(error_msg, True)\n                    else:\n                        PluginHostObj = PluginHost(logger=backup_logger)\n                        PluginHostErrorCode,dobackup,g_fsfreeze_on = PluginHostObj.pre_check()\n                        doFsConsistentbackup = False\n                        appconsistentBackup = False\n\n                        if not (PluginHostErrorCode == CommonVariables.FailedPrepostPluginhostConfigParsing or\n                                PluginHostErrorCode == CommonVariables.FailedPrepostPluginConfigParsing or\n                                PluginHostErrorCode == CommonVariables.FailedPrepostPluginhostConfigNotFound or\n                                PluginHostErrorCode == CommonVariables.FailedPrepostPluginhostConfigPermissionError or\n                                PluginHostErrorCode == CommonVariables.FailedPrepostPluginConfigNotFound):\n                            backup_logger.log('App Consistent Consistent Backup Enabled', True)\n                            HandlerUtil.HandlerUtility.add_to_telemetery_data(\"isPrePostEnabled\", \"true\")\n                            appconsistentBackup = True\n\n                        if(PluginHostErrorCode != CommonVariables.PrePost_PluginStatus_Success):\n                            backup_logger.log('Triggering File System Consistent Backup because of error code' + ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.StatusCodeStringBuilder(PluginHostErrorCode), True)\n                            doFsConsistentbackup = True\n\n                        preResult = PluginHostResult()\n                        postResult = PluginHostResult()\n\n                        if not doFsConsistentbackup:\n                            preResult = PluginHostObj.pre_script()\n                            dobackup = preResult.continueBackup\n\n                            if(g_fsfreeze_on == False and preResult.anyScriptFailed):\n                                dobackup = False\n\n                        if dobackup:\n                            freeze_snapshot(thread_timeout)\n\n                        if not doFsConsistentbackup:\n                            postResult = PluginHostObj.post_script()\n                            if not postResult.continueBackup:\n                                dobackup = False\n                \n                            if(g_fsfreeze_on == False and postResult.anyScriptFailed):\n                                dobackup = False\n\n                        if not dobackup:\n                            if run_result == CommonVariables.success and PluginHostErrorCode != CommonVariables.PrePost_PluginStatus_Success:\n                                run_status = 'error'\n                                run_result = PluginHostErrorCode\n                                hutil.SetExtErrorCode(PluginHostErrorCode)\n                                error_msg = 'Plugin Host Precheck Failed'\n                                error_msg = error_msg + ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.StatusCodeStringBuilder(hutil.ExtErrorCode)\n                                backup_logger.log(error_msg, True)\n\n                            if run_result == CommonVariables.success:\n                                pre_plugin_errors = preResult.errors\n                                for error in pre_plugin_errors:\n                                    if error.errorCode != CommonVariables.PrePost_PluginStatus_Success:\n                                        run_status = 'error'\n                                        run_result = error.errorCode\n                                        hutil.SetExtErrorCode(error.errorCode)\n                                        error_msg = 'PreScript failed for the plugin ' +  error.pluginName\n                                        error_msg = error_msg + ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.StatusCodeStringBuilder(hutil.ExtErrorCode)\n                                        backup_logger.log(error_msg, True)\n                                        break\n\n                            if run_result == CommonVariables.success:\n                                post_plugin_errors = postResult.errors\n                                for error in post_plugin_errors:\n                                    if error.errorCode != CommonVariables.PrePost_PluginStatus_Success:\n                                        run_status = 'error'\n                                        run_result = error.errorCode\n                                        hutil.SetExtErrorCode(error.errorCode)\n                                        error_msg = 'PostScript failed for the plugin ' +  error.pluginName\n                                        error_msg = error_msg + ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.StatusCodeStringBuilder(hutil.ExtErrorCode)\n                                        backup_logger.log(error_msg, True)\n                                        break\n\n                        if appconsistentBackup:\n                            if(PluginHostErrorCode != CommonVariables.PrePost_PluginStatus_Success):\n                                hutil.SetExtErrorCode(PluginHostErrorCode)\n                            pre_plugin_errors = preResult.errors\n                            for error in pre_plugin_errors:\n                                if error.errorCode != CommonVariables.PrePost_PluginStatus_Success:\n                                    hutil.SetExtErrorCode(error.errorCode)\n                            post_plugin_errors = postResult.errors\n                            for error in post_plugin_errors:\n                                if error.errorCode != CommonVariables.PrePost_PluginStatus_Success:\n                                    hutil.SetExtErrorCode(error.errorCode)\n\n                        if run_result == CommonVariables.success and not doFsConsistentbackup and not (preResult.anyScriptFailed or postResult.anyScriptFailed):\n                            run_status = 'success'\n                            run_result = CommonVariables.success_appconsistent\n                            hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.success_appconsistent)\n                            error_msg = 'Enable Succeeded with App Consistent Snapshot'\n                            backup_logger.log(error_msg, True)\n\n            else:\n                run_status = 'error'\n                run_result = CommonVariables.error_parameter\n                hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.error_parameter)\n                error_msg = 'command is not correct'\n                backup_logger.log(error_msg, True, 'Error')\n        except Exception as e:\n            hutil.update_settings_file()\n            errMsg = 'Failed to enable the extension with error: %s, stack trace: %s' % (str(e), traceback.format_exc())\n            backup_logger.log(errMsg, True, 'Error')\n            global_error_result = e\n\n        if monitor_process is not None:\n            monitor_process.terminate()\n\n        \"\"\"\n        we do the final report here to get rid of the complex logic to handle the logging when file system be freezed issue.\n        \"\"\"\n        try:\n            if(global_error_result is not None):\n                if(hasattr(global_error_result,'errno') and global_error_result.errno == 2):\n                    run_result = CommonVariables.error_12\n                    hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.error_12)\n                elif(para_parser is None):\n                    run_result = CommonVariables.error_parameter\n                    hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.error_parameter)\n                else:\n                    run_result = CommonVariables.error\n                    hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.error)\n                run_status = 'error'\n                error_msg  += ('Enable failed.' + str(global_error_result))\n            status_report_msg = None\n            hutil.SetExtErrorCode(run_result) #setting extension errorcode at the end if missed somewhere\n            HandlerUtil.HandlerUtility.add_to_telemetery_data(\"extErrorCode\", str(ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.ExtensionErrorCodeNameDict[hutil.ExtErrorCode]))\n            total_used_size = -1\n            blob_report_msg, file_report_msg = get_status_to_report(run_status,run_result,error_msg, snapshot_info_array)\n            if(hutil.is_status_file_exists()):\n                status_report_to_file(file_report_msg)\n            status_report_to_blob(blob_report_msg)\n        except Exception as e:\n            errMsg = 'Failed to log status in extension'\n            errMsg += str(e)\n            backup_logger.log(errMsg, True, 'Error')\n        if(para_parser is not None and para_parser.logsBlobUri is not None and para_parser.logsBlobUri != \"\"):\n            backup_logger.commit(para_parser.logsBlobUri)\n        else:\n            backup_logger.log(\"the logs blob uri is not there, so do not upload log.\")\n            backup_logger.commit_to_local()\n        if(eventlogger is not None): \n            eventlogger.dispose()\n    except Exception as e:\n        backup_logger.log(str(e), True, 'Error')\n        if(eventlogger is not None): \n            eventlogger.dispose()\n    if monitor_process is not None:\n        monitor_process.terminate()\n    sys.exit(0)\n\ndef uninstall():\n    global configSeqNo\n    hutil.do_parse_context('Uninstall', configSeqNo)\n    hutil.do_exit(0,'Uninstall','success','0', 'Uninstall succeeded')\n\ndef disable():\n    global configSeqNo\n    hutil.do_parse_context('Disable', configSeqNo)\n    hutil.do_exit(0,'Disable','success','0', 'Disable Succeeded')\n\ndef update():\n    global configSeqNo\n    hutil.do_parse_context('Update', configSeqNo)\n    hutil.do_exit(0,'Update','success','0', 'Update Succeeded')\n\ndef enable():\n    global backup_logger,hutil,error_msg,para_parser,patch_class_name,orig_distro,configSeqNo,eventlogger,disable_event_logging\n    try:\n        hutil.do_parse_context('Enable', configSeqNo)\n        backup_logger.log('starting enable', True)\n        backup_logger.log(\"patch_class_name: \"+str(patch_class_name)+\" and orig_distro: \"+str(orig_distro),True)\n        if(disable_event_logging != 0):\n            backup_logger.log(\"logging via guest agent is turned off\")\n        hutil.exit_if_same_seq()\n\n        hutil.save_seq()\n\n        protected_settings = hutil._context._config['runtimeSettings'][0]['handlerSettings'].get('protectedSettings', {})\n        public_settings = hutil._context._config['runtimeSettings'][0]['handlerSettings'].get('publicSettings')\n        para_parser = ParameterParser(protected_settings, public_settings, backup_logger)\n\n        try:\n            if CommonVariables.enableSnapshotExtensionPolling in para_parser.wellKnownSettingFlags and para_parser.wellKnownSettingFlags[CommonVariables.enableSnapshotExtensionPolling]:\n                create_host_based_service()\n        except Exception as e:\n            backup_logger.log(\"error starting new host based daemon: {}\".format(e), True, \"Error\")\n\n        if(para_parser.taskId is not None and para_parser.taskId != \"\"):\n            if(eventlogger is not None):\n                eventlogger.update_properties(para_parser.taskId)\n            backup_logger.log('taskId: ' + str(para_parser.taskId), True)\n            randomSleepTime = random.randint(500, 5000)\n            backup_logger.log('Sleeping for milliseconds: ' + str(randomSleepTime), True)\n            time.sleep(randomSleepTime / 1000)\n            exit_if_same_taskId(para_parser.taskId)\n            taskIdentity = TaskIdentity()\n            taskIdentity.save_identity(para_parser.taskId)       \n        temp_status= 'success'\n        temp_result=CommonVariables.ExtensionTempTerminalState\n        temp_msg='Transitioning state in extension'\n        blob_report_msg, file_report_msg = get_status_to_report(temp_status, temp_result, temp_msg, None)\n\n        status_report_to_file(file_report_msg)\n        if(eventlogger is not None):\n            eventlogger.dispose()\n        start_daemon()\n        sys.exit(0)\n    except Exception as e:\n        hutil.update_settings_file()\n        errMsg = 'Failed to call the daemon with error: %s, stack trace: %s' % (str(e), traceback.format_exc())\n        backup_logger.log(errMsg, True, 'Error')\n        global_error_result = e\n        temp_status= 'error'\n        temp_result=CommonVariables.error\n        hutil.SetExtErrorCode(ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.error)\n        error_msg = 'Failed to call the daemon'\n        exit_with_commit_log(temp_status, temp_result,error_msg, para_parser)\n\ndef thread_for_log_upload():\n    global para_parser,backup_logger\n    backup_logger.commit(para_parser.logsBlobUri)\n\ndef start_daemon():\n    args = [os.path.join(os.getcwd(), \"main/handle.sh\"), \"daemon\"]\n    #This process will start a new background process by calling\n    #    handle.py -daemon\n    #to run the script and will exit itself immediatelly.\n\n    #Redirect stdout and stderr to /dev/null.  Otherwise daemon process will\n    #throw Broke pipe exeception when parent process exit.\n    devnull = open(os.devnull, 'w')\n    child = subprocess.Popen(args, stdout=devnull, stderr=devnull)\n\ndef can_use_systemd():\n    try:\n        pso = subprocess.check_output([\"systemctl\", \"is-system-running\"])\n        return pso[0:7].decode(\"utf-8\") == \"running\"\n    except Exception as e:\n        backup_logger.log(\"error running `systemctl is-system-running`: {}\".format(e), True, 'Warning')\n\n    try:\n        pso = subprocess.check_output([\"ps\", \"--no-headers\", \"-o\", \"comm\", \"1\"])\n        return pso[0:7].decode(\"utf-8\") == \"systemd\"\n    except Exception as e:\n        backup_logger.log(\"error running `ps --no-headers -o comm 1`: {}\".format(e), True, \"Warning\")\n    return False\n\ndef create_host_based_systemd_service():\n    ## Create the file `/etc/systemd/system/directsnapshot.service`\n    ## [Unit]\n    ##     Description=My test service\n    ##     After=multi-user.target\n    ## [Service]\n    ##     Type=simple\n    ##     Restart=always\n    ##     ExecStart=/usr/bin/python3 /home/<username>/test.py\n    ## [Install]\n    ##     WantedBy=multi-user.target\n    systemd_service_file = \"/etc/systemd/system/directsnapshot.service\"\n    script_dir = os.path.dirname(os.path.realpath(__file__))\n    work_dir = os.path.dirname(script_dir)\n    script_path = os.path.join(script_dir, \"handle_host_daemon.py\")\n    sys_script_path = os.path.join(\"main\", \"handle_host_daemon.py\")\n    exec_path = \"\"\n    try:\n        exec_path = sys.executable\n    except Exception as e:\n        backup_logger.log(\"error fetching python executable path: {}\".format(e), True, \"Error\")\n        return\n    if exec_path == \"\" or exec_path is None:\n        backup_logger.log(\"empty python executable path\", True, \"Error\")\n        return\n    if os.path.isfile(systemd_service_file):\n        try:\n            subprocess.check_output([\"systemctl\", \"stop\", \"directsnapshot.service\"])\n            os.remove(systemd_service_file)\n        except Exception as e:\n            backup_logger.log(\"error removing existing systemd service: {}\".format(e), True, \"Error\")\n            return\n    with open(systemd_service_file, \"w\", encoding=\"utf-8\") as f:\n        f.write(\"[Unit]\\n\")\n        f.write(\"\\tDescription=Snapshot service for Microsoft Azure Restore Points\\n\")\n        f.write(\"\\tAfter=multi-user.target\\n\")\n        f.write(\"[Service]\\n\")\n        f.write(\"\\tType=simple\\n\")\n        f.write(\"\\tRestart=always\\n\")\n        f.write(\"\\tWorkingDirectory={}\\n\".format(work_dir))\n        f.write(\"\\tExecStart={} {}\\n\".format(exec_path, sys_script_path))\n        f.write(\"[Install]\\n\")\n        f.write(\"\\tWantedBy=multi-user.target\\n\")\n\n    # Check if pid file exists\n    pidfile=os.path.join(work_dir, \"directsnapshot.pid\")\n    if os.path.isfile(pidfile):\n        try:\n            opid = None\n            with open(pidfile, \"r\", encoding=\"utf-8\") as f:\n                opid = f.read()\n            if opid is not None and os.path.isdir(os.path.join(\"/proc\", opid)):\n                backup_logger.log(\"process exists. killing\", True, \"Warning\")\n                subprocess.check_output([\"kill\", \"-9\", opid])\n                backup_logger.log(\"process killed\")\n        except Exception as e:\n            backup_logger.log(\"error checking for and killing daemon process: {}\".format(e), True, \"Error\")\n    \n    # Daemon reload, enable and run\n    try:\n        subprocess.check_output([\"systemctl\", \"daemon-reload\"])\n        subprocess.check_output([\"systemctl\", \"enable\", \"--now\", \"directsnapshot.service\"])\n    except Exception as e:\n        backup_logger.log(\"error running systemd service: {}\".format(e), True, \"Error\")\n\ndef create_host_based_process():\n    script_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\n    subprocess.Popen(\n        [\"./main/handle_host_daemon.sh\"],\n        cwd = script_dir,\n        shell = True\n    )\n\ndef create_host_based_service():\n    try:\n        if can_use_systemd():\n            create_host_based_systemd_service()\n        else:\n            create_host_based_process()\n    except Exception as e:\n        backup_logger.log(\"error creating service for host based snapshots: {}\".format(e), True, \"Error\")\n\nif __name__ == '__main__' :\n    main()\n"
  },
  {
    "path": "VMBackup/main/handle.sh",
    "content": "#!/usr/bin/env sh\npwdcommand=`pwd`\npwdstr=\"$pwdcommand\"\noutput=`cat $pwdstr'/HandlerEnvironment.json'`\noutputstr=\"$output\"\npoststr=${outputstr#*logFolder\\\"}\npostsubstr=${poststr#*\\\"}\npostsubstr1=${postsubstr#*\\\"}\nresultstrlen=`expr ${#postsubstr} - 1 - ${#postsubstr1}`\nlogfolder=$(echo $postsubstr | cut -b 1-$resultstrlen)\nlogfile=$logfolder'/shell.log'\n\nrc=3\narc=0\n\nif [ \"$1\" = \"install\" ]\nthen\n    if [ -f \"/etc/azure/workload.conf\" ]\n    then\n\t\tWorkloadConfEdited=`awk '/(workload_name)([ ]*[=])([ ]*[(^|\\\")a-zA-Z(^|\\\")])/' /etc/azure/workload.conf`\n\t\tif [ \"$WorkloadConfEdited\" != \"\" ]\n\t\t\tthen\n\t\t\t\t#Workload.conf is edited\n\t\t\t\techo \"`date -u`- The command is $1, exiting without conf file copy\" >> $logfile\n\t\t\telse\n\t\t\t\t#workload.conf is not edited\n\t\t\t\tcp main/workloadPatch/WorkloadUtils/workload.conf /etc/azure/workload.conf\n\t\t\t\techo \"`date -u`- The command is $1, exiting with conf file copy\" >> $logfile\t\n\t\tfi\n        exit $arc\n    else\n        mkdir -p /etc/azure\n        cp main/workloadPatch/WorkloadUtils/workload.conf /etc/azure/workload.conf\n        echo \"`date -u`- The command is $1, exiting with conf file copy\" >> $logfile\n        exit $arc\n    fi\nelif [ \"$1\" != \"enable\"  ] && [ \"$1\" != \"daemon\" ]\nthen\n    echo \"`date -u`- The command is $1, exiting\" >> $logfile\n    exit $arc\nfi\n\nconfigSeqNo=\"$(echo `printenv ConfigSequenceNumber`)\"\nif [ -z ${configSeqNo} ]\nthen\n\tconfigSeqNo='seqNo:-1'\n\techo \"`date -u`- ConfigSequenceNumber not found in environment variable ${configSeqNo}\" >> $logfile\nelse\n\tconfigSeqNo='seqNo:'$configSeqNo\n\techo \"`date -u`- ConfigSequenceNumber from environment variable ${configSeqNo}\" >> $logfile\nfi\n\npythonVersionList=\"python3.8 python3.7 python3.6 python3.5 python3.4 python3.3 python3 python2.7 python2.6 python2 python\"\n\nfor pythonVersion in ${pythonVersionList};\ndo\n\tcmnd=\"/usr/bin/${pythonVersion}\"\n\tif [ ! -f \"${cmnd}\" ]\n\tthen\n\t\tcmnd=\"/usr/local/bin/${pythonVersion}\"\n\tfi\n\tif [ -f \"${cmnd}\" ]\n\tthen\n\t\techo \"`date -u`- ${pythonVersion} path exists\" >> $logfile\n\t\t$cmnd main/handle.py -$configSeqNo -$1\n\t\trc=$?\n\tfi\n\tif [ $rc -eq 0 ]\n\tthen\n\t\tbreak\n\tfi\ndone\n\npythonProcess=$(ps -ef | grep waagent | grep python)\npythonPath=$(echo \"${pythonProcess}\" | head -n1 | awk '{print $8;}')\n\nif [ $rc -ne 0 ] && [ -f \"`which python`\" ]\nthen\n\techo \"`date -u`- python path exists\" >> $logfile\n\t/usr/bin/env python main/handle.py -$configSeqNo -$1\n\trc=$?\nfi\n\nif [ $rc -ne 0 ] && [ -f \"${pythonPath}\" ]\nthen\n\techo \"`date -u`- python path exists\" >> $logfile\n\t$pythonPath main/handle.py -$configSeqNo -$1\n\trc=$?\nfi\n\t\nif [ $rc -eq 3 ]\nthen\n\techo \"`date -u`- python version unknown\" >> $logfile\nfi\n\necho \"`date -u`- $rc returned from handle.py\" >> $logfile\n\nexit $rc\n"
  },
  {
    "path": "VMBackup/main/handle_host_daemon.py",
    "content": "#!/usr/bin/env python\n\nimport time\nimport os\nimport threading\nimport signal\nimport sys\nimport json\nfrom Utils.WAAgentUtil import waagent\nfrom Utils import HandlerUtil\nimport datetime\nfrom common import CommonVariables\nimport subprocess\nimport traceback\nfrom datetime import datetime\n\nIS_PYTHON3 = sys.version_info[0] == 3\nif IS_PYTHON3:\n    import configparser as ConfigParsers\nelse:\n    import ConfigParser as ConfigParsers\n\nif IS_PYTHON3:\n    from urllib import request\nelse:\n    import urllib2 as request\n\nif IS_PYTHON3:\n    from urllib.error import HTTPError\nelse:\n    from urllib2 import HTTPError\n\nif IS_PYTHON3:\n    import urllib.parse as urllib\nelse:\n    import urllib\n\n\nSCRIPT_DIR=os.path.dirname(os.path.realpath(__file__))\nBASE_URI=\"http://168.63.129.16\"\nSTORAGE_DEVICE_PATH = '/sys/bus/vmbus/devices/'\nGEN2_DEVICE_ID = 'f8b3781a-1e82-4818-a1c3-63d806ec15bb'\n# LOCK_FILE_DIR=\"/etc/azure/MicrosoftRecoverySvcsSafeFreezeLock\"\n# LOCK_FILE=\"/etc/azure/MicrosoftRecoverySvcsSafeFreezeLock/SafeFreezeLockFile\"\n# LOCK_FILE_NAME=\"SafeFreezeLockFile\"\n\nSNAPSHOT_INPROGRESS = False\n    \nclass HandlerContext:\n    def __init__(self,name):\n        self._name = name\n        self._version = '0.0'\n        return\n\ndef read_file(filepath):\n    \"\"\"\n    Read and return contents of 'filepath'.\n    \"\"\"\n    mode = 'rb'\n    with open(filepath, mode) as in_file:\n        data = in_file.read().decode('utf-8')\n        return data\n\nclass Handler:\n    _log = None\n    _error = None\n    def __init__(self, log, error, short_name):\n        self._context = HandlerContext(short_name)\n        self._log = log\n        self._error = error\n        self.eventlogger = None\n        self.log_message = \"\"\n        handler_env_file = './HandlerEnvironment.json'\n        if not os.path.isfile(handler_env_file):\n            self.error(\"[handle_host_daemon.py] -> Unable to locate \" + handler_env_file)\n            return None\n        ctxt = waagent.GetFileContents(handler_env_file)\n        if ctxt == None :\n            self.error(\"[handle_host_daemon] -> Unable to read \" + handler_env_file)\n        try:\n            handler_env = json.loads(ctxt)\n        except:\n            pass\n        if handler_env == None :\n            self.log(\"JSON error processing \" + handler_env_file)\n            return None\n        if type(handler_env) == list:\n            handler_env = handler_env[0]\n        self._context._name = handler_env['name']\n        self._context._version = str(handler_env['version'])\n        self._context._config_dir = handler_env['handlerEnvironment']['configFolder']\n        self._context.log_dir = handler_env['handlerEnvironment']['logFolder']\n        self._context.log_file = os.path.join(self._context.log_dir,'host_based_extension.log')\n        self.logging_file=self._context.log_file\n\n    def _get_log_prefix(self):\n        return '[%s-%s]' % (self._context._name, self._context._version)\n\n    def get_value_from_configfile(self, key):\n        value = None\n        configfile = '/etc/azure/vmbackup.conf'\n        try :\n            if os.path.exists(configfile):\n                config = ConfigParsers.ConfigParser()\n                config.read(configfile)\n                if config.has_option('SnapshotThread',key):\n                    value = config.get('SnapshotThread',key)\n        except Exception as e:\n            pass\n        return value\n\n    def get_strvalue_from_configfile(self, key, default):\n        value = self.get_value_from_configfile(key)\n        if value == None or value == '':\n            value = default\n        try :\n            value_str = str(value)\n        except ValueError :\n            self.log('Not able to parse the read value as string, falling back to default value', 'Warning')\n            value = default\n        return value\n\n    def get_intvalue_from_configfile(self, key, default):\n        value = default\n        value = self.get_value_from_configfile(key)\n        if value == None or value == '':\n            value = default\n        try :\n            value_int = int(value)\n        except ValueError :\n            self.log('Not able to parse the read value as int, falling back to default value', 'Warning')\n            value = default\n\n        return int(value)\n\n    def log(self, message, level='Info'):\n        print(\"[Handler.log] -> Level: {} -> {}\".format(level, message))\n        try:\n            self.log_with_no_try_except(message, level)\n        except IOError:\n            pass\n        except Exception as e:\n            try:\n                errMsg = str(e) + 'Exception in hutil.log'\n                self.log_with_no_try_except(errMsg, 'Warning')\n            except Exception as e:\n                pass\n\n    def log_with_no_try_except(self, message, level='Info'):\n        WriteLog = self.get_strvalue_from_configfile('WriteLog','True')\n        if (WriteLog == None or WriteLog == 'True'):\n            if sys.version_info > (3,):\n                if self.logging_file is not None:\n                    self.log_py3(message)\n                    if self.eventlogger != None:\n                        self.eventlogger.trace_message(level, message)\n                else:\n                    pass\n            else:\n                self._log(self._get_log_prefix() + message)\n                if self.eventlogger != None:\n                    self.eventlogger.trace_message(level, message)\n            message = \"{0}  {1}  {2} \\n\".format(str(datetime.datetime.utcnow()) , level , message)\n        self.log_message = self.log_message + message\n\n    def log_py3(self, msg):\n        if type(msg) is not str:\n            msg = str(msg, errors=\"backslashreplace\")\n        msg = str(datetime.datetime.utcnow()) + \" \" + str(self._get_log_prefix()) + msg + \"\\n\"\n        try:\n            with open(self.logging_file, \"a+\") as C :\n                C.write(msg)\n        except IOError:\n            pass\n\n    def error(self, message):\n        self._error(self._get_log_prefix() + message) \n\n\nclass InvalidSnapshotRequestInitError(Exception):\n    def __init__():\n        super().__init__(\"Snapshot request object intialized incorrectly\")\n\n# class AcquireSnapshotLockError(Exception):\n#     def __init__():\n#         super().__init__(\"Failed to acquire snapshot lock\")\n\nclass GetMountsError(Exception):\n    def __init__(message = \"\"):\n        super().__init__(\"[SnapshotRequest.get_mounts] -> failed: {}\".format(message))\n\ndef print_from_thread(msg):\n    os.write(sys.stdout.fileno(), msg.encode(\"utf-8\"))\n\ndef thread_for_binary(self,args):\n    print_from_thread(\"[FreezeHandler.thread_for_binary] -> Thread for binary is called: {}\\n\".format(args))\n    time.sleep(3)\n    print_from_thread(\"[FreezeHandler.thread_for_binary] -> Waited in thread for 3 seconds\\n\")\n    print_from_thread(\"[FreezeHandler.thread_for_binary] -> ****** 1. Starting Freeze Binary \\n\")\n    self.child = subprocess.Popen(args,stdout=subprocess.PIPE)\n    print_from_thread(\"Binary subprocess Created\\n\")\n\nclass FreezeHandler(object):\n    def __init__(self,handler):\n        # sig_handle valid values(0:nothing done,1: freezed successfully, 2:freeze failed)\n        self.sig_handle = 0\n        self.child = None\n        self.handler = handler \n\n    def sigusr1_handler(self, signal, frame):\n        print_from_thread('[FreezeHandler.sigusr1_handler] -> freezed\\n')\n        print_from_thread(\"[FreezeHandler.sigusr1_handler] -> ****** 4. Freeze Completed (Signal=1 received)\\n\")\n        self.sig_handle=1\n\n    def sigchld_handler(self, signal, frame):\n        print_from_thread('[FreezeHandler.sigchld_handler] -> some child process terminated\\n')\n        if(self.child is not None and self.child.poll() is not None):\n            print_from_thread(\"[FreezeHandler.sigchld_handler] -> binary child terminated\\n\")\n            print_from_thread(\"[FreezeHandler.sigchld_handler] -> ****** 9. Binary Process completed (Signal=2 received)\\n\")\n            self.sig_handle=2\n\n    def reset_signals(self):\n        self.sig_handle = 0\n        self.child = None\n\n    def startproc(self,args):\n        binary_thread = threading.Thread(target=thread_for_binary, args=[self, args])\n        binary_thread.start()\n\n        SafeFreezeWaitInSecondsDefault = 66\n\n        proc_sleep_time = self.handler.get_intvalue_from_configfile('SafeFreezeWaitInSeconds',SafeFreezeWaitInSecondsDefault)\n        \n        for i in range(0,(int(proc_sleep_time/2))):\n            if(self.sig_handle==0):\n                print(\"[FreezeHandler.startproc] -> inside loop with sig_handle \"+str(self.sig_handle))\n                time.sleep(2)\n            else:\n                break\n        print(\"[FreezeHandler.startproc] -> Binary output for signal handled: \"+str(self.sig_handle))\n        return self.sig_handle\n\n    def signal_receiver(self):\n        signal.signal(signal.SIGUSR1,self.sigusr1_handler)\n        signal.signal(signal.SIGCHLD,self.sigchld_handler)\n\nclass SnapshotRequest:\n    def __init__(self, handler, data):\n        global SNAPSHOT_INPROGRESS, BASE_URI, GEN2_DEVICE_ID\n        self.freeze_handler = FreezeHandler(handler)\n        self.freeze_start = datetime.utcnow()\n        self.freeze_safe_active = False\n        if isinstance(handler, Handler):\n            self.handler = handler\n            # MY_PATCHING, PATCH_CLASS_NAME, ORIG_DISTRO = GetMyPatching(handler)\n        else:\n            raise InvalidSnapshotRequestInitError\n        if \"snapshotId\" in data and isinstance(data[\"snapshotId\"], str):\n            self.snapshotId = data[\"snapshotId\"]\n        else:\n            raise InvalidSnapshotRequestInitError\n        \n        if \"luns\" in data and isinstance(data[\"luns\"], list):\n            self.luns = data[\"luns\"]\n        # else:\n        #     raise InvalidSnapshotRequestInitError\n        \n        if \"extensionSettings\" in data and isinstance(data[\"extensionSettigns\"], dict):\n            self.extensionSettings = {}\n            es = data[\"extensionSettings\"]\n            if \"public\" in es and isinstance(es[\"public\"], dict):\n                self.extensionSettings[\"public\"] = es[\"public\"]\n            else:\n                self.extensionSettings[\"public\"] = {}\n            if \"protected\" in es and isinstance(es[\"protected\"], dict):\n                self.extensionSettings[\"protected\"] = {}\n                pro = es[\"protected\"]\n                if \"loggingBlobSasUri\" in pro and isinstance(pro[\"loggingBlobSasUri\"], str):\n                    self.extensionSettings.protected[\"loggingBlobSasUri\"] = pro[\"loggingBlobSasUri\"]\n                # else:\n                #     raise InvalidSnapshotRequestInitError\n                if \"statusBlobSasUri\" in pro and isinstance(pro[\"statusBlobSasUri\"], str):\n                    self.extensionSettings.protected[\"statusBlobSasUri\"] = pro[\"statusBlobSasUri\"]\n                # else:\n                #     raise InvalidSnapshotRequestInitError\n            # else:\n            #     raise InvalidSnapshotRequestInitError\n            \n            if \"ProtectedSettingsCertThumbprint\" in data and isinstance(data[\"ProtectedSettingsCertThumbprint\"], str):\n                self.ProtectedSettingsCertThumbprint = data[\"ProtectedSettingsCertThumbprint\"]\n            # else:\n            #     raise InvalidSnapshotRequestInitError\n        self.__data = data\n\n    # def acquire_snapshot_lock(self):\n    #     try:\n    #         if not os.path.isdir('/etc/azure'):\n    #             os.mkdir('/etc/azure')\n    #         if not os.path.isdir(LOCK_FILE_DIR):\n    #             if not os.path.isfile(LOCK_FILE_DIR):\n    #                 os.mkdir(LOCK_FILE_DIR)\n    #             else:\n    #                 os.remove(LOCK_FILE_DIR)\n    #                 os.mkdir(LOCK_FILE_DIR)\n    #         self.safeFreezelockFile = open(LOCK_FILE,\"w\")\n    #         try:\n    #             fcntl.lockf(self.safeFreezelockFile, fcntl.LOCK_EX | fcntl.LOCK_NB)\n    #             self.isAcquiredLock = True\n    #             return True\n    #         except Exception as e:\n    #             self.handler.log(\"[lock_snapshot_file] -> fcntl.lockf has failed: \", e)\n    #             self.safeFreezelockFile.close()\n    #     except Exception as e:\n    #         self.handler.log(\"[lock_snapshot_file] -> Unexpected error occured: \", e)\n    #     return False\n\n    # def release_snapshot_lock(self):\n    #     try:\n    #         if (self.isAquireLock == True):\n    #             try:\n    #                 fcntl.lockf(self.safeFreezelockFile, fcntl.LOCK_UN)\n    #                 self.safeFreezelockFile.close()\n    #             except Exception as e:\n    #                 self.handler.log(\"Failed to unlock: %s, stack trace: %s\" % (str(e), traceback.format_exc()),True)\n    #         try:\n    #             os.remove(LOCK_FILE)\n    #         except Exception as e:\n    #             self.handler.log(\n    #                 \"Failed to delete %s file:\\nException:\\n%s\\nStack Trace:\\n%s\" %\n    #                   LOCK_FILE, str(e), traceback.format_exc())\n    #     except Exception as e:\n    #         self.handler.log(\"[release_snapshot_lock] -> unexpected error occurred: \", e)\n    #     return False\n\n    # Ignores usb devices\n    # TODO: suppport lvm setup\n    def get_block_devices(self):\n        p1 = subprocess.Popen([\"lsblk\", \"-dnl\", \"-o\", \"NAME\"], stdout=subprocess.PIPE)\n        p2 = subprocess.check_output([\"grep\", \"-E\", \"(sd|nvme)\"], stdin=p1.stdout).decode(\"utf-8\")\n        p1.stdout.close()\n        disks = []\n        for x in p2.split(\"\\n\"):\n            # print(\"device: {}\".format(x))\n            if not x.strip():\n                continue\n            if not self.is_usb(\"/dev/{}\".format(x)):\n                disks.append(x)\n        return disks\n    \n    def is_usb(self, device):\n        # lsblk -dnl -o NAME | grep 'sd'\n        # udevadm info /dev/sda --query=property | grep ID_BUS\n        p1 = subprocess.Popen([\"udevadm\", \"info\", device, \"--query=property\"], stdout=subprocess.PIPE)\n        p2 = subprocess.check_output([\"grep\", 'ID_BUS'], stdin=p1.stdout).decode(\"utf-8\")\n        p1.stdout.close()\n        return p2.endswith(\"=usb\")\n    \n    @staticmethod\n    def _enumerate_device_id():\n        \"\"\"\n\t\tEnumerate all storage device IDs.\n\t\tArgs:\n\t\tNone\n\t\tReturns:\n\t\tIterator[Tuple[str, str]]: VmBus and storage devices.\n        \"\"\"\n\n        if os.path.exists(STORAGE_DEVICE_PATH):\n            for vmbus in os.listdir(STORAGE_DEVICE_PATH):\n                deviceid = read_file(filepath=os.path.join(STORAGE_DEVICE_PATH, vmbus, \"device_id\"))\n                guid = deviceid.strip('{}\\n')\n                yield vmbus, guid\n\n    @staticmethod\n    def search_for_resource_disk(gen1_device_prefix, gen2_device_id):\n        \"\"\"\n        Search the filesystem for a device by ID or prefix.\n        Args:\n        gen1_device_prefix (str): Gen1 resource disk prefix.\n        gen2_device_id (str): Gen2 resource device ID.\n        Returns:\n        str: The found device.\n        \"\"\"\n        device = None\n        # We have to try device IDs for both Gen1 and Gen2 VMs.\n        #ResourceDiskUtil.logger.log('Searching gen1 prefix {0} or gen2 {1}'.format(gen1_device_prefix, gen2_device_id),True)\n        try: # pylint: disable=R1702\n            for vmbus, guid in SnapshotRequest._enumerate_device_id():\n                if guid.startswith(gen1_device_prefix) or guid == gen2_device_id:\n                    for root, dirs, files in os.walk(STORAGE_DEVICE_PATH + vmbus): # pylint: disable=W0612\n                        root_path_parts = root.split('/')\n                        # For Gen1 VMs we only have to check for the block dir in the\n                        # current device. But for Gen2 VMs all of the disks (sda, sdb,\n                        # sr0) are presented in this device on the same SCSI controller.\n                        # Because of that we need to also read the LUN. It will be:\n                        #   0 - OS disk\n                        #   1 - Resource disk\n                        #   2 - CDROM\n                        if root_path_parts[-1] == 'block' and ( # pylint: disable=R1705\n                                guid != gen2_device_id or\n                                root_path_parts[-2].split(':')[-1] == '1'):\n                            device = dirs[0]\n                            return device\n                        else:\n                            # older distros\n                            for d in dirs: # pylint: disable=C0103\n                                if ':' in d and \"block\" == d.split(':')[0]:\n                                    device = d.split(':')[1]\n                                    return device\n        except (OSError, IOError) as exc:\n            err_msg='Error getting device for %s or %s: %s , Stack Trace: %s' % (gen1_device_prefix, gen2_device_id, str(exc),traceback.format_exc())\n        return None\n\n    def device_for_ide_port(self):\n        \"\"\"\n\t\tReturn device name attached to ide port 'n'.\n\t\tgen1 device prefix is the prefix of the file name in which the resource disk partition is stored eg sdb\n\t\tgen1 is for new distros\n\t\tIn old distros the directory name which contains resource disk partition is assigned to gen2 device id\n\t\t\"\"\"\n        g0 = \"00000000\"\n        gen1_device_prefix = '{0}-0001'.format(g0)\n        self.handler.log(\n            '[SnapshostRequest.device_for_ide_port] -> Searching gen1 prefix {0} or gen2 {1}'.format(\n                gen1_device_prefix, GEN2_DEVICE_ID\n        ))\n        device = self.search_for_resource_disk(\n        \tgen1_device_prefix=gen1_device_prefix,\n        \tgen2_device_id=GEN2_DEVICE_ID\n        )\n        self.handler.log('[SnapshotRequest.device_for_ide_port] -> Found device: {0}'.format(device))\n        return device\n\n    def get_resource_disk_mount_point(self,option=1): # pylint: disable=R0912,R0914\n        try:\n            \"\"\"\n            if option = 0 then partition will be returned eg sdb1\n            if option = 1 then mount point will be returned eg /mnt/resource\n            \"\"\"\n            device = self.device_for_ide_port()\n            if device is None:\n                self.handler.log('unable to detect disk topology',True,'Error')\n            \n            partition = None\n            if device is not None:\n                partition = \"{0}{1}\".format(device,\"1\")  #assuming only one resourde disk partition\n            self.handler.log(\"Resource disk partition: {0} \".format(partition),True)\n            if(option==0):\n                return partition\n            \n            # find name of mount using:\n            # grep -E \"^/dev/sdb1\" /proc/mounts | awk '{print $2}'\n            # print(\"Found partition: {}\".format(partition))\n            if partition is not None:\n                p1 = subprocess.Popen([\"grep\", \"-E\", \"^/dev/{}\".format(partition), \"/proc/mounts\"], stdout=subprocess.PIPE)\n                p2 = subprocess.check_output([\"awk\", '{print $2}'], stdin=p1.stdout).decode(\"utf-8\")\n                p1.stdout.close()\n                v = [x for x in p2.split(\"\\n\") if x.strip()]\n                if len(v) > 0:\n                    # print(\"Returning v[0]: {}\".format(v[0]))\n                    return v[0]\n        except Exception as e:\n            self.handler.log(\n                    \"[SnapshotRequest.get_resource_disk_mountpoint] -> unexpected error occured: {}\\n{}\".format(e, traceback.format_exc())\n            )\n        return None\n\n    def get_mounts(self):\n        try:\n            resource_mount = self.get_resource_disk_mount_point()\n            p1 = subprocess.Popen([\"mount\", \"-l\"], stdout=subprocess.PIPE)\n            p2 = subprocess.Popen([\"grep\", \"-E\", \"(ext4|ext3|btrfs|xfs)\"], stdin=p1.stdout, stdout=subprocess.PIPE)\n            p3 = subprocess.check_output([\"awk\", '{print $1\" \"$3}'], stdin=p2.stdout).decode(\"utf-8\")\n            p1.stdout.close()\n            p2.stdout.close()\n            # print(\"p3: {}\".format(p3))\n            disks = self.get_block_devices() \n            # print(\"disks: {}\".format(disks))\n            def is_valid_mount(partition,mount_point):\n                if resource_mount is not None and mount_point.strip() == resource_mount:\n                    return False\n                # lsblk -ndo pkname /dev/sda1\n                disk = subprocess.check_output([\"lsblk\", \"-ndo\", \"pkname\", partition]).decode(\"utf-8\")\n                disk = \" \".join(disk.split()) # removing any trailing or preceding newlines\n                # print(\"[is_valid_disk] -> if disk: {} exists in list: {}\".format(disk, disks))\n                if disk not in disks:\n                    return False\n                return True\n            mounts = []\n            for m in p3.split(\"\\n\"):\n                if not m.strip():\n                    continue\n                m = \" \".join(m.split()) # removing any preceding or trailing new lines\n                v = m.split()\n                # print(\"Post split: {}\".format(v))\n                if len(v) != 2:\n                    continue \n                partition = v[0]\n                mount_point = v[1]\n                # print(\"[get_mounts] -> Checking mount: {}\".format(mount_point))\n                # print(\"[get_mounts] -> Checking partition: {}\".format(partition))\n                if not is_valid_mount(partition, mount_point):\n                    continue\n                mounts.append(mount_point)\n            print(\"Mounts: {}\".format(mounts))\n            return mounts\n        except Exception as e:\n            self.handler.log(\"[SnapshotRequest.get_mounts] -> Unexpected error: {}\".format(e))\n            raise GetMountsError(traceback.format_exc())\n\n    def safefreeze_path(self):\n        p = os.path.join(os.getcwd(),os.path.dirname(__file__),\"safefreeze/bin/safefreeze\")\n        machine = os.uname()[-1]\n        if IS_PYTHON3:\n            machine = os.uname().machine\n        if machine is not None and (machine.startswith(\"arm64\") or machine.startswith(\"aarch64\")):\n            p = \"safefreezeArm64/bin/safefreeze\"\n        return p\n    \n    def log_binary_output(self):\n        print(\n            \"[SnapshotRequest.log_binary_output] -> ============== Binary output traces start ================= \"\n        )\n        while True:\n            line=self.freeze_handler.child.stdout.readline()\n            if IS_PYTHON3:\n                line = str(line, encoding='utf-8', errors=\"backslashreplace\")\n            else:\n                line = str(line)\n            if(\"[SnapshotRequest.log_binary_output] -> Failed to open:\" in line):\n                self.mount_open_failed = True\n            if(line != ''):\n                self.handler.log(line.rstrip(), True)\n            else:\n                break\n        print(\n            \"[SnapshotRequest.log_binary_output] -> ============== Binary output traces end ================= \"\n        )\n\n    def freeze_safe(self, args):\n        errors = []\n        error_codes = []\n        timedout = False\n        try:\n            self.freeze_handler.reset_signals()\n            self.freeze_handler.signal_receiver()\n            sig_handle = self.freeze_handler.startproc(args)\n            # self.handler.log(\n            #     \"[SnapshotRequest.freeze_safe] -> freeze_safe after returning from startproc : sig_handle={}\".format(str(sig_handle))\n            # )\n            print(\"[SnapshotRequest.freeze_safe] -> freeze_safe after returning from startproc : sig_handle={}\".format(str(sig_handle)))\n            if(sig_handle != 1):\n                if (self.freeze_handler.child is not None):\n                    print(\"[SnapshotRequest.freeze_safe] -> calling log_binary_output\")\n                    self.log_binary_output()\n                if (sig_handle == 0):\n                    timedout = True\n                    error_msg=\"freeze timed-out\"\n                    errors.append(error_msg)\n                    error_codes.append(\"FREEZE_TIMED_OUT\")\n                    self.handler.log(error_msg)\n                # elif (self.mount_open_failed == True):\n                #     error_msg=CommonVariables.unable_to_open_err_string\n                #     errors.append(error_msg)\n                #     self.handler.log(error_msg)\n                # elif (self.isAquireLockSucceeded == False):\n                #     error_msg=\"Mount Points already freezed by some other processor\"\n                #     errors.append(error_msg)\n                #     self.handler.log(error_msg)\n                else:\n                    error_msg=\"freeze failed for some mount\"\n                    errors.append(error_msg)\n                    error_codes.append(\"INCOMPLETE_FREEZE\")\n                    self.handler.log(error_msg)\n        except Exception as e:\n            # self.logger.enforce_local_flag(True)\n            error_msg='freeze failed for some mount with exception, Exception %s, stack trace: %s' % (str(e), traceback.format_exc())\n            errors.append(error_msg)\n            error_codes.append(\"UNEXPECTED_FREEZE_EXC\")\n            self.handler.log(error_msg)\n        finally:\n            self.freeze_start_time = datetime.utcnow()\n        return errors, error_codes, timedout\n\n    def thaw_safe(self):\n        errors = []\n        unable_to_sleep = False\n        try:\n            if not self.freeze_safe_active:\n                self.freeze_end_time = datetime.utcnow()\n                return errors, unable_to_sleep\n            if(self.freeze_handler.child is None):\n                print(\"[SnapshotRequest.thaw_safe] -> child already completed\")\n                print(\"[SnapshotRequest.thaw_safe] -> ****** 7. Error - Binary Process Already Completed\")\n                error_msg = 'snapshot result inconsistent'\n                errors.append(error_msg)\n            elif(self.freeze_handler.child.poll() is None):\n                print(\"[SnapshotRequest.thaw_safe] -> child process still running\")\n                print(\"[SnapshotRequest.thaw_safe] -> ****** 7. Sending Thaw Signal to Binary\")\n                self.freeze_handler.child.send_signal(signal.SIGUSR1)\n                \n                # Will try for 30 seconds to see if freeze process has stopped\n                for i in range(0,30):\n                    if(self.freeze_handler.child.poll() is None):\n                        print(\"child still running sigusr1 sent\")\n                        time.sleep(1)\n                    else:\n                        break\n                print(\"[SnapshotRequest.thaw_safe] -> calling log_binary_output: 1\")\n                self.log_binary_output()\n                if(self.freeze_handler.child.returncode != 0):\n                    error_msg = '[SnapshotRequest.thaw_safe] -> snapshot result inconsistent as child returns with failure'\n                    errors.append(error_msg)\n                    print(error_msg, True, 'Error')\n            else:\n                self.handler.log(\"[SnapshotRequest.thaw_safe] -> Binary output after process end when no thaw sent: \", True)\n                if(self.freeze_handler.child.returncode == 2):\n                    error_msg = '[SnapshotRequest.thaw_safe] -> Unable to execute sleep'\n                    errors.append(error_msg)\n                    unable_to_sleep = True\n                else:\n                    error_msg = '[SnapshotRequest.thaw_safe] -> snapshot result inconsistent'\n                    errors.append(error_msg)\n                print(\"[SnapshotRequest.thaw_safe] -> calling log_binary_output: 2\")\n                self.log_binary_output()\n                print(error_msg, True, 'Error')\n        finally:\n            self.freeze_end_time = datetime.utcnow()\n        return errors, unable_to_sleep\n\n    # Uses safe_freeze binary which depends on fsfreeze\n    # TODO: support LVM when present\n    def freeze_mounts(self):\n        errors = []\n        error_codes = []\n        try:\n            timeout = self.handler.get_intvalue_from_configfile('timeout','60')\n            args = [self.safefreeze_path(), str(timeout)]\n            mounts = self.get_mounts()\n            if len(mounts) == 0:\n                self.handler.log(\"[SnapshotRequest.freeze_mounts] -> nothing to freeze\")\n                return False\n            for mount in mounts:\n                args.append(mount)\n            errors, error_codes, timedout = self.freeze_safe(args)\n            if len(errors) == 0 and not timedout:\n                self.freeze_start_time = datetime.utcnow()\n                self.freeze_safe_active = True\n        except GetMountsError as gme:\n            self.handler.log(\"[SnapshotRequest.freeze_mounts] -> get_mounts failed: {}\\n{}\".format(gme, traceback.format_exc()))\n        except Exception as e:\n            self.handler.log(\"[SnapshotRequest.freeze_mounts] -> unexpected error occured: {}\\n{}\".format(e, traceback.format_exc()))\n        return errors, error_codes\n\n    def start_snapshot(self, error_code = None, error_message = None):\n        print(\"[SnapshotRequest.start_snapshot] -> Fired\")\n        errors = []\n        try:\n            payload = {\n                \"snapshotId\": self.snapshotId,\n                \"errMsg\": \"\"\n            }\n            if error_code is not None:\n                payload[\"error\"] = {\n                    \"code\": error_code if isinstance(error_code, str) else \"\",\n                    \"message\": error_message if isinstance(error_message, str) else \"\",\n                }\n                payload[\"errMsg\"] = error_message if isinstance(error_message, str) else \"\"\n            \n            # if IS_PYTHON3:\n            #     data = urllib.urlencode(payload).encode(\"utf-8\")\n            # else:\n            #     data = urllib.urlencode(payload)\n            # print(\"[SnapshotRequest.start_snapshot] -> Data:{}\".format(data))\n\n            if IS_PYTHON3:\n                data = json.dumps(payload).encode(\"utf-8\")\n                print(\"[SnapshotRequest.start_snapshot] -> Data: {}\".format(data))\n                r = request.Request(\n                    url = \"{}/machine/plugins?comp=xdisksvc&type=startsnapshot\".format(BASE_URI),\n                    headers = {\n                        \"Content-Type\": \"application/json; charset=utf-8\",\n                        \"Content-Length\": len(data),\n                    }\n                )\n            else:\n                data = json.dumps(payload)\n                print(\"[SnapshotRequest.start_snapshot] -> Data: {}\".format(data))\n                r = request.Request(\n                    url = \"{}/machine/plugins?comp=xdisksvc&type=startsnapshot\".format(BASE_URI),\n                    headers = {\n                        \"Content-Type\": \"application/json; charset=utf-8\"\n                    }\n                )\n            conn = request.urlopen(r, timeout = 10, data = data)\n            print(\"[SnapshotRequest.start_snapshot] -> Request: {}\".format(r))\n            # if IS_PYTHON3:\n            #     conn = request.urlopen(r, timeout = 10, data = data)\n            # else:\n            #     conn = request.urlopen(r, timeout = 10)\n            if conn.status != 200:\n                resp = conn.read()\n                print(\"[SnapshotRequest.start_snapshot] -> unexpected status code:{}, Body: {}\".format(conn.status, resp))\n                errors.append(\"STARTSNAP_UNEXPECTED_STATUS_{}\".format(conn.status))\n        except HTTPError as herr:\n            print(\"[SnapshotRequest.start_snapshot] -> startsnapshot request failed with status: {}, reason: {}\".format(herr.code, herr.reason))\n            errors.append(\"STARTSNAP_HTTP_ERR\")\n        except Exception as e:\n            print(\"[SnapshotRequest.start_snapshot] -> unexpected error occured: {}\\n{}\".format(e, traceback.format_exc()))\n            errors.append(\"STARTSNAP_UNEXPECTED_EXC\")\n        return errors\n\n    def end_snapshot(self, payload):\n        errors = []\n        try:\n\n            # if IS_PYTHON3:\n            #     data = urllib.urlencode(payload).encode(\"utf-8\")\n            # else:\n            #     data = urllib.urlencode(payload)\n            # print(\"[SanpshotRequest.end_snapshot] -> Data:{}\".format(data))\n            if IS_PYTHON3:\n                data = json.dumps(payload).encode(\"utf-8\")\n                print(\"[SnapshotRequest.end_snapshot] -> Data: {}\".format(data))\n                r = request.Request(\n                    url = \"{}/machine/plugins?comp=xdisksvc&type=publishsnapshot\".format(BASE_URI),\n                    headers = {\n                        \"Content-Type\": \"application/json\",\n                        \"Content-Length\": len(data)\n                    }\n                )\n            else:\n                data = json.dumps(payload)\n                print(\"[SnapshotRequest.end_snapshot] -> Data: {}\".format(data))\n                r = request.Request(\n                    url = \"{}/machine/plugins?comp=xdisksvc&type=publishsnapshot\".format(BASE_URI),\n                    headers = {\n                        \"Content-Type\": \"application/json\"\n                    }\n                )\n\n            conn = request.urlopen(r, timeout = 10, data = data)\n            # if IS_PYTHON3:\n            #     conn = request.urlopen(r, timeout = 10, data = data)\n            # else:\n            #     conn = request.urlopen(r, timeout = 10)\n            if conn.status != 200:\n                resp = conn.read()\n                print(\"[SnapshotRequest.end_snapshot] -> unexpected status code: {}, Body: {}\".format(conn.status, resp))\n                errors.append(\"ENDSNAP_UNEXPECTED_STATUS_{}\".format(conn.status))\n        except HTTPError as herr:\n            print(\"[SnapshotRequest.end_snapshot] -> unexpected status code: {}, reason: {}\".format(herr.code, herr.reason))\n            errors.append(\"ENDSNAP_UNEXPECTED_STATUS_{}\".format(herr.code))\n        except Exception as e:\n            print(\"[SnapshotRequest.end_snapshot] -> unexpected error occured: {}\\n{}\".format(e, traceback.format_exc()))\n            errors.append(\"ENDSNAP_UNEXPECTED_EXC\")\n        return errors\n\n    def take_snapshot(self):\n        # self.freeze_start = datetime.utcnow()\n        print(\"[SnapshotRequest.take_snapshot] -> Fired\")\n        frozen_at = 0\n        call_remote_end = 0\n        remote_call_success = False\n        snapshot_error_code = None\n        snapshot_error_msg = None\n\n        try:\n            errors, error_codes = self.freeze_mounts()\n            x_errors = []\n            if len(errors) > 0:\n                print(\"[Snapshot_Request.take_snapshot] -> self.freeze_mounts() failed\")\n                print(\"{}\".format(\"\\n\".join(errors)))\n                x_errors = self.start_snapshot(error_code = error_codes[0], error_message = errors[0])\n                snapshot_error_code = error_codes[0]\n                snapshot_error_msg = errors[0]\n            else:\n                print(\"[Snapshot_Request.take_snapshot] -> self.freeze_mounts() success\")\n                frozen_at = datetime.utcnow()\n                x_errors = self.start_snapshot()\n            if len(x_errors) > 0:\n                print(\"[Snapshot_Request.take_snapshot] -> calling xdisksvc failed with: {}\".format(x_errors[0]))\n                snapshot_error_code = x_errors[0]\n                snapshot_error_msg = snapshot_error_code\n            else:\n                print(\"[Snapshot_Request.take_snapshot] -> calling xdisksvc succeeded\")\n                remote_call_success = True\n        except Exception as e:\n            print(\"[SnapshotRequest.take_snapshot] -> unexpected exception: {}\\n{}\".format(e, traceback.format_exc()))\n            snapshot_error_code = \"UNEXPECTED_SNAPSHOT_EXC\"\n            snapshot_error_msg = str(e)\n        finally:\n            call_remote_end = datetime.utcnow()\n            self.thaw_safe()\n            print(\"[SnapshotRequest.take_snapshot] -> thaw_safe executed successfully\")\n        \n        print(\"[SnapshotRequest.take_snapshot] -> Outta try catch!\")\n        body = {\n            \"snapshotId\": self.snapshotId,\n            \"errMsg\": \"\",\n            # \"consistencyMode\": \"App\",\n        }\n        if remote_call_success and (call_remote_end.timestamp() - frozen_at.timestamp()) < 9:\n            print(\"[SanpshotRequest.take_snapshot] -> app consistency verified\")\n        else:\n            print(\"[SnapshotRequest.take_snapshot] -> app consistency validation failed\")\n            body[\"error\"] = {\n                \"code\": snapshot_error_code,\n                \"message\": snapshot_error_msg\n            }\n            body[\"errMsg\"] = snapshot_error_msg\n        self.end_snapshot(body)\n\ndef get_snapshot_requests(handler):\n    global BASE_URI\n    res = {\n        \"statusCode\": 0,\n        \"data\": {},\n    }\n    try:\n        conn = request.urlopen(BASE_URI + \"/machine/plugins?comp=xdisksvc&type=checkforsnapshot\", timeout = 10)\n        res[\"statusCode\"] = conn.status\n        if res[\"statusCode\"] == 200:\n            res[\"data\"] = json.loads(conn.read())\n            return res\n    except HTTPError as herr:\n        res[\"statusCode\"] = herr.code\n    except Exception as e:\n        handler.log(\"Exception making a http request\", e)\n    return res\n\ndef take_new_snapshot(handler, data):\n    try:\n        sr = SnapshotRequest(handler, data)\n        sr.take_snapshot()\n    except InvalidSnapshotRequestInitError:\n        handler.log(\"[take_new_snapshot] -> SnapshotRequest object initialized with invalid data: \", data)\n    except Exception as e:\n        handler.log(\"[take_new_snapshot] -> Unexpected error occurred: \", e)\n\ndef main():\n    global SCRIPT_DIR\n    HandlerUtil.waagent.LoggerInit('/dev/console','/dev/stdout')\n    handler = Handler(HandlerUtil.waagent.Log, HandlerUtil.waagent.Error, CommonVariables.extension_name)\n    starttime = time.time()\n    while True:\n        try:\n            res = get_snapshot_requests(handler)\n            print(\"[main] -> res: {}\".format(res))\n            if res[\"statusCode\"] == 200:\n                take_new_snapshot(handler, res[\"data\"])\n            elif res[\"statusCode\"] == 404:\n                handler.log(\"[main] -> no new snapshot requests at this time\")\n            else:\n                handler.log(\"[main] -> invalid response code: \", res[\"statusCode\"])\n        except Exception as e:\n            handler.log(\"[main] -> Unexpected expcetion occured\", e)\n        time.sleep(300.0 - ((time.time() - starttime) % 300.0))\n\nif __name__ == '__main__' :\n    main()\n"
  },
  {
    "path": "VMBackup/main/handle_host_daemon.sh",
    "content": "#!/usr/bin/env sh\npwdstr=`pwd`\noutput=`cat $pwdstr'/HandlerEnvironment.json'`\noutputstr=\"$output\"\npoststr=${outputstr#*logFolder\\\"}\npostsubstr=${poststr#*\\\"}\npostsubstr1=${postsubstr#*\\\"}\nresultstrlen=`expr ${#postsubstr} - 1 - ${#postsubstr1}`\nlogfolder=$(echo $postsubstr | cut -b 1-$resultstrlen)\nlogfile=$logfolder'/shell.log'\nrc=3\nPIDFILE=\"directsnapshot.pid\"\n\nif [ -e $PIDFILE ]; then\n\tpid=`cat $PIDFILE`\n\tpid=$(ps --pid $pid | tail -1 | awk '{ print $1 }')\n\t# echo $pid\n\tif echo $pid | grep -Eq '^[0-9]+$'; then\n\t\techo \"Process already exists\"\n\t\texit 0\n\telse\n\t\trm $PIDFILE\n\tfi\nfi\n\npythonVersionList=\"python3.8 python3.7 python3.6 python3.5 python3.4 python3.3 python3 python2.7 python2.6 python2 python\"\n\nfor pythonVersion in ${pythonVersionList};\ndo\n\tcmnd=\"/usr/bin/${pythonVersion}\"\n\tif [ -f \"${cmnd}\" ]; then\n\t\techo \"[$(date -u +\"%F %H:%M:%S:%N\")] ${pythonVersion} path exists\" >> $logfile\n\t\tnohup $cmnd main/handle_host_daemon.py &\n\t\tpid=$(ps --pid $! | tail -1 | awk '{ print $1 }')\n\t\techo $pid | tee $PIDFILE\n\t\tif echo $pid | grep -Eq '^[0-9]+$'; then\n\t\t\trc=0\n\t\tfi\n\tfi\n\tif [ $rc -eq 0 ]\n\tthen\n\t\tbreak\n\tfi\ndone\n\nif [ $rc -ne 0 ] && [ -f \"`which python`\" ]\nthen\n\techo \"[$(date -u +\"%F %H:%M:%S:%N\")] python path exists\" >> $logfile\n\tnohup /usr/bin/env python main/handle_host_daemon.py &\n\tpid=$(ps --pid $! | tail -1 | awk '{ print $1 }')\n\techo $pid | tee $PIDFILE\n\tif echo $pid | grep -Eq '^[0-9]+$'; then\n\t\trc=0\n\tfi\nfi\n\nif [ $rc -ne 0 ] && [ -f \"${pythonPath}\" ]\nthen\n\techo \"[$(date -u +\"%F %H:%M:%S:%N\")] python path exists\" >> $logfile\n\tnohup $pythonPath main/handle_host_daemon.py &\n\tpid=$(ps --pid $! | tail -1 | awk '{ print $1 }')\n\techo $pid | tee $PIDFILE\n\tif echo $pid | grep -Eq '^[0-9]+$'; then\n\t\trc=0\n\tfi\nfi\n\t\nif [ $rc -eq 3 ]\nthen\n\techo \"[$(date -u +\"%F %H:%M:%S:%N\")] python version unknown\" >> $logfile\nfi\n\necho \"[$(date -u +\"%F %H:%M:%S:%N\")] $rc returned from handle_host_daemon.py\" >> $logfile\n\nexit $rc"
  },
  {
    "path": "VMBackup/main/hostsnapshotter.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\ntry:\n    import urlparse as urlparser\nexcept ImportError:\n    import urllib.parse as urlparser\nimport traceback\nimport datetime\ntry:\n    import ConfigParser as ConfigParsers\nexcept ImportError:\n    import configparser as ConfigParsers\nimport multiprocessing as mp\nimport json\nfrom common import CommonVariables\nfrom HttpUtil import HttpUtil\nfrom Utils import Status\nfrom Utils import HostSnapshotObjects\nfrom Utils import HandlerUtil\nfrom fsfreezer import FsFreezer\nimport sys\n\n\nclass HostSnapshotter(object):\n    \"\"\"description of class\"\"\"\n    def __init__(self, logger, hostIp):\n        self.logger = logger\n        self.configfile='/etc/azure/vmbackup.conf'\n        self.snapshoturi = 'http://' + hostIp + '/metadata/recsvc/snapshot/dosnapshot?api-version=2017-12-01'\n        self.presnapshoturi = 'http://' + hostIp + '/metadata/recsvc/snapshot/presnapshot?api-version=2017-12-01'\n        self.hutil = HandlerUtil.HandlerUtility(HandlerUtil.waagent.Log, HandlerUtil.waagent.Error, CommonVariables.extension_name)\n\n    def snapshotall(self, paras, freezer, g_fsfreeze_on, taskId):\n        result = None\n        blob_snapshot_info_array = []\n        all_failed = True\n        is_inconsistent = False\n        unable_to_sleep = False\n        meta_data = paras.backup_metadata\n        if(self.snapshoturi is None):\n            self.logger.log(\"Failed to do the snapshot because snapshoturi is none\",False,'Error')\n            all_failed = True\n        try:\n            snapshoturi_obj = urlparser.urlparse(self.snapshoturi)\n            if(snapshoturi_obj is None or snapshoturi_obj.hostname is None):\n                self.logger.log(\"Failed to parse the snapshoturi\",False,'Error')\n                all_failed = True\n            else:\n                diskIds = []\n                body_content = ''\n                headers = {}\n                headers['Backup'] = 'true'\n                headers['Content-type'] = 'application/json'\n                headers['UserAgent'] = 'VMSnapshot'\n                settings = []\n                if (paras.includeLunList != None and paras.includeLunList.count != 0):\n                    diskIds = paras.includeLunList\n                if(paras.wellKnownSettingFlags != None):\n                    for flag in paras.wellKnownSettingFlags:\n                        temp_dict = {}\n                        temp_dict[CommonVariables.key] = flag\n                        temp_dict[CommonVariables.value] = paras.wellKnownSettingFlags[flag]\n                        settings.append(temp_dict)\n                if(paras.isVMADEEnabled == True and paras.diskEncryptionSettings):\n                    settings.append({CommonVariables.key:CommonVariables.isOsDiskADEEncrypted, CommonVariables.value:paras.isOsDiskADEEncrypted})\n                    settings.append({CommonVariables.key:CommonVariables.areDataDisksADEEncrypted, CommonVariables.value:paras.areDataDisksADEEncrypted})\n                    meta_data.append({CommonVariables.key:CommonVariables.diskEncryptionSettings, CommonVariables.value:paras.diskEncryptionSettings})\n                hostDoSnapshotRequestBodyObj = HostSnapshotObjects.HostDoSnapshotRequestBody(taskId, diskIds, settings, paras.snapshotTaskToken, meta_data, paras.instantAccessDurationMinutes)\n                body_content = json.dumps(hostDoSnapshotRequestBodyObj, cls = HandlerUtil.ComplexEncoder)\n                redactedRequestBodyObj = self.hutil.redact_sensitive_encryption_details(hostDoSnapshotRequestBodyObj)\n                redacted_body_content = json.dumps(redactedRequestBodyObj, cls = HandlerUtil.ComplexEncoder)\n                self.logger.log('Headers : ' + str(headers))\n                self.logger.log('Host Request body : ' + str(redacted_body_content))\n                http_util = HttpUtil(self.logger)\n                self.logger.log(\"start calling the snapshot rest api\")\n                # initiate http call for blob-snapshot and get http response\n                self.logger.log('****** 5. Snaphotting (Host) Started')\n                result, httpResp, errMsg,responseBody = http_util.HttpCallGetResponse('POST', snapshoturi_obj, body_content, headers = headers, responseBodyRequired = True, isHostCall = True)\n                self.logger.log('****** 6. Snaphotting (Host) Completed')\n                self.logger.log(\"dosnapshot responseBody: \" + responseBody)\n                #performing thaw\n                if g_fsfreeze_on :\n                    time_before_thaw = datetime.datetime.now()\n                    thaw_result, unable_to_sleep = freezer.thaw_safe()\n                    time_after_thaw = datetime.datetime.now()\n                    HandlerUtil.HandlerUtility.add_to_telemetery_data(\"ThawTime\", str(time_after_thaw-time_before_thaw))\n                    self.logger.log('T:S thaw result ' + str(thaw_result))\n                    if(thaw_result is not None and len(thaw_result.errors) > 0):\n                        is_inconsistent = True\n                \n                # Http response check(After thawing)\n                if(httpResp != None):\n                    HandlerUtil.HandlerUtility.add_to_telemetery_data(CommonVariables.hostStatusCodeDoSnapshot, str(httpResp.status))\n                    if(int(httpResp.status) == 200 or int(httpResp.status) == 201) and (responseBody == None or responseBody == \"\") :\n                        self.logger.log(\"DoSnapshot: responseBody is empty but http status code is success\")\n                        HandlerUtil.HandlerUtility.add_to_telemetery_data(CommonVariables.hostStatusCodeDoSnapshot, str(557))\n                        all_failed = True\n                    elif(int(httpResp.status) == 200 or int(httpResp.status) == 201):\n                        blob_snapshot_info_array, all_failed = self.get_snapshot_info(responseBody)\n                    if(httpResp.status == 500 and not responseBody.startswith(\"{ \\\"error\\\"\")):\n                        HandlerUtil.HandlerUtility.add_to_telemetery_data(CommonVariables.hostStatusCodeDoSnapshot, str(556))\n                        all_failed = True\n                else:\n                    # HttpCall failed\n                    HandlerUtil.HandlerUtility.add_to_telemetery_data(CommonVariables.hostStatusCodeDoSnapshot, str(555))\n                    self.logger.log(\"dosnapshot Hitting wrong WireServer IP\")\n        except Exception as e:\n            errorMsg = \"Failed to do the snapshot in host with error: %s, stack trace: %s\" % (str(e), traceback.format_exc())\n            self.logger.log(errorMsg, False, 'Error')\n            HandlerUtil.HandlerUtility.add_to_telemetery_data(CommonVariables.hostStatusCodeDoSnapshot, str(558))\n            all_failed = True\n        return blob_snapshot_info_array, all_failed, is_inconsistent, unable_to_sleep\n\n    def pre_snapshot(self, paras, taskId, fetch_disk_details = False):\n        statusCode = 555\n        if(self.presnapshoturi is None):\n            self.logger.log(\"Failed to do the snapshot because presnapshoturi is none\",False,'Error')\n            all_failed = True\n        try:\n            presnapshoturi_obj = urlparser.urlparse(self.presnapshoturi)\n            if(presnapshoturi_obj is None or presnapshoturi_obj.hostname is None):\n                self.logger.log(\"Failed to parse the presnapshoturi\",False,'Error')\n                all_failed = True\n            else:\n                headers = {}\n                headers['Backup'] = 'true'\n                headers['Content-type'] = 'application/json'\n                headers['UserAgent'] = 'VMSnapshot'\n                # if the vm is ade enabled and if the diskEncryptionSettings are not yet populated, then we need to fetch the disk details\n                # or when the fetch_disk_details flag is set to true\n                if(fetch_disk_details == True or (paras.isVMADEEnabled == True and not paras.diskEncryptionSettings)):\n                    if(fetch_disk_details != True):\n                        self.logger.log(\"Fetching disk details as the VM is ADE enabled and diskEncryptionSettings are not yet populated\")\n                        fetch_disk_details = True\n                    preSnapshotSettings = []\n                    temp_dict = {}\n                    temp_dict[CommonVariables.key] = CommonVariables.isVMADEEnabled\n                    temp_dict[CommonVariables.value] = paras.isVMADEEnabled\n                    preSnapshotSettings.append(temp_dict)\n                    hostPreSnapshotRequestBodyObj = HostSnapshotObjects.HostPreSnapshotRequestBody(taskId, paras.snapshotTaskToken, preSnapshotSettings)\n                else:\n                    hostPreSnapshotRequestBodyObj = HostSnapshotObjects.HostPreSnapshotRequestBody(taskId, paras.snapshotTaskToken)\n                body_content = json.dumps(hostPreSnapshotRequestBodyObj, cls = HandlerUtil.ComplexEncoder)\n                self.logger.log('Headers : ' + str(headers))\n                self.logger.log('Host Request body : ' + str(body_content))\n                http_util = HttpUtil(self.logger)\n                self.logger.log(\"start calling the presnapshot rest api\")\n                # initiate http call for blob-snapshot and get http response\n                result, httpResp, errMsg,responseBody = http_util.HttpCallGetResponse('POST', presnapshoturi_obj, body_content, headers = headers, responseBodyRequired = True, isHostCall = True)\n                if responseBody:\n                    try:\n                        response_json = json.loads(responseBody)\n                        if \"bhsVersion\" in response_json:\n                            self.logger.log(\"PreSnapshotResponse: bhsVersion: \" + str(response_json[\"bhsVersion\"]))\n                        if \"nodeId\" in response_json:\n                            self.logger.log(\"PreSnapshotResponse: nodeId: \" + str(response_json[\"nodeId\"]))\n                        if \"responseTime\" in response_json:\n                            self.logger.log(\"PreSnapshotResponse: responseTime: \" + str(response_json[\"responseTime\"]))\n                        if \"result\" in response_json:\n                            self.logger.log(\"PreSnapshotResponse: result: \" + str(response_json[\"result\"]))\n                    except Exception as e:\n                        self.logger.log(\"PreSnapshotResponse: Failed to parse responseBody: \" + str(e))\n                \n                if(httpResp != None):\n                    statusCode = httpResp.status\n                    self.logger.log(\"PreSnapshot: Status Code: \" + str(statusCode))\n                    if(int(statusCode) == 200 or int(statusCode) == 201) and (responseBody == None or responseBody == \"\") :\n                        self.logger.log(\"PreSnapshot:responseBody is empty but http status code is success\")\n                        statusCode = 557\n                    elif(responseBody != None):\n                        if(paras.isVMADEEnabled == True and fetch_disk_details == True):\n                            response = json.loads(responseBody)\n                            paras.isOsDiskADEEncrypted = response.get(CommonVariables.isOsDiskADEEncrypted)\n                            paras.areDataDisksADEEncrypted = response.get(CommonVariables.areDataDisksADEEncrypted)\n                            paras.diskEncryptionSettings = response.get(CommonVariables.diskEncryptionSettings)\n                            self.logger.log(\"PreSnapshotResponse: isOsDiskADEEncrypted: \"+ str(paras.isOsDiskADEEncrypted))\n                            self.logger.log(\"PreSnapshotResponse: areDataDisksADEEncrypted: \"+ str(paras.areDataDisksADEEncrypted))\n                            if paras.diskEncryptionSettings is not None:\n                                self.logger.log(\"PreSnapshotResponse: DiskEncryptionSettings: \"+ str(len(paras.diskEncryptionSettings)))\n                            else:\n                                self.logger.log(\"PreSnapshotResponse: DiskEncryptionSettings are null\")\n                        else:\n                            self.logger.log(\"PreSnapshotResponse: VM is either not ADE Enabled or disk details were not requested\")\n                    elif(httpResp.status == 500 and not responseBody.startswith(\"{ \\\"error\\\"\")):\n                        self.logger.log(\"BHS is not runnning on host machine\")\n                        statusCode = 556\n                else:\n                    # HttpCall failed\n                    statusCode = 555\n                    self.logger.log(\"presnapshot Hitting wrong WireServer IP\")\n        except Exception as e:\n            errorMsg = \"Failed to do the pre snapshot in host with error: %s, stack trace: %s\" % (str(e), traceback.format_exc())\n            self.logger.log(errorMsg, False, 'Error')\n            statusCode = 558\n        HandlerUtil.HandlerUtility.add_to_telemetery_data(CommonVariables.hostStatusCodePreSnapshot, str(statusCode))\n        return statusCode, responseBody\n\n    def get_snapshot_info(self, responseBody):\n        blobsnapshotinfo_array = []\n        all_failed = True\n        try:\n            if(responseBody != None):\n                json_reponseBody = json.loads(responseBody)\n                epochTime = datetime.datetime(1970, 1, 1, 0, 0, 0)\n                for snapshot_info in json_reponseBody['snapshotInfo']:\n                    self.logger.log(\"From Host- IsSuccessful:{0}, SnapshotUri:{1}, ErrorMessage:{2}, StatusCode:{3}\".format(snapshot_info['isSuccessful'], snapshot_info['snapshotUri'], snapshot_info['errorMessage'], snapshot_info['statusCode']))\n                    \n                    ddSnapshotIdentifierInfo = None\n                    if('ddSnapshotIdentifier' in snapshot_info and snapshot_info['ddSnapshotIdentifier'] != None):\n                        creationTimeString = snapshot_info['ddSnapshotIdentifier']['creationTime']\n                        self.logger.log(\"creationTime string from BHS : {0} \".format(creationTimeString))\n                        try:\n                            creationTimeObj = datetime.datetime.strptime(creationTimeString, \"%Y-%m-%dT%H:%M:%S.%fZ\")\n                        except:\n                            creationTimeObj = datetime.datetime.strptime(creationTimeString, \"%Y-%m-%dT%H:%M:%SZ\")\n                        self.logger.log(\"Converting the creationTime string received in UTC format to UTC Ticks\")\n                        delta = creationTimeObj - epochTime\n                        timestamp = self.get_total_seconds(delta)\n                        creationTimeUTCTicks = str(int(timestamp * 1000))\n                        instantAccessDurationMinutes = None\n                        if 'instantAccessDurationMinutes' in snapshot_info['ddSnapshotIdentifier']:\n                            instantAccessDurationMinutes = snapshot_info['ddSnapshotIdentifier']['instantAccessDurationMinutes']\n                        ddSnapshotIdentifierInfo = HostSnapshotObjects.DDSnapshotIdentifier(creationTimeUTCTicks , snapshot_info['ddSnapshotIdentifier']['id'], snapshot_info['ddSnapshotIdentifier']['token'], instantAccessDurationMinutes)\n                        self.logger.log(\"ddSnapshotIdentifier Information from Host- creationTime : {0}, id : {1}, token : {2}, instantAccessDurationMinutes : {3}\".format(\n                                        ddSnapshotIdentifierInfo.creationTime, ddSnapshotIdentifierInfo.id, ddSnapshotIdentifierInfo.token,\n                                        ddSnapshotIdentifierInfo.instantAccessDurationMinutes if ddSnapshotIdentifierInfo.instantAccessDurationMinutes is not None else 'Not Set'))\n                    else:\n                        self.logger.log(\"ddSnapshotIdentifier absent/None in Host Response\")\n\n                    blobsnapshotinfo_array.append(HostSnapshotObjects.BlobSnapshotInfo(snapshot_info['isSuccessful'], snapshot_info['snapshotUri'], snapshot_info['errorMessage'], snapshot_info['statusCode'], ddSnapshotIdentifierInfo))\n                    \n                    if (snapshot_info['isSuccessful'] == 'true'):\n                        all_failed = False\n        except Exception as e:\n            errorMsg = \" deserialization of response body failed with error: %s, stack trace: %s\" % (str(e), traceback.format_exc())\n            self.logger.log(errorMsg)\n\n        return blobsnapshotinfo_array, all_failed\n    \n    def get_total_seconds(self, delta):\n        # Check if total_seconds method exists in current Python version\n        if hasattr(delta, 'total_seconds'):\n            return delta.total_seconds()\n        else:\n            self.logger.log(\"Calculating total seconds manually for version compatibility.\")\n            return delta.days * 86400 + delta.seconds + delta.microseconds / 1e6\n"
  },
  {
    "path": "VMBackup/main/mounts.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom os.path import *\n\nimport re\nimport sys\nimport subprocess\nimport types\nfrom Utils.DiskUtil import DiskUtil\n\nclass Error(Exception): \n    pass\n\nclass Mount:\n    def __init__(self, name, type, fstype, mount_point):\n        self.name = name\n        self.type = type\n        self.fstype = fstype\n        self.mount_point = mount_point\n        self.unique_name = str(self.mount_point) + \"_\" + str(self.name)\n\nclass Mounts:\n    def __init__(self,patching,logger):\n        self.mounts = []\n        added_mount_point_names = [] \n        disk_util = DiskUtil.get_instance(patching,logger)\n        # Get mount points \n        mount_points, mount_points_info = disk_util.get_mount_points() \n        # Get lsblk devices \n        self.device_items = disk_util.get_device_items(None)\n        lsblk_mounts = [] \n        lsblk_mount_points = []\n        lsblk_unique_names = []\n        lsblk_fs_types = []\n        # List to hold mount-points returned from lsblk command but not reurned from mount command \n        lsblk_mounts_not_in_mount = [] \n        for device_item in self.device_items:\n            mount = Mount(device_item.name, device_item.type, device_item.file_system, device_item.mount_point)\n            lsblk_mounts.append(mount)\n            logger.log(\"lsblk mount point \"+str(mount.mount_point)+\" added with device-name \"+str(mount.name)+\" and fs type \"+str(mount.fstype)+\", unique-name \"+str(mount.unique_name), True)\n            lsblk_mount_points.append(device_item.mount_point)\n            lsblk_unique_names.append(mount.unique_name)\n            lsblk_fs_types.append(device_item.file_system)\n            # If lsblk mount is not found in \"mount command\" mount-list, add it to the lsblk_mounts_not_in_mount array\n            if((device_item.mount_point not in mount_points) and (device_item.mount_point not in lsblk_mounts_not_in_mount)):\n                lsblk_mounts_not_in_mount.append(device_item.mount_point)\n        # Sort lsblk_mounts_not_in_mount array in ascending order\n        lsblk_mounts_not_in_mount.sort()\n        # Add the lsblk devices in the same order as they are returned in mount command output\n        for mount_point_info in mount_points_info:\n            mountPoint = mount_point_info[0]\n            deviceNameParts = mount_point_info[1].split(\"/\")\n            uniqueName = str(mountPoint) + \"_\" + str(deviceNameParts[len(deviceNameParts)-1])\n            fsType = mount_point_info[2]\n            if((mountPoint in lsblk_mount_points) and (mountPoint not in added_mount_point_names)):\n                if (self.should_skip_fstype(str(fsType))):\n                    logger.log(\"######## mounts list item Skipped due to fsType, mountPoint \"+str(mountPoint)+\", fsType \"+str(fsType)+\" and unique-name \"+str(uniqueName), True)\n                else:\n                    lsblk_mounts_index = 0\n                    try:\n                        lsblk_mounts_index = lsblk_unique_names.index(uniqueName)\n                    except ValueError as e:\n                        logger.log(\"######## UniqueName not found in lsblk list :\" + str(uniqueName), True)\n                        lsblk_mounts_index = lsblk_mount_points.index(mountPoint)\n                    mountObj = lsblk_mounts[lsblk_mounts_index]\n                    if(mountObj.fstype is None or mountObj.fstype == \"\" or mountObj.fstype == \" \"):\n                        logger.log(\"fstype empty from lsblk for mount\" + str(mountPoint), True)\n                        mountObj.fstype = fsType\n                    self.mounts.append(mountObj)\n                    added_mount_point_names.append(mountPoint)\n                    logger.log(\"mounts list item added, mount point \"+str(mountObj.mount_point)+\", device-name \"+str(mountObj.name)+\", fs-type \"+str(mountObj.fstype)+\", unique-name \"+str(mountObj.unique_name), True)\n        # Append all the lsblk devices corresponding to lsblk_mounts_not_in_mount list mount-points\n        for mount_point in lsblk_mounts_not_in_mount:\n            if((mount_point in lsblk_mount_points) and (mount_point not in added_mount_point_names)):\n                self.mounts.append(lsblk_mounts[lsblk_mount_points.index(mount_point)])\n                added_mount_point_names.append(mount_point)\n                logger.log(\"mounts list item added from lsblk_mounts_not_in_mount, mount point \"+str(mount_point), True)\n        added_mount_point_names.reverse()\n        logger.log(\"added_mount_point_names :\" + str(added_mount_point_names), True)\n        # Reverse the mounts list\n        self.mounts.reverse()\n\n    def should_skip_fstype(self, fstype):\n        if (fstype == 'ext3' or fstype == 'ext4' or fstype == 'xfs' or fstype == 'btrfs'):\n            return False\n        else:\n            return True\n"
  },
  {
    "path": "VMBackup/main/parameterparser.py",
    "content": "#!/usr/bin/env python\n#\n#CustomScript extension\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom common import CommonVariables\nimport base64\nimport json\nimport sys\n\nclass ParameterParser(object):\n    def __init__(self, protected_settings, public_settings, backup_logger):\n        \"\"\"\n        TODO: we should validate the parameter first\n        \"\"\"\n        self.blobs = []\n        self.backup_metadata = None\n        self.public_config_obj = None\n        self.private_config_obj = None\n        self.blobs = None\n        self.customSettings = None\n        self.isVMADEEnabled = False\n        self.snapshotTaskToken = ''\n        self.includedDisks = None\n        self.dynamicConfigsFromCRP = None\n        self.disk_encryption_details = []\n        self.isOsDiskADEEncrypted = False\n        self.areDataDisksADEEncrypted = False\n        self.diskEncryptionSettings = {}\n        self.wellKnownSettingFlags = {CommonVariables.isSnapshotTtlEnabled: False, CommonVariables.useMccfToFetchDsasForAllDisks: False,\n                                      CommonVariables.useMccfForLad: False, CommonVariables.enableSnapshotExtensionPolling: False, CommonVariables.isVmmdBlobIncluded : False}\n        settingKeysMapping= {}\n        settingKeysMapping[CommonVariables.isSnapshotTtlEnabled.lower()] = CommonVariables.isSnapshotTtlEnabled\n        settingKeysMapping[CommonVariables.useMccfToFetchDsasForAllDisks.lower()] = CommonVariables.useMccfToFetchDsasForAllDisks\n        settingKeysMapping[CommonVariables.useMccfForLad.lower()] = CommonVariables.useMccfForLad\n        settingKeysMapping[CommonVariables.enableSnapshotExtensionPolling.lower()] = CommonVariables.enableSnapshotExtensionPolling\n        self.includeLunList = []    #To be shared with HP\n        self.instantAccessDurationMinutes = None\n\n        \"\"\"\n        get the public configuration\n        \"\"\"\n        self.commandToExecute = public_settings.get(CommonVariables.command_to_execute)\n        self.taskId = public_settings.get(CommonVariables.task_id)\n        self.locale = public_settings.get(CommonVariables.locale)\n        self.logsBlobUri = public_settings.get(CommonVariables.logs_blob_uri)\n        self.statusBlobUri = public_settings.get(CommonVariables.status_blob_uri)\n        self.commandStartTimeUTCTicks = public_settings.get(CommonVariables.commandStartTimeUTCTicks)\n        self.vmType = public_settings.get(CommonVariables.vmType)\n        if(CommonVariables.customSettings in public_settings.keys() and public_settings.get(CommonVariables.customSettings) is not None and public_settings.get(CommonVariables.customSettings) != \"\"):\n            backup_logger.log(\"Reading customSettings from public_settings\", True)\n            self.customSettings = public_settings.get(CommonVariables.customSettings)\n        elif(CommonVariables.customSettings in protected_settings.keys()):\n            backup_logger.log(\"Reading customSettings from protected_settings\", True)\n            self.customSettings = protected_settings.get(CommonVariables.customSettings)\n\n        self.publicObjectStr = public_settings.get(CommonVariables.object_str)\n        if(self.publicObjectStr is not None and self.publicObjectStr != \"\"):\n            if sys.version_info > (3,):\n                decoded_public_obj_string = base64.b64decode(self.publicObjectStr)\n                decoded_public_obj_string = decoded_public_obj_string.decode('ascii')\n            else:\n                decoded_public_obj_string = base64.standard_b64decode(self.publicObjectStr)\n            decoded_public_obj_string = decoded_public_obj_string.strip()\n            decoded_public_obj_string = decoded_public_obj_string.strip('\\'')\n            self.public_config_obj = json.loads(decoded_public_obj_string)\n            self.backup_metadata = self.public_config_obj['backupMetadata']\n        if(self.logsBlobUri is None or self.logsBlobUri == \"\"):\n            self.logsBlobUri = protected_settings.get(CommonVariables.logs_blob_uri)\n        if(self.statusBlobUri is None or self.statusBlobUri == \"\"):\n            self.statusBlobUri = protected_settings.get(CommonVariables.status_blob_uri)\n        if(CommonVariables.snapshotTaskToken in self.public_config_obj.keys()):\n            self.snapshotTaskToken = self.public_config_obj[CommonVariables.snapshotTaskToken]\n        elif(CommonVariables.snapshotTaskToken in protected_settings.keys()):\n            self.snapshotTaskToken = protected_settings.get(CommonVariables.snapshotTaskToken)\n        if(CommonVariables.includedDisks in self.public_config_obj.keys()):\n            self.includedDisks = self.public_config_obj[CommonVariables.includedDisks]\n        if(\"dynamicConfigsFromCRP\" in self.public_config_obj):\n            self.dynamicConfigsFromCRP = self.public_config_obj['dynamicConfigsFromCRP']\n\n        \"\"\"\n        first get the protected configuration\n        \"\"\"\n        self.privateObjectStr = protected_settings.get(CommonVariables.object_str)\n        if(self.privateObjectStr is not None and self.privateObjectStr != \"\"):\n            if sys.version_info > (3,):\n                decoded_private_obj_string = base64.b64decode(self.privateObjectStr)\n                decoded_private_obj_string = decoded_private_obj_string.decode('ascii')\n            else:\n                decoded_private_obj_string = base64.standard_b64decode(self.privateObjectStr)\n            decoded_private_obj_string = decoded_private_obj_string.strip()\n            decoded_private_obj_string = decoded_private_obj_string.strip('\\'')\n            self.private_config_obj = json.loads(decoded_private_obj_string)\n            self.blobs = self.private_config_obj['blobSASUri']\n        \n        try:\n            if(self.includedDisks != None):\n                if(CommonVariables.dataDiskLunList in self.includedDisks.keys() and self.includedDisks[CommonVariables.dataDiskLunList] != None):\n                    self.includeLunList = self.includedDisks[CommonVariables.dataDiskLunList]\n                if(CommonVariables.isOSDiskIncluded in self.includedDisks.keys() and self.includedDisks[CommonVariables.isOSDiskIncluded] == True):\n                    self.includeLunList.append(-1)\n                    \n                    backup_logger.log(\"LUN list - \" + str(self.includeLunList), True)\n                if(CommonVariables.isVmmdBlobIncluded in self.includedDisks.keys() and self.includedDisks[CommonVariables.isVmmdBlobIncluded] == True):\n                    self.wellKnownSettingFlags[CommonVariables.isVmmdBlobIncluded] = True   \n                    \n                if(CommonVariables.isVMADEEnabled in self.includedDisks.keys() and self.includedDisks[CommonVariables.isVMADEEnabled] == True):\n                    self.isVMADEEnabled = True\n                    \n        except Exception as e:\n            errorMsg = \"Exception occurred while populating includeLunList, Exception: %s\" % (str(e))\n            backup_logger.log(errorMsg, True)\n        \n        if(self.dynamicConfigsFromCRP != None):\n            try:\n                backup_logger.log(\"settings received \" + str(self.dynamicConfigsFromCRP), True)\n                for config in self.dynamicConfigsFromCRP:\n                    if CommonVariables.key in config and CommonVariables.value in config:\n                        config_key = config[CommonVariables.key].lower()\n                        if(config_key in settingKeysMapping):\n                            self.wellKnownSettingFlags[settingKeysMapping[config_key]] = config[CommonVariables.value]\n                        else:\n                            backup_logger.log(\"The received \" + str(config[CommonVariables.key]) + \" is not an expected setting name.\", True)\n                    else:\n                        backup_logger.log(\"The received dynamicConfigsFromCRP is not in expected format.\", True)\n            except Exception as e:\n                errorMsg = \"Exception occurred while populating settings, Exception: %s\" % (str(e))\n                backup_logger.log(errorMsg, True)\n                        \n        backup_logger.log(\"settings to be sent \" + str(self.wellKnownSettingFlags), True)\n\n        try:\n            if(self.includedDisks != None):\n                if(CommonVariables.instantAccessDurationMinutes in self.includedDisks.keys() and self.includedDisks[CommonVariables.instantAccessDurationMinutes] != None):\n                    self.instantAccessDurationMinutes = self.includedDisks[CommonVariables.instantAccessDurationMinutes]\n                    backup_logger.log(\"InstantAccessDurationMinutes = \" + str(self.instantAccessDurationMinutes), True)\n        except Exception as e:\n            errorMsg = \"Exception occurred while extracting instantAccessDurationMinutes, Exception: %s\" % (str(e))\n            backup_logger.log(errorMsg, True)"
  },
  {
    "path": "VMBackup/main/patch/AbstractPatching.py",
    "content": "#!/usr/bin/python\n#\n# AbstractPatching is the base patching class of all the linux distros\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.4+\n\n\nimport os\nimport sys\nimport base64\nimport re\nimport json\nimport platform\nimport shutil\nimport time\nimport traceback\nimport datetime\nimport subprocess\nclass AbstractPatching(object):\n    \"\"\"\n    AbstractPatching defines a skeleton neccesary for a concrete Patching class.\n    \"\"\"\n    def __init__(self,distro_info):\n        self.distro_info = distro_info\n        self.base64_path = '/usr/bin/base64'\n        self.bash_path = '/bin/bash'\n        self.blkid_path = '/usr/bin/blkid'\n        self.cat_path = '/bin/cat'\n        self.cryptsetup_path = '/usr/sbin/cryptsetup'\n        self.dd_path = '/usr/bin/dd'\n        self.e2fsck_path = '/sbin/e2fsck'\n        self.echo_path = '/usr/bin/echo'\n        self.lsblk_path = '/usr/bin/lsblk'\n        self.lsscsi_path = '/usr/bin/lsscsi'\n        self.mkdir_path = '/usr/bin/mkdir'\n        self.mount_path = '/usr/bin/mount'\n        self.openssl_path = '/usr/bin/openssl'\n        self.resize2fs_path = '/sbin/resize2fs'\n        self.umount_path = '/usr/bin/umount'\n\n    def install_extras(self):\n        pass\n"
  },
  {
    "path": "VMBackup/main/patch/DefaultPatching.py",
    "content": "#!/usr/bin/python\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.4+\n\n\nimport os\nimport sys\nimport base64\nimport re\nimport json\nimport platform\nimport shutil\nimport time\nimport traceback\nimport datetime\nimport subprocess\nfrom patch.AbstractPatching import AbstractPatching\nfrom common import *\n\n\nclass DefaultPatching(AbstractPatching):\n    def __init__(self,logger,distro_info):\n        super(DefaultPatching,self).__init__(distro_info)\n        self.logger = logger\n        self.base64_path = '/usr/bin/base64'\n        self.bash_path = '/bin/bash'\n        self.blkid_path = '/sbin/blkid'\n        self.cat_path = '/bin/cat'\n        self.cryptsetup_path = '/sbin/cryptsetup'\n        self.dd_path = '/bin/dd'\n        self.e2fsck_path = '/sbin/e2fsck'\n        self.echo_path = '/bin/echo'\n        self.lsblk_path = '/bin/lsblk'\n        self.lsscsi_path = '/usr/bin/lsscsi'\n        self.mkdir_path = '/bin/mkdir'\n        self.mount_path = '/bin/mount'\n        self.openssl_path = '/usr/bin/openssl'\n        self.resize2fs_path = '/sbin/resize2fs'\n        self.umount_path = '/bin/umount'\n\n    def install_extras(self):\n        \"\"\"\n        install the sg_dd because the default dd do not support the sparse write\n        \"\"\"\n        pass\n"
  },
  {
    "path": "VMBackup/main/patch/FreeBSDPatching.py",
    "content": "#!/usr/bin/python\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.4+\n\n\nimport os\nimport sys\nimport base64\nimport re\nimport json\nimport platform\nimport shutil\nimport time\nimport traceback\nimport datetime\nimport subprocess\nfrom patch.AbstractPatching import AbstractPatching\nfrom common import *\n\n\nclass FreeBSDPatching(AbstractPatching):\n    def __init__(self,logger,distro_info):\n        super(FreeBSDPatching,self).__init__(distro_info)\n        self.logger = logger\n        self.base64_path = '/usr/local/bin/base64'\n        self.bash_path = '/usr/local/bin/bash'\n        self.blkid_path = '/sbin/blkid'\n        self.cat_path = '/bin/cat'\n        self.cryptsetup_path = '/sbin/cryptsetup'\n        self.dd_path = '/bin/dd'\n        self.e2fsck_path = '/sbin/e2fsck'\n        self.echo_path = '/bin/echo'\n        self.lsblk_path = '/bin/lsblk'\n        self.lsscsi_path = '/usr/bin/lsscsi'\n        self.mkdir_path = '/bin/mkdir'\n        self.mount_path = '/sbin/mount'\n        self.openssl_path = '/usr/bin/openssl'\n        self.resize2fs_path = '/sbin/resize2fs'\n        self.umount_path = '/sbin/umount'\n\n    def install_extras(self):\n        \"\"\"\n        install the sg_dd because the default dd do not support the sparse write\n        \"\"\"\n        pass\n\n"
  },
  {
    "path": "VMBackup/main/patch/KaliPatching.py",
    "content": "#!/usr/bin/python\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.4+\n\n\nimport os\nimport sys\nimport base64\nimport re\nimport json\nimport platform\nimport shutil\nimport time\nimport traceback\nimport datetime\nimport subprocess\nfrom patch.AbstractPatching import AbstractPatching\nfrom common import *\n\n\nclass KaliPatching(AbstractPatching):\n    def __init__(self,logger,distro_info):\n        super(KaliPatching,self).__init__(distro_info)\n        self.logger = logger\n        self.base64_path = '/usr/bin/base64'\n        self.bash_path = '/bin/bash'\n        self.blkid_path = '/sbin/blkid'\n        self.cat_path = '/bin/cat'\n        self.cryptsetup_path = '/sbin/cryptsetup'\n        self.dd_path = '/bin/dd'\n        self.e2fsck_path = '/sbin/e2fsck'\n        self.echo_path = '/bin/echo'\n        self.lsblk_path = '/bin/lsblk'\n        self.lsscsi_path = '/usr/bin/lsscsi'\n        self.mkdir_path = '/bin/mkdir'\n        self.mount_path = '/bin/mount'\n        self.openssl_path = '/usr/bin/openssl'\n        self.resize2fs_path = '/sbin/resize2fs'\n        self.umount_path = '/bin/umount'\n\n    def install_extras(self):\n        \"\"\"\n        install the sg_dd because the default dd do not support the sparse write\n        \"\"\"\n        pass\n"
  },
  {
    "path": "VMBackup/main/patch/NSBSDPatching.py",
    "content": "#!/usr/bin/python\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.4+\n\n\nimport os\nimport sys\nimport base64\nimport re\nimport json\nimport platform\nimport shutil\nimport time\nimport traceback\nimport datetime\nimport subprocess\nfrom patch.AbstractPatching import AbstractPatching\nfrom common import *\n\n\nclass NSBSDPatching(AbstractPatching):\n\n    resolver = None\n\n    def __init__(self,logger,distro_info):\n        super(NSBSDPatching,self).__init__(distro_info)\n        self.logger = logger\n        self.usr_flag = 0\n        self.mount_path = '/sbin/mount'\n\n        try:\n            import dns.resolver\n        except ImportError:\n            raise Exception(\"Python DNS resolver not available. Cannot proceed!\")\n        self.resolver = dns.resolver.Resolver()\n        servers = []\n        getconf_cmd = \"/usr/Firewall/sbin/getconf /usr/Firewall/ConfigFiles/dns Servers | tail -n +2\"\n        getconf_p = subprocess.Popen(getconf_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n        output, _ = getconf_p.communicate()\n        output = str(output)\n\n        for server in output.split(\"\\n\"):\n            if server == '':\n                break\n            server = server[:-1] # remove last '='\n            grep_cmd = \"/usr/bin/grep '{}' /etc/hosts\".format(server) + \" | awk '{print $1}'\"\n            grep_p = subprocess.Popen(grep_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n            ip, _ = grep_p.communicate()\n            ip = str(ip).rstrip()\n            servers.append(ip)\n        self.resolver.nameservers = servers\n        dns.resolver.override_system_resolver(self.resolver)\n\n    def install_extras(self):\n        pass\n"
  },
  {
    "path": "VMBackup/main/patch/SuSEPatching.py",
    "content": "#!/usr/bin/python\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.4+\n\n\nimport os\nimport sys\nimport base64\nimport re\nimport json\nimport platform\nimport shutil\nimport time\nimport traceback\nimport datetime\nimport subprocess\nfrom patch.AbstractPatching import AbstractPatching\nfrom common import *\n\n\nclass SuSEPatching(AbstractPatching):\n    def __init__(self,logger,distro_info):\n        super(SuSEPatching,self).__init__(distro_info)\n        if(distro_info[1] == \"11\"):\n            self.logger = logger\n            self.base64_path = '/usr/bin/base64'\n            self.bash_path = '/bin/bash'\n            self.blkid_path = '/sbin/blkid'\n            self.cryptsetup_path = '/sbin/cryptsetup'\n            self.cat_path = '/bin/cat'\n            self.dd_path = '/bin/dd'\n            self.e2fsck_path = '/sbin/e2fsck'\n            self.echo_path = '/bin/echo'\n            self.lsblk_path = '/bin/lsblk'\n            self.lsscsi_path = '/usr/bin/lsscsi'\n            self.mkdir_path = '/bin/mkdir'\n            self.mount_path = '/bin/mount'\n            self.openssl_path = '/usr/bin/openssl'\n            self.resize2fs_path = '/sbin/resize2fs'\n            self.umount_path = '/bin/umount'\n        else:\n            self.logger = logger\n            self.base64_path = '/usr/bin/base64'\n            self.bash_path = '/bin/bash'\n            self.blkid_path = '/usr/bin/blkid'\n            self.cat_path = '/bin/cat'\n            self.cryptsetup_path = '/usr/sbin/cryptsetup'\n            self.dd_path = '/usr/bin/dd'\n            self.e2fsck_path = '/sbin/e2fsck'\n            self.echo_path = '/usr/bin/echo'\n            self.lsblk_path = '/usr/bin/lsblk'\n            self.lsscsi_path = '/usr/bin/lsscsi'\n            self.mkdir_path = '/usr/bin/mkdir'\n            self.mount_path = '/usr/bin/mount'\n            self.openssl_path = '/usr/bin/openssl'\n            self.resize2fs_path = '/sbin/resize2fs'\n            self.umount_path = '/usr/bin/umount'\n\n    def install_extras(self):\n        common_extras = ['cryptsetup','lsscsi']\n        for extra in common_extras:\n            self.logger.log(\"installation for \" + extra + 'result is ' + str(subprocess.call(['zypper', 'install','-l', extra])))\n\n        #if(paras.filesystem == \"btrfs\"):\n        #    extras = ['btrfs-tools']\n        #    for extra in extras:\n        #        print(\"installation for \" + extra + 'result is ' + str(subprocess.call(['zypper', 'install','-l', extra])))\n        #pass\n"
  },
  {
    "path": "VMBackup/main/patch/UbuntuPatching.py",
    "content": "#!/usr/bin/python\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.4+\n\n\nimport os\nimport sys\nimport base64\nimport re\nimport json\nimport platform\nimport shutil\nimport time\nimport traceback\nimport datetime\nimport subprocess\nfrom patch.AbstractPatching import AbstractPatching\nfrom common import *\n\n\nclass UbuntuPatching(AbstractPatching):\n    def __init__(self,logger,distro_info):\n        super(UbuntuPatching,self).__init__(distro_info)\n        self.logger = logger\n        self.base64_path = '/usr/bin/base64'\n        self.bash_path = '/bin/bash'\n        self.blkid_path = '/sbin/blkid'\n        self.cat_path = '/bin/cat'\n        self.cryptsetup_path = '/sbin/cryptsetup'\n        self.dd_path = '/bin/dd'\n        self.e2fsck_path = '/sbin/e2fsck'\n        self.echo_path = '/bin/echo'\n        self.lsblk_path = '/bin/lsblk'\n        self.lsscsi_path = '/usr/bin/lsscsi'\n        self.mkdir_path = '/bin/mkdir'\n        self.mount_path = '/bin/mount'\n        self.openssl_path = '/usr/bin/openssl'\n        self.resize2fs_path = '/sbin/resize2fs'\n        self.umount_path = '/bin/umount'\n\n    def install_extras(self):\n        \"\"\"\n        install the sg_dd because the default dd do not support the sparse write\n        \"\"\"\n        if(self.distro_info[0].lower() == \"ubuntu\" and self.distro_info[1] == \"12.04\"):\n            common_extras = ['cryptsetup-bin','lsscsi']\n        else:\n            common_extras = ['cryptsetup-bin','lsscsi']\n        for extra in common_extras:\n            self.logger.log(\"installation for \" + extra + 'result is ' + str(subprocess.call(['apt-get', 'install','-y', extra])))\n"
  },
  {
    "path": "VMBackup/main/patch/__init__.py",
    "content": "#!/usr/bin/python\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.4+\n\nimport os\nimport re\nimport platform\nimport traceback\n\nfrom patch.UbuntuPatching import UbuntuPatching\nfrom patch.debianPatching import debianPatching\nfrom patch.redhatPatching import redhatPatching\nfrom patch.centosPatching import centosPatching\nfrom patch.SuSEPatching import SuSEPatching\nfrom patch.oraclePatching import oraclePatching\nfrom patch.KaliPatching import KaliPatching\nfrom patch.DefaultPatching import DefaultPatching\nfrom patch.FreeBSDPatching import FreeBSDPatching\nfrom patch.NSBSDPatching import NSBSDPatching\n\n# Define the function in case waagent(<2.0.4) doesn't have DistInfo()\ndef DistInfo():\n    try:\n        if 'FreeBSD' in platform.system():\n            release = re.sub('\\\\-.*$', '', str(platform.release()))\n            distinfo = ['FreeBSD', release]\n            return distinfo\n        if 'NS-BSD' in platform.system():\n            release = re.sub('\\\\-.*$', '', str(platform.release()))\n            distinfo = ['NS-BSD', release]\n            return distinfo\n        if 'linux_distribution' in dir(platform):\n            distinfo = list(platform.linux_distribution(full_distribution_name=0))\n            # remove trailing whitespace in distro name\n            if(distinfo[0] == ''):\n                osfile= open(\"/etc/os-release\", \"r\")\n                for line in osfile:\n                    lists=str(line).split(\"=\")\n                    if(lists[0]== \"NAME\"):\n                        distname = lists[1].split(\"\\\"\")\n                        distinfo[0] = distname[1]\n                        if(distinfo[0].lower() == \"sles\"):\n                            distinfo[0] = \"SuSE\"\n                osfile.close()\n            distinfo[0] = distinfo[0].strip()\n            return distinfo\n        if 'Linux' in platform.system():\n            distinfo = [\"Default\"]\n            if \"ubuntu\" in platform.version().lower():\n                distinfo[0] = \"Ubuntu\"\n            elif 'suse' in platform.version().lower():\n                distinfo[0] = \"SuSE\"\n            elif 'centos' in platform.version().lower():\n                distinfo[0] = \"centos\"\n            elif 'debian' in platform.version().lower():\n                distinfo[0] = \"debian\"\n            elif 'oracle' in platform.version().lower():\n                distinfo[0] = \"oracle\"\n            elif 'redhat' in platform.version().lower() or 'rhel' in platform.version().lower():\n                distinfo[0] = \"redhat\"\n            elif 'kali' in platform.version().lower():\n                distinfo[0] = \"Kali\"\n            return distinfo\n        else:\n            return platform.dist()\n    except Exception as e:\n        errMsg = 'Failed to retrieve the distinfo with error: %s, stack trace: %s' % (str(e), traceback.format_exc())\n        logger.log(errMsg)\n        distinfo = ['Abstract','1.0']\n        return distinfo\n\ndef GetMyPatching(logger):\n    \"\"\"\n    Return MyPatching object.\n    NOTE: Logging is not initialized at this point.\n    \"\"\"\n    dist_info = DistInfo()\n    if 'Linux' in platform.system():\n        Distro = dist_info[0]\n    else: # I know this is not Linux!\n        if 'FreeBSD' in platform.system():\n            Distro = platform.system()\n        if 'NS-BSD' in platform.system():\n            Distro = platform.system()\n            Distro = Distro.replace(\"-\", \"\")\n    Distro = Distro.strip('\"')\n    Distro = Distro.strip(' ')\n    orig_distro = Distro\n    patching_class_name = Distro + 'Patching'\n    if patching_class_name not in globals():\n        if ('SuSE'.lower() in Distro.lower()):\n            Distro = 'SuSE'\n        elif ('Ubuntu'.lower() in Distro.lower()):\n            Distro = 'Ubuntu'\n        elif ('centos'.lower() in Distro.lower() or 'big-ip'.lower() in Distro.lower()):\n            Distro = 'centos'\n        elif ('debian'.lower() in Distro.lower()):\n            Distro = 'debian'\n        elif ('oracle'.lower() in Distro.lower()):\n            Distro = 'oracle'\n        elif ('redhat'.lower() in Distro.lower()):\n            Distro = 'redhat'\n        elif (\"Kali\".lower() in Distro.lower()):\n            Distro = 'Kali'\n        elif ('FreeBSD'.lower() in  Distro.lower() or 'gaia'.lower() in Distro.lower() or 'panos'.lower() in Distro.lower()):\n            Distro = 'FreeBSD'\n        else:\n            Distro = 'Default'\n        patching_class_name = Distro + 'Patching'\n    patchingInstance = globals()[patching_class_name](logger,dist_info)\n    return patchingInstance, patching_class_name, orig_distro\n"
  },
  {
    "path": "VMBackup/main/patch/centosPatching.py",
    "content": "#!/usr/bin/python\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.4+\n\n\nimport os\nimport sys\nimport base64\nimport re\nimport json\nimport platform\nimport shutil\nimport time\nimport traceback\nimport datetime\nimport subprocess\nfrom patch.redhatPatching import redhatPatching\nfrom common import *\n\nclass centosPatching(redhatPatching):\n    def __init__(self,logger,distro_info):\n        super(centosPatching,self).__init__(logger,distro_info)\n        self.logger = logger\n        self.usr_flag = 0\n        if(distro_info[1] == \"6.8\" or distro_info[1] == \"6.7\" or distro_info[1] == \"6.6\" or distro_info[1] == \"6.5\" or distro_info[1] == \"6.9\" or distro_info[1] == \"6.3\"):\n            self.base64_path = '/usr/bin/base64'\n            self.bash_path = '/bin/bash'\n            self.blkid_path = '/sbin/blkid'\n            self.cat_path = '/bin/cat'\n            self.cryptsetup_path = '/sbin/cryptsetup'\n            self.dd_path = '/bin/dd'\n            self.e2fsck_path = '/sbin/e2fsck'\n            self.echo_path = '/bin/echo'\n            self.lsblk_path = '/bin/lsblk'\n            self.usr_flag = 0\n            self.lsscsi_path = '/usr/bin/lsscsi'\n            self.mkdir_path = '/bin/mkdir'\n            self.mount_path = '/bin/mount'\n            self.openssl_path = '/usr/bin/openssl'\n            self.resize2fs_path = '/sbin/resize2fs'\n            self.umount_path = '/bin/umount'\n        else:\n            self.base64_path = '/usr/bin/base64'\n            self.bash_path = '/usr/bin/bash'\n            self.blkid_path = '/usr/bin/blkid'\n            self.cat_path = '/bin/cat'\n            self.cryptsetup_path = '/usr/sbin/cryptsetup'\n            self.dd_path = '/usr/bin/dd'\n            self.e2fsck_path = '/sbin/e2fsck'\n            self.echo_path = '/usr/bin/echo'\n            self.lsblk_path = '/usr/bin/lsblk'\n            self.usr_flag = 1\n            self.lsscsi_path = '/usr/bin/lsscsi'\n            self.mkdir_path = '/usr/bin/mkdir'\n            self.mount_path = '/usr/bin/mount'\n            self.openssl_path = '/usr/bin/openssl'\n            self.resize2fs_path = '/sbin/resize2fs'\n            self.umount_path = '/usr/bin/umount'\n\n    def install_extras(self):\n        common_extras = ['cryptsetup','lsscsi']\n        for extra in common_extras:\n            self.logger.log(\"installation for \" + extra + 'result is ' + str(subprocess.call(['yum', 'install','-y', extra])))\n"
  },
  {
    "path": "VMBackup/main/patch/debianPatching.py",
    "content": "#!/usr/bin/python\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.4+\n\n\nimport os\nimport sys\nimport base64\nimport re\nimport json\nimport platform\nimport shutil\nimport time\nimport traceback\nimport datetime\nimport subprocess\nfrom patch.AbstractPatching import AbstractPatching\nfrom common import *\n\n\nclass debianPatching(AbstractPatching):\n    def __init__(self,logger,distro_info):\n        super(debianPatching,self).__init__(distro_info)\n        self.logger = logger\n        self.base64_path = '/usr/bin/base64'\n        self.bash_path = '/bin/bash'\n        self.blkid_path = '/sbin/blkid'\n        self.cat_path = '/bin/cat'\n        self.cryptsetup_path = '/sbin/cryptsetup'\n        self.dd_path = '/bin/dd'\n        self.e2fsck_path = '/sbin/e2fsck'\n        self.echo_path = '/bin/echo'\n        self.lsblk_path = '/bin/lsblk'\n        self.lsscsi_path = '/usr/bin/lsscsi'\n        self.mkdir_path = '/bin/mkdir'\n        self.mount_path = '/bin/mount'\n        self.openssl_path = '/usr/bin/openssl'\n        self.resize2fs_path = '/sbin/resize2fs'\n        self.umount_path = '/bin/umount'\n\n    def install_extras(self):\n        pass\n"
  },
  {
    "path": "VMBackup/main/patch/oraclePatching.py",
    "content": "#!/usr/bin/python\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.4+\n\n\nimport os\nimport sys\nimport base64\nimport re\nimport json\nimport platform\nimport shutil\nimport time\nimport traceback\nimport datetime\nimport subprocess\nfrom patch.redhatPatching import redhatPatching\nfrom common import *\n\n\nclass oraclePatching(redhatPatching):\n    def __init__(self,logger,distro_info):\n        super(oraclePatching,self).__init__(logger,distro_info)\n        self.logger = logger\n        if(distro_info is not None and len(distro_info) > 0 and distro_info[1].startswith(\"6.\")):\n            self.base64_path = '/usr/bin/base64'\n            self.bash_path = '/bin/bash'\n            self.blkid_path = '/sbin/blkid'\n            self.cat_path = '/bin/cat'\n            self.cryptsetup_path = '/sbin/cryptsetup'\n            self.dd_path = '/bin/dd'\n            self.e2fsck_path = '/sbin/e2fsck'\n            self.echo_path = '/bin/echo'\n            self.getenforce_path = '/usr/sbin/getenforce'\n            self.setenforce_path = '/usr/sbin/setenforce'\n            self.lsblk_path = '/bin/lsblk' \n            self.lsscsi_path = '/usr/bin/lsscsi'\n            self.mkdir_path = '/bin/mkdir'\n            self.mount_path = '/bin/mount'\n            self.openssl_path = '/usr/bin/openssl'\n            self.resize2fs_path = '/sbin/resize2fs'\n            self.umount_path = '/bin/umount'\n        else:\n            self.base64_path = '/usr/bin/base64'\n            self.bash_path = '/usr/bin/bash'\n            self.blkid_path = '/usr/bin/blkid'\n            self.cat_path = '/bin/cat'\n            self.cryptsetup_path = '/usr/sbin/cryptsetup'\n            self.dd_path = '/usr/bin/dd'\n            self.e2fsck_path = '/sbin/e2fsck'\n            self.echo_path = '/usr/bin/echo'\n            self.getenforce_path = '/usr/sbin/getenforce'\n            self.setenforce_path = '/usr/sbin/setenforce'\n            self.lsblk_path = '/usr/bin/lsblk'\n            self.lsscsi_path = '/usr/bin/lsscsi'\n            self.mkdir_path = '/usr/bin/mkdir'\n            self.mount_path = '/usr/bin/mount'\n            self.openssl_path = '/usr/bin/openssl'\n            self.resize2fs_path = '/sbin/resize2fs'\n            self.umount_path = '/usr/bin/umount'\n\n    def install_extras(self):\n        common_extras = ['cryptsetup','lsscsi']\n        for extra in common_extras:\n            self.logger.log(\"installation for \" + extra + 'result is ' + str(subprocess.call(['yum', 'install','-y', extra])))\n"
  },
  {
    "path": "VMBackup/main/patch/redhatPatching.py",
    "content": "#!/usr/bin/python\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.4+\n\n\nimport os\nimport sys\nimport base64\nimport re\nimport json\nimport platform\nimport shutil\nimport time\nimport traceback\nimport datetime\nimport subprocess\nfrom patch.AbstractPatching import AbstractPatching\nfrom common import *\n\n\nclass redhatPatching(AbstractPatching):\n    def __init__(self,logger,distro_info):\n        super(redhatPatching,self).__init__(distro_info)\n        self.logger = logger\n        self.usr_flag = 0\n        if(distro_info is not None and len(distro_info) > 0 and distro_info[1].startswith(\"6.\")):\n            self.base64_path = '/usr/bin/base64'\n            self.bash_path = '/bin/bash'\n            self.blkid_path = '/sbin/blkid'\n            self.cat_path = '/bin/cat'\n            self.cryptsetup_path = '/sbin/cryptsetup'\n            self.dd_path = '/bin/dd'\n            self.e2fsck_path = '/sbin/e2fsck'\n            self.echo_path = '/bin/echo'\n            self.getenforce_path = '/usr/sbin/getenforce'\n            self.setenforce_path = '/usr/sbin/setenforce'\n            self.lsblk_path = '/bin/lsblk'\n            self.usr_flag = 0\n            self.lsscsi_path = '/usr/bin/lsscsi'\n            self.mkdir_path = '/bin/mkdir'\n            self.mount_path = '/bin/mount'\n            self.openssl_path = '/usr/bin/openssl'\n            self.resize2fs_path = '/sbin/resize2fs'\n            self.umount_path = '/bin/umount'\n        else:\n            self.base64_path = '/usr/bin/base64'\n            self.bash_path = '/usr/bin/bash'\n            self.blkid_path = '/usr/bin/blkid'\n            self.cat_path = '/bin/cat'\n            self.cryptsetup_path = '/usr/sbin/cryptsetup'\n            self.dd_path = '/usr/bin/dd'\n            self.e2fsck_path = '/sbin/e2fsck'\n            self.echo_path = '/usr/bin/echo'\n            self.getenforce_path = '/usr/sbin/getenforce'\n            self.setenforce_path = '/usr/sbin/setenforce'\n            self.lsblk_path = '/usr/bin/lsblk'\n            self.usr_flag = 1\n            self.lsscsi_path = '/usr/bin/lsscsi'\n            self.mkdir_path = '/usr/bin/mkdir'\n            self.mount_path = '/usr/bin/mount'\n            self.openssl_path = '/usr/bin/openssl'\n            self.resize2fs_path = '/sbin/resize2fs'\n            self.umount_path = '/usr/bin/umount'\n\n    def install_extras(self):\n        common_extras = ['cryptsetup','lsscsi']\n        for extra in common_extras:\n            self.logger.log(\"installation for \" + extra + 'result is ' + str(subprocess.call(['yum', 'install','-y', extra])))\n"
  },
  {
    "path": "VMBackup/main/safefreeze/Makefile",
    "content": "CC := gcc\nSRCDIR := src\nLIBDIR := lib\nBINDIRNEW := binNew\nBINDIROLD := bin\nBINDIR := $(BINDIRNEW)\nINCDIR := include\nBUILDDIR := build\nTARGET := $(BINDIR)/safefreeze\n\nSRCEXT := c\nSOURCES := $(shell find $(SRCDIR) -type f -name *.$(SRCEXT))\nOBJECTS := $(patsubst $(SRCDIR)/%,$(BUILDDIR)/%,$(SOURCES:.$(SRCEXT)=.o))\nCFLAGS := -g\n\nLDFLAGS := -static -static-libgcc\nINC := -I $(INCDIR)\nLIB := -L $(LIBDIR)\n\nall : $(TARGET)\n\n$(TARGET): $(OBJECTS)\n\t@echo \"Linking...\"\n\t@mkdir -p $(BINDIR)\n\t$(CC) $^ $(LDFLAGS) -o $(TARGET) $(LIB)\n\n$(BUILDDIR)/%.o: $(SRCDIR)/%.$(SRCEXT)\n\t@mkdir -p $(BUILDDIR)\n\t@echo \"Compiling...\"\n\t$(CC) $(CFLAGS) $(INC) -c -o $@ $<\n\nclean:\n\t@echo \"Cleaning...\"\n\t$(RM) -r $(BUILDDIR) $(BINDIR)\n\n.PHONY: clean\n"
  },
  {
    "path": "VMBackup/main/safefreeze/src/safefreeze.c",
    "content": "//\n// Copyright 2016 Microsoft Corporation\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n//\n\n\n#include <stdio.h>\n#include <stdarg.h>\n#include <stdlib.h>\n#include <fcntl.h>\n#include <signal.h>\n#include <linux/fs.h>\n#include <sys/ioctl.h>\n#include <time.h>\n#include <string.h>\n#include<unistd.h>\n#include<sys/stat.h>\n#include <errno.h>\n\n\n#define JUMPWITHSTATUS(x)        \\\n{                                \\\n    status = (x);                \\\n    if (status) goto CLEANUP;    \\\n}\nvoid logger(const char *logstr,...)\n{\n    time_t mytime;\n    struct tm * timeinfo;\n    char buffer[80];\n    time(&mytime);\n    timeinfo = localtime(&mytime);\n    strftime(buffer, 80, \"%F %X\", timeinfo);\n    va_list arg;\n    int done;\n    printf(\"%s \", buffer);\n    va_start(arg, logstr);\n    done = vfprintf(stdout,  logstr, arg);\n    va_end(arg);\n}\n\nint gThaw = 0;\n\n\nvoid globalSignalHandler(int signum)\n{\n    if (signum == SIGUSR1)\n    {\n        gThaw = 1;\n    }\n}\n\n\nvoid printUsage()\n{\n    logger(\"Usage: safefreeze TimeoutInSeconds MountPoint1 [MountPoint2 [MountPoint3 [..]]]\\n\");\n}\n\n\nint main(int argc, char *argv[])\n{\n    int status = EXIT_SUCCESS;\n\n    int timeout = 0;\n    int numFileSystems = 0;\n    int *fileSystemDescriptors = NULL;\n\n    int i = 0;\n\n    if (argc < 3)\n    {\n        printUsage();\n        JUMPWITHSTATUS(EXIT_FAILURE);\n    }\n\n    if ((timeout = atoi(argv[1])) <= 0)\n    {\n        printUsage();\n        JUMPWITHSTATUS(EXIT_FAILURE);\n    }\n\n    numFileSystems = argc - 2;\n    fileSystemDescriptors = (int *) malloc(sizeof(int) * numFileSystems);\n\n    for (i = 0; i < numFileSystems; i++)\n    {\n        fileSystemDescriptors[i] = -1;\n    }\n\n    for (i = 0; i < numFileSystems; i++)\n    {\n        char *mountPoint = argv[i + 2];\n\n        if ((fileSystemDescriptors[i] = open(mountPoint, O_RDONLY | O_NONBLOCK)) < 0)\n        {\n            int errsv = errno;\n            logger(\"Failed to open: %s with error: %d and error message: %s\\n\", mountPoint, fileSystemDescriptors[i], strerror(errsv));\n            JUMPWITHSTATUS(EXIT_FAILURE);\n        }\n\n        struct stat sb;\n\n        if (fstat(fileSystemDescriptors[i], &sb) == -1)\n        {\n            int errsv = errno;\n            logger(\"Failed to stat: %s with error message: %s\\n\", mountPoint, strerror(errsv));\n            JUMPWITHSTATUS(EXIT_FAILURE);\n        }\n\n        if ((sb.st_mode & S_IFDIR) == 0)\n        {\n            logger(\"Path not a directory: %s\\n\", mountPoint);\n            JUMPWITHSTATUS(EXIT_FAILURE);\n        }\n    }\n\n    struct sigaction globalSignalAction = {0};\n    globalSignalAction.sa_handler = globalSignalHandler;\n\n    if (sigaction(SIGHUP, &globalSignalAction, NULL) ||\n        sigaction(SIGINT, &globalSignalAction, NULL) ||\n        sigaction(SIGQUIT, &globalSignalAction, NULL) ||\n        sigaction(SIGABRT, &globalSignalAction, NULL) ||\n        sigaction(SIGPIPE, &globalSignalAction, NULL) ||\n        sigaction(SIGTERM, &globalSignalAction, NULL) ||\n        sigaction(SIGUSR1, &globalSignalAction, NULL) ||\n        sigaction(SIGUSR2, &globalSignalAction, NULL) ||\n        sigaction(SIGTSTP, &globalSignalAction, NULL) ||\n        sigaction(SIGTTIN, &globalSignalAction, NULL) ||\n        sigaction(SIGTTOU, &globalSignalAction, NULL)\n       )\n    {\n        logger(\"Failed to setup signal handlers\\n\");\n        JUMPWITHSTATUS(EXIT_FAILURE);\n    }\n\n    logger(\"****** 2. Binary Freeze Started \\n\");\n    for (i = 0; i < numFileSystems; i++)\n    {\n        char *mountPoint = argv[i + 2];\n        logger(\"Freezing: %s\\n\", mountPoint);\n\n        if (ioctl(fileSystemDescriptors[i], FIFREEZE, 0) != 0)\n        {\n            int errsv = errno;\n            logger(\"Failed to FIFREEZE: %s with error message: %s\\n\", mountPoint, strerror(errsv));\n            JUMPWITHSTATUS(EXIT_FAILURE);\n        }\n    }\n\n    logger(\"****** 3. Binary Freeze Completed \\n\");\n\n    if (kill(getppid(), SIGUSR1) != 0)\n    {\n        logger(\"Failed to send FreezeCompletion to parent process\\n\");\n        JUMPWITHSTATUS(EXIT_FAILURE);\n    }\n\n    time_t starttime,currenttime;\n    currenttime=time(NULL);\n    starttime=time(NULL);\n    for (i = 0; i < timeout; i++)\n    {\n        if (gThaw == 1 )\n        {\n            logger(\"****** 8. Binary Thaw Signal Received \\n\");\n            break;\n        }\n        else\n        {\n            sleep(1);\n            logger(\"sleep for 1 second \\n\");\n        }\n    }\n    currenttime=time(NULL);\n    if (gThaw != 1 && currenttime > starttime+timeout-1)\n    {\n        logger(\"Failed to receive timely Thaw from parent process\\n\");\n        JUMPWITHSTATUS(EXIT_FAILURE);\n    }\n    else if (gThaw != 1)\n    {\n        logger(\"Inconsistent snapshot because of SLEEP failure \\n\");\n        JUMPWITHSTATUS(2);\n    }\n\nCLEANUP:\n\n    if (fileSystemDescriptors != NULL)\n    {\n        for (i = numFileSystems-1 ; i >= 0; i--)\n        {\n            if (fileSystemDescriptors[i] >= 0)\n            {\n                char *mountPoint = argv[i + 2];\n                logger(\"Thawing: %s\\n\", mountPoint);\n\n                if (ioctl(fileSystemDescriptors[i], FITHAW, 0) != 0)\n                {\n                    logger(\"Failed to FITHAW: %s with error message : %s\\n\", mountPoint, strerror(errno));\n                    status = EXIT_FAILURE;\n                }\n\n                close(fileSystemDescriptors[i]);\n                fileSystemDescriptors[i] = -1;\n            }\n        }\n        free(fileSystemDescriptors);\n        fileSystemDescriptors = NULL;\n    }\n\n    return status;\n}\n"
  },
  {
    "path": "VMBackup/main/safefreezeArm64/Makefile",
    "content": "CC := aarch64-linux-gnu-gcc\nSRCDIR := src\nLIBDIR := lib\nBINDIRNEW := binNew\nBINDIROLD := bin\nBINDIR := $(BINDIRNEW)\nINCDIR := include\nBUILDDIR := build\nTARGET := $(BINDIR)/safefreeze\n\nSRCEXT := c\nSOURCES := $(shell find $(SRCDIR) -type f -name *.$(SRCEXT))\nOBJECTS := $(patsubst $(SRCDIR)/%,$(BUILDDIR)/%,$(SOURCES:.$(SRCEXT)=.o))\nCFLAGS := -g\n\nLDFLAGS := -static -static-libgcc\nINC := -I $(INCDIR)\nLIB := -L $(LIBDIR)\n\nall : $(TARGET)\n\n$(TARGET): $(OBJECTS)\n\t@echo \"Linking...\"\n\t@mkdir -p $(BINDIR)\n\t$(CC) $^ $(LDFLAGS) -o $(TARGET) $(LIB)\n\n$(BUILDDIR)/%.o: $(SRCDIR)/%.$(SRCEXT)\n\t@mkdir -p $(BUILDDIR)\n\t@echo \"Compiling...\"\n\t$(CC) $(CFLAGS) $(INC) -c -o $@ $<\n\nclean:\n\t@echo \"Cleaning...\"\n\t$(RM) -r $(BUILDDIR) $(BINDIR)\n\n.PHONY: clean\n"
  },
  {
    "path": "VMBackup/main/safefreezeArm64/src/safefreeze.c",
    "content": "//\n// Copyright 2016 Microsoft Corporation\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n//\n\n\n#include <stdio.h>\n#include <stdarg.h>\n#include <stdlib.h>\n#include <fcntl.h>\n#include <signal.h>\n#include <linux/fs.h>\n#include <sys/ioctl.h>\n#include <time.h>\n#include <string.h>\n#include<unistd.h>\n#include<sys/stat.h>\n#include <errno.h>\n\n\n#define JUMPWITHSTATUS(x)        \\\n{                                \\\n    status = (x);                \\\n    if (status) goto CLEANUP;    \\\n}\nvoid logger(const char *logstr,...)\n{\n    time_t mytime;\n    struct tm * timeinfo;\n    char buffer[80];\n    time(&mytime);\n    timeinfo = localtime(&mytime);\n    strftime(buffer, 80, \"%F %X\", timeinfo);\n    va_list arg;\n    int done;\n    printf(\"%s \", buffer);\n    va_start(arg, logstr);\n    done = vfprintf(stdout,  logstr, arg);\n    va_end(arg);\n}\n\nint gThaw = 0;\n\n\nvoid globalSignalHandler(int signum)\n{\n    if (signum == SIGUSR1)\n    {\n        gThaw = 1;\n    }\n}\n\n\nvoid printUsage()\n{\n    logger(\"Usage: safefreeze TimeoutInSeconds MountPoint1 [MountPoint2 [MountPoint3 [..]]]\\n\");\n}\n\n\nint main(int argc, char *argv[])\n{\n    int status = EXIT_SUCCESS;\n\n    int timeout = 0;\n    int numFileSystems = 0;\n    int *fileSystemDescriptors = NULL;\n\n    int i = 0;\n\n    if (argc < 3)\n    {\n        printUsage();\n        JUMPWITHSTATUS(EXIT_FAILURE);\n    }\n\n    if ((timeout = atoi(argv[1])) <= 0)\n    {\n        printUsage();\n        JUMPWITHSTATUS(EXIT_FAILURE);\n    }\n\n    numFileSystems = argc - 2;\n    fileSystemDescriptors = (int *) malloc(sizeof(int) * numFileSystems);\n\n    for (i = 0; i < numFileSystems; i++)\n    {\n        fileSystemDescriptors[i] = -1;\n    }\n\n    for (i = 0; i < numFileSystems; i++)\n    {\n        char *mountPoint = argv[i + 2];\n\n        if ((fileSystemDescriptors[i] = open(mountPoint, O_RDONLY | O_NONBLOCK)) < 0)\n        {\n            int errsv = errno;\n            logger(\"Failed to open: %s with error: %d and error message: %s\\n\", mountPoint, fileSystemDescriptors[i], strerror(errsv));\n            JUMPWITHSTATUS(EXIT_FAILURE);\n        }\n\n        struct stat sb;\n\n        if (fstat(fileSystemDescriptors[i], &sb) == -1)\n        {\n            int errsv = errno;\n            logger(\"Failed to stat: %s with error message: %s\\n\", mountPoint, strerror(errsv));\n            JUMPWITHSTATUS(EXIT_FAILURE);\n        }\n\n        if ((sb.st_mode & S_IFDIR) == 0)\n        {\n            logger(\"Path not a directory: %s\\n\", mountPoint);\n            JUMPWITHSTATUS(EXIT_FAILURE);\n        }\n    }\n\n    struct sigaction globalSignalAction = {0};\n    globalSignalAction.sa_handler = globalSignalHandler;\n\n    if (sigaction(SIGHUP, &globalSignalAction, NULL) ||\n        sigaction(SIGINT, &globalSignalAction, NULL) ||\n        sigaction(SIGQUIT, &globalSignalAction, NULL) ||\n        sigaction(SIGABRT, &globalSignalAction, NULL) ||\n        sigaction(SIGPIPE, &globalSignalAction, NULL) ||\n        sigaction(SIGTERM, &globalSignalAction, NULL) ||\n        sigaction(SIGUSR1, &globalSignalAction, NULL) ||\n        sigaction(SIGUSR2, &globalSignalAction, NULL) ||\n        sigaction(SIGTSTP, &globalSignalAction, NULL) ||\n        sigaction(SIGTTIN, &globalSignalAction, NULL) ||\n        sigaction(SIGTTOU, &globalSignalAction, NULL)\n       )\n    {\n        logger(\"Failed to setup signal handlers\\n\");\n        JUMPWITHSTATUS(EXIT_FAILURE);\n    }\n\n    logger(\"****** 2. Binary Freeze Started \\n\");\n    for (i = 0; i < numFileSystems; i++)\n    {\n        char *mountPoint = argv[i + 2];\n        logger(\"Freezing: %s\\n\", mountPoint);\n\n        if (ioctl(fileSystemDescriptors[i], FIFREEZE, 0) != 0)\n        {\n            int errsv = errno;\n            logger(\"Failed to FIFREEZE: %s with error message: %s\\n\", mountPoint, strerror(errsv));\n            JUMPWITHSTATUS(EXIT_FAILURE);\n        }\n    }\n\n    logger(\"****** 3. Binary Freeze Completed \\n\");\n\n    if (kill(getppid(), SIGUSR1) != 0)\n    {\n        logger(\"Failed to send FreezeCompletion to parent process\\n\");\n        JUMPWITHSTATUS(EXIT_FAILURE);\n    }\n\n    time_t starttime,currenttime;\n    currenttime=time(NULL);\n    starttime=time(NULL);\n    for (i = 0; i < timeout; i++)\n    {\n        if (gThaw == 1 )\n        {\n            logger(\"****** 8. Binary Thaw Signal Received \\n\");\n            break;\n        }\n        else\n        {\n            sleep(1);\n            logger(\"sleep for 1 second \\n\");\n        }\n    }\n    currenttime=time(NULL);\n    if (gThaw != 1 && currenttime > starttime+timeout-1)\n    {\n        logger(\"Failed to receive timely Thaw from parent process\\n\");\n        JUMPWITHSTATUS(EXIT_FAILURE);\n    }\n    else if (gThaw != 1)\n    {\n        logger(\"Inconsistent snapshot because of SLEEP failure \\n\");\n        JUMPWITHSTATUS(2);\n    }\n\nCLEANUP:\n\n    if (fileSystemDescriptors != NULL)\n    {\n        for (i = numFileSystems-1 ; i >= 0; i--)\n        {\n            if (fileSystemDescriptors[i] >= 0)\n            {\n                char *mountPoint = argv[i + 2];\n                logger(\"Thawing: %s\\n\", mountPoint);\n\n                if (ioctl(fileSystemDescriptors[i], FITHAW, 0) != 0)\n                {\n                    logger(\"Failed to FITHAW: %s with error message : %s\\n\", mountPoint, strerror(errno));\n                    status = EXIT_FAILURE;\n                }\n\n                close(fileSystemDescriptors[i]);\n                fileSystemDescriptors[i] = -1;\n            }\n        }\n        free(fileSystemDescriptors);\n        fileSystemDescriptors = NULL;\n    }\n\n    return status;\n}\n"
  },
  {
    "path": "VMBackup/main/taskidentity.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport subprocess\nimport xml\nimport xml.dom.minidom\n\nclass TaskIdentity:\n    def __init__(self):\n        self.store_identity_file = './task_identity_FD76C85E-406F-4CFA-8EB0-CF18B123365C'\n\n    def save_identity(self,identity):\n        with open(self.store_identity_file,'w') as f:\n            f.write(identity)\n\n    def stored_identity(self):\n        identity_stored = None\n        if(os.path.exists(self.store_identity_file)):\n            with open(self.store_identity_file,'r') as f:\n                identity_stored = f.read()\n        return identity_stored\n"
  },
  {
    "path": "VMBackup/main/tempPlugin/VMSnapshotScriptPluginConfig.json",
    "content": "{\n    \"pluginName\" : \"ScriptRunner\",\n    \"preScriptLocation\" : \"\",\n    \"postScriptLocation\" : \"\",\n    \"preScriptParams\" : [\"\", \"\"],\n    \"postScriptParams\" : [\"\", \"\"],\n    \"preScriptNoOfRetries\" : 0,\n    \"postScriptNoOfRetries\" : 0,\n    \"timeoutInSeconds\" : 30,\n    \"continueBackupOnFailure\" : true,\n    \"fsFreezeEnabled\" : true\n}\n"
  },
  {
    "path": "VMBackup/main/tempPlugin/postScript.sh",
    "content": "#!/bin/bash\ninstance=$1\n\n# variables used for returning the status of the scripts\nsuccess=0\nerror=1\nwarning=2\n\nretVal=$success\nexit $retVal\n\n"
  },
  {
    "path": "VMBackup/main/tempPlugin/preScript.sh",
    "content": "#!/bin/bash\ninstance=$1\n\n# variables used for returning the status of the scripts\nsuccess=0\nerror=1\nwarning=2\n\nretVal=$success\nexit $retVal\n\n"
  },
  {
    "path": "VMBackup/main/tempPlugin/vmbackup.conf",
    "content": "[SnapshotThread]\nfsfreeze: True\n\n"
  },
  {
    "path": "VMBackup/main/workloadPatch/CustomScripts/customscript.sql",
    "content": ""
  },
  {
    "path": "VMBackup/main/workloadPatch/DefaultScripts/logbackup.sql",
    "content": "ALTER SYSTEM ARCHIVE LOG CURRENT;\nALTER DATABASE BACKUP CONTROLFILE TO '&1/control.ctl';\nQUIT;"
  },
  {
    "path": "VMBackup/main/workloadPatch/DefaultScripts/postMysqlMaster.sql",
    "content": "SET GLOBAL read_only = OFF; UNLOCK TABLES;"
  },
  {
    "path": "VMBackup/main/workloadPatch/DefaultScripts/postMysqlSlave.sql",
    "content": "START SLAVE;SELECT SLEEP(5);\nSET GLOBAL read_only = OFF; UNLOCK TABLES;\n"
  },
  {
    "path": "VMBackup/main/workloadPatch/DefaultScripts/postOracleMaster.sql",
    "content": "REM ================================================================================\nREM File:       postOracleMaster.sql\nREM Date:       16-Sep 2020\nREM Type:       Oracle SQL*Plus script\nREM Author:     Microsoft CAE team\nREM\nREM Description:\nREM             Oracle SQL*Plus script called as an Azure Backup \"post\" script to\nREM             run immediately following a backup snapshot.\nREM\nREM             SQL*Plus is executed in RESTRICTED LEVEL 2 mode, which means that\nREM             commands like HOST and SPOOL are not permitted, but commands like\nREM             START are permitted.\nREM\nREM Modifications:\nREM     TGorman 05oct22 v0.1 - remove external dependency on AZMESSAGE procedure\nREM     TGorman 13dec22 v0.2 - support for DATABASE_ROLE = 'STANDBY'\nREM ================================================================================\nREM\nREM ********************************************************************************\nREM store script version into SQL*Plus substitution variable...\nREM ********************************************************************************\ndefine V_SCRIPT_VERSION=\"0.2\"\n\nREM\nREM ********************************************************************************\nREM Format standard output to be terse...\nREM ********************************************************************************\n\nSET ECHO OFF FEEDBACK OFF TIMING OFF PAGESIZE 0 LINESIZE 130 TRIMOUT ON TRIMSPOOL ON VERIFY OFF\n\nREM\nREM ********************************************************************************\nREM Uncomment the following SET command to make commands, status feedback, and\nREM timings visible for debugging...\nREM ********************************************************************************\n\nREM SET ECHO ON FEEDBACK ON TIMING ON\n\nREM\nREM ********************************************************************************\nREM Connect this SQL*Plus session to the current database instance as SYSBACKUP...\nREM (be sure to leave one blank line before the CONNECT command)\nREM\nREM If databases are 11g or older, then please replace the following line with\nREM \"CONNECT / AS SYSDBA\", because the SYSBACKUP role was introduced in 12c...\nREM ********************************************************************************\n\nCONNECT / AS SYSBACKUP\nREM CONNECT / AS SYSDBA\n\nREM\nREM ********************************************************************************\nREM Retrieve the status of the Oracle database instance, and exit from SQL*Plus\nREM with SUCCESS exit status if database instance is not OPEN...\nREM ********************************************************************************\n\nWHENEVER OSERROR EXIT SUCCESS\nWHENEVER SQLERROR EXIT SUCCESS\nCOL STATUS NEW_VALUE V_STATUS\nSELECT 'STATUS='||STATUS AS STATUS FROM V$INSTANCE;\nEXEC IF '&&V_STATUS' <> 'STATUS=OPEN' THEN RAISE NOT_LOGGED_ON; END IF;\n\nREM\nREM ********************************************************************************\nREM Next, if SQL*Plus has not exited as a result of the last command, now ensure that\nREM the failure of any command results in a FAILURE exit status from SQL*Plus...\nREM ********************************************************************************\n\nWHENEVER OSERROR EXIT FAILURE\nWHENEVER SQLERROR EXIT FAILURE\n\nREM\nREM ********************************************************************************\nREM Display the LOG_MODE of the database to be captured by the calling Python code...\nREM ********************************************************************************\n\nSELECT 'LOG_MODE='||LOG_MODE AS LOG_MODE FROM V$DATABASE;\n\nREM\nREM ********************************************************************************\nREM Display the DATABASE_ROLE of the database to be captured by the calling Python code...\nREM ********************************************************************************\nSELECT 'DATABASE_ROLE='||database_role AS DATABASE_ROLE FROM V$DATABASE;\n\nREM\nREM ********************************************************************************\nREM Enable emitting DBMS_OUTPUT to standard output...\nREM ********************************************************************************\n\nSET SERVEROUTPUT ON SIZE 1000000\n\nREM\nREM ********************************************************************************\nREM Attempt to take the database out from BACKUP mode, which will succeed only if the\nREM database is presently in ARCHIVELOG mode and if the database was already in\nREM BACKUP mode...\nREM ********************************************************************************\n\nDECLARE\n        --\n        v_errcontext            varchar2(128);\n        v_timestamp\t\tvarchar2(32);\n\tv_database_role\t\tvarchar2(32);\n        noArchiveLogMode        exception;\n\tnotInBackup\t\texception;\n        pragma                  exception_init(noArchiveLogMode, -1123);\n        pragma                  exception_init(noArchiveLogMode, -1142);\n        --\nBEGIN\n        --\n\tv_errcontext := 'query DATABASE_ROLE';\n\tSELECT\tDATABASE_ROLE\n\tINTO\tv_database_role\n\tFROM\tV$DATABASE;\n\t--\n\tif v_database_role = 'PRIMARY' then\n\t\t--\n\t\tv_errcontext := 'END BACKUP';\n\t\tSELECT TO_CHAR(SYSDATE, 'YYYY-MM-DD HH24:MI:SS')\n\t\tINTO v_timestamp FROM DUAL;\n\t\tDBMS_OUTPUT.PUT_LINE(v_timestamp || ' - INFO - AzBackup post-script v&&V_SCRIPT_VERSION: starting ' || v_errcontext || '...');\n\t\tSYS.DBMS_SYSTEM.KSDWRT(SYS.DBMS_SYSTEM.ALERT_FILE, 'INFO - AzBackup post-script v&&V_SCRIPT_VERSION: starting ' || v_errcontext || '...');\n        \t--\n\t\texecute immediate 'ALTER DATABASE END BACKUP';\n\t\t--\n\t\tSELECT TO_CHAR(SYSDATE, 'YYYY-MM-DD HH24:MI:SS')\n\t\tINTO v_timestamp FROM DUAL;\n\t\tDBMS_OUTPUT.PUT_LINE(v_timestamp || ' - INFO - AzBackup post-script v&&V_SCRIPT_VERSION: ' || v_errcontext || ' succeeded');\n\t\tSYS.DBMS_SYSTEM.KSDWRT(SYS.DBMS_SYSTEM.ALERT_FILE, 'INFO - AzBackup post-script v&&V_SCRIPT_VERSION: ' || v_errcontext || ' succeeded');\n        \t--\n\tend if;\n        --\nEXCEPTION\n        --\n        when noArchiveLogMode then\n                --\n\t\tSELECT TO_CHAR(SYSDATE, 'YYYY-MM-DD HH24:MI:SS')\n\t\tINTO v_timestamp FROM DUAL;\n\t\tDBMS_OUTPUT.PUT_LINE(v_timestamp || ' - INFO - AzBackup post-script v&&V_SCRIPT_VERSION: ' || v_errcontext || ' in NOARCHIVELOG failed - continuing backup...');\n\t\tSYS.DBMS_SYSTEM.KSDWRT(SYS.DBMS_SYSTEM.ALERT_FILE, 'INFO - AzBackup post-script v&&V_SCRIPT_VERSION: ' || v_errcontext || ' in NOARCHIVELOG failed - continuing backup...');\n\t--\n        when notInBackup then\n\t\tSELECT TO_CHAR(SYSDATE, 'YYYY-MM-DD HH24:MI:SS')\n\t\tINTO v_timestamp FROM DUAL;\n\t\tDBMS_OUTPUT.PUT_LINE(v_timestamp || ' - WARN - AzBackup post-script v&&V_SCRIPT_VERSION: ' || v_errcontext || ' failed as no datafiles in BACKUP mode - continuing backup...');\n\t\tSYS.DBMS_SYSTEM.KSDWRT(SYS.DBMS_SYSTEM.ALERT_FILE, 'WARN - AzBackup post-script v&&V_SCRIPT_VERSION: ' || v_errcontext || ' failed as no datafiles in BACKUP mode - continuing backup...');\n        --\n        when others then\n\t\tSELECT TO_CHAR(SYSDATE, 'YYYY-MM-DD HH24:MI:SS')\n\t\tINTO v_timestamp FROM DUAL;\n\t\tDBMS_OUTPUT.PUT_LINE(v_timestamp || ' - FAIL - AzBackup post-script v&&V_SCRIPT_VERSION: ' || v_errcontext || '  failed');\n\t\tSYS.DBMS_SYSTEM.KSDWRT(SYS.DBMS_SYSTEM.ALERT_FILE, 'FAIL - AzBackup post-script v&&V_SCRIPT_VERSION: ' || v_errcontext || ' failed');\n                raise;\n        --\nEND;\n/\n\nREM \nREM ********************************************************************************\nREM Force a switch of the online redo logfiles, which will force a full checkpoint,\nREM and then archive the current logfile...\nREM ********************************************************************************\n\nDECLARE\n        --\n        v_errcontext            varchar2(128);\n        v_timestamp\t\tvarchar2(32);\n\tv_database_role\t\tvarchar2(32);\n        noArchiveLogMode        exception;\n        pragma                  exception_init(noArchiveLogMode, -258);\n        --\nBEGIN\n        --\n\tv_errcontext := 'query DATABASE_ROLE';\n\tSELECT\tDATABASE_ROLE\n\tINTO\tv_database_role\n\tFROM\tV$DATABASE;\n\t--\n\tif v_database_role = 'PRIMARY' then\n\t\t--\n        \tv_errcontext := 'ARCHIVE LOG CURRENT';\n\t\tSELECT TO_CHAR(SYSDATE, 'YYYY-MM-DD HH24:MI:SS')\n\t\tINTO v_timestamp FROM DUAL;\n\t\tDBMS_OUTPUT.PUT_LINE(v_timestamp || ' - INFO - AzBackup post-script v&&V_SCRIPT_VERSION: starting ' || v_errcontext || '...');\n\t\tSYS.DBMS_SYSTEM.KSDWRT(SYS.DBMS_SYSTEM.ALERT_FILE, 'INFO - AzBackup post-script v&&V_SCRIPT_VERSION: starting ' || v_errcontext || '...');\n        \t--\n        \texecute immediate 'ALTER SYSTEM ARCHIVE LOG CURRENT';\n        \t--\n\t\tSELECT TO_CHAR(SYSDATE, 'YYYY-MM-DD HH24:MI:SS')\n\t\tINTO v_timestamp FROM DUAL;\n\t\tDBMS_OUTPUT.PUT_LINE(v_timestamp || ' - INFO - AzBackup post-script v&&V_SCRIPT_VERSION: ' || v_errcontext || ' succeeded');\n\t\tSYS.DBMS_SYSTEM.KSDWRT(SYS.DBMS_SYSTEM.ALERT_FILE, 'INFO - AzBackup post-script v&&V_SCRIPT_VERSION: ' || v_errcontext || ' succeeded');\n\t\t--\n\tend if;\n        --\nEXCEPTION\n        --\n        when noArchiveLogMode then\n                begin\n                        --\n        \t\tv_errcontext := 'SWITCH LOGFILE';\n\t\t\tSELECT TO_CHAR(SYSDATE, 'YYYY-MM-DD HH24:MI:SS')\n\t\t\tINTO v_timestamp FROM DUAL;\n\t\t\tDBMS_OUTPUT.PUT_LINE(v_timestamp || ' - INFO - AzBackup post-script v&&V_SCRIPT_VERSION: starting ' || v_errcontext || '...');\n\t\t\tSYS.DBMS_SYSTEM.KSDWRT(SYS.DBMS_SYSTEM.ALERT_FILE, 'INFO - AzBackup post-script v&&V_SCRIPT_VERSION: starting ' || v_errcontext || '...');\n                        --\n                        execute immediate 'ALTER SYSTEM SWITCH LOGFILE';\n                        --\n\t\t\tSELECT TO_CHAR(SYSDATE, 'YYYY-MM-DD HH24:MI:SS')\n\t\t\tINTO v_timestamp FROM DUAL;\n\t\t\tDBMS_OUTPUT.PUT_LINE(v_timestamp || ' - INFO - AzBackup post-script v&&V_SCRIPT_VERSION: ' || v_errcontext || ' succeeded');\n\t\t\tSYS.DBMS_SYSTEM.KSDWRT(SYS.DBMS_SYSTEM.ALERT_FILE, 'INFO - AzBackup post-script v&&V_SCRIPT_VERSION: ' || v_errcontext || ' succeeded');\n                        --\n                exception\n                        when others then\n\t\t\t\tSELECT TO_CHAR(SYSDATE, 'YYYY-MM-DD HH24:MI:SS')\n\t\t\t\tINTO v_timestamp FROM DUAL;\n\t\t\t\tDBMS_OUTPUT.PUT_LINE(v_timestamp || ' - FAIL - AzBackup post-script v&&V_SCRIPT_VERSION: ' || v_errcontext || ' failed');\n\t\t\t\tSYS.DBMS_SYSTEM.KSDWRT(SYS.DBMS_SYSTEM.ALERT_FILE, 'FAIL - AzBackup post-script v&&V_SCRIPT_VERSION: ' || v_errcontext || ' failed');\n                                raise;\n                end;\n        --\n        when others then\n\t\tSELECT TO_CHAR(SYSDATE, 'YYYY-MM-DD HH24:MI:SS')\n\t\tINTO v_timestamp FROM DUAL;\n\t\tDBMS_OUTPUT.PUT_LINE(v_timestamp || ' - FAIL - AzBackup post-script v&&V_SCRIPT_VERSION: ' || v_errcontext || ' failed');\n\t\tSYS.DBMS_SYSTEM.KSDWRT(SYS.DBMS_SYSTEM.ALERT_FILE, 'FAIL - AzBackup post-script v&&V_SCRIPT_VERSION: ' || v_errcontext || ' failed');\n                raise;\n        --\nEND;\n/\n\nREM\nREM ********************************************************************************\nREM Exit from Oracle SQL*Plus with SUCCESS exit status...\nREM ********************************************************************************\n\nEXIT SUCCESS\n"
  },
  {
    "path": "VMBackup/main/workloadPatch/DefaultScripts/postPostgresMaster.sql",
    "content": "SELECT pg_stop_backup();"
  },
  {
    "path": "VMBackup/main/workloadPatch/DefaultScripts/preMysqlMaster.sql",
    "content": "FLUSH TABLES WITH READ LOCK; SET GLOBAL read_only = ON;\nset @query = concat(\"SELECT \\\"serverlevel\\\" INTO OUTFILE \",@outfile);\nprepare stmt from @query;\nexecute stmt;deallocate prepare stmt;\nSELECT SLEEP(@timeout);"
  },
  {
    "path": "VMBackup/main/workloadPatch/DefaultScripts/preMysqlSlave.sql",
    "content": "STOP SLAVE;SELECT SLEEP(5);\nFLUSH TABLES WITH READ LOCK; SET GLOBAL read_only = ON;\nset @query = concat(\"SELECT \\\"serverlevel\\\" INTO OUTFILE \",@outfile);\nprepare stmt from @query;\nexecute stmt;deallocate prepare stmt;\nSELECT SLEEP(@timeout);"
  },
  {
    "path": "VMBackup/main/workloadPatch/DefaultScripts/preOracleMaster.sql",
    "content": "REM ================================================================================\nREM File:       preOracleMaster.sql\nREM Date:       16-Sep 2020\nREM Type:       Oracle SQL*Plus script\nREM Author:     Microsoft CAE team\nREM\nREM Description:\nREM             Oracle SQL*Plus script called as an Azure Backup \"pre\" script, to\nREM             be run immediately prior to a backup snapshot.\nREM\nREM             SQL*Plus is executed in RESTRICTED LEVEL 2 mode, which means that\nREM             commands like HOST and SPOOL are not permitted, but commands like\nREM             START are permitted.\nREM\nREM Modifications:\nREM\tTGorman\t05oct22 v0.1 - remove external dependency on AZMESSAGE procedure\nREM\tTGorman 13dec22 v0.2 - support for DATABASE_ROLE = 'STANDBY'\nREM ================================================================================\nREM\nREM ********************************************************************************\nREM store script version into SQL*Plus substitution variable...\nREM ********************************************************************************\ndefine V_SCRIPT_VERSION=\"0.2\"\n\nREM\nREM ********************************************************************************\nREM Format standard output to be terse...\nREM ********************************************************************************\n\nSET ECHO OFF FEEDBACK OFF TIMING OFF PAGESIZE 0 LINESIZE 130 TRIMOUT ON TRIMSPOOL ON VERIFY OFF\n\nREM\nREM ********************************************************************************\nREM Uncomment the following SET command to make commands, status feedback, and\nREM timings visible for debugging...\nREM ********************************************************************************\n\nREM SET ECHO ON FEEDBACK ON TIMING ON\n\nREM\nREM ********************************************************************************\nREM Connect this SQL*Plus session to the current database instance as SYSBACKUP...\nREM (be sure to leave one blank line before the CONNECT command)\nREM\nREM If databases are 11g or older, then please replace the following line with\nREM \"CONNECT / AS SYSDBA\", because the SYSBACKUP role was introduced in 12c...\nREM ********************************************************************************\n\nCONNECT / AS SYSBACKUP\nREM CONNECT / AS SYSDBA\n\nREM\nREM ********************************************************************************\nREM Retrieve the status of the Oracle database instance, and exit from SQL*Plus\nREM with SUCCESS exit status if database instance is not OPEN...\nREM ********************************************************************************\n\nWHENEVER OSERROR EXIT SUCCESS\nWHENEVER SQLERROR EXIT SUCCESS\nCOL STATUS NEW_VALUE V_STATUS\nSELECT 'STATUS='||STATUS AS STATUS FROM V$INSTANCE;\nEXEC IF '&&V_STATUS' <> 'STATUS=OPEN' THEN RAISE NOT_LOGGED_ON; END IF;\n\nREM\nREM ********************************************************************************\nREM Next, if SQL*Plus has not exited as a result of the last command, now ensure that\nREM the failure of any command results in a FAILURE exit status from SQL*Plus...\nREM ********************************************************************************\n\nWHENEVER OSERROR EXIT FAILURE\nWHENEVER SQLERROR EXIT FAILURE\n\nREM\nREM ********************************************************************************\nREM Display the LOG_MODE of the database to be captured by the calling Python code...\nREM ********************************************************************************\n\nSELECT 'LOG_MODE='||LOG_MODE AS LOG_MODE FROM V$DATABASE;\n\nREM\nREM ********************************************************************************\nREM Display the DATABASE_ROLE of the database to be captured by the calling Python code...\nREM ********************************************************************************\nSELECT 'DATABASE_ROLE='||database_role AS DATABASE_ROLE FROM V$DATABASE;\n\nREM\nREM ********************************************************************************\nREM Enable emitting DBMS_OUTPUT to standard output...\nREM ********************************************************************************\n\nSET SERVEROUTPUT ON SIZE 1000000\n\nREM\nREM ********************************************************************************\nREM Force a switch of the online redo logfiles, which will force a full checkpoint,\nREM and then archive the current logfile...\nREM ********************************************************************************\n\nDECLARE\n        --\n        v_errcontext            varchar2(128);\n\tv_timestamp\t\tvarchar2(32);\n\tv_database_role\t\tvarchar2(32);\n        noArchiveLogMode        exception;\n        pragma                  exception_init(noArchiveLogMode, -258);\n        --\nBEGIN\n        --\n\tv_errcontext := 'query DATABASE_ROLE';\n\tSELECT\tDATABASE_ROLE\n\tINTO\tv_database_role\n\tFROM\tV$DATABASE;\n\t--\n\tif v_database_role = 'PRIMARY' then\n\t\t--\n        \tv_errcontext := 'ARCHIVE LOG CURRENT';\n\t\tSELECT TO_CHAR(SYSDATE, 'YYYY-MM-DD HH24:MI:SS')\n\t\tINTO v_timestamp FROM DUAL;\n\t\tDBMS_OUTPUT.PUT_LINE(v_timestamp || ' - INFO - AzBackup pre-script v&&V_SCRIPT_VERSION: starting ' || v_errcontext || '...');\n\t\tSYS.DBMS_SYSTEM.KSDWRT(SYS.DBMS_SYSTEM.ALERT_FILE, 'INFO - AzBackup pre-script v&&V_SCRIPT_VERSION: starting ' || v_errcontext || '...');\n        \t--\n        \texecute immediate 'ALTER SYSTEM ARCHIVE LOG CURRENT';\n       \t\t--\n\t\tSELECT TO_CHAR(SYSDATE, 'YYYY-MM-DD HH24:MI:SS')\n\t\tINTO v_timestamp FROM DUAL;\n\t\tDBMS_OUTPUT.PUT_LINE(v_timestamp || ' - INFO - AzBackup pre-script v&&V_SCRIPT_VERSION: ' || v_errcontext || ' succeeded');\n\t\tSYS.DBMS_SYSTEM.KSDWRT(SYS.DBMS_SYSTEM.ALERT_FILE, 'INFO - AzBackup pre-script v&&V_SCRIPT_VERSION: ' || v_errcontext || ' succeeded');\n\t\t--\n\tend if;\n        --\nEXCEPTION\n        --\n        when noArchiveLogMode then\n                begin\n                        --\n        \t\tv_errcontext := 'SWITCH LOGFILE';\n\t\t\tSELECT TO_CHAR(SYSDATE, 'YYYY-MM-DD HH24:MI:SS')\n\t\t\tINTO v_timestamp FROM DUAL;\n\t\t\tDBMS_OUTPUT.PUT_LINE(v_timestamp || ' - INFO - AzBackup pre-script v&&V_SCRIPT_VERSION: starting ' || v_errcontext || '...');\n\t\t\tSYS.DBMS_SYSTEM.KSDWRT(SYS.DBMS_SYSTEM.ALERT_FILE, 'INFO - AzBackup pre-script v&&V_SCRIPT_VERSION: starting ' || v_errcontext || '...');\n                        --\n                        execute immediate 'ALTER SYSTEM SWITCH LOGFILE';\n                        --\n\t\t\tSELECT TO_CHAR(SYSDATE, 'YYYY-MM-DD HH24:MI:SS')\n\t\t\tINTO v_timestamp FROM DUAL;\n\t\t\tDBMS_OUTPUT.PUT_LINE(v_timestamp || ' - INFO - AzBackup pre-script v&&V_SCRIPT_VERSION: ' || v_errcontext || ' succeeded');\n\t\t\tSYS.DBMS_SYSTEM.KSDWRT(SYS.DBMS_SYSTEM.ALERT_FILE, 'INFO - AzBackup pre-script v&&V_SCRIPT_VERSION: ' || v_errcontext || ' succeeded');\n                        --\n                exception\n                        when others then\n\t\t\t\tSELECT TO_CHAR(SYSDATE, 'YYYY-MM-DD HH24:MI:SS')\n\t\t\t\tINTO v_timestamp FROM DUAL;\n\t\t\t\tDBMS_OUTPUT.PUT_LINE(v_timestamp || ' - FAIL - AzBackup pre-script v&&V_SCRIPT_VERSION: ' || v_errcontext || ' failed');\n\t\t\t\tSYS.DBMS_SYSTEM.KSDWRT(SYS.DBMS_SYSTEM.ALERT_FILE, 'FAIL - AzBackup pre-script v&&V_SCRIPT_VERSION: ' || v_errcontext || ' failed');\n                                raise;\n                end;\n        --\n        when others then\n\t\tSELECT TO_CHAR(SYSDATE, 'YYYY-MM-DD HH24:MI:SS')\n\t\tINTO v_timestamp FROM DUAL;\n\t\tDBMS_OUTPUT.PUT_LINE(v_timestamp || ' - FAIL - AzBackup pre-script v&&V_SCRIPT_VERSION: ' || v_errcontext || ' failed');\n\t\tSYS.DBMS_SYSTEM.KSDWRT(SYS.DBMS_SYSTEM.ALERT_FILE, 'FAIL - AzBackup pre-script v&&V_SCRIPT_VERSION: ' || v_errcontext || ' failed');\n                raise;\n        --\nEND;\n/\n\nREM\nREM ********************************************************************************\nREM Attempt to put the database into BACKUP mode, which will succeed only if the\nREM database is presently in ARCHIVELOG mode\nREM ********************************************************************************\n\nDECLARE\n        --\n        v_errcontext            varchar2(128);\n\tv_timestamp\t\tvarchar2(32);\n\tv_database_role\t\tvarchar2(32);\n        noArchiveLogMode        exception;\n        pragma                  exception_init(noArchiveLogMode, -1123);\n        --\nBEGIN\n        --\n\tv_errcontext := 'query DATABASE_ROLE';\n\tSELECT\tDATABASE_ROLE\n\tINTO\tv_database_role\n\tFROM\tV$DATABASE;\n\t--\n\tif v_database_role = 'PRIMARY' then\n\t\t--\n        \tv_errcontext := 'BEGIN BACKUP';\n\t\tSELECT TO_CHAR(SYSDATE, 'YYYY-MM-DD HH24:MI:SS')\n\t\tINTO v_timestamp FROM DUAL;\n\t\tDBMS_OUTPUT.PUT_LINE(v_timestamp || ' - INFO - AzBackup pre-script v&&V_SCRIPT_VERSION: starting ' || v_errcontext || '...');\n\t\tSYS.DBMS_SYSTEM.KSDWRT(SYS.DBMS_SYSTEM.ALERT_FILE, 'INFO - AzBackup pre-script v&&V_SCRIPT_VERSION: starting ' || v_errcontext || '...');\n        \t--\n        \texecute immediate 'ALTER DATABASE BEGIN BACKUP';\n       \t\t--\n\t\tSELECT TO_CHAR(SYSDATE, 'YYYY-MM-DD HH24:MI:SS')\n\t\tINTO v_timestamp FROM DUAL;\n\t\tDBMS_OUTPUT.PUT_LINE(v_timestamp || ' - INFO - AzBackup pre-script v&&V_SCRIPT_VERSION: ' || v_errcontext || ' succeeded');\n\t\tSYS.DBMS_SYSTEM.KSDWRT(SYS.DBMS_SYSTEM.ALERT_FILE, 'INFO - AzBackup pre-script v&&V_SCRIPT_VERSION: ' || v_errcontext || ' succeeded');\n\t\t--\n\tend if;\n        --\nEXCEPTION\n        --\n        when noArchiveLogMode then\n                --\n\t\tSELECT TO_CHAR(SYSDATE, 'YYYY-MM-DD HH24:MI:SS')\n\t\tINTO v_timestamp FROM DUAL;\n\t\tDBMS_OUTPUT.PUT_LINE(v_timestamp || ' - INFO - AzBackup pre-script v&&V_SCRIPT_VERSION: ' || v_errcontext || ' in NOARCHIVELOG failed - continuing backup...');\n\t\tSYS.DBMS_SYSTEM.KSDWRT(SYS.DBMS_SYSTEM.ALERT_FILE, 'INFO - AzBackup pre-script v&&V_SCRIPT_VERSION: ' || v_errcontext || ' in NOARCHIVELOG failed - continuing backup...');\n        --\n        when others then\n\t\tSELECT TO_CHAR(SYSDATE, 'YYYY-MM-DD HH24:MI:SS')\n\t\tINTO v_timestamp FROM DUAL;\n\t\tDBMS_OUTPUT.PUT_LINE(v_timestamp || ' - FAIL - AzBackup pre-script v&&V_SCRIPT_VERSION: ' || v_errcontext || '  failed');\n\t\tSYS.DBMS_SYSTEM.KSDWRT(SYS.DBMS_SYSTEM.ALERT_FILE, 'FAIL - AzBackup pre-script v&&V_SCRIPT_VERSION: ' || v_errcontext || ' failed');\n                raise;\n        --\nEND;\n/\n\nREM\nREM ********************************************************************************\nREM Exit from Oracle SQL*Plus with SUCCESS exit status...\nREM ********************************************************************************\n\nEXIT SUCCESS\n"
  },
  {
    "path": "VMBackup/main/workloadPatch/DefaultScripts/prePostgresMaster.sql",
    "content": "SELECT pg_start_backup('AzureBackup');"
  },
  {
    "path": "VMBackup/main/workloadPatch/DefaultScripts/timeoutDaemon.sh",
    "content": "#!/usr/bin/env sh\n\narc=0\n\ncomand=\"$2\"\ncred_string=\"$3\"\ntimeout=\"$4\"\nscriptPath=\"$5\"\n\nsleep $timeout\n\nif [ \"$1\" = \"oracle\" ]\nthen\n    cmd=\"$comand/sqlplus -S -R 2 /nolog @$scriptPath/postOracleMaster.sql\"\n    exec $cmd\nelif [ \"$1\" = \"postgres\" ]\nthen\n    cmd=\"$comand/psql $cred_string -f $scriptPath/postPostgresMaster.sql\"\n    exec $cmd\nelse\n    echo \"`date`- incorrect workload name\"\nfi\n\nexit $arc"
  },
  {
    "path": "VMBackup/main/workloadPatch/LogBackupPatch.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nimport threading\nimport os\nfrom time import sleep\ntry:\n    import ConfigParser as ConfigParsers\nexcept ImportError:\n    import configparser as ConfigParsers\nimport subprocess\n\nclass LogBackupPatch:\n    def __init__(self):\n        self.name = \"\"\n        self.cred_string = \"\"\n        self.baseLocation = \"\"\n        self.parameterFilePath = \"\"\n        self.oracleParameter = {}\n        self.backupSource = \"\"\n        self.crontabLocation = \"\"\n        self.command = \"\"\n        self.confParser()\n        self.crontabEntry()\n\n    def crontabEntry(self):\n        if os.path.exists(self.crontabLocation):\n            crontabFile = open(self.crontabLocation, 'r')\n            crontabCheck = crontabFile.read()\n        else:\n            crontabCheck = \"NO CRONTAB\"\n\n        if 'oracle' in self.name.lower():\n            if 'OracleLogBackup' in str(crontabCheck):\n                return\n            else:\n                os.system(\"echo \\\"*/15 * * * * python \" + os.path.join(os.getcwd(), \"main/workloadPatch/WorkloadUtils/OracleLogBackup.py\\\"\") + \" >> /var/spool/cron/root\")\n                return\n    \n    def confParser(self):\n        configfile = '/etc/azure/workload.conf' \n        if os.path.exists(configfile):\n            config = ConfigParsers.ConfigParser()\n            config.read(configfile)\n            if config.has_section(\"logbackup\"):\n                if config.has_option(\"workload\", 'workload_name'):                        \n                    self.name = config.get(\"workload\", 'workload_name')\n                else:\n                    return None\n                if config.has_option(\"workload\", 'command'):                        \n                    self.command = config.get(\"workload\", 'command')\n                if config.has_option(\"workload\", 'credString'):\n                    self.cred_string = config.get(\"workload\", 'credString')\n                if config.has_option(\"logbackup\", 'parameterFilePath'):\n                    self.parameterFilePath = config.get(\"logbackup\", 'parameterFilePath')\n                else:\n                    return None\n                if config.has_option(\"logbackup\", 'baseLocation'):\n                    self.baseLocation = config.get(\"logbackup\", 'baseLocation')\n                else:\n                    return None\n                if config.has_option(\"logbackup\", 'crontabLocation'):\n                    self.crontabLocation = config.get(\"logbackup\", 'crontabLocation')\n        else:\n            return"
  },
  {
    "path": "VMBackup/main/workloadPatch/WorkloadPatch.py",
    "content": "#!/usr/bin/python\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.4+\n\nimport sys\nimport Utils.HandlerUtil\nimport threading\nimport os\nfrom time import sleep\nimport re\ntry:\n    import ConfigParser as ConfigParsers\nexcept ImportError:\n    import configparser as ConfigParsers\nimport subprocess\nfrom common import CommonVariables\nfrom workloadPatch.LogBackupPatch import LogBackupPatch\n\nclass ErrorDetail:\n    def __init__(self, errorCode, errorMsg):\n        self.errorCode = errorCode\n        self.errorMsg = errorMsg\n    \nclass WorkloadPatch:\n    def __init__(self, logger):\n        self.logger = logger\n        self.name = None\n        self.supported_workload = [\"oracle\", \"mysql\", \"mariadb\", \"postgres\"]\n        self.command = \"\"\n        self.dbnames = []\n        self.cred_string = \"\"\n        self.ipc_folder = None\n        self.error_details = []\n        self.enforce_slave_only = 0\n        self.role = \"master\"\n        self.child = []\n        self.timeout = \"90\"\n        self.linux_user = \"root\"\n        self.sudo_user = \"sudo\"\n        self.outfile = \"\"\n        self.logbackup = \"\"\n        self.custom_scripts_enabled = 0\n        self.scriptpath= \"DefaultScripts\"\n        self.temp_script_folder= \"/etc/azure\"\n        self.configuration_path = \"\"\n        self.confParser()\n        self.pre_database_status = \"\"\n        self.pre_log_mode = \"\"\n        self.post_database_status = \"\"\n        self.post_log_mode = \"\"\n        self.instance_list = []\n\n    def readOracleList(self,filePath):\n        re_db = re.compile(r'^(?P<DB>(\\w+)):(?P<PATH>(/|\\w+|\\.)+)(:(\\w*))?')\n        with open(filePath, 'r') as f:\n            for line in f:\n                line = line.strip()\n                re_db_match = re_db.search(line)\n                if re_db_match:\n                    db = re_db_match.group('DB')\n                    path = re_db_match.group('PATH')\n                    curr_dict = {\"sid\": db, \"home\": path, \"preSuccess\": False, \"postSuccess\": False, \"noArchive\": False, \"dbOpen\": True}\n                    self.instance_list.append(curr_dict)\n\n    def pre(self):\n        try:\n            self.logger.log(\"WorkloadPatch: Entering workload pre call\")\n            self.createTempScriptsFolder()\n            if self.role == \"master\" and int(self.enforce_slave_only) == 0:\n                if self.configuration_path:\n                    self.preInstance()\n                elif len(self.dbnames) == 0 :\n                    #pre at server level create fork process for child and append\n                    self.preMaster()\n                else:\n                    self.preMasterDB()\n                    # create fork process for child                  \n            elif self.role == \"slave\":\n                if len(self.dbnames) == 0 :\n                    #pre at server level create fork process for child and append\n                    self.preSlave()\n                else:\n                    self.preSlaveDB()\n                # create fork process for child\n            else:\n                self.error_details.append(ErrorDetail(CommonVariables.FailedWorkloadInvalidRole, \"invalid role name in config\"))\n        except Exception as e:\n            self.logger.log(\"WorkloadPatch: exception in pre\" + str(e))\n            self.error_details.append(ErrorDetail(CommonVariables.FailedWorkloadPreError, \"Exception in pre\"))\n\n    def post(self):\n        try:\n            self.logger.log(\"WorkloadPatch: Entering workload post call\")\n            if self.role == \"master\":\n                if len(self.instance_list) != 0:\n                    self.postInstance()\n                elif len(self.dbnames) == 0:\n                    #post at server level to turn off readonly mode\n                    self.postMaster()\n                else:\n                    self.postMasterDB()\n            elif self.role == \"slave\":\n                if len(self.dbnames) == 0 :\n                    #post at server level to turn on slave\n                    self.postSlave()\n                else:\n                    self.postSlaveDB()\n            else:\n                self.error_details.append(ErrorDetail(CommonVariables.FailedWorkloadInvalidRole, \"invalid role name in config\"))\n            #Remove the temporary scripts folder created\n            self.removeTempScriptsFolder()\n        except Exception as e:\n            self.logger.log(\"WorkloadPatch: exception in post\" + str(e))\n            #Remove the temporary scripts folder created\n            self.removeTempScriptsFolder()\n            self.error_details.append(ErrorDetail(CommonVariables.FailedWorkloadPostError, \"exception in processing of postscript\"))\n        \n    def preMaster(self):\n        global preSuccess\n        self.logger.log(\"WorkloadPatch: Entering pre mode for master\")\n        if self.ipc_folder != None:\n            self.outfile = os.path.join(self.ipc_folder, \"azbackupIPC.txt\")\n            if os.path.exists(self.outfile):\n                os.remove(self.outfile)\n            else:\n                self.logger.log(\"WorkloadPatch: File for IPC does not exist at pre\")\n        \n        preSuccess = False\n        \n        if 'mysql' in self.name.lower() or 'mariadb' in self.name.lower():\n            self.logger.log(\"WorkloadPatch: Create connection string for premaster mysql\")\n            if self.outfile == \"\":\n                self.error_details.append(ErrorDetail(CommonVariables.FailedWorkloadIPCDirectoryMissing, \"IPC directory missing\"))\n                return None\n            prescript = os.path.join(self.temp_script_folder, self.scriptpath + \"/preMysqlMaster.sql\")\n            arg = self.sudo_user+\" \"+self.command+self.name+\" \"+self.cred_string+\" -e\\\"set @timeout=\"+self.timeout+\";set @outfile=\\\\\\\"\\\\\\\\\\\\\\\"\"+self.outfile+\"\\\\\\\\\\\\\\\"\\\\\\\";source \"+prescript+\";\\\"\"\n            binary_thread = threading.Thread(target=self.thread_for_sql, args=[arg])\n            binary_thread.start()\n            self.waitForPreScriptCompletion()\n        elif 'oracle' in self.name.lower():\n            self.logger.log(\"WorkloadPatch: Pre- Inside oracle pre\")\n            preOracle = self.command + \"sqlplus\" + \" -S -R 2 /nolog @\" + os.path.join(self.temp_script_folder, self.scriptpath + \"/preOracleMaster.sql \")\n            args = \"su - \"+self.linux_user+\" -c \"+\"\\'\"+preOracle+\"\\'\"\n            self.logger.log(\"WorkloadPatch: argument passed for pre script:\"+str(args))\n\n            process = subprocess.Popen(args, stdout=subprocess.PIPE, shell=True)\n            wait_counter = 5\n            while process.poll() == None and wait_counter>0:\n                wait_counter -= 1\n                sleep(2)\n            while True:\n                line= process.stdout.readline()\n                line = Utils.HandlerUtil.HandlerUtility.convert_to_string(line)\n                if(line != ''):\n                    self.logger.log(\"WorkloadPatch: pre completed with output \"+line.rstrip(), True)\n                else:\n                    break\n                if('BEGIN BACKUP succeeded' in line):\n                    preSuccess = True\n                    break\n                if('LOG_MODE=' in line):\n                    line = line.replace('\\n','')\n                    line_split = line.split('=')\n                    self.logger.log(\"WorkloadPatch: log mode set is \"+line_split[1], True)\n                    if(line_split[1] == \"ARCHIVELOG\"):\n                        self.pre_log_mode = \"ARCHIVELOG\"\n                        self.logger.log(\"WorkloadPatch: Archive log mode for oracle\")\n                    else:\n                        self.pre_log_mode = \"NOARCHIVELOG\" \n                        self.logger.log(\"WorkloadPatch: No archive log mode for oracle\")\n                if('STATUS=' in line):\n                    line = line.replace('\\n', '')\n                    line_split = line.split('=')\n                    self.logger.log(\"WorkloadPatch: database status is \"+line_split[1], True)\n                    if(line_split[1] == \"OPEN\"):\n                        self.pre_database_status = \"OPEN\"\n                        self.logger.log(\"WorkloadPatch: Database is open\")\n                    else:##handle other DB status if required\n                        self.pre_database_status = \"NOTOPEN\"\n                        self.logger.log(\"WorkloadPatch: Database is not open\")\n\n            if(self.pre_log_mode == \"NOARCHIVELOG\" and self.pre_database_status == \"OPEN\"):\n                self.error_details.append(ErrorDetail(CommonVariables.FailedWorkloadDatabaseInNoArchiveLog, \"Workload in no archive log mode\"))                \n            if(preSuccess == True):\n                self.logger.log(\"WorkloadPatch: pre success is true\")\n                self.timeoutDaemon()\n            elif(self.pre_database_status == \"NOTOPEN\"):\n                self.logger.log(\"WorkloadPatch: Database in closed status, backup can be app consistent\")\n            else:\n                self.logger.log(\"WorkloadPatch: Pre failed for oracle\")\n                self.error_details.append(ErrorDetail(CommonVariables.FailedWorkloadPreError, \"Workload Pre failed\"))\n            \n            self.logger.log(\"WorkloadPatch: Pre- Exiting pre mode for master\")\n        elif 'postgres' in self.name.lower():\n            self.logger.log(\"WorkloadPatch: Pre- Inside postgres pre\")\n            prePostgres = self.command + \"psql \" + self.cred_string + \" -f \" + os.path.join(os.getcwd(), \"main/workloadPatch/\"+self.scriptpath+\"/prePostgresMaster.sql\")\n            args =  \"su - \"+self.linux_user+\" -c \"+\"\\'\"+prePostgres+\"\\'\"\n            self.logger.log(\"WorkloadPatch: argument passed for pre script:\"+str(self.linux_user)+\"  \"+str(self.command))\n\n            process = subprocess.Popen(args,stdout=subprocess.PIPE, shell=True)\n            wait_counter = 5\n            while process.poll() == None and wait_counter>0:\n                wait_counter -= 1\n                sleep(2)\n            while True:\n                line= process.stdout.readline()\n                line = Utils.HandlerUtil.HandlerUtility.convert_to_string(line)\n                if(line != ''):\n                    self.logger.log(\"WorkloadPatch: pre completed with output \"+line.rstrip(), True)\n                else:\n                    break\n            self.timeoutDaemon()\n            self.logger.log(\"WorkloadPatch: Pre- Exiting pre mode for master postgres\")\n        #Add new workload support here\n        else:\n            self.logger.log(\"WorkloadPatch: Unsupported workload name\")\n            self.error_details.append(ErrorDetail(CommonVariables.FailedWorkloadInvalidWorkloadName, \"Workload Not supported\"))\n\n    def postMaster(self):\n        global daemonProcess\n        self.logger.log(\"WorkloadPatch: Entering post mode for master\")\n        try:\n            if self.ipc_folder != None and self.ipc_folder != \"\": #IPCm based workloads\n                if os.path.exists(self.outfile):\n                    os.remove(self.outfile)\n                else:\n                    self.logger.log(\"WorkloadPatch: File for IPC does not exist at post\")\n                if len(self.child) == 0 or self.child[0].poll() is not None:\n                    self.logger.log(\"WorkloadPatch: Not app consistent backup\")\n                    self.error_details.append(ErrorDetail(CommonVariables.FailedWorkloadQuiescingTimeout,\"not app consistent\"))\n                elif self.child[0].poll() is None:\n                    self.logger.log(\"WorkloadPatch: pre connection still running. Sending kill signal\")\n                    self.child[0].kill()\n            else: #non IPC based workloads\n                if (self.pre_database_status != \"NOTOPEN\") and (daemonProcess is None or daemonProcess.poll() is not None):\n                    self.logger.log(\"WorkloadPatch: Not app consistent backup\")\n                    self.error_details.append(ErrorDetail(CommonVariables.FailedWorkloadQuiescingTimeout,\"not app consistent\"))\n                elif daemonProcess.poll() is None:\n                    self.logger.log(\"WorkloadPatch: pre connection still running. Sending kill signal\")\n                    daemonProcess.kill()\n        except Exception as e:\n            self.logger.log(\"WorkloadPatch: exception in daemon process indentification\" + str(e))\n        \n        postSuccess = False\n\n        if 'mysql' in self.name.lower() or 'mariadb' in self.name.lower():\n            self.logger.log(\"WorkloadPatch: Create connection string for post master\")\n            postscript = os.path.join(self.temp_script_folder, self.scriptpath + \"/postMysqlMaster.sql\")\n            args = self.sudo_user+\" \"+self.command+self.name+\" \"+self.cred_string+\" < \"+postscript\n            self.logger.log(\"WorkloadPatch: command to execute: \"+str(self.sudo_user)+\"  \"+str(self.command))\n            post_child = subprocess.Popen(args,stdout=subprocess.PIPE,stdin=subprocess.PIPE,shell=True,stderr=subprocess.PIPE)\n        elif 'oracle' in self.name.lower():\n            self.logger.log(\"WorkloadPatch: Post- Inside oracle post\")\n            postOracle = self.command + \"sqlplus\" + \" -S -R 2 /nolog @\" + os.path.join(self.temp_script_folder, self.scriptpath + \"/postOracleMaster.sql \")\n            args =  \"su - \"+self.linux_user+\" -c \"+\"\\'\"+postOracle+\"\\'\"\n            self.logger.log(\"WorkloadPatch: argument passed for post script:\"+str(args))\n            process = subprocess.Popen(args, stdout=subprocess.PIPE, shell=True)\n            wait_counter = 5\n            while process.poll()==None and wait_counter>0:\n                wait_counter -= 1\n                sleep(2)\n            while True:\n                line= process.stdout.readline()\n                line = Utils.HandlerUtil.HandlerUtility.convert_to_string(line)\n                if(line != ''):\n                    self.logger.log(\"WorkloadPatch: post completed with output \"+line.rstrip(), True)\n                else:\n                    break\n                if 'END BACKUP succeeded' in line:\n                    self.logger.log(\"WorkloadPatch: post succeeded\")\n                    postSuccess = True\n                    break\n                if('LOG_MODE=' in line):\n                    line = line.replace('\\n','')\n                    line_split = line.split('=')\n                    self.logger.log(\"WorkloadPatch: log mode set is \"+line_split[1], True)\n                    if(line_split[1] == \"ARCHIVELOG\"):\n                        self.post_log_mode = \"ARCHIVELOG\"\n                        self.logger.log(\"WorkloadPatch: Archive log mode for oracle\")\n                    else:\n                        self.post_log_mode = \"NOARCHIVELOG\" \n                        self.logger.log(\"WorkloadPatch: No archive log mode for oracle\")\n                if('STATUS=' in line):\n                    line = line.replace('\\n', '')\n                    line_split = line.split('=')\n                    self.logger.log(\"WorkloadPatch: database status is \"+line_split[1], True)\n                    if(line_split[1] == \"OPEN\"):\n                        self.post_database_status = \"OPEN\"\n                        self.logger.log(\"WorkloadPatch: Database is open\")\n                    else:##handle other DB status if required\n                        self.post_database_status = \"NOTOPEN\"\n                        self.logger.log(\"WorkloadPatch: Database is not open\")\n            if((self.pre_log_mode == \"NOARCHIVELOG\" and self.post_log_mode == \"ARCHIVELOG\") or (self.pre_log_mode == \"ARCHIVELOG\" and self.post_log_mode == \"NOARCHIVELOG\")):\n                self.logger.log(\"WorkloadPatch: Database log mode changed during backup\")\n                self.error_details.append(ErrorDetail(CommonVariables.FailedWorkloadLogModeChanged, \"Database log mode changed during backup\"))\n            if(postSuccess == False):\n                if(self.pre_database_status == \"NOTOPEN\" and self.post_database_status == \"NOTOPEN\"):\n                    self.logger.log(\"WorkloadPatch: Database in closed status, backup is app consistent\")\n                elif((self.pre_database_status == \"OPEN\" and self.post_database_status == \"NOTOPEN\") or (self.pre_database_status == \"NOTOPEN\" and self.post_database_status == \"OPEN\")):\n                    self.logger.log(\"WorkloadPatch: Database status changed during backup\")\n                    self.error_details.append(ErrorDetail(CommonVariables.FailedWorkloadDatabaseStatusChanged, \"Database status changed during backup\"))\n                else:\n                    self.error_details.append(ErrorDetail(CommonVariables.FailedWorkloadPostError, \"Workload Post failed\"))\n            \n            self.logger.log(\"WorkloadPatch: Post- Completed\")\n            self.callLogBackup()\n        elif 'postgres' in self.name.lower():\n            self.logger.log(\"WorkloadPatch: Post- Inside postgres post\")\n            postPostgres = self.command + \"psql \" + self.cred_string + \" -f \" + os.path.join(os.getcwd(), \"main/workloadPatch/\"+self.scriptpath+\"/postPostgresMaster.sql\")\n            args =  \"su - \"+self.linux_user+\" -c \"+\"\\'\"+postPostgres+\"\\'\"\n            self.logger.log(\"WorkloadPatch: argument passed for post script:\"+str(self.linux_user)+\"  \"+str(self.command))\n            process = subprocess.Popen(args,stdout=subprocess.PIPE, shell=True)\n            wait_counter = 5\n            while process.poll()==None and wait_counter>0:\n                wait_counter -= 1\n                sleep(2)\n            self.logger.log(\"WorkloadPatch: Post- Completed\")\n        #Add new workload support here\n        else:\n            self.logger.log(\"WorkloadPatch: Unsupported workload name\")\n            self.error_details.append(ErrorDetail(CommonVariables.FailedWorkloadInvalidWorkloadName, \"Workload Not supported\"))\n\n    def preSlave(self):\n        self.logger.log(\"WorkloadPatch: Entering pre mode for sloave\")\n        if self.ipc_folder != None:\n            self.outfile = os.path.join(self.ipc_folder, \"azbackupIPC.txt\")\n            if os.path.exists(self.outfile):\n                os.remove(self.outfile)\n            else:\n                self.logger.log(\"WorkloadPatch: File for IPC does not exist at pre\")\n\n        if 'mysql' in self.name.lower() or 'mariadb' in self.name.lower():\n            self.logger.log(\"WorkloadPatch: Create connection string for preslave mysql\")\n            if self.outfile == \"\":\n                self.error_details.append(ErrorDetail(CommonVariables.FailedWorkloadIPCDirectoryMissing, \"IPC directory missing\"))\n                return None\n            prescript = os.path.join(self.temp_script_folder, self.scriptpath + \"/preMysqlSlave.sql\")\n            arg = self.sudo_user+\" \"+self.command+self.name+\" \"+self.cred_string+\" -e\\\"set @timeout=\"+self.timeout+\";set @outfile=\\\\\\\"\\\\\\\\\\\\\\\"\"+self.outfile+\"\\\\\\\\\\\\\\\"\\\\\\\";source \"+prescript+\";\\\"\"\n            binary_thread = threading.Thread(target=self.thread_for_sql, args=[arg])\n            binary_thread.start()\n            self.waitForPreScriptCompletion()\n        #Add new workload support here\n        else:\n            self.logger.log(\"WorkloadPatch: Unsupported workload name\")\n            self.error_details.append(ErrorDetail(CommonVariables.FailedWorkloadInvalidWorkloadName, \"Workload Not supported\"))\n         \n    def postSlave(self):\n        self.logger.log(\"WorkloadPatch: Entering post mode for slave\")\n        if self.ipc_folder != None and self.ipc_folder != \"\":#IPCm based workloads\n            if os.path.exists(self.outfile):\n                os.remove(self.outfile)\n            else:\n                self.logger.log(\"WorkloadPatch: File for IPC does not exist at post\")\n            if len(self.child) == 0 or self.child[0].poll() is not None:\n                self.logger.log(\"WorkloadPatch: Not app consistent backup\")\n                self.error_details.append(ErrorDetail(CommonVariables.FailedWorkloadQuiescingTimeout,\"not app consistent\"))\n                return\n            elif self.child[0].poll() is None:\n                self.logger.log(\"WorkloadPatch: pre connection still running. Sending kill signal\")\n                self.child[0].kill()\n        else: #non IPC based workloads\n            if (self.pre_database_status != \"NOTOPEN\") and (daemonProcess is None or daemonProcess.poll() is not None):\n                self.logger.log(\"WorkloadPatch: Not app consistent backup\")\n                self.error_details.append(ErrorDetail(CommonVariables.FailedWorkloadQuiescingTimeout,\"not app consistent\"))\n                return\n            elif daemonProcess.poll() is None:\n                self.logger.log(\"WorkloadPatch: pre connection still running. Sending kill signal\")\n                daemonProcess.kill()\n\n        if 'mysql' in self.name.lower() or 'mariadb' in self.name.lower():\n            self.logger.log(\"WorkloadPatch: Create connection string for post slave\")\n            postscript = os.path.join(self.temp_script_folder, self.scriptpath + \"/postMysqlSlave.sql\")\n            args = self.sudo_user+\" \"+self.command+self.name+\" \"+self.cred_string+\" < \"+postscript\n            self.logger.log(\"WorkloadPatch: command to execute: \"+str(args))\n            post_child = subprocess.Popen(args,stdout=subprocess.PIPE,stdin=subprocess.PIPE,shell=True,stderr=subprocess.PIPE)\n        #Add new workload support here\n        else:\n            self.logger.log(\"WorkloadPatch: Unsupported workload name\")\n            self.error_details.append(ErrorDetail(CommonVariables.FailedWorkloadInvalidWorkloadName, \"Workload Not supported\"))\n\n    def preInstance(self):\n        if 'oracle' in self.name.lower():\n            self.readOracleList(self.configuration_path)\n            for index in range(len(self.instance_list)):\n                oracleInstance = self.instance_list[index]\n                oracle_home = oracleInstance[\"home\"]\n                commandPath = os.path.join(oracle_home,'bin') + \"/\"\n                self.preMasterOracleInstance(commandPath, index)\n\n    def postInstance(self):\n        if 'oracle' in self.name.lower():\n            for index in range(len(self.instance_list)):\n                oracleInstance = self.instance_list[index]\n                oracle_home = oracleInstance[\"home\"]\n                commandPath = os.path.join(oracle_home,'bin') + \"/\"\n                if ((oracleInstance[\"preSuccess\"] == True or oracleInstance[\"dbOpen\"] == False)):\n                    self.postMasterOracleInstance(commandPath, index)\n                else:\n                    if (oracleInstance[\"noArchive\"] == True):\n                        self.error_details.append(ErrorDetail(CommonVariables.FailedWorkloadDatabaseInNoArchiveLog, \"Workload in no archive log mode\"))                \n                    self.error_details.append(ErrorDetail(CommonVariables.FailedWorkloadPreError, \"Workload Pre failed for SID: \" + oracleInstance[\"sid\"]))\n\n    def preMasterOracleInstance(self, commandPath, instanceIndex):\n        global preSuccess\n        self.logger.log(\"WorkloadPatch: Entering pre mode for master\")           \n        preSuccess = False\n    \n        oracleInstance = self.instance_list[instanceIndex]\n        self.logger.log(\"WorkloadPatch: Pre- Inside oracle pre for instance with SID: \" + oracleInstance[\"sid\"] + \" HOME: \" + oracleInstance[\"home\"])\n        preOracle = commandPath + \"sqlplus\" + \" -S -R 2 /nolog @\" + os.path.join(self.temp_script_folder, self.scriptpath + \"/preOracleMaster.sql \")\n        envExport = \"export ORACLE_SID=\" + oracleInstance[\"sid\"] + \"; export ORACLE_HOME=\" + oracleInstance[\"home\"] + \"; export PATH=\" + oracleInstance[\"home\"] + \"/bin:${PATH}; export ORACLE_UNQNAME=\" + oracleInstance[\"sid\"] + \"; \" \n        args = \"su - \"+self.linux_user+\" -c \"+\"\\'\"+ envExport + preOracle+\"\\'\"\n        self.logger.log(\"WorkloadPatch: argument passed for pre script:\"+str(args))\n        process = subprocess.Popen(args, stdout=subprocess.PIPE, shell=True)\n\n        self.instance_list[instanceIndex][\"pid\"] = process.pid\n        wait_counter = 5\n        while process.poll() == None and wait_counter>0:\n            wait_counter -= 1\n            sleep(2)\n        while True:\n            line= process.stdout.readline()\n            line = Utils.HandlerUtil.HandlerUtility.convert_to_string(line)\n            if(line != ''):\n                self.logger.log(\"WorkloadPatch: pre completed with output \"+line.rstrip(), True)\n            else:\n                break\n            if('BEGIN BACKUP succeeded' in line):\n                preSuccess = True\n                break\n            if('LOG_MODE=' in line):\n                line = line.replace('\\n','')\n                line_split = line.split('=')\n                self.logger.log(\"WorkloadPatch: log mode set is \"+line_split[1], True)\n                if(line_split[1] == \"ARCHIVELOG\"):\n                    self.pre_log_mode = \"ARCHIVELOG\"\n                    self.logger.log(\"WorkloadPatch: Archive log mode for oracle\")\n                else:\n                    self.pre_log_mode = \"NOARCHIVELOG\" \n                    self.logger.log(\"WorkloadPatch: No archive log mode for oracle\")\n            if('STATUS=' in line):\n                line = line.replace('\\n', '')\n                line_split = line.split('=')\n                self.logger.log(\"WorkloadPatch: database status is \"+line_split[1], True)\n                if(line_split[1] == \"OPEN\"):\n                    self.pre_database_status = \"OPEN\"\n                    self.logger.log(\"WorkloadPatch: Database is open\")\n                else:##handle other DB status if required\n                    self.pre_database_status = \"NOTOPEN\"\n                    self.instance_list[instanceIndex][\"dbOpen\"] = False\n                    self.logger.log(\"WorkloadPatch: Database is not open\")\n\n        if(self.pre_log_mode == \"NOARCHIVELOG\" and self.pre_database_status == \"OPEN\"):\n            self.instance_list[instanceIndex][\"noArchive\"] = True\n        if(preSuccess == True):\n            self.logger.log(\"WorkloadPatch: pre success is true\")\n            self.instance_list[instanceIndex][\"preSuccess\"] = True\n            self.timeoutDaemonOracleInstance(instanceIndex, commandPath)\n        elif(self.pre_database_status == \"NOTOPEN\"):\n            self.logger.log(\"WorkloadPatch: Database in closed status, backup can be app consistent\")\n        else:\n            self.logger.log(\"WorkloadPatch: Pre failed for oracle\")\n            \n        self.logger.log(\"WorkloadPatch: Pre- Exiting pre mode for master\")\n\n    def postMasterOracleInstance(self, commandPath, instanceIndex):\n        global daemonProcess\n\n        if \"daemonProcess\" in self.instance_list[instanceIndex]:\n            daemonProcess = self.instance_list[instanceIndex][\"daemonProcess\"]\n\n        self.logger.log(\"WorkloadPatch: Entering post mode for master\")\n        try:\n            if (self.instance_list[instanceIndex][\"dbOpen\"] == True) and (daemonProcess is None or daemonProcess.poll() is not None):\n                self.logger.log(\"WorkloadPatch: Not app consistent backup\")\n                self.error_details.append(ErrorDetail(CommonVariables.FailedWorkloadQuiescingTimeout,\"not app consistent\"))\n            elif daemonProcess.poll() is None:\n                self.logger.log(\"WorkloadPatch: pre connection still running. Sending kill signal\")\n                daemonProcess.kill()\n        except Exception as e:\n            self.logger.log(\"WorkloadPatch: exception in daemon process indentification\" + str(e))\n        \n        postSuccess = False\n    \n        oracleInstance = self.instance_list[instanceIndex]\n        self.logger.log(\"WorkloadPatch: Post- Inside oracle post for instance with SID: \" + oracleInstance[\"sid\"] + \" HOME: \" + oracleInstance[\"home\"])\n        postOracle = commandPath + \"sqlplus\" + \" -S -R 2 /nolog @\" + os.path.join(self.temp_script_folder, self.scriptpath + \"/postOracleMaster.sql \")\n        envExport = \"export ORACLE_SID=\" + oracleInstance[\"sid\"] + \"; export ORACLE_HOME=\" + oracleInstance[\"home\"] + \"; export PATH=\" + oracleInstance[\"home\"] + \"/bin:${PATH}; export ORACLE_UNQNAME=\" + oracleInstance[\"sid\"] + \"; \" \n        args =  \"su - \"+self.linux_user+\" -c \"+\"\\'\"+ envExport + postOracle+\"\\'\"\n        self.logger.log(\"WorkloadPatch: argument passed for post script:\"+str(args))\n        process = subprocess.Popen(args, stdout=subprocess.PIPE, shell=True)\n        wait_counter = 5\n        while process.poll()==None and wait_counter>0:\n            wait_counter -= 1\n            sleep(2)\n        while True:\n            line= process.stdout.readline()\n            line = Utils.HandlerUtil.HandlerUtility.convert_to_string(line)\n            if(line != ''):\n                self.logger.log(\"WorkloadPatch: post completed with output \"+line.rstrip(), True)\n            else:\n                break\n            if 'END BACKUP succeeded' in line:\n                self.logger.log(\"WorkloadPatch: post succeeded\")\n                postSuccess = True\n                self.instance_list[instanceIndex][\"postSuccess\"] = True\n                break\n            if('LOG_MODE=' in line):\n                line = line.replace('\\n','')\n                line_split = line.split('=')\n                self.logger.log(\"WorkloadPatch: log mode set is \"+line_split[1], True)\n                if(line_split[1] == \"ARCHIVELOG\"):\n                    self.post_log_mode = \"ARCHIVELOG\"\n                    self.logger.log(\"WorkloadPatch: Archive log mode for oracle\")\n                else:\n                    self.post_log_mode = \"NOARCHIVELOG\" \n                    self.logger.log(\"WorkloadPatch: No archive log mode for oracle\")\n            if('STATUS=' in line):\n                line = line.replace('\\n', '')\n                line_split = line.split('=')\n                self.logger.log(\"WorkloadPatch: database status is \"+line_split[1], True)\n                if(line_split[1] == \"OPEN\"):\n                    self.post_database_status = \"OPEN\"\n                    self.logger.log(\"WorkloadPatch: Database is open\")\n                else:##handle other DB status if required\n                    self.post_database_status = \"NOTOPEN\"\n                    self.logger.log(\"WorkloadPatch: Database is not open\")\n        if((oracleInstance[\"noArchive\"] == True and self.post_log_mode == \"ARCHIVELOG\") or (oracleInstance[\"noArchive\"] == False and self.post_log_mode == \"NOARCHIVELOG\")):\n            self.logger.log(\"WorkloadPatch: Database log mode changed during backup\")\n            self.error_details.append(ErrorDetail(CommonVariables.FailedWorkloadLogModeChanged, \"Database log mode changed during backup\"))\n        if(postSuccess == False):\n            if(oracleInstance[\"dbOpen\"] == False and self.post_database_status == \"NOTOPEN\"):\n                self.logger.log(\"WorkloadPatch: Database in closed status, backup is app consistent\")\n            elif((oracleInstance[\"dbOpen\"] == True and self.post_database_status == \"NOTOPEN\") or (oracleInstance[\"dbOpen\"] == False and self.post_database_status == \"OPEN\")):\n                self.logger.log(\"WorkloadPatch: Database status changed during backup\")\n                self.error_details.append(ErrorDetail(CommonVariables.FailedWorkloadDatabaseStatusChanged, \"Database status changed during backup\"))\n            else:\n                self.error_details.append(ErrorDetail(CommonVariables.FailedWorkloadPostError, \"Workload Post failed\"))\n        \n        self.logger.log(\"WorkloadPatch: Post- Completed\")\n        self.callLogBackup()\n\n    def preMasterDB(self):\n        pass\n       \n    def preSlaveDB(self):\n        pass\n\n    def postMasterDB(self):\n        pass\n\n    def postSlaveDB(self):\n        pass\n    \n    def confParser(self):\n        self.logger.log(\"WorkloadPatch: Entering workload config parsing\")\n        configfile = '/etc/azure/workload.conf'\n        try:\n            if os.path.exists(configfile):\n                config = ConfigParsers.ConfigParser()\n                config.read(configfile)\n                if config.has_section(\"workload\"):\n                    self.logger.log(\"WorkloadPatch: config section present for workloads \")\n                    if config.has_option(\"workload\", 'workload_name'):                        \n                        name = config.get(\"workload\", 'workload_name')\n                        if name in self.supported_workload:\n                            self.name = name\n                            self.logger.log(\"WorkloadPatch: config workload command \"+ self.name)\n                        else:\n                            return None\n                    else:\n                        return None\n                    if config.has_option(\"workload\", 'command_path'):                        \n                        self.command = config.get(\"workload\", 'command_path')\n                        self.command = self.command+\"/\"\n                        self.logger.log(\"WorkloadPatch: config workload command \"+ self.command)\n                    if config.has_option(\"workload\", 'credString'):\n                        self.cred_string = config.get(\"workload\", 'credString')\n                        self.logger.log(\"WorkloadPatch: config workload cred_string found\")\n                    elif not config.has_option(\"workload\", 'linux_user'):\n                        self.error_details.append(ErrorDetail(CommonVariables.FailedWorkloadAuthorizationMissing, \"Cred and linux user string missing\"))\n                    if config.has_option(\"workload\", 'role'):\n                        self.role = config.get(\"workload\", 'role')\n                        self.logger.log(\"WorkloadPatch: config workload role \"+ self.role)\n                    if config.has_option(\"workload\", 'enforceSlaveOnly'):\n                        self.enforce_slave_only = config.get(\"workload\", 'enforceSlaveOnly')\n                        self.logger.log(\"WorkloadPatch: config workload enforce_slave_only \"+ self.enforce_slave_only)\n                    if config.has_option(\"workload\", 'ipc_folder'):\n                        self.ipc_folder = config.get(\"workload\", 'ipc_folder')\n                        self.logger.log(\"WorkloadPatch: config ipc folder \"+ self.ipc_folder)\n                    if config.has_option(\"workload\", 'timeout'):\n                        timeout = config.get(\"workload\", 'timeout')\n                        if timeout != \"\" and timeout != None:\n                            self.timeout = timeout\n                        self.logger.log(\"WorkloadPatch: config timeout of pre script \"+ self.timeout)\n                    if config.has_option(\"workload\", 'linux_user'):\n                        self.linux_user = config.get(\"workload\", 'linux_user')\n                        self.logger.log(\"WorkloadPatch: config linux user of pre script \"+ self.linux_user)\n                        self.sudo_user = \"sudo -u \"+self.linux_user\n                    if config.has_option(\"workload\", 'dbnames'):\n                        dbnames_list = config.get(\"workload\", 'dbnames') #mydb1;mydb2;mydb3\n                        self.dbnames = dbnames_list.split(';')\n                    if config.has_option(\"workload\", 'customScriptEnabled'):\n                        self.custom_scripts_enabled = config.get(\"workload\", 'customScriptEnabled')\n                        self.logger.log(\"WorkloadPatch: config workload customer using custom script \"+ self.custom_scripts_enabled)\n                        if int(self.custom_scripts_enabled) == 1:\n                            self.scriptpath= \"CustomScripts\"\n                    if config.has_option(\"workload\", 'configuration_path'):\n                        self.configuration_path = config.get(\"workload\", 'configuration_path')\n                        self.logger.log(\"WorkloadPatch: config workload customer having multiple instances mentioned at path \"+ self.configuration_path)\n                    if config.has_section(\"logbackup\"):\n                        self.logbackup = \"enable\"\n                        self.logger.log(\"WorkloadPatch: Logbackup Enabled\")\n                else:\n                    self.logger.log(\"WorkloadPatch: workload config section missing. File system consistent backup\")\n            else:\n                self.logger.log(\"WorkloadPatch: workload config file missing. File system consistent backup\")\n        except Exception as e:\n            self.logger.log(\"WorkloadPatch: exception in workload conf file parsing\")\n            if(self.name != None):\n                self.error_details.append(ErrorDetail(CommonVariables.FailedWorkloadConfParsingError, \"exception in workloadconfig parsing\"))\n    \n    def createTempScriptsFolder(self):\n        self.logger.log(\"WorkloadPatch: Creating temporary scripts folder\")\n        try:\n            originalScriptsPath = os.path.join(os.getcwd(), \"main/workloadPatch/\"+self.scriptpath)\n            newScriptsPath = os.path.join(self.temp_script_folder, self.scriptpath)\n            \n            if (os.path.exists(self.temp_script_folder) == False):\n                self.logger.log(\"WorkloadPatch: Script folder directory path not found..creating\")\n                os.makedirs(self.temp_script_folder)\n                \n            if (os.path.exists(newScriptsPath)):\n                self.logger.log(\"WorkloadPatch: Existing temporary scripts folder found..removing\")\n                self.removeTempScriptsFolder()\n                \n            copyProcess = subprocess.Popen(['cp','-ar',originalScriptsPath,self.temp_script_folder])\n            copyProcess.wait()\n            changeOwnerProcess = subprocess.Popen(['chown','-R',self.linux_user,newScriptsPath], stdout=subprocess.PIPE)\n            changeOwnerProcess.wait()\n            permissionProcess = subprocess.Popen(['chmod','-R','500',newScriptsPath], stdout=subprocess.PIPE)\n            permissionProcess.wait()\n            self.logger.log(\"WorkloadPatch: Script files copied to temporary scripts folder present at \" + newScriptsPath)\n        except Exception as e:\n            self.logger.log(\"WorkloadPatch: exception in creating temporary scripts folder: \" + str(e))\n        \n    \n    def removeTempScriptsFolder(self):\n        self.logger.log(\"WorkloadPatch: Removing temporary scripts folder\")\n        try:\n            newScriptsPath = os.path.join(self.temp_script_folder, self.scriptpath)\n            removalProcess = subprocess.Popen(['rm','-rf',newScriptsPath], stdout=subprocess.PIPE)\n            removalProcess.wait()\n            self.logger.log(\"WorkloadPatch: Removed temporary scripts folder\")\n        except Exception as e:\n            self.logger.log(\"WorkloadPatch: exception in removing temporary scripts folder: \" + str(e))\n        \n        \n    def populateErrors(self):\n        if len(self.error_details) > 0:\n            errdetail = self.error_details[0]\n            return errdetail\n        else:\n            return None\n\n    def waitForPreScriptCompletion(self):\n        if self.ipc_folder != None:\n            wait_counter = 5 \n            while len(self.child) == 0 and wait_counter > 0:\n                self.logger.log(\"WorkloadPatch: child not created yet\", True)\n                wait_counter -= 1\n                sleep(2)\n            if wait_counter > 0:\n                self.logger.log(\"WorkloadPatch: sql subprocess Created \"+str(self.child[0].pid))\n            else:\n                self.logger.log(\"WorkloadPatch: sql connection failed\")\n                self.error_details.append(ErrorDetail(CommonVariables.FailedWorkloadConnectionError, \"sql connection failed\"))\n                return None\n            wait_counter = 60\n            while os.path.exists(self.outfile) == False and wait_counter > 0:\n                self.logger.log(\"WorkloadPatch: Waiting for sql to complete\")\n                wait_counter -= 1\n                sleep(2)\n            if wait_counter > 0:\n                self.logger.log(\"WorkloadPatch: pre at server level completed\")\n            else:\n                self.logger.log(\"WorkloadPatch: pre failed to quiesce\")\n                self.error_details.append(ErrorDetail(CommonVariables.FailedWorkloadQuiescingError, \"pre failed to quiesce\"))\n                return None\n\n\n    def timeoutDaemon(self):\n        global daemonProcess\n\n        argsDaemon = \"su - \"+self.linux_user+\" -c \" + \"'\" + os.path.join(self.temp_script_folder, self.scriptpath + \"/timeoutDaemon.sh\")+\" \"+self.name+\" \"+self.command+\" \\\"\"+self.cred_string+\"\\\" \"+self.timeout+\" \"+os.path.join(self.temp_script_folder, self.scriptpath + \"'\")\n        devnull = open(os.devnull, 'w')\n\n        daemonProcess = subprocess.Popen(argsDaemon, stdout=devnull, stderr=devnull, shell=True)\n        wait_counter = 5\n        while (daemonProcess is None or daemonProcess.poll() is not None) and wait_counter > 0:\n            self.logger.log(\"WorkloadPatch: daemonProcess not created yet\", True)\n            wait_counter -= 1\n            sleep(1)\n        if wait_counter > 0:\n            self.logger.log(\"WorkloadPatch: daemonProcess Created \"+str(daemonProcess.pid))\n        else:\n            while True:\n                line= daemonProcess.stdout.readline()\n                line = Utils.HandlerUtil.HandlerUtility.convert_to_string(line)\n                if(line != ''):\n                    self.logger.log(\"WorkloadPatch: daemon process creation failed \"+line.rstrip(), True)\n                else:\n                    break\n            self.error_details.append(ErrorDetail(CommonVariables.FailedWorkloadConnectionError, \"sql connection failed\"))\n        return None\n\n    def timeoutDaemonOracleInstance(self, instanceIndex, commandPath):\n        global daemonProcess\n\n        argsDaemon = \"su - \"+self.linux_user+\" -c \" + \"'\" + os.path.join(self.temp_script_folder, self.scriptpath + \"/timeoutDaemon.sh\")+\" \"+self.name+\" \"+commandPath+\" \\\"\"+self.cred_string+\"\\\" \"+self.timeout+\" \"+os.path.join(self.temp_script_folder, self.scriptpath + \"'\")\n        devnull = open(os.devnull, 'w')\n    \n        oracleInstance = self.instance_list[instanceIndex]\n        envExport = \"export ORACLE_SID=\" + oracleInstance[\"sid\"] + \"; export ORACLE_HOME=\" + oracleInstance[\"home\"] + \"; export PATH=\" + oracleInstance[\"home\"] + \"/bin:${PATH}; export ORACLE_UNQNAME=\" + oracleInstance[\"sid\"] + \"; \" \n        argsDaemon = envExport + argsDaemon\n        daemonProcess = subprocess.Popen(argsDaemon, stdout=devnull, stderr=devnull, shell=True)\n        self.instance_list[instanceIndex][\"daemonProcess\"] = daemonProcess\n        wait_counter = 5\n\n        while (daemonProcess is None or daemonProcess.poll() is not None) and wait_counter > 0:\n            self.logger.log(\"WorkloadPatch: daemonProcess not created yet\", True)\n            wait_counter -= 1\n            sleep(1)\n        if wait_counter > 0:\n            self.logger.log(\"WorkloadPatch: daemonProcess Created \"+str(daemonProcess.pid))\n        else:\n            while True:\n                line= daemonProcess.stdout.readline()\n                line = Utils.HandlerUtil.HandlerUtility.convert_to_string(line)\n                if(line != ''):\n                    self.logger.log(\"WorkloadPatch: daemon process creation failed \"+line.rstrip(), True)\n                else:\n                    break\n            self.error_details.append(ErrorDetail(CommonVariables.FailedWorkloadConnectionError, \"sql connection failed\"))\n        return None\n    \n    def thread_for_sql(self,args):\n        self.logger.log(\"WorkloadPatch: command to execute: \"+str(args))\n        self.child.append(subprocess.Popen(args,stdout=subprocess.PIPE,stdin=subprocess.PIPE,shell=True,stderr=subprocess.PIPE))\n        sleep(1)\n    \n    def getRole(self):\n        return \"master\"\n    \n    def callLogBackup(self):\n        if 'enable' in self.logbackup.lower():\n            self.logger.log(\"WorkloadPatch: Initializing logbackup\")\n            logbackupObject = LogBackupPatch()\n        else:\n            return\n"
  },
  {
    "path": "VMBackup/main/workloadPatch/WorkloadUtils/OracleLogBackup.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport re\nimport sys\nimport subprocess\nimport threading\nfrom workloadPatch.LogbackupPatch import LogBackupPatch\nfrom time import sleep\nfrom datetime import datetime\n\n# Example of Parameter File Content:\n# *.db_name='CDB1'\ndef parameterFileParser():\n    regX = re.compile(r\"\\*\\..+=.+\")\n    parameterFile = open(logbackup.parameterFilePath, 'r')\n    contents = parameterFile.read()\n    for match in regX.finditer(contents):\n        keyParameter = match.group().split('=')[0].lstrip('*\\.')\n        valueParameter = [name.strip('\\'') for name in match.group().split('=')[1].split(',')]\n        logbackup.oracleParameter[keyParameter] = valueParameter\n\ndef setLocation():\n    nowTimestamp = datetime.now()\n    nowTimestamp = nowTimestamp.strftime(\"%Y%m%d%H%M%S\")\n    fullPath = logbackup.baseLocation + nowTimestamp\n    os.system('mkdir -m777 '+ fullPath)\n    return fullPath\n\ndef takeBackup():\n    print(\"logbackup: Taking a backup\")\n\n    backupPath = setLocation()\n\n    if 'oracle' in logbackup.name.lower():\n        backupOracle = logbackup.command + \" -s / as sysdba @\" +  \"/var/lib/waagent/Microsoft.Azure.RecoveryServices.VMSnapshotLinux-1.0.9164.0/main/workloadPatch/scripts/logbackup.sql \" + backupPath\n        argsForControlFile = [\"su\", \"-\", logbackup.cred_string, \"-c\", backupOracle]\n        snapshotControlFile = subprocess.Popen(argsForControlFile)\n        while snapshotControlFile.poll()==None:\n            sleep(1)        \n        recoveryFileDest = logbackup.oracleParameter['db_recovery_file_dest']\n        dbName = logbackup.oracleParameter['db_name']\n        print('    logbackup: Archive log backup started at ', datetime.now().strftime(\"%Y%m%d%H%M%S\"))\n        os.system('cp -R -f ' + recoveryFileDest[0] + '/' + dbName[0] + '/archivelog ' + backupPath)\n        print('    logbackup: Archive log backup complete at ', datetime.now().strftime(\"%Y%m%d%H%M%S\"))\n\n    print(\"logbackup: Backup Complete\")\n\ndef main():\n    global logbackup\n    logbackup = LogBackupPatch()\n    parameterFileParser()\n    takeBackup()\n\nif __name__ == \"__main__\":\n    main()"
  },
  {
    "path": "VMBackup/main/workloadPatch/WorkloadUtils/OracleLogRestore.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nimport threading\nimport os\nfrom time import sleep\nimport subprocess\nfrom datetime import datetime\nimport re\ntry:\n    import ConfigParser as ConfigParsers\nexcept ImportError:\n    import configparser as ConfigParsers\n\nclass LogRestore:\n    def __init__(self):\n        self.name = \"\"\n        self.cred_string = \"\"\n        self.baseLocation = \"\"\n        self.parameterFilePath = \"\"\n        self.oracleParameter = {}\n        self.backupSource = \"\"\n        self.crontabLocation = \"\"\n        self.command = \"\"\n        self.confParser()\n        self.parameterFileParser()\n\n    # Example of Parameter File Content:\n    # *.db_name='CDB1'\n    def parameterFileParser(self):\n        regX = re.compile(r\"\\*\\..+=.+\")\n        parameterFile = open(self.parameterFilePath, 'r')\n        contents = parameterFile.read()\n        for match in regX.finditer(contents):\n            keyParameter = match.group().split('=')[0].lstrip('*\\.')\n            valueParameter = [name.strip('\\'') for name in match.group().split('=')[1].split(',')]\n            self.oracleParameter[keyParameter] = valueParameter\n\n    # To replace the existing control files in the DB with new control files\n    def switchControlFiles(self, backupPath):\n        parsedControlFile = self.oracleParameter['control_files']\n        for location in parsedControlFile:\n            os.system('rm -f '+location)\n            os.system('cp -f '+ backupPath + '/control.ctl ' + location)\n            os.system('chmod a+wrx '+location)\n\n    # To replace the existing archive log files in the DB with new archive log file\n    def switchArchiveLogFiles(self, backupPath):\n        recoveryFileDest = self.oracleParameter['db_recovery_file_dest']\n        dbName = self.oracleParameter['db_name']\n        for location in recoveryFileDest:\n            os.system('rm -R -f '+ location + '/' + dbName[0] +'/archivelog')\n            os.system('cp -R -f ' + backupPath + '/archivelog ' + location + '/' + dbName[0] + '/archivelog')\n            os.system('chmod -R a+wrx '+ location +'/' + dbName[0] + '/archivelog')\n\n    # To trigger the restore of control files and archive log files\n    def triggerRestore(self):\n        backupPath = self.baseLocation + self.backupSource\n        self.switchControlFiles(backupPath)\n        self.switchArchiveLogFiles(backupPath)\n\n    def confParser(self):\n        configfile = '/etc/azure/workload.conf' \n        if os.path.exists(configfile):\n            config = ConfigParsers.ConfigParser()\n            config.read(configfile)\n            if config.has_section(\"logbackup\"):\n                #self.logger.log(\"LogRestore: config section present for workload \")\n                if config.has_option(\"workload\", 'workload_name'):                        \n                    self.name = config.get(\"workload\", 'workload_name')\n                #self.logger.log(\"LogRestore: config workload name \"+ self.name)\n                else:\n                    return None\n                if config.has_option(\"workload\", 'command'):                        \n                    self.command = config.get(\"workload\", 'command')\n                #self.logger.log(\"LogRestore: config workload command \" + self.command)\n                if config.has_option(\"workload\", 'credString'):\n                    self.cred_string = config.get(\"workload\", 'credString')\n                #self.logger.log(\"LogRestore: config workload cred_string \" + self.cred_string)\n                if config.has_option(\"logbackup\", 'parameterFilePath'):\n                    self.parameterFilePath = config.get(\"logbackup\", 'parameterFilePath')\n                #self.logger.log(\"LogRestore: config logbackup parameter file path: \" + self.parameterFilePath)\n                else:\n                    return None\n                if config.has_option(\"logbackup\", 'baseLocation'):\n                    self.baseLocation = config.get(\"logbackup\", 'baseLocation')\n                #self.logger.log(\"LogRestore: config logbackup base location: \" + self.baseLocation)\n                else:\n                    return None\n                if config.has_option(\"logbackup\", 'crontabLocation'):\n                    self.crontabLocation = config.get(\"logbackup\", 'crontabLocation')\n                #self.logger.log(\"LogRestore: config logbackup crontab location: \" + self.crontabLocation)\n        else:\n            return\n            #self.logger.log(\"No matching workload config found\")\ndef main():\n    oracleLogRestore = LogRestore()\n\n    os.system('ls -lrt ' + oracleLogRestore.baseLocation)\n    oracleLogRestore.backupSource = input(\"Enter the timestamp: \")\n    oracleLogRestore.triggerRestore()\n\nif __name__ == \"__main__\":\n    main()"
  },
  {
    "path": "VMBackup/main/workloadPatch/WorkloadUtils/workload.conf",
    "content": "[workload]\n#workload_name valid values- mysql, oracle, mariadb, postgres\nworkload_name = \ncommand_path = \ncredString = \nipc_folder = \ntimeout = \nlinux_user = \n\n"
  },
  {
    "path": "VMBackup/main/workloadPatch/__init__.py",
    "content": "#!/usr/bin/python\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.4+"
  },
  {
    "path": "VMBackup/manifest.xml",
    "content": "<ExtensionImage xmlns=\"http://schemas.microsoft.com/windowsazure\">\n<ProviderNameSpace>Microsoft.Azure.RecoveryServices</ProviderNameSpace>\n<Type>VMSnapshotLinux</Type>\n<Version>1.0.9184.0</Version>\n<Label>Windows Azure VMBackup Extension for Linux IaaS</Label>\n<HostingResources>VmRole</HostingResources>\n<MediaLink></MediaLink>\n<Description>Windows Azure VMBackup Extension for Linux IaaS</Description>\n<IsInternalExtension>true</IsInternalExtension>\n<Eula>https://github.com/Azure/azure-linux-extensions/blob/1.0/LICENSE-2_0.txt</Eula>\n<PrivacyUri>https://github.com/Azure/azure-linux-extensions/blob/1.0/LICENSE-2_0.txt</PrivacyUri>\n<HomepageUri>https://github.com/Azure/azure-linux-extensions</HomepageUri>\n<IsJsonExtension>true</IsJsonExtension>\n<SupportedOS>Linux</SupportedOS>\n<CompanyName>Microsoft Open Source Technology Center</CompanyName>\n<!--%REGIONS%-->\n</ExtensionImage>"
  },
  {
    "path": "VMBackup/mkstub.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nimport json\nimport os\nimport shutil\nfrom main.common import CommonVariables\n\ndef copytree(src,dst):\n    names = os.listdir(src)\n    if(os.path.isdir(dst) != True):\n        os.makedirs(dst)\n    for name in names:\n        srcname = os.path.join(src, name)\n        dstname = os.path.join(dst, name)\n        if os.path.isdir(srcname):\n                copytree(srcname, dstname)\n        else:\n            # Will raise a SpecialFileError for unsupported file types\n            shutil.copy2(srcname, dstname)\n\ntarget_zip_file_location = './dist/'\ntarget_folder_name = CommonVariables.extension_name + '-' + str(CommonVariables.extension_version)\ntarget_zip_file_path = target_zip_file_location + target_folder_name + '.zip'\n\n\nfinal_folder_path = target_zip_file_location + target_folder_name\n\ncopytree(final_folder_path, '/var/lib/waagent/' + target_folder_name)\n\n\"\"\"\nwe should also build up a HandlerEnvironment.json\n\"\"\"\nmanifest_obj = [{\n  \"name\": CommonVariables.extension_name,\n  \"seqNo\": \"1\", \n  \"version\": 1.0,\n    \"handlerEnvironment\": {    \n        \"logFolder\": \"/var/log/azure/\" + CommonVariables.extension_name + \"/\" + str(CommonVariables.extension_version),\n        \"configFolder\": \"/var/lib/waagent/\" + CommonVariables.extension_name + \"-\" + str(CommonVariables.extension_version) + \"/config\",\n        \"statusFolder\": \"/var/lib/waagent/\" + CommonVariables.extension_name + \"-\" + str(CommonVariables.extension_version) + \"/status\",\n        \"heartbeatFile\": \"/var/lib/waagent/\" + CommonVariables.extension_name + \"-\" + str(CommonVariables.extension_version) + \"/heartbeat.log\"\n    }\n}]\n\nmanifest_str = json.dumps(manifest_obj, sort_keys = True, indent = 4)\nmanifest_file = open('/var/lib/waagent/' + target_folder_name + \"/HandlerEnvironment.json\", \"w\") \nmanifest_file.write(manifest_str)\nmanifest_file.close()\n"
  },
  {
    "path": "VMBackup/references",
    "content": "Utils/\n"
  },
  {
    "path": "VMBackup/setup.py",
    "content": "﻿#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# To build:\n# python setup.py sdist\n#\n# To install:\n# python setup.py install\n#\n# To register (only needed once):\n# python setup.py register\n#\n# To upload:\n# python setup.py sdist upload\n\ntry:\n    from setuptools import setup\nexcept ImportError:\n    from distutils.core import setup\nimport os\nimport shutil\nimport tempfile\nimport json\nimport sys\nimport subprocess\nimport shutil\nimport time\nfrom subprocess import call\nfrom zipfile import ZipFile\nfrom main.common import CommonVariables\n\npackages_array = []\nmain_folder = 'main'\nmain_entry = main_folder + '/handle.sh'\nbinary_entry = main_folder + '/safefreeze'\narm64_binary_entry = main_folder + '/safefreezeArm64'\npackages_array.append(main_folder)\n\nplugin_folder = main_folder + '/tempPlugin'\nplugin_conf =  main_folder + '/VMSnapshotPluginHost.conf'\nseverity_json =  main_folder + '/LogSeverity.json'\n\n\npatch_folder = main_folder + '/patch'\npackages_array.append(patch_folder)\n\nworkloadpatch_folder = main_folder + '/workloadPatch'\nworkloadutils_folder = main_folder + '/workloadPatch/WorkloadUtils'\nworkloadscripts_folder = main_folder + '/workloadPatch/DefaultScripts'\nworkload_customscripts_folder = main_folder + '/workloadPatch/CustomScripts'\nsqlfilelists=os.listdir(workloadscripts_folder)\ncustom_sqlfilelists=os.listdir(workload_customscripts_folder)\npackages_array.append(workloadpatch_folder)\n\nmanifest = \"manifest.xml\"\n\n\"\"\"\ncopy the dependency to the local\n\"\"\"\n\n\"\"\"\ncopy the utils lib to local\n\"\"\"\ntarget_utils_path = main_folder + '/' + CommonVariables.utils_path_name\n#if os.path.isdir(target_utils_path):\n#    shutil.rmtree(target_utils_path)\n#print('copying')\n#shutil.copytree ('../' + CommonVariables.utils_path_name, target_utils_path)\n#print('copying end')\npackages_array.append(target_utils_path)\n\n\"\"\"\ncopy the NodeBased lib to local\n\"\"\"\ntarget_snapshot_service_path = main_folder + '/' + CommonVariables.snapshot_service_path_name\npackages_array.append(target_snapshot_service_path)\n\npolling_service_metadata =  target_snapshot_service_path + '/service_metadata.json'\npolling_service_readme =  target_snapshot_service_path + '/README.md'\n\n\"\"\"\ngenerate the HandlerManifest.json file.\n\"\"\"\nmanifest_obj = [{\n  \"name\": CommonVariables.extension_name,\n  \"version\": CommonVariables.extension_version,\n  \"handlerManifest\": {\n    \"installCommand\": main_entry + \" install\",\n    \"uninstallCommand\": main_entry + \" uninstall\",\n    \"updateCommand\": main_entry + \" update\",\n    \"enableCommand\": main_entry + \" enable\",\n    \"disableCommand\": main_entry + \" disable\",\n    \"rebootAfterInstall\": False,\n    \"reportHeartbeat\": False\n  }\n}]\n\nmanifest_str = json.dumps(manifest_obj, sort_keys = True, indent = 4)\nmanifest_file = open(\"HandlerManifest.json\", \"w\") \nmanifest_file.write(manifest_str)\nmanifest_file.close()\n\n\"\"\"\ngenerate the safe freeze binary\n\"\"\"\ncur_dir = os.getcwd()\nos.chdir(\"./main/safefreeze\")\nchil = subprocess.Popen([\"make\"], stdout=subprocess.PIPE)\nprocess_wait_time = 5\nwhile(process_wait_time >0 and chil.poll() is None):\n    time.sleep(1)\n    process_wait_time -= 1\n\nos.chdir(cur_dir)\n\n\n'''\ndue to the lack of cross-compilation support in Mariner.\nIt would not be able to get the binaries, so will be using the older binaries\nTo Do : Once Mariner starts supporting cross-compilation, \nwill have to modify any necessary scripts to\nenable the generation of safefreezeARM64 binaries.\n'''\n\"\"\"\ngenerate the ARM64 safe freeze binary\n\"\"\"\ncur_dir = os.getcwd()\nos.chdir(\"./main/safefreezeArm64\")\nchil = subprocess.Popen([\"make\"], stdout=subprocess.PIPE)\nprocess_wait_time = 5\nwhile(process_wait_time >0 and chil.poll() is None):\n    time.sleep(1)\n    process_wait_time -= 1\n\nos.chdir(cur_dir)\n\n\n\"\"\"\nsetup script, to package the files up\n\"\"\"\nsetup(name = CommonVariables.extension_name,\n      version = CommonVariables.extension_zip_version,\n      description=CommonVariables.extension_description,\n      license='Apache License 2.0',\n      author='Microsoft Corporation',\n      author_email='andliu@microsoft.com',\n      url='https://github.com/Azure/azure-linux-extensions',\n      classifiers = ['Development Status :: 5 - Production/Stable',\n        'Programming Language :: Python',\n        'Programming Language :: Python :: 2',\n        'Programming Language :: Python :: 2.7',\n        'Programming Language :: Python :: 3',\n        'Programming Language :: Python :: 3.3',\n        'Programming Language :: Python :: 3.4',\n        'License :: OSI Approved :: Apache Software License',\n        'Programming Language :: SQL',\n        'Programming Language :: PL/SQL'],\n      packages = packages_array\n      )\n\n\"\"\"\nunzip the package files and re-package it.\n\"\"\"\n\n\n\ntarget_zip_file_location = './dist/'\ntarget_folder_name = CommonVariables.extension_name  + '-' + CommonVariables.extension_zip_version\ntarget_zip_file_path = target_zip_file_location + target_folder_name + '.zip'\n\ntarget_zip_file = ZipFile(target_zip_file_path)\ntarget_zip_file.extractall(target_zip_file_location)\n\ndef dos2unix(src):\n    args = [\"dos2unix\",src]\n    devnull = open(os.devnull, 'w')\n    child = subprocess.Popen(args, stdout=devnull, stderr=devnull)\n    print('dos2unix %s ' % (src))\n    child.wait()\n\ndef zip(src, dst):\n    zf = ZipFile(\"%s\" % (dst), \"w\")\n    abs_src = os.path.abspath(src)\n    for dirname, subdirs, files in os.walk(src):\n        for filename in files:\n            absname = os.path.abspath(os.path.join(dirname, filename))\n            dos2unix(absname)\n            arcname = absname[len(abs_src) + 1:]\n            print('zipping %s as %s' % (os.path.join(dirname, filename), arcname))\n            zf.write(absname, arcname)\n    zf.close()\n\ndef copybinary(src, dst):\n    shutil.copytree(src, dst)\n\ndef copy(src, dst):\n    shutil.copy2(src, dst)\n\nfinal_folder_path = target_zip_file_location + target_folder_name\n\nfinal_binary_path= final_folder_path + '/main/safefreeze'\nfinal_Arm64binary_path= final_folder_path + '/main/safefreezeArm64'\nfinal_plugin_path = final_folder_path + '/main/tempPlugin'\nfinal_workloadscripts_path = final_folder_path + '/main/workloadPatch/DefaultScripts'\nfinal_workload_customscripts_path = final_folder_path + '/main/workloadPatch/CustomScripts'\nfinal_workloadutils_path = final_folder_path + '/main/workloadPatch/WorkloadUtils'\n\ncopybinary(binary_entry, final_binary_path)\ncopybinary(arm64_binary_entry, final_Arm64binary_path)\ncopybinary(plugin_folder, final_plugin_path)\ncopybinary(workloadscripts_folder, final_workloadscripts_path) \ncopybinary(workload_customscripts_folder, final_workload_customscripts_path)\ncopybinary(workloadutils_folder, final_workloadutils_path)\n\nfinal_main_folder = final_folder_path + '/main'\nfinal_snapshot_service_path = final_main_folder + '/' + CommonVariables.snapshot_service_path_name\n\ncopy(plugin_conf, final_main_folder)\ncopy(severity_json, final_main_folder)\ncopy(polling_service_metadata, final_snapshot_service_path)\ncopy(polling_service_readme, final_snapshot_service_path)\ncopy(manifest, final_folder_path)\ncopy(main_entry, final_main_folder)\n\nzip(final_folder_path, target_zip_file_path)\n\n"
  },
  {
    "path": "VMBackup/test/handle.py",
    "content": "﻿#!/usr/bin/env python\n#\n#CustomScript extension\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport array\nimport base64\nimport os\nimport os.path\nimport re\nimport string\nimport subprocess\nimport sys\nimport imp\nimport shlex\nimport traceback\nimport urllib2\nimport urlparse\nimport datetime\nimport math\n\n\ndef main():\n    ticks = 635798839149570996\n    \n    commandStartTime = datetime.datetime(1, 1, 1) + datetime.timedelta(microseconds = ticks/10)\n    utcNow = datetime.datetime.utcnow()\n    timespan = utcNow-commandStartTime\n    \n    print(str(timespan.total_seconds()))\n    total_span_in_seconds = timespan.days * 24 * 60 * 60 + timespan.seconds\n    print(str(total_span_in_seconds))\n\nif __name__ == '__main__' :\n    main()\n"
  },
  {
    "path": "VMBackup/test/install_python2.6.sh",
    "content": "#!/bin/bash\n\n# Function to print messages\nprint_message() {\n    echo \"----------------------------------------\"\n    echo \"$1\"\n    echo \"----------------------------------------\"\n}\n\n# Check if Python 2.6 is already installed\nif command -v python2.6 &> /dev/null; then\n    PYTHON_VERSION=$(python2.6 --version 2>&1)  # Capture version output\n    print_message \"Python 2.6 is already installed. Version: $PYTHON_VERSION\"\n    exit 0\nfi\n\n# Update the package list\nprint_message \"Updating package list...\"\nsudo apt update\n\n# Install required packages for building Python\nprint_message \"Installing required packages...\"\nif ! sudo apt install -y build-essential checkinstall \\\nlibreadline-dev libncurses-dev libssl-dev \\\nlibsqlite3-dev tk-dev libgdbm-dev libc6-dev libbz2-dev; then\n    echo \"Error: Failed to install required packages.\"\n    exit 1\nfi\n\nprint_message \"Checking for libreadline installation...\"\ndpkg -l | grep libreadline || echo \"libreadline not found.\"\n\nprint_message \"Changing directory to /tmp...\"\ncd /tmp\n\nprint_message \"Downloading Python 2.6.6 source code...\"\nif ! wget https://www.python.org/ftp/python/2.6.6/Python-2.6.6.tgz; then\n    echo \"Error: Failed to download Python 2.6.6 source code.\"\n    exit 1\nfi\n\n# Extract the downloaded tarball\nprint_message \"Extracting Python 2.6.6...\"\nif ! tar -xzf Python-2.6.6.tgz; then\n    echo \"Error: Failed to extract Python 2.6.6.\"\n    exit 1\nfi\n\n# Change directory to the extracted folder\ncd Python-2.6.6\n\nprint_message \"Configuring Python build with optimizations...\"\nif ! ./configure --enable-optimizations; then\n    echo \"Error: Configuration of Python build failed.\"\n    exit 1\nfi\n\n# Compile the source code\nprint_message \"Compiling Python 2.6.6. This may take a while...\"\nif ! make; then\n    echo \"Error: Compilation of Python 2.6.6 failed.\"\n    exit 1\nfi\n\nprint_message \"Installing Python 2.6...\"\nif ! sudo make altinstall; then\n    echo \"Error: Installation of Python 2.6 failed.\"\n    exit 1\nfi\n\nprint_message \"Verifying the installation of Python 2.6...\"\nif command -v python2.6 &> /dev/null; then\n    python2.6 --version\nelse\n    echo \"Error: Python 2.6 installation was not successful.\"\n    exit 1\nfi\n\nprint_message \"Creating a symbolic link for python2...\"\nif ! sudo ln -s /usr/local/bin/python2.6 /usr/bin/python2; then\n    echo \"Error: Failed to create a symbolic link for python2.\"\n    exit 1\nfi\n\nprint_message \"Python 2.6 installation completed successfully.\"\n"
  },
  {
    "path": "VMEncryption/.vscode/settings.json",
    "content": "{\n    \"python.linting.pylintEnabled\": false,\n    \"python.linting.flake8Enabled\": true,\n    \"python.linting.flake8Args\": [\"--max-line-length=300\"],\n    \"python.linting.enabled\": true\n}"
  },
  {
    "path": "VMEncryption/MANIFEST.in",
    "content": "include HandlerManifest.json manifest.xml extension_shim.sh\nrecursive-include main/oscrypto/91ade *.sh\nrecursive-include main/oscrypto/91ade *.rules\nrecursive-include main/oscrypto/rhel_68/encryptpatches *.patch\nrecursive-include main/oscrypto/centos_68/encryptpatches *.patch\nrecursive-include main/oscrypto/ubuntu_1604/encryptpatches *.patch\nrecursive-include main/oscrypto/ubuntu_1604/encryptscripts *.sh\nrecursive-include main/oscrypto/ubuntu_1404/encryptpatches *.patch\nrecursive-include main/oscrypto/ubuntu_1404/encryptscripts *.sh\nprune test\n"
  },
  {
    "path": "VMEncryption/ReleaseNotes.txt",
    "content": "(0.1.0.999345)\n\n-Fix disable after EFA encryption and stop-start\n-Fix Ubuntu 14 unmount oldroot sequence\n-Fix missing Python-six module for CentOS 6.8\n"
  },
  {
    "path": "VMEncryption/Test-AzureRmVMDiskEncryptionExtension.ps1",
    "content": "﻿Param(\n    [Parameter(Mandatory=$true)]\n    [string] $SubscriptionId,\n    [Parameter(Mandatory=$true)]\n    [string] $AadClientId,\n    [Parameter(Mandatory=$true)]\n    [string] $AadClientSecret,\n    [Parameter(Mandatory=$true)]\n    [string] $ResourcePrefix,\n    [Parameter(Mandatory=$true)]\n    [string] $Username,\n    [Parameter(Mandatory=$true)]\n    [string] $Password,\n    [string] $ExtensionName=\"AzureDiskEncryptionForLinux\",\n    [string] $SshPubKey,\n    [string] $SshPrivKeyPath,\n    [string] $Location=\"eastus\",\n    [string] $VolumeType=\"data\",\n    [string] $GalleryImage=\"RedHat:RHEL:7.2\",\n    [string] $VMSize=\"Standard_D2\",\n    [switch] $DryRun=$false,\n    [switch] $Force=$false\n)\n\n$ErrorActionPreference = \"Stop\"\n\nSet-AzureRmContext -SubscriptionId $SubscriptionId\n\nWrite-Host \"Set AzureRmContext successfully\"\n\n## Resource Group\n$global:ResourceGroupName = $ResourcePrefix + \"ResourceGroup\"\n\nif(!$DryRun)\n{\n    New-AzureRmResourceGroup -Name $ResourceGroupName -Location $Location\n}\n\nWrite-Host \"Created ResourceGroup successfully: $ResourceGroupName\"\n\n## KeyVault\n$global:KeyVaultName = $ResourcePrefix + \"KeyVault\"\n\nif(!$DryRun)\n{\n    $global:KeyVault = New-AzureRmKeyVault -VaultName $KeyVaultName -ResourceGroupName $ResourceGroupName -Location $Location\n}\nelse\n{\n    $global:KeyVault = Get-AzureRmKeyVault -VaultName $KeyVaultName -ResourceGroupName $ResourceGroupName\n}\n\nWrite-Host \"Created KeyVault successfully: $KeyVaultName\"\n\nif(!$DryRun)\n{\n    Set-AzureRmKeyVaultAccessPolicy -VaultName $KeyVaultName -ResourceGroupName $ResourceGroupName -ServicePrincipalName $AadClientId -PermissionsToKeys all -PermissionsToSecrets all\n    Set-AzureRmKeyVaultAccessPolicy -VaultName $KeyVaultName -ResourceGroupName $ResourceGroupName -EnabledForDiskEncryption\n}\n\nWrite-Host \"Set AzureRmKeyVaultAccessPolicy successfully\"\n\nif(!$DryRun)\n{\n    Add-AzureKeyVaultKey -VaultName $KeyVaultName -Name \"keyencryptionkey\" -Destination Software\n}\n\nWrite-Host \"Added AzureRmKeyVaultKey successfully\"\n\n$global:KeyEncryptionKey = Get-AzureKeyVaultKey -VaultName $KeyVault.OriginalVault.Name -Name \"keyencryptionkey\"\n\nWrite-Host \"Fetched KeyEncryptionKey successfully\"\n\n## Storage\n$global:StorageName = ($ResourcePrefix + \"Storage\").ToLower()\n$global:StorageType = \"Standard_GRS\"\n$global:ContainerName = \"vhds\"\n\nif(!$DryRun)\n{\n    $global:StorageAccount = New-AzureRmStorageAccount -ResourceGroupName $ResourceGroupName -Name $StorageName -Type $StorageType -Location $Location\n}\nelse\n{\n    $global:StorageAccount = Get-AzureRmStorageAccount -ResourceGroupName $ResourceGroupName -Name $StorageName\n}\n\nWrite-Host \"Created StorageAccount successfully: $StorageName\"\n\n## Network\n$global:PublicIpName = $ResourcePrefix + \"PublicIp\"\n$global:InterfaceName = $ResourcePrefix + \"NetworkInterface\"\n$global:SubnetName = $ResourcePrefix + \"Subnet\"\n$global:VNetName = $ResourcePrefix + \"VNet\"\n$global:VNetAddressPrefix = \"10.0.0.0/16\"\n$global:VNetSubnetAddressPrefix = \"10.0.0.0/24\"\n$global:DomainNameLabel = ($ResourcePrefix + \"VM\").ToLower()\n\nif(!$DryRun)\n{\n    $global:PublicIp = New-AzureRmPublicIpAddress -Name $PublicIpName -ResourceGroupName $ResourceGroupName -Location $Location -AllocationMethod Dynamic -DomainNameLabel $DomainNameLabel\n}\nelse\n{\n    $global:PublicIp = Get-AzureRmPublicIpAddress -Name $PublicIpName -ResourceGroupName $ResourceGroupName\n}\n\nWrite-Host \"Created PublicIp successfully: \" $PublicIp.DnsSettings.Fqdn.ToString()\n\nif(!$DryRun)\n{\n    $global:SubnetConfig = New-AzureRmVirtualNetworkSubnetConfig -Name $SubnetName -AddressPrefix $VNetSubnetAddressPrefix\n}\n\nWrite-Host \"Created SubnetConfig successfully: $SubnetName\"\n\nif(!$DryRun)\n{\n    $global:VNet = New-AzureRmVirtualNetwork -Name $VNetName -ResourceGroupName $ResourceGroupName -Location $Location -AddressPrefix $VNetAddressPrefix -Subnet $SubnetConfig\n}\nelse\n{\n    $global:VNet = Get-AzureRmVirtualNetwork -Name $VNetName -ResourceGroupName $ResourceGroupName\n    $global:SubnetConfig = Get-AzureRmVirtualNetworkSubnetConfig -Name $SubnetName -VirtualNetwork $VNet\n}\n\nWrite-Host \"Created AzureRmVirtualNetwork successfully: $VNetName\"\n\nif(!$DryRun)\n{\n    $global:Interface = New-AzureRmNetworkInterface -Name $InterfaceName -ResourceGroupName $ResourceGroupName -Location $Location -SubnetId $VNet.Subnets[0].Id -PublicIpAddressId $PublicIp.Id\n}\nelse\n{\n    $global:Interface = Get-AzureRmNetworkInterface -Name $InterfaceName -ResourceGroupName $ResourceGroupName\n}\n\nWrite-Host \"Created AzureNetworkInterface successfully: $InterfaceName\"\n\n## Compute\n$global:VMName = $ResourcePrefix + \"VM\"\n$global:ComputerName = $ResourcePrefix + \"VM\"\n$global:OSDiskName = $VMName + \"OsDisk\"\n$global:OSDiskUri = $StorageAccount.PrimaryEndpoints.Blob.ToString() + \"vhds/\" + $OSDiskName + \".vhd\"\n$global:DataDisk1Name = $VMName + \"DataDisk1\"\n$global:DataDisk1Uri = $StorageAccount.PrimaryEndpoints.Blob.ToString() + \"vhds/\" + $DataDisk1Name + \".vhd\"\n$global:DataDisk2Name = $VMName + \"DataDisk2\"\n$global:DataDisk2Uri = $StorageAccount.PrimaryEndpoints.Blob.ToString() + \"vhds/\" + $DataDisk2Name + \".vhd\"\n\n## Setup local VM object\n$SecString = ($Password | ConvertTo-SecureString -AsPlainText -Force)\n$Credential = New-Object -TypeName System.Management.Automation.PSCredential -ArgumentList @($Username, $SecString)\n\nWrite-Host \"Created credentials successfully\"\n\n$global:VirtualMachine = New-AzureRmVMConfig -VMName $VMName -VMSize $VMSize\n\nWrite-Host \"Created AzureRmVMConfig successfully\"\n\n$VirtualMachine = Set-AzureRmVMOperatingSystem -VM $VirtualMachine -Linux -ComputerName $ComputerName -Credential $Credential\n\nWrite-Host \"Set AzureRmVMOperatingSystem successfully\"\n\n$PublisherName = $GalleryImage.Split(\":\")[0]\n$Offer = $GalleryImage.Split(\":\")[1]\n$Skus = $GalleryImage.Split(\":\")[2]\n\nWrite-Host \"PublisherName: $PublisherName, Offer: $Offer, Skus: $Skus\"\n\n$VirtualMachine = Set-AzureRmVMSourceImage -VM $VirtualMachine -PublisherName $PublisherName -Offer $Offer -Skus $Skus -Version \"latest\"\n\nWrite-Host \"Set AzureVMSourceImage successfully\"\n\n$VirtualMachine = Add-AzureRmVMNetworkInterface -VM $VirtualMachine -Id $Interface.Id\n\nWrite-Host \"Added AzureVMNetworkInterface successfully\"\n\n$VirtualMachine = Set-AzureRmVMOSDisk -VM $VirtualMachine -Name $OSDiskName -VhdUri $OSDiskUri -CreateOption FromImage\n\nWrite-Host \"Created AzureVMOSDisk successfully\"\n\nif ($SshPubKey)\n{\n    $VirtualMachine = Add-AzureRmVMSshPublicKey -VM $VirtualMachine -KeyData $SshPubKey -Path (\"/home/\" + $Username + \"/.ssh/authorized_keys\")\n\n    Write-Host \"Added SSH public key successfully\"\n}\n\n## Create the VM in Azure\nif(!$DryRun)\n{\n    New-AzureRmVM -ResourceGroupName $ResourceGroupName -Location $Location -VM $VirtualMachine\n}\n\nWrite-Host \"Created AzureVM successfully: $VMName\"\n\n$VirtualMachine = Get-AzureRmVM -ResourceGroupName $ResourceGroupName -Name $VMName\n\nWrite-Host \"Fetched VM successfully\"\n\nif(!$DryRun)\n{\n    Add-AzureRmVMDataDisk -VM $VirtualMachine -Name $DataDisk1Name -Caching None -DiskSizeInGB 1 -Lun 0 -VhdUri $DataDisk1Uri -CreateOption Empty\n    Add-AzureRmVMDataDisk -VM $VirtualMachine -Name $DataDisk2Name -Caching None -DiskSizeInGB 1 -Lun 1 -VhdUri $DataDisk2Uri -CreateOption Empty\n}\n\nWrite-Host \"Added DataDisks successfully: $DataDisk1Name, $DataDisk2Name\"\n\nif(!$DryRun)\n{\n    Update-AzureRmVM -ResourceGroupName $ResourceGroupName -VM $VirtualMachine\n}\n\nWrite-Host \"Updated VM successfully\"\n\n## SSH preparation\n\n$global:Hostname = $PublicIp.DnsSettings.Fqdn.ToString()\n\nif ($SshPrivKeyPath -and !$DryRun)\n{\n    $commandFileName = $ResourcePrefix + \"Commands.txt\"\n\n    $commands = @\"\nsudo mkdir /root/.ssh\nsudo cp .ssh/authorized_keys /root/.ssh/\nsudo chmod 700 /root/.ssh\nsudo chmod 600 /root/.ssh/authorized_keys \nsudo restorecon -R -v /root/.ssh\nsudo echo \"PermitRootLogin yes\" >>/etc/ssh/sshd_config\nsudo service sshd restart\nexit\n\"@\n\n    $commands | Out-File -Encoding ascii $commandFileName\n    dos2unix $commandFileName\n    cmd /c \"ssh -tt -o UserKnownHostsFile=C:\\Windows\\System32\\NUL -o StrictHostKeyChecking=no -i $SshPrivKeyPath ${Username}@${Hostname} <$commandFileName\"\n    Remove-Item $commandFileName\n\n    Write-Host \"Copied SSH public key for root\"\n\n    $commands = @\"\n(cat <<EOF\nalias adetail='tail -f /var/log/azure/Microsoft.Azure.Security.A*D*E*ForLinux*/*/extension.log'\nalias adecat='cat /var/log/azure/Microsoft.Azure.Security.A*D*E*ForLinux*/*/extension.log'\nEOF\n) >> /root/.bashrc\n\nparted /dev/sdc\nmklabel msdos\nmkpart pri ext2 0% 100%\nquit\n\nparted /dev/sdd\nmklabel msdos\nmkpart pri ext2 0% 100%\nquit\n\nmkfs.ext4 /dev/sdc1\nmkfs.ext4 /dev/sdd1\n\nUUID1=\"`$(blkid -s UUID -o value /dev/sdc1)\"\nUUID2=\"`$(blkid -s UUID -o value /dev/sdd1)\"\n\necho \"UUID=`$UUID1 /data1 ext4 defaults 0 0\" >>/etc/fstab\necho \"UUID=`$UUID2 /data2 ext4 defaults 0 0\" >>/etc/fstab\n\nmkdir /data1\nmkdir /data2\n\nmount -a\nexit\n\"@\n\n    $commands | Out-File -Encoding ascii $commandFileName\n    dos2unix $commandFileName\n    cmd /c \"ssh -o UserKnownHostsFile=C:\\Windows\\System32\\NUL -o StrictHostKeyChecking=no -i $SshPrivKeyPath root@${Hostname} <$commandFileName\"\n    Remove-Item $commandFileName\n\n    Write-Host \"Mounted data partitions\"\n\n    $commands = @\"\nsed -i 's/SELINUX=.*/SELINUX=disabled/g' /etc/selinux/config\nreboot\n\"@\n\n    $commands | Out-File -Encoding ascii $commandFileName\n    dos2unix $commandFileName\n    cmd /c \"ssh -o UserKnownHostsFile=C:\\Windows\\System32\\NUL -o StrictHostKeyChecking=no -i $SshPrivKeyPath root@${Hostname} <$commandFileName\"\n    Remove-Item $commandFileName\n\n    Start-Sleep 5\n\n    $vmRunning = $false\n\n    while(!$vmRunning)\n    {\n        try\n        {\n            $tcpClient = New-Object System.Net.Sockets.TcpClient\n            $tcpClient.Connect($Hostname, \"22\")\n            $vmRunning = $true\n        }\n        catch\n        {\n            Write-Host \"VM is not up yet\"\n        }\n    }\n\n    Write-Host \"SELinux disabled\"\n}\n\n## Encryption\n\nif(!$DryRun)\n{\n    $global:EncryptionEnableOutput = Set-AzureRmVMDiskEncryptionExtension `\n        -ExtensionName $ExtensionName `\n        -ResourceGroupName $ResourceGroupName `\n        -VMName $VMName `\n        -AadClientID $AadClientId `\n        -AadClientSecret $AadClientSecret `\n        -DiskEncryptionKeyVaultId $KeyVault.ResourceId `\n        -DiskEncryptionKeyVaultUrl $KeyVault.VaultUri `\n        -KeyEncryptionKeyVaultId $KeyVault.ResourceId `\n        -KeyEncryptionKeyURL $KeyEncryptionKey.Id `\n        -KeyEncryptionAlgorithm \"RSA-OAEP\" `\n        -VolumeType $VolumeType `\n        -SequenceVersion \"1\" `\n        -Force:$Force 3>&1 | Out-String\n\n    Write-Host \"Set AzureRmVMDiskEncryptionExtension successfully\"\n\n    $global:BackupTag = [regex]::match($EncryptionEnableOutput, '(AzureEnc.*?),').Groups[1].Value\n}\n"
  },
  {
    "path": "VMEncryption/Test-AzureRmVMDiskEncryptionExtensionDiskFormat.ps1",
    "content": "﻿Param(\n    [Parameter(Mandatory=$true)]\n    [string] $SubscriptionId,\n    [Parameter(Mandatory=$true)]\n    [string] $AadClientId,\n    [Parameter(Mandatory=$true)]\n    [string] $AadClientSecret,\n    [Parameter(Mandatory=$true)]\n    [string] $ResourcePrefix,\n    [Parameter(Mandatory=$true)]\n    [string] $Username,\n    [Parameter(Mandatory=$true)]\n    [string] $Password,\n    [string] $ExtensionName=\"AzureDiskEncryptionForLinux\",\n    [string] $SshPubKey,\n    [string] $SshPrivKeyPath,\n    [string] $Location=\"eastus\",\n    [string] $VolumeType=\"data\",\n    [string] $GalleryImage=\"RedHat:RHEL:7.2\",\n    [string] $VMSize=\"Standard_D2\"\n)\n\n$ErrorActionPreference = \"Stop\"\n\nSet-AzureRmContext -SubscriptionId $SubscriptionId\n\nWrite-Host \"Set AzureRmContext successfully\"\n\n## Resource Group\n$global:ResourceGroupName = $ResourcePrefix + \"ResourceGroup\"\nNew-AzureRmResourceGroup -Name $ResourceGroupName -Location $Location\n\nWrite-Host \"Created ResourceGroup successfully: $ResourceGroupName\"\n\n## KeyVault\n$global:KeyVaultName = $ResourcePrefix + \"KeyVault\"\n\n$global:KeyVault = New-AzureRmKeyVault -VaultName $KeyVaultName -ResourceGroupName $ResourceGroupName -Location $Location\n\nWrite-Host \"Created KeyVault successfully: $KeyVaultName\"\n\nSet-AzureRmKeyVaultAccessPolicy -VaultName $KeyVaultName -ResourceGroupName $ResourceGroupName -ServicePrincipalName $AadClientId -PermissionsToKeys all -PermissionsToSecrets all\nSet-AzureRmKeyVaultAccessPolicy -VaultName $KeyVaultName -ResourceGroupName $ResourceGroupName -EnabledForDiskEncryption\n\nWrite-Host \"Set AzureRmKeyVaultAccessPolicy successfully\"\n\nAdd-AzureKeyVaultKey -VaultName $KeyVaultName -Name \"keyencryptionkey\" -Destination Software\n\nWrite-Host \"Added AzureRmKeyVaultKey successfully\"\n\n$global:KeyEncryptionKey = Get-AzureKeyVaultKey -VaultName $KeyVault.OriginalVault.Name -Name \"keyencryptionkey\"\n\nWrite-Host \"Fetched KeyEncryptionKey successfully\"\n\n## Storage\n$global:StorageName = ($ResourcePrefix + \"Storage\").ToLower()\n$global:StorageType = \"Standard_GRS\"\n$global:ContainerName = \"vhds\"\n\n$global:StorageAccount = New-AzureRmStorageAccount -ResourceGroupName $ResourceGroupName -Name $StorageName -Type $StorageType -Location $Location\n\nWrite-Host \"Created StorageAccount successfully: $StorageName\"\n\n## Network\n$global:PublicIpName = $ResourcePrefix + \"PublicIp\"\n$global:InterfaceName = $ResourcePrefix + \"NetworkInterface\"\n$global:SubnetName = $ResourcePrefix + \"Subnet\"\n$global:VNetName = $ResourcePrefix + \"VNet\"\n$global:VNetAddressPrefix = \"10.0.0.0/16\"\n$global:VNetSubnetAddressPrefix = \"10.0.0.0/24\"\n$global:DomainNameLabel = ($ResourcePrefix + \"VM\").ToLower()\n\n$global:PublicIp = New-AzureRmPublicIpAddress -Name $PublicIpName -ResourceGroupName $ResourceGroupName -Location $Location -AllocationMethod Dynamic -DomainNameLabel $DomainNameLabel\n\nWrite-Host \"Created PublicIp successfully: \" $PublicIp.DnsSettings.Fqdn.ToString()\n\n$global:SubnetConfig = New-AzureRmVirtualNetworkSubnetConfig -Name $SubnetName -AddressPrefix $VNetSubnetAddressPrefix\n\nWrite-Host \"Created SubnetConfig successfully: $SubnetName\"\n\n$global:VNet = New-AzureRmVirtualNetwork -Name $VNetName -ResourceGroupName $ResourceGroupName -Location $Location -AddressPrefix $VNetAddressPrefix -Subnet $SubnetConfig\n\nWrite-Host \"Created AzureRmVirtualNetwork successfully: $VNetName\"\n\n$global:Interface = New-AzureRmNetworkInterface -Name $InterfaceName -ResourceGroupName $ResourceGroupName -Location $Location -SubnetId $VNet.Subnets[0].Id -PublicIpAddressId $PublicIp.Id\n\nWrite-Host \"Created AzureNetworkInterface successfully: $InterfaceName\"\n\n## Compute\n$global:VMName = $ResourcePrefix + \"VM\"\n$global:ComputerName = $ResourcePrefix + \"VM\"\n$global:OSDiskName = $VMName + \"OsDisk\"\n$global:OSDiskUri = $StorageAccount.PrimaryEndpoints.Blob.ToString() + \"vhds/\" + $OSDiskName + \".vhd\"\n$global:DataDisk1Name = $VMName + \"DataDisk1\"\n$global:DataDisk1Uri = $StorageAccount.PrimaryEndpoints.Blob.ToString() + \"vhds/\" + $DataDisk1Name + \".vhd\"\n$global:DataDisk2Name = $VMName + \"DataDisk2\"\n$global:DataDisk2Uri = $StorageAccount.PrimaryEndpoints.Blob.ToString() + \"vhds/\" + $DataDisk2Name + \".vhd\"\n\n## Setup local VM object\n$SecString = ($Password | ConvertTo-SecureString -AsPlainText -Force)\n$Credential = New-Object -TypeName System.Management.Automation.PSCredential -ArgumentList @($Username, $SecString)\n\nWrite-Host \"Created credentials successfully\"\n\n$global:VirtualMachine = New-AzureRmVMConfig -VMName $VMName -VMSize $VMSize\n\nWrite-Host \"Created AzureRmVMConfig successfully\"\n\n$VirtualMachine = Set-AzureRmVMOperatingSystem -VM $VirtualMachine -Linux -ComputerName $ComputerName -Credential $Credential\n\nWrite-Host \"Set AzureRmVMOperatingSystem successfully\"\n\n$PublisherName = $GalleryImage.Split(\":\")[0]\n$Offer = $GalleryImage.Split(\":\")[1]\n$Skus = $GalleryImage.Split(\":\")[2]\n\nWrite-Host \"PublisherName: $PublisherName, Offer: $Offer, Skus: $Skus\"\n\n$VirtualMachine = Set-AzureRmVMSourceImage -VM $VirtualMachine -PublisherName $PublisherName -Offer $Offer -Skus $Skus -Version \"latest\"\n\nWrite-Host \"Set AzureVMSourceImage successfully\"\n\n$VirtualMachine = Add-AzureRmVMNetworkInterface -VM $VirtualMachine -Id $Interface.Id\n\nWrite-Host \"Added AzureVMNetworkInterface successfully\"\n\n$VirtualMachine = Set-AzureRmVMOSDisk -VM $VirtualMachine -Name $OSDiskName -VhdUri $OSDiskUri -CreateOption FromImage\n\nWrite-Host \"Created AzureVMOSDisk successfully\"\n\nif ($SshPubKey)\n{\n    $VirtualMachine = Add-AzureRmVMSshPublicKey -VM $VirtualMachine -KeyData $SshPubKey -Path (\"/home/\" + $Username + \"/.ssh/authorized_keys\")\n\n    Write-Host \"Added SSH public key successfully\"\n}\n\n## Create the VM in Azure\nNew-AzureRmVM -ResourceGroupName $ResourceGroupName -Location $Location -VM $VirtualMachine\n\nWrite-Host \"Created AzureVM successfully: $VMName\"\n\n$VirtualMachine = Get-AzureRmVM -ResourceGroupName $ResourceGroupName -Name $VMName\n\nWrite-Host \"Fetched VM successfully\"\n\nAdd-AzureRmVMDataDisk -VM $VirtualMachine -Name $DataDisk1Name -Caching None -DiskSizeInGB 10 -Lun 0 -VhdUri $DataDisk1Uri -CreateOption Empty\nAdd-AzureRmVMDataDisk -VM $VirtualMachine -Name $DataDisk2Name -Caching None -DiskSizeInGB 10 -Lun 1 -VhdUri $DataDisk2Uri -CreateOption Empty\n\nWrite-Host \"Added DataDisks successfully: $DataDisk1Name, $DataDisk2Name\"\n\nUpdate-AzureRmVM -ResourceGroupName $ResourceGroupName -VM $VirtualMachine\n\nWrite-Host \"Updated VM successfully\"\n\n## SSH preparation\n\nif ($SshPrivKeyPath)\n{\n    $global:Hostname = $PublicIp.DnsSettings.Fqdn.ToString()\n    $commandFileName = $ResourcePrefix + \"Commands.txt\"\n\n    $commands = @\"\nsudo mkdir /root/.ssh\nsudo cp .ssh/authorized_keys /root/.ssh/\nsudo chmod 700 /root/.ssh\nsudo chmod 600 /root/.ssh/authorized_keys \nsudo restorecon -R -v /root/.ssh\nsudo echo \"PermitRootLogin yes\" >>/etc/ssh/sshd_config\nsudo service sshd restart\nexit\n\"@\n\n    $commands | Out-File -Encoding ascii $commandFileName\n    dos2unix $commandFileName\n    cmd /c \"ssh -tt -o UserKnownHostsFile=C:\\Windows\\System32\\NUL -o StrictHostKeyChecking=no -i $SshPrivKeyPath ${Username}@${Hostname} <$commandFileName\"\n    Remove-Item $commandFileName\n\n    Write-Host \"Copied SSH public key for root\"\n\n    $commands = @\"\n(cat <<EOF\nalias adetail='tail -f /var/log/azure/Microsoft.Azure.Security.A*D*E*ForLinux*/*/extension.log'\nalias adecat='cat /var/log/azure/Microsoft.Azure.Security.A*D*E*ForLinux*/*/extension.log'\nEOF\n) >> /root/.bashrc\n\napt-get install -yq mdadm\nyum install -y mdadm\nexit\n\"@\n\n    $commands | Out-File -Encoding ascii $commandFileName\n    dos2unix $commandFileName\n    cmd /c \"ssh -o UserKnownHostsFile=C:\\Windows\\System32\\NUL -o StrictHostKeyChecking=no -i $SshPrivKeyPath root@${Hostname} <$commandFileName\"\n    Remove-Item $commandFileName\n\n    Write-Host \"Installed mdadm\"\n\n    $commands = @\"\nmdadm --create --verbose /dev/md0 --level=0 --raid-devices=2 /dev/sdc /dev/sdd\nmkdir -p /etc/mdadm\nmdadm --detail --scan > /etc/mdadm/mdadm.conf\nexit\n\"@\n\n    $commands | Out-File -Encoding ascii $commandFileName\n    dos2unix $commandFileName\n    cmd /c \"ssh -o UserKnownHostsFile=C:\\Windows\\System32\\NUL -o StrictHostKeyChecking=no -i $SshPrivKeyPath root@${Hostname} <$commandFileName\"\n    Remove-Item $commandFileName\n\n    Write-Host \"Created RAID array\"\n\n    $commands = @\"\nsed -i 's/SELINUX=.*/SELINUX=disabled/g' /etc/selinux/config\nreboot\n\"@\n\n    $commands | Out-File -Encoding ascii $commandFileName\n    dos2unix $commandFileName\n    cmd /c \"ssh -o UserKnownHostsFile=C:\\Windows\\System32\\NUL -o StrictHostKeyChecking=no -i $SshPrivKeyPath root@${Hostname} <$commandFileName\"\n    Remove-Item $commandFileName\n\n    Start-Sleep 5\n\n    $vmRunning = $false\n\n    while(!$vmRunning)\n    {\n        try\n        {\n            $tcpClient = New-Object System.Net.Sockets.TcpClient\n            $tcpClient.Connect($Hostname, \"22\")\n            $vmRunning = $true\n        }\n        catch\n        {\n            Write-Host \"VM is not up yet\"\n        }\n    }\n\n    Write-Host \"SELinux disabled\"\n\n    $commands = @\"\nlsblk\nexit\n\"@\n\n    $commands | Out-File -Encoding ascii $commandFileName\n    dos2unix $commandFileName\n    $stdout = cmd /c \"ssh -o UserKnownHostsFile=C:\\Windows\\System32\\NUL -o StrictHostKeyChecking=no -i $SshPrivKeyPath root@${Hostname} <$commandFileName\"\n    Remove-Item $commandFileName\n\n    $global:RaidBlockDevice = \"/dev/\" + [regex]::Match($stdout, '(md\\d+)').Captures.Groups[0].Value\n\n    Write-Host \"Encrypting RAID device: $RaidBlockDevice\"\n}\n\n## Encryption\n\nRead-Host \"Press Enter to continue...\"\n\n$global:Settings = @{\n    \"AADClientID\" = $AadClientId;\n    \"DiskFormatQuery\" = \"[{`\"dev_path`\":`\"$RaidBlockDevice`\",`\"file_system`\":`\"ext4`\",`\"name`\":`\"encryptedraid`\"}]\";\n    \"EncryptionOperation\" = \"EnableEncryptionFormat\";\n    \"KeyEncryptionAlgorithm\" = \"RSA-OAEP\";\n    \"KeyEncryptionKeyURL\" = $KeyEncryptionKey.Id;\n    \"KeyVaultURL\" = $KeyVault.VaultUri;\n    \"SequenceVersion\" = \"1\";\n    \"VolumeType\" = $VolumeType;\n}\n\n$global:ProtectedSettings = @{\n    \"AADClientSecret\" = $AadClientSecret;\n}\n\nSet-AzureRmVMExtension `\n    -ResourceGroupName $ResourceGroupName `\n    -Location $Location `\n    -VMName $VMName `\n    -Name $ExtensionName `\n    -Publisher \"Microsoft.Azure.Security\" `\n    -Type \"AzureDiskEncryptionForLinux\" `\n    -TypeHandlerVersion \"0.1\" `\n    -Settings $Settings `\n    -ProtectedSettings $ProtectedSettings\n\nWrite-Host \"Set AzureRmVMExtension successfully\"\n\n$VirtualMachine = Get-AzureRmVM -ResourceGroupName $ResourceGroupName -Name $VMName\n$global:InstanceView = Get-AzureRmVM -ResourceGroupName $ResourceGroupName -Name $VMName -Status\n\n$KVSecretRef = New-Object Microsoft.Azure.Management.Compute.Models.KeyVaultSecretReference -ArgumentList @($InstanceView.Extensions[0].Statuses[0].Message, $KeyVault.ResourceId)\n$KVKeyRef = New-Object Microsoft.Azure.Management.Compute.Models.KeyVaultKeyReference -ArgumentList @($KeyEncryptionKey.Id, $KeyVault.ResourceId)\n$VirtualMachine.StorageProfile.OsDisk.EncryptionSettings = New-Object Microsoft.Azure.Management.Compute.Models.DiskEncryptionSettings -ArgumentList @($KVSecretRef, $KVKeyRef, $true)\n\nUpdate-AzureRmVM -ResourceGroupName $ResourceGroupName -VM $VirtualMachine\n\nWrite-Host \"Updated VM successfully\"\n"
  },
  {
    "path": "VMEncryption/VMEncryption.pyproj",
    "content": "﻿<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<Project DefaultTargets=\"Build\" xmlns=\"http://schemas.microsoft.com/developer/msbuild/2003\" ToolsVersion=\"4.0\">\n  <PropertyGroup>\n    <Configuration Condition=\" '$(Configuration)' == '' \">Debug</Configuration>\n    <SchemaVersion>2.0</SchemaVersion>\n    <ProjectGuid>334deedb-1c9a-40c8-89f2-a4ae042c18aa</ProjectGuid>\n    <ProjectHome>.</ProjectHome>\n    <SearchPath>\n    </SearchPath>\n    <WorkingDirectory>.</WorkingDirectory>\n    <OutputPath>.</OutputPath>\n    <Name>VMEncryption</Name>\n    <RootNamespace>VMEncryption</RootNamespace>\n  </PropertyGroup>\n  <PropertyGroup Condition=\" '$(Configuration)' == 'Debug' \">\n    <DebugSymbols>true</DebugSymbols>\n    <EnableUnmanagedDebugging>false</EnableUnmanagedDebugging>\n  </PropertyGroup>\n  <PropertyGroup Condition=\" '$(Configuration)' == 'Release' \">\n    <DebugSymbols>true</DebugSymbols>\n    <EnableUnmanagedDebugging>false</EnableUnmanagedDebugging>\n  </PropertyGroup>\n  <PropertyGroup>\n    <VisualStudioVersion Condition=\"'$(VisualStudioVersion)' == ''\">10.0</VisualStudioVersion>\n    <PtvsTargetsFile>$(MSBuildExtensionsPath32)\\Microsoft\\VisualStudio\\v$(VisualStudioVersion)\\Python Tools\\Microsoft.PythonTools.targets</PtvsTargetsFile>\n  </PropertyGroup>\n  <ItemGroup>\n    <Content Include=\"main\\oscrypto\\91ade\\cryptroot-ask-ade.sh\" />\n    <Content Include=\"MANIFEST.in\" />\n    <Content Include=\"Test-AzureRmVMDiskEncryptionExtension.ps1\" />\n    <Content Include=\"main\\oscrypto\\centos_68\\encryptpatches\\centos_68_dracut.patch\" />\n    <Content Include=\"main\\oscrypto\\rhel_68\\encryptpatches\\rhel_68_dracut.patch\" />\n    <Content Include=\"main\\oscrypto\\ubuntu_1604\\encryptpatches\\ubuntu_1604_initramfs.patch\" />\n    <Content Include=\"main\\oscrypto\\ubuntu_1604\\encryptscripts\\azure_crypt_key.sh\" />\n    <Content Include=\"main\\oscrypto\\ubuntu_1604\\encryptscripts\\inject_luks_header.sh\" />\n    <Content Include=\"main\\oscrypto\\ubuntu_1404\\encryptpatches\\ubuntu_1404_initramfs.patch\" />\n    <Content Include=\"main\\oscrypto\\ubuntu_1404\\encryptscripts\\azure_crypt_key.sh\" />\n    <Content Include=\"main\\oscrypto\\ubuntu_1404\\encryptscripts\\inject_luks_header.sh\" />\n    <Content Include=\"main\\oscrypto\\91ade\\50-udev-ade.rules\" />\n    <Content Include=\"main\\oscrypto\\91ade\\module-setup.sh\" />\n    <Content Include=\"main\\oscrypto\\91ade\\parse-crypt-ade.sh\" />\n  </ItemGroup>\n  <ItemGroup>\n    <Compile Include=\"main\\BekUtil.py\">\n      <SubType>Code</SubType>\n    </Compile>\n    <Compile Include=\"main\\check_util.py\" />\n    <Compile Include=\"main\\CommandExecutor.py\">\n      <SubType>Code</SubType>\n    </Compile>\n    <Compile Include=\"main\\ConfigUtil.py\">\n      <SubType>Code</SubType>\n    </Compile>\n    <Compile Include=\"main\\EncryptionConfig.py\">\n      <SubType>Code</SubType>\n    </Compile>\n    <Compile Include=\"main\\EncryptionEnvironment.py\">\n      <SubType>Code</SubType>\n    </Compile>\n    <Compile Include=\"main\\EncryptionMarkConfig.py\" />\n    <Compile Include=\"main\\HttpUtil.py\">\n      <SubType>Code</SubType>\n    </Compile>\n    <Compile Include=\"main\\DecryptionMarkConfig.py\" />\n    <Compile Include=\"main\\MachineIdentity.py\" />\n    <Compile Include=\"main\\OnGoingItemConfig.py\" />\n    <Compile Include=\"main\\patch\\debianPatching.py\">\n      <SubType>Code</SubType>\n    </Compile>\n    <Compile Include=\"main\\ResourceDiskUtil.py\" />\n    <Compile Include=\"main\\TransactionalCopyTask.py\">\n      <SubType>Code</SubType>\n    </Compile>\n    <Compile Include=\"main\\ProcessLock.py\">\n      <SubType>Code</SubType>\n    </Compile>\n    <Compile Include=\"main\\KeyVaultUtil.py\">\n      <SubType>Code</SubType>\n    </Compile>\n    <Compile Include=\"main\\BackupLogger.py\" />\n    <Compile Include=\"main\\Common.py\" />\n    <Compile Include=\"main\\DiskUtil.py\">\n      <SubType>Code</SubType>\n    </Compile>\n    <Compile Include=\"main\\handle.py\" />\n    <Compile Include=\"main\\ExtensionParameter.py\" />\n    <Compile Include=\"main\\oscrypto\\__init__.py\" />\n    <Compile Include=\"main\\oscrypto\\OSEncryptionState.py\" />\n    <Compile Include=\"main\\oscrypto\\OSEncryptionStateMachine.py\" />\n    <Compile Include=\"main\\oscrypto\\rhel_72_lvm\\RHEL72LVMEncryptionStateMachine.py\" />\n    <Compile Include=\"main\\oscrypto\\rhel_72_lvm\\__init__.py\" />\n    <Compile Include=\"main\\oscrypto\\rhel_72_lvm\\encryptstates\\PrereqState.py\" />\n    <Compile Include=\"main\\oscrypto\\rhel_72_lvm\\encryptstates\\SelinuxState.py\" />\n    <Compile Include=\"main\\oscrypto\\rhel_72_lvm\\encryptstates\\StripdownState.py\" />\n    <Compile Include=\"main\\oscrypto\\rhel_72_lvm\\encryptstates\\UnmountOldrootState.py\" />\n    <Compile Include=\"main\\oscrypto\\rhel_72_lvm\\encryptstates\\EncryptBlockDeviceState.py\" />\n    <Compile Include=\"main\\oscrypto\\rhel_72_lvm\\encryptstates\\PatchBootSystemState.py\" />\n    <Compile Include=\"main\\oscrypto\\rhel_72_lvm\\encryptstates\\__init__.py\" />\n    <Compile Include=\"main\\oscrypto\\rhel_72\\RHEL72EncryptionStateMachine.py\" />\n    <Compile Include=\"main\\oscrypto\\rhel_72\\__init__.py\" />\n    <Compile Include=\"main\\oscrypto\\rhel_72\\encryptstates\\PrereqState.py\" />\n    <Compile Include=\"main\\oscrypto\\rhel_72\\encryptstates\\SelinuxState.py\" />\n    <Compile Include=\"main\\oscrypto\\rhel_72\\encryptstates\\StripdownState.py\" />\n    <Compile Include=\"main\\oscrypto\\rhel_72\\encryptstates\\UnmountOldrootState.py\" />\n    <Compile Include=\"main\\oscrypto\\rhel_72\\encryptstates\\EncryptBlockDeviceState.py\" />\n    <Compile Include=\"main\\oscrypto\\rhel_72\\encryptstates\\PatchBootSystemState.py\" />\n    <Compile Include=\"main\\oscrypto\\rhel_72\\encryptstates\\__init__.py\" />\n    <Compile Include=\"main\\oscrypto\\rhel_68\\RHEL68EncryptionStateMachine.py\" />\n    <Compile Include=\"main\\oscrypto\\rhel_68\\__init__.py\" />\n    <Compile Include=\"main\\oscrypto\\rhel_68\\encryptstates\\PrereqState.py\" />\n    <Compile Include=\"main\\oscrypto\\rhel_68\\encryptstates\\SelinuxState.py\" />\n    <Compile Include=\"main\\oscrypto\\rhel_68\\encryptstates\\StripdownState.py\" />\n    <Compile Include=\"main\\oscrypto\\rhel_68\\encryptstates\\UnmountOldrootState.py\" />\n    <Compile Include=\"main\\oscrypto\\rhel_68\\encryptstates\\EncryptBlockDeviceState.py\" />\n    <Compile Include=\"main\\oscrypto\\rhel_68\\encryptstates\\PatchBootSystemState.py\" />\n    <Compile Include=\"main\\oscrypto\\rhel_68\\encryptstates\\__init__.py\" />\n    <Compile Include=\"main\\oscrypto\\centos_68\\CentOS68EncryptionStateMachine.py\" />\n    <Compile Include=\"main\\oscrypto\\centos_68\\__init__.py\" />\n    <Compile Include=\"main\\oscrypto\\centos_68\\encryptstates\\PrereqState.py\" />\n    <Compile Include=\"main\\oscrypto\\centos_68\\encryptstates\\SelinuxState.py\" />\n    <Compile Include=\"main\\oscrypto\\centos_68\\encryptstates\\StripdownState.py\" />\n    <Compile Include=\"main\\oscrypto\\centos_68\\encryptstates\\UnmountOldrootState.py\" />\n    <Compile Include=\"main\\oscrypto\\centos_68\\encryptstates\\SplitRootPartitionState.py\" />\n    <Compile Include=\"main\\oscrypto\\centos_68\\encryptstates\\EncryptBlockDeviceState.py\" />\n    <Compile Include=\"main\\oscrypto\\centos_68\\encryptstates\\PatchBootSystemState.py\" />\n    <Compile Include=\"main\\oscrypto\\centos_68\\encryptstates\\__init__.py\" />\n    <Compile Include=\"main\\oscrypto\\ubuntu_1604\\Ubuntu1604EncryptionStateMachine.py\" />\n    <Compile Include=\"main\\oscrypto\\ubuntu_1604\\__init__.py\" />\n    <Compile Include=\"main\\oscrypto\\ubuntu_1604\\encryptstates\\PrereqState.py\" />\n    <Compile Include=\"main\\oscrypto\\ubuntu_1604\\encryptstates\\StripdownState.py\" />\n    <Compile Include=\"main\\oscrypto\\ubuntu_1604\\encryptstates\\UnmountOldrootState.py\" />\n    <Compile Include=\"main\\oscrypto\\ubuntu_1604\\encryptstates\\SplitRootPartitionState.py\" />\n    <Compile Include=\"main\\oscrypto\\ubuntu_1604\\encryptstates\\EncryptBlockDeviceState.py\" />\n    <Compile Include=\"main\\oscrypto\\ubuntu_1604\\encryptstates\\PatchBootSystemState.py\" />\n    <Compile Include=\"main\\oscrypto\\ubuntu_1604\\encryptstates\\__init__.py\" />\n    <Compile Include=\"main\\oscrypto\\ubuntu_1404\\Ubuntu1404EncryptionStateMachine.py\" />\n    <Compile Include=\"main\\oscrypto\\ubuntu_1404\\__init__.py\" />\n    <Compile Include=\"main\\oscrypto\\ubuntu_1404\\encryptstates\\PrereqState.py\" />\n    <Compile Include=\"main\\oscrypto\\ubuntu_1404\\encryptstates\\StripdownState.py\" />\n    <Compile Include=\"main\\oscrypto\\ubuntu_1404\\encryptstates\\UnmountOldrootState.py\" />\n    <Compile Include=\"main\\oscrypto\\ubuntu_1404\\encryptstates\\SplitRootPartitionState.py\" />\n    <Compile Include=\"main\\oscrypto\\ubuntu_1404\\encryptstates\\EncryptBlockDeviceState.py\" />\n    <Compile Include=\"main\\oscrypto\\ubuntu_1404\\encryptstates\\PatchBootSystemState.py\" />\n    <Compile Include=\"main\\oscrypto\\ubuntu_1404\\encryptstates\\__init__.py\" />\n    <Compile Include=\"main\\patch\\AbstractPatching.py\" />\n    <Compile Include=\"main\\patch\\centosPatching.py\" />\n    <Compile Include=\"main\\patch\\redhatPatching.py\" />\n    <Compile Include=\"main\\patch\\SuSEPatching.py\" />\n    <Compile Include=\"main\\patch\\UbuntuPatching.py\" />\n    <Compile Include=\"main\\patch\\__init__.py\" />\n    <Compile Include=\"main\\Utils\\HandlerUtil.py\" />\n    <Compile Include=\"main\\Utils\\WAAgentUtil.py\" />\n    <Compile Include=\"main\\Utils\\__init__.py\" />\n    <Compile Include=\"main\\__init__.py\" />\n    <Compile Include=\"setup.py\" />\n    <Compile Include=\"test\\console_logger.py\" />\n    <Compile Include=\"test\\test_check_util.py\" />\n    <Compile Include=\"test\\test_resource_disk_util.py\" />\n    <Compile Include=\"test\\__init__.py\" />\n  </ItemGroup>\n  <ItemGroup>\n    <Folder Include=\"main\\\" />\n    <Folder Include=\"main\\oscrypto\\\" />\n    <Folder Include=\"main\\oscrypto\\91ade\" />\n    <Folder Include=\"main\\oscrypto\\rhel_72_lvm\\\" />\n    <Folder Include=\"main\\oscrypto\\rhel_72_lvm\\encryptstates\\\" />\n    <Folder Include=\"main\\oscrypto\\rhel_72\\\" />\n    <Folder Include=\"main\\oscrypto\\rhel_72\\encryptstates\\\" />\n    <Folder Include=\"main\\oscrypto\\rhel_68\\\" />\n    <Folder Include=\"main\\oscrypto\\rhel_68\\encryptstates\\\" />\n    <Folder Include=\"main\\oscrypto\\rhel_68\\encryptpatches\\\" />\n    <Folder Include=\"main\\oscrypto\\centos_68\\\" />\n    <Folder Include=\"main\\oscrypto\\centos_68\\encryptstates\\\" />\n    <Folder Include=\"main\\oscrypto\\centos_68\\encryptpatches\\\" />\n    <Folder Include=\"main\\oscrypto\\ubuntu_1604\\\" />\n    <Folder Include=\"main\\oscrypto\\ubuntu_1604\\encryptstates\\\" />\n    <Folder Include=\"main\\oscrypto\\ubuntu_1604\\encryptpatches\\\" />\n    <Folder Include=\"main\\oscrypto\\ubuntu_1604\\encryptscripts\\\" />\n    <Folder Include=\"main\\oscrypto\\ubuntu_1404\\\" />\n    <Folder Include=\"main\\oscrypto\\ubuntu_1404\\encryptstates\\\" />\n    <Folder Include=\"main\\oscrypto\\ubuntu_1404\\encryptpatches\\\" />\n    <Folder Include=\"main\\oscrypto\\ubuntu_1404\\encryptscripts\\\" />\n    <Folder Include=\"main\\patch\\\" />\n    <Folder Include=\"main\\Utils\\\" />\n    <Folder Include=\"test\\\" />\n  </ItemGroup>\n  <Import Project=\"$(PtvsTargetsFile)\" Condition=\"Exists($(PtvsTargetsFile))\" />\n  <Import Project=\"$(MSBuildToolsPath)\\Microsoft.Common.targets\" Condition=\"!Exists($(PtvsTargetsFile))\" />\n</Project>"
  },
  {
    "path": "VMEncryption/extension_shim.sh",
    "content": "#!/usr/bin/env bash\n\n# Keeping the default command\nCOMMAND=\"\"\nPYTHON=\"\"\n\nUSAGE=\"$(basename \"$0\") [-h] [-i|--install] [-u|--uninstall] [-d|--disable] [-e|--enable] [-p|--update] [-m|--daemon]\n\nProgram to find the installed python on the box and invoke a Python extension script using Python 2.7.\n\nwhere:\n    -h|--help       show this help text\n    -i|--install    install the extension\n    -u|--uninstall  uninstall the extension\n    -d|--disable    disable the extension\n    -e|--enable     enable the extension\n    -p|--update     update the extension\n    -m|--daemon     invoke daemon option\n    -c|--command    command to run\n\nexample:\n# Install usage\n$ bash extension_shim.sh -i\npython ./main/handle.py -install\n\n# Custom executable python file\n$ bash extension_shim.sh -c \"\"hello.py\"\" -i\npython hello.py -install\n\n# Custom executable python file with arguments\n$ bash extension_shim.sh -c \"\"hello.py --install\"\"\npython hello.py --install\n\"\n\nfunction find_python(){\n    local python_exec_command=$1\n\n    # Check if there is python defined.\n    if command -v python >/dev/null 2>&1 ; then\n        eval ${python_exec_command}=\"python\"\n    fi\n}\n\n# Transform long options to short ones for getopts support (getopts doesn't support long args)\nfor arg in \"$@\"; do\n  shift\n  case \"$arg\" in\n    \"--help\")       set -- \"$@\" \"-h\" ;;\n    \"--install\")    set -- \"$@\" \"-i\" ;;\n    \"--update\")     set -- \"$@\" \"-p\" ;;\n    \"--enable\")     set -- \"$@\" \"-e\" ;;\n    \"--disable\")    set -- \"$@\" \"-d\" ;;\n    \"--uninstall\")  set -- \"$@\" \"-u\" ;;\n    \"--daemon\")     set -- \"$@\" \"-m\" ;;\n    *)              set -- \"$@\" \"$arg\"\n  esac\ndone\n\nif [ -z \"$arg\" ]\nthen\n   echo \"$USAGE\" >&2\n   exit 1\nfi\n\n# Get the arguments\nwhile getopts \"iudephc:?\" o; do\n    case \"${o}\" in\n        h|\\?)\n            echo \"$USAGE\"\n            exit 0\n            ;;\n        i)\n            operation=\"-install\"\n            ;;\n        u)\n            operation=\"-uninstall\"\n            ;;\n        d)\n            operation=\"-disable\"\n            ;;\n        e)\n            operation=\"-enable\"\n            ;;\n        p)\n            operation=\"-update\"\n            ;;\n        m)\n            operation=\"-daemon\"\n            ;;\n        c)\n            COMMAND=\"$OPTARG\"\n            ;;\n        *)\n            echo \"$USAGE\" >&2\n            exit 1\n            ;;\n    esac\ndone\n\nshift \"$((OPTIND-1))\"\n\n# If find_python is not able to find a python installed, $PYTHON will be null.\nfind_python PYTHON\n\nif [ -z \"$PYTHON\" ]; then\n   echo \"No Python interpreter found on the box\" >&2\n   exit 51 # Not Supported\nelse\n    PYTHON_VER=`${PYTHON} --version 2>&1`\n    if [[ \"$PYTHON_VER\" =~ \"Python 2.6\" ]] || [[ \"$PYTHON_VER\" =~ \"Python 2.7\" ]]; then\n        echo $PYTHON_VER\n    else\n        echo \"Expected Python 2.7, found $PYTHON_VER\" >&2\n        exit 51 # Not Supported\n    fi\nfi\n\n${PYTHON} ${COMMAND} ${operation} 2>&1\n# DONE"
  },
  {
    "path": "VMEncryption/main/BackupLogger.py",
    "content": "﻿#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport time\nimport datetime\nimport traceback\nimport urlparse\nimport httplib\nimport os\nimport string\n\nclass BackupLogger(object):\n    def __init__(self, hutil):\n        self.hutil = hutil\n        self.current_process_id = os.getpid()\n\n    \"\"\"description of class\"\"\"\n    def log(self, msg, level='Info'):\n        log_msg = \"{0}: [{1}] {2}\".format(self.current_process_id, level, msg)\n        log_msg = filter(lambda c: c in string.printable, log_msg)\n        log_msg = log_msg.encode('ascii', 'ignore')\n\n        self.hutil.log(log_msg)\n        self.log_to_console(log_msg)\n \n    def log_to_console(self, msg):\n        try:\n            with open('/dev/console', 'w') as f:\n                msg = filter(lambda c: c in string.printable, msg)\n                f.write('[AzureDiskEncryption] ' + msg + '\\n')\n        except IOError as e:\n            pass\n"
  },
  {
    "path": "VMEncryption/main/BekUtil.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom Common import TestHooks\nimport base64\nimport os.path\n\n\"\"\"\nadd retry-logic to the network api call.\n\"\"\"\n\n\nclass BekUtil(object):\n    \"\"\"\n    Utility functions related to the BEK VOLUME and BEK files\n    \"\"\"\n\n    def __init__(self, disk_util, logger):\n        self.disk_util = disk_util\n        self.logger = logger\n        self.bek_filesystem_mount_point = '/mnt/azure_bek_disk'\n\n    def generate_passphrase(self, algorithm):\n        if TestHooks.use_hard_code_passphrase:\n            return TestHooks.hard_code_passphrase\n        else:\n            with open(\"/dev/urandom\", \"rb\") as _random_source:\n                bytes = _random_source.read(127)\n                passphrase_generated = base64.b64encode(bytes)\n            return passphrase_generated\n\n    def get_bek_passphrase_file(self, encryption_config):\n        \"\"\"\n        Returns the LinuxPassPhraseFileName path\n        \"\"\"\n\n        bek_filename = encryption_config.get_bek_filename()\n\n        try:\n            self.disk_util.make_sure_path_exists(self.bek_filesystem_mount_point)\n            self.disk_util.mount_bek_volume(\"BEK VOLUME\", self.bek_filesystem_mount_point, \"fmask=077\")\n\n            if os.path.exists(os.path.join(self.bek_filesystem_mount_point, bek_filename)):\n                return os.path.join(self.bek_filesystem_mount_point, bek_filename)\n\n        except Exception as e:\n            message = \"Failed to get BEK from BEK VOLUME with error: {0}\".format(str(e))\n            self.logger.log(message)\n\n        return None\n\n    def umount_azure_passhprase(self, encryption_config, force=False):\n        passphrase_file = self.get_bek_passphrase_file(encryption_config)\n        if force or (passphrase_file and os.path.exists(passphrase_file)):\n            self.disk_util.umount(self.bek_filesystem_mount_point)\n"
  },
  {
    "path": "VMEncryption/main/CommandExecutor.py",
    "content": "#!/usr/bin/env python\n#\n# VMEncryption extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.7+\n#\n\nimport os\nimport os.path\nimport shlex\nimport sys\n\nfrom subprocess import *\nfrom threading import Timer\n\nclass ProcessCommunicator(object):\n    def __init__(self):\n        self.stdout = None\n        self.stderr = None\n\nclass CommandExecutor(object):\n    \"\"\"description of class\"\"\"\n    def __init__(self, logger):\n        self.logger = logger\n\n    def Execute(self, command_to_execute, raise_exception_on_failure=False, communicator=None, input=None, suppress_logging=False, timeout=0):\n        if type(command_to_execute) == unicode:\n            command_to_execute = command_to_execute.encode('ascii', 'ignore')\n\n        if not suppress_logging:\n            self.logger.log(\"Executing: {0}\".format(command_to_execute))\n        args = shlex.split(command_to_execute)\n        proc = None\n        timer = None\n        return_code = None\n\n        try:\n            proc = Popen(args, stdout=PIPE, stderr=PIPE, stdin=PIPE, close_fds=True)\n        except Exception as e:\n            if raise_exception_on_failure:\n                raise\n            else:\n                if not suppress_logging:\n                    self.logger.log(\"Process creation failed: \" + str(e))\n                return -1\n\n        def timeout_process():\n            proc.kill()\n            self.logger.log(\"Command {0} didn't finish in {1} seconds. Timing it out\".format(command_to_execute, timeout))\n\n        try:\n            if timeout>0:\n                timer = Timer(timeout, timeout_process)\n                timer.start()\n            stdout, stderr = proc.communicate(input=input)\n        finally:\n            if timer is not None:\n                timer.cancel()\n            return_code = proc.returncode\n\n        if isinstance(communicator, ProcessCommunicator):\n            communicator.stdout, communicator.stderr = stdout, stderr\n\n        if int(return_code) != 0:\n            msg = \"Command {0} failed with return code {1}\".format(command_to_execute, return_code)\n            msg += \"\\nstdout:\\n\" + stdout\n            msg += \"\\nstderr:\\n\" + stderr\n\n            if not suppress_logging:\n                self.logger.log(msg)\n\n            if raise_exception_on_failure:\n                raise Exception(msg)\n\n        return return_code\n    \n    def ExecuteInBash(self, command_to_execute, raise_exception_on_failure=False, communicator=None, input=None, suppress_logging=False):\n        command_to_execute = 'bash -c \"{0}{1}\"'.format('set -e; ' if raise_exception_on_failure else '',\n                                                      command_to_execute)\n        \n        return self.Execute(command_to_execute, raise_exception_on_failure, communicator, input, suppress_logging)\n"
  },
  {
    "path": "VMEncryption/main/Common.py",
    "content": "﻿#!/usr/bin/env python\n#\n# Azure Disk Encryption For Linux Extension\n#\n# Copyright 2019 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nclass CommonVariables:\n    utils_path_name = 'Utils'\n    extension_name = 'AzureDiskEncryptionForLinux'\n    extension_version = '0.1.0.999345'\n    extension_type = extension_name\n    extension_media_link = 'https://amextpaas.blob.core.windows.net/prod/' + extension_name + '-' + str(extension_version) + '.zip'\n    extension_label = 'Windows Azure VMEncryption Extension for Linux IaaS'\n    extension_description = extension_label\n    extension_shim_filename = \"extension_shim.sh\"\n\n    \"\"\"\n    disk/file system related\n    \"\"\"\n    sector_size = 512\n    luks_header_size = 4096 * 512\n    default_block_size = 52428800\n    min_filesystem_size_support = 52428800 * 3\n    #TODO for the sles 11, we should use the ext3\n    default_file_system = 'ext4'\n    format_supported_file_systems = ['ext4', 'ext3', 'ext2', 'xfs', 'btrfs']\n    inplace_supported_file_systems = ['ext4', 'ext3', 'ext2']\n    default_mount_name = 'encrypted_disk'\n    dev_mapper_root = '/dev/mapper/'\n    osmapper_name = 'osencrypt'\n    azure_symlinks_dir = '/dev/disk/azure'\n    disk_by_id_root = '/dev/disk/by-id'\n    disk_by_uuid_root = '/dev/disk/by-uuid'\n    encryption_key_mount_point = '/mnt/azure_bek_disk/'\n    bek_fstab_line_template = 'LABEL=BEK\\\\040VOLUME {0} auto defaults,discard,nofail 0 0\\n'\n    bek_fstab_line_template_ubuntu_14 = 'LABEL=BEK\\\\040VOLUME {0} auto defaults,discard,nobootwait 0 0\\n'\n    etc_defaults_cryptdisks_line = '\\nCRYPTDISKS_MOUNT=\"$CRYPTDISKS_MOUNT {0}\"\\n'\n\n    \"\"\"\n    parameter key names\n    \"\"\"\n    PassphraseFileNameKey = 'BekFileName'\n    KeyEncryptionKeyURLKey = 'KeyEncryptionKeyURL'\n    KeyVaultURLKey = 'KeyVaultURL'\n    AADClientIDKey = 'AADClientID'\n    AADClientCertThumbprintKey = 'AADClientCertThumbprint'\n    KeyEncryptionAlgorithmKey = 'KeyEncryptionAlgorithm'\n    encryption_algorithms = ['RSA-OAEP', 'RSA-OAEP-256', 'RSA1_5']\n    default_encryption_algorithm = 'RSA-OAEP'\n    DiskFormatQuerykey = \"DiskFormatQuery\"\n    PassphraseKey = 'Passphrase'\n\n    \"\"\"\n    value for VolumeType could be OS or Data\n    \"\"\"\n    VolumeTypeKey = 'VolumeType'\n    AADClientSecretKey = 'AADClientSecret'\n    SecretUriKey = 'SecretUri'\n    SecretSeqNum = 'SecretSeqNum'\n\n    VolumeTypeOS = 'OS'\n    VolumeTypeData = 'Data'\n    VolumeTypeAll = 'All'\n\n    SupportedVolumeTypes = [ VolumeTypeOS, VolumeTypeData, VolumeTypeAll ]\n\n    \"\"\"\n    command types\n    \"\"\"\n    EnableEncryption = 'EnableEncryption'\n    EnableEncryptionFormat = 'EnableEncryptionFormat'\n    EnableEncryptionFormatAll = 'EnableEncryptionFormatAll'\n    UpdateEncryptionSettings = 'UpdateEncryptionSettings'\n    DisableEncryption = 'DisableEncryption'\n    QueryEncryptionStatus = 'QueryEncryptionStatus'\n\n    \"\"\"\n    encryption config keys\n    \"\"\"\n    EncryptionEncryptionOperationKey = 'EncryptionOperation'\n    EncryptionDecryptionOperationKey = 'DecryptionOperation'\n    EncryptionVolumeTypeKey = 'VolumeType'\n    EncryptionDiskFormatQueryKey = 'DiskFormatQuery'\n\n    \"\"\"\n    crypt ongoing item config keys\n    \"\"\"\n    OngoingItemMapperNameKey = 'MapperName'\n    OngoingItemHeaderFilePathKey = 'HeaderFilePath'\n    OngoingItemOriginalDevNamePathKey = 'DevNamePath'\n    OngoingItemOriginalDevPathKey = 'DevicePath'\n    OngoingItemPhaseKey = 'Phase'\n    OngoingItemHeaderSliceFilePathKey = 'HeaderSliceFilePath'\n    OngoingItemFileSystemKey = 'FileSystem'\n    OngoingItemMountPointKey = 'MountPoint'\n    OngoingItemDeviceSizeKey = 'Size'\n    OngoingItemCurrentSliceIndexKey = 'CurrentSliceIndex'\n    OngoingItemFromEndKey = 'FromEnd'\n    OngoingItemCurrentDestinationKey = 'CurrentDestination'\n    OngoingItemCurrentTotalCopySizeKey = 'CurrentTotalCopySize'\n    OngoingItemCurrentLuksHeaderFilePathKey = 'CurrentLuksHeaderFilePath'\n    OngoingItemCurrentSourcePathKey = 'CurrentSourcePath'\n    OngoingItemCurrentBlockSizeKey = 'CurrentBlockSize'\n\n    \"\"\"\n    encryption phase devinitions\n    \"\"\"\n    EncryptionPhaseBackupHeader = 'BackupHeader'\n    EncryptionPhaseCopyData = 'EncryptingData'\n    EncryptionPhaseRecoverHeader = 'RecoverHeader'\n    EncryptionPhaseEncryptDevice = 'EncryptDevice'\n    EncryptionPhaseDone = 'Done'\n\n    \"\"\"\n    decryption phase constants\n    \"\"\"\n    DecryptionPhaseCopyData = 'DecryptingData'\n    DecryptionPhaseDone = 'Done'\n\n    \"\"\"\n    logs related\n    \"\"\"\n    InfoLevel = 'Info'\n    WarningLevel = 'Warning'\n    ErrorLevel = 'Error'\n\n    \"\"\"\n    error codes\n    \"\"\"\n    extension_success_status = 'success'\n    extension_error_status = 'error'\n    process_success = 0\n    success = 0\n    os_not_supported = 51\n    missing_dependency = 52\n    configuration_error = 53\n    luks_format_error = 2\n    scsi_number_not_found = 3\n    device_not_blank = 4\n    environment_error = 5\n    luks_open_error = 6\n    mkfs_error = 7\n    folder_conflict_error = 8\n    mount_error = 9\n    mount_point_not_exists = 10\n    passphrase_too_long_or_none = 11\n    parameter_error = 12\n    create_encryption_secret_failed = 13\n    encrypttion_already_enabled = 14\n    passphrase_file_not_found = 15\n    command_not_support = 16\n    volue_type_not_support = 17\n    copy_data_error = 18\n    encryption_failed = 19\n    tmpfs_error = 20\n    backup_slice_file_error = 21\n    unmount_oldroot_error = 22\n    operation_lookback_failed = 23\n    unknown_error = 100\n\nclass TestHooks:\n    search_not_only_ide = False\n    use_hard_code_passphrase = False\n    hard_code_passphrase = \"Quattro!\"\n\nclass DeviceItem(object):\n    def __init__(self):\n        #NAME,TYPE,FSTYPE,MOUNTPOINT,LABEL,UUID,MODEL,SIZE,MAJ:MIN\n        self.name = None\n        self.type = None\n        self.file_system = None\n        self.mount_point = None\n        self.label = None\n        self.uuid = None\n        self.model = None\n        self.size = None\n        self.majmin = None\n        self.device_id = None\n        self.azure_name = None\n    def __str__(self):\n        return (\"name:\" + str(self.name) + \" type:\" + str(self.type) +\n                \" fstype:\" + str(self.file_system) + \" mountpoint:\" + str(self.mount_point) +\n                \" label:\" + str(self.label) + \" model:\" + str(self.model) +\n                \" size:\" + str(self.size) + \" majmin:\" + str(self.majmin) +\n                \" device_id:\" + str(self.device_id)) + \" azure_name:\" + str(self.azure_name) \n\nclass LvmItem(object):\n    def __init__(self):\n        #lv_name,vg_name,lv_kernel_major,lv_kernel_minor\n        self.lv_name = None\n        self.vg_name = None\n        self.lv_kernel_major = None\n        self.lv_kernel_minor = None\n    def __str__(self):\n        return (\"lv_name:\" + str(self.lv_name) + \" vg_name:\" + str(self.vg_name) +\n                \" lv_kernel_major:\" + str(self.lv_kernel_major) + \" lv_kernel_minor:\" + str(self.lv_kernel_minor))\n\n\nclass CryptItem(object):\n    def __init__(self):\n        self.mapper_name = None\n        self.dev_path = None\n        self.mount_point = None\n        self.file_system = None\n        self.luks_header_path = None\n        self.uses_cleartext_key = None\n        self.current_luks_slot = None\n\n    def __str__(self):\n        return (\"name: \" + str(self.mapper_name) + \" dev_path:\" + str(self.dev_path) +\n                \" mount_point:\" + str(self.mount_point) + \" file_system:\" + str(self.file_system) +\n                \" luks_header_path:\" + str(self.luks_header_path) +\n                \" uses_cleartext_key:\" + str(self.uses_cleartext_key) +\n                \" current_luks_slot:\" + str(self.current_luks_slot))\n\n    def __eq__(self, other):\n        \"\"\"\n        Override method for \"==\" operation, useful for making CryptItem comparison a little logically consistent\n        For example a luks_slot value of \"-1\" and \"None\" are logically equivalent, so this method, treats them the same\n        This is done by \"consolidating\" both values to \"None\".\n        \"\"\"\n        if not isinstance(other, CryptItem):\n            return NotImplemented\n\n        def _consolidate_luks_header_path(crypt_item):\n            \"\"\"\n            if luks_header_path is absent, then it implies that the header is attached so the header path might as well be the device path (dev_path)\n            \"\"\"\n            if crypt_item.luks_header_path and not crypt_item.luks_header_path == \"None\":\n                return crypt_item.luks_header_path\n            return crypt_item.dev_path\n\n        def _consolidate_luks_slot(crypt_item):\n            \"\"\"\n            -1 for luks_slot implies \"None\"\n            \"\"\"\n            if crypt_item.current_luks_slot == -1:\n                return None\n            return crypt_item.current_luks_slot\n\n        def _consolidate_file_system(crypt_item):\n            \"\"\"\n            \"None\" and \"auto\" are functionally identical for \"file_system\" field\n            \"\"\"\n            if not crypt_item.file_system or crypt_item.file_system == \"None\":\n                return \"auto\"\n            return crypt_item.file_system\n\n        def _consolidate_cleartext_key(crypt_item):\n            \"\"\"\n            \"False\", \"None\", \"\" and None are equivalent to False\n            \"\"\"\n            if not crypt_item.uses_cleartext_key or crypt_item.uses_cleartext_key in [\"False\", \"None\"]:\n                return False\n            return True\n\n        return self.mapper_name == other.mapper_name and\\\n            self.dev_path == other.dev_path and\\\n            self.file_system == other.file_system and\\\n            self.mount_point == other.mount_point and\\\n            _consolidate_luks_header_path(self) == _consolidate_luks_header_path(other) and \\\n            _consolidate_luks_slot(self) == _consolidate_luks_slot(other) and\\\n            _consolidate_file_system(self) == _consolidate_file_system(other) and\\\n            _consolidate_cleartext_key(self) == _consolidate_cleartext_key(other)\n"
  },
  {
    "path": "VMEncryption/main/ConfigUtil.py",
    "content": "#!/usr/bin/env python\n#\n# VMEncryption extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os.path\nfrom Common import *\nfrom ConfigParser import *\n\nclass ConfigKeyValuePair(object):\n    def __init__(self, prop_name, prop_value):\n        self.prop_name = prop_name\n        self.prop_value = prop_value\n\nclass ConfigUtil(object):\n    def __init__(self, config_file_path, section_name, logger):\n        \"\"\"\n        this should not create the config file with path: config_file_path\n        \"\"\"\n        self.config_file_path = config_file_path\n        self.logger = logger\n        self.azure_crypt_config_section = section_name\n    \n    def config_file_exists(self):\n        return os.path.exists(self.config_file_path)\n\n    def save_config(self, prop_name, prop_value):\n        #TODO make the operation an transaction.\n        config = ConfigParser()\n        if os.path.exists(self.config_file_path):\n            config.read(self.config_file_path)\n        # read values from a section\n        if not config.has_section(self.azure_crypt_config_section):\n            config.add_section(self.azure_crypt_config_section)\n        config.set(self.azure_crypt_config_section, prop_name, prop_value)\n        with open(self.config_file_path, 'wb') as configfile:\n            config.write(configfile)\n\n    def save_configs(self, key_value_pairs):\n        config = ConfigParser()\n        if os.path.exists(self.config_file_path):\n            config.read(self.config_file_path)\n        # read values from a section\n        if not config.has_section(self.azure_crypt_config_section):\n            config.add_section(self.azure_crypt_config_section)\n        for key_value_pair in key_value_pairs:\n            if key_value_pair.prop_value is not None:\n                config.set(self.azure_crypt_config_section, key_value_pair.prop_name, key_value_pair.prop_value)\n        with open(self.config_file_path, 'wb') as configfile:\n            config.write(configfile)\n\n    def get_config(self, prop_name):\n        # write the configs, the bek file name and so on.\n        if os.path.exists(self.config_file_path):\n            try:\n                config = ConfigParser()\n                config.read(self.config_file_path)\n                # read values from a section\n                prop_value = config.get(self.azure_crypt_config_section, prop_name)\n                return prop_value\n            except (NoSectionError, NoOptionError) as e:\n                self.logger.log(msg=\"value of prop_name:{0} not found.\".format(prop_name))\n                return None\n        else:\n            self.logger.log(\"the config file {0} not exists.\".format(self.config_file_path))\n            return None"
  },
  {
    "path": "VMEncryption/main/DecryptionMarkConfig.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport os.path\nimport traceback\nfrom ConfigUtil import *\nfrom Common import CommonVariables\n\nclass DecryptionMarkConfig(object):\n    def __init__(self, logger, encryption_environment):\n        self.logger = logger\n        self.encryption_environment = encryption_environment\n        self.command = None\n        self.volume_type = None\n        self.decryption_mark_config = ConfigUtil(self.encryption_environment.azure_decrypt_request_queue_path,\n                                                 'decryption_request_queue',\n                                                 self.logger)\n\n    def get_current_command(self):\n        return self.decryption_mark_config.get_config(CommonVariables.EncryptionEncryptionOperationKey)\n\n    def config_file_exists(self):\n        return self.decryption_mark_config.config_file_exists()\n    \n    def commit(self):\n        key_value_pairs = []\n\n        command = ConfigKeyValuePair(CommonVariables.EncryptionEncryptionOperationKey, self.command)\n        key_value_pairs.append(command)\n\n        volume_type = ConfigKeyValuePair(CommonVariables.EncryptionVolumeTypeKey, self.volume_type)\n        key_value_pairs.append(volume_type)\n\n        self.decryption_mark_config.save_configs(key_value_pairs)\n\n    def clear_config(self):\n        try:\n            if os.path.exists(self.encryption_environment.azure_decrypt_request_queue_path):\n                os.remove(self.encryption_environment.azure_decrypt_request_queue_path)\n            return True\n        except OSError as e:\n            self.logger.log(\"Failed to clear_queue with error: {0}, stack trace: {1}\".format(e, traceback.format_exc()))\n            return False\n"
  },
  {
    "path": "VMEncryption/main/DiskUtil.py",
    "content": "#!/usr/bin/env python\n#\n# VMEncryption extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport subprocess\nimport json\nimport os\nimport os.path\nimport re\nfrom subprocess import Popen\nimport shutil\nimport traceback\nimport uuid\nimport glob\nfrom datetime import datetime\n\nfrom EncryptionConfig import EncryptionConfig\nfrom DecryptionMarkConfig import DecryptionMarkConfig\nfrom EncryptionMarkConfig import EncryptionMarkConfig\nfrom TransactionalCopyTask import TransactionalCopyTask\nfrom CommandExecutor import CommandExecutor, ProcessCommunicator\nfrom Common import CommonVariables, CryptItem, LvmItem, DeviceItem\n\n\nclass DiskUtil(object):\n    os_disk_lvm = None\n    sles_cache = {}\n    device_id_cache = {}\n\n    def __init__(self, hutil, patching, logger, encryption_environment):\n        self.encryption_environment = encryption_environment\n        self.hutil = hutil\n        self.distro_patcher = patching\n        self.logger = logger\n        self.ide_class_id = \"{32412632-86cb-44a2-9b5c-50d1417354f5}\"\n        self.vmbus_sys_path = '/sys/bus/vmbus/devices'\n\n        self.command_executor = CommandExecutor(self.logger)\n\n    def copy(self, ongoing_item_config, status_prefix=''):\n        copy_task = TransactionalCopyTask(logger=self.logger,\n                                          disk_util=self,\n                                          hutil=self.hutil,\n                                          ongoing_item_config=ongoing_item_config,\n                                          patching=self.distro_patcher,\n                                          encryption_environment=self.encryption_environment,\n                                          status_prefix=status_prefix)\n        try:\n            mem_fs_result = copy_task.prepare_mem_fs()\n            if mem_fs_result != CommonVariables.process_success:\n                return CommonVariables.tmpfs_error\n            else:\n                return copy_task.begin_copy()\n        except Exception as e:\n            message = \"Failed to perform dd copy: {0}, stack trace: {1}\".format(e, traceback.format_exc())\n            self.logger.log(msg=message, level=CommonVariables.ErrorLevel)\n        finally:\n            copy_task.clear_mem_fs()\n\n    def format_disk(self, dev_path, file_system):\n        mkfs_command = \"\"\n        if file_system in CommonVariables.format_supported_file_systems:\n            mkfs_command = \"mkfs.\" + file_system\n        mkfs_cmd = \"{0} {1}\".format(mkfs_command, dev_path)\n        return self.command_executor.Execute(mkfs_cmd)\n\n    def make_sure_path_exists(self, path):\n        mkdir_cmd = self.distro_patcher.mkdir_path + ' -p ' + path\n        self.logger.log(\"make sure path exists, executing: {0}\".format(mkdir_cmd))\n        return self.command_executor.Execute(mkdir_cmd)\n\n    def touch_file(self, path):\n        mkdir_cmd = self.distro_patcher.touch_path + ' ' + path\n        self.logger.log(\"touching file, executing: {0}\".format(mkdir_cmd))\n        return self.command_executor.Execute(mkdir_cmd)\n\n    def parse_crypttab_line(self, line):\n        crypttab_parts = line.strip().split()\n\n        if len(crypttab_parts) < 3: # Line should have enough content\n            return None\n\n        if crypttab_parts[0].startswith(\"#\"): # Line should not be a comment\n            return None\n\n        crypt_item = CryptItem()\n        crypt_item.mapper_name = crypttab_parts[0]\n        crypt_item.dev_path = crypttab_parts[1]\n        keyfile_path = crypttab_parts[2]\n        if CommonVariables.encryption_key_mount_point not in keyfile_path and self.encryption_environment.cleartext_key_base_path not in keyfile_path:\n            return None  # if the key_file path doesn't have the encryption key file name, its probably not for us to mess with\n        if self.encryption_environment.cleartext_key_base_path in keyfile_path:\n            crypt_item.uses_cleartext_key = True\n        crypttab_option_string = crypttab_parts[3]\n        crypttab_options = crypttab_option_string.split(',')\n        for option in crypttab_options:\n            option_pair = option.split(\"=\")\n            if len(option_pair) == 2:\n                key = option_pair[0].strip()\n                value = option_pair[1].strip()\n                if key == \"header\":\n                    crypt_item.luks_header_path = value\n        return crypt_item\n\n    def parse_azure_crypt_mount_line(self, line):\n\n        crypt_item = CryptItem()\n\n        crypt_mount_item_properties = line.strip().split()\n\n        crypt_item.mapper_name = crypt_mount_item_properties[0]\n        crypt_item.dev_path = crypt_mount_item_properties[1]\n        crypt_item.luks_header_path = crypt_mount_item_properties[2] if crypt_mount_item_properties[2] and crypt_mount_item_properties[2] != \"None\" else None\n        crypt_item.mount_point = crypt_mount_item_properties[3]\n        crypt_item.file_system = crypt_mount_item_properties[4]\n        crypt_item.uses_cleartext_key = True if crypt_mount_item_properties[5] == \"True\" else False\n        crypt_item.current_luks_slot = int(crypt_mount_item_properties[6]) if len(crypt_mount_item_properties) > 6 else -1\n\n        return crypt_item\n\n    def get_crypt_items(self):\n        crypt_items = []\n        rootfs_crypt_item_found = False\n\n        if self.should_use_azure_crypt_mount():\n            with open(self.encryption_environment.azure_crypt_mount_config_path, 'r') as f:\n                for line in f.readlines():\n                    if not line.strip():\n                        continue\n\n                    crypt_item = self.parse_azure_crypt_mount_line(line)\n\n                    if crypt_item.mount_point == \"/\" or crypt_item.mapper_name == CommonVariables.osmapper_name:\n                        rootfs_crypt_item_found = True\n\n                    crypt_items.append(crypt_item)\n        else:\n            self.logger.log(\"Using crypttab instead of azure_crypt_mount file.\")\n            crypttab_path = \"/etc/crypttab\"\n\n            fstab_items = []\n\n            with open(\"/etc/fstab\", \"r\") as f:\n                for line in f.readlines():\n                    fstab_device, fstab_mount_point = self.parse_fstab_line(line)\n                    if fstab_device is not None:\n                        fstab_items.append((fstab_device, fstab_mount_point))\n\n            if not os.path.exists(crypttab_path):\n                self.logger.log(\"{0} does not exist\".format(crypttab_path))\n            else:\n                with open(crypttab_path, 'r') as f:\n                    for line in f.readlines():\n                        if not line.strip():\n                            continue\n\n                        crypt_item = self.parse_crypttab_line(line)\n                        if crypt_item is None:\n                            continue\n\n                        if crypt_item.mapper_name == CommonVariables.osmapper_name:\n                            rootfs_crypt_item_found = True\n\n                        for device_path, mount_path in fstab_items:\n                            if crypt_item.mapper_name in device_path:\n                                crypt_item.mount_point = mount_path\n                        crypt_items.append(crypt_item)\n\n        encryption_status = json.loads(self.get_encryption_status())\n\n        if encryption_status[\"os\"] == \"Encrypted\" and not rootfs_crypt_item_found:\n            crypt_item = CryptItem()\n            crypt_item.mapper_name = CommonVariables.osmapper_name\n\n            proc_comm = ProcessCommunicator()\n            grep_result = self.command_executor.ExecuteInBash(\"cryptsetup status {0} | grep device:\".format(crypt_item.mapper_name), communicator=proc_comm)\n\n            if grep_result == 0:\n                crypt_item.dev_path = proc_comm.stdout.strip().split()[1]\n            else:\n                proc_comm = ProcessCommunicator()\n                self.command_executor.Execute(\"dmsetup table --target crypt\", communicator=proc_comm)\n\n                for line in proc_comm.stdout.splitlines():\n                    if crypt_item.mapper_name in line:\n                        majmin = filter(lambda p: re.match(r'\\d+:\\d+', p), line.split())[0]\n                        src_device = filter(lambda d: d.majmin == majmin, self.get_device_items(None))[0]\n                        crypt_item.dev_path = '/dev/' + src_device.name\n                        break\n\n            rootfs_dev = next((m for m in self.get_mount_items() if m[\"dest\"] == \"/\"))\n            crypt_item.file_system = rootfs_dev[\"fs\"]\n\n            if not crypt_item.dev_path:\n                raise Exception(\"Could not locate block device for rootfs\")\n\n            crypt_item.luks_header_path = \"/boot/luks/osluksheader\"\n\n            if not os.path.exists(crypt_item.luks_header_path):\n                crypt_item.luks_header_path = crypt_item.dev_path\n\n            crypt_item.mount_point = \"/\"\n            crypt_item.uses_cleartext_key = False\n            crypt_item.current_luks_slot = -1\n\n            crypt_items.append(crypt_item)\n\n        return crypt_items\n\n    def should_use_azure_crypt_mount(self):\n        if not os.path.exists(self.encryption_environment.azure_crypt_mount_config_path):\n            return False\n\n        non_os_entry_found = False\n        with open(self.encryption_environment.azure_crypt_mount_config_path, 'r') as f:\n            for line in f.readlines():\n                if not line.strip():\n                    continue\n\n                parsed_crypt_item = self.parse_azure_crypt_mount_line(line)\n                if parsed_crypt_item.mapper_name != CommonVariables.osmapper_name:\n                    non_os_entry_found = True\n\n        # if there is a non_os_entry found we should use azure_crypt_mount. Otherwise we shouldn't\n        return non_os_entry_found\n\n    def add_crypt_item(self, crypt_item, key_file_path):\n        if self.should_use_azure_crypt_mount():\n            return self.add_crypt_item_to_azure_crypt_mount(crypt_item)\n        else:\n            return self.add_crypt_item_to_crypttab(crypt_item, key_file_path)\n\n    def add_crypt_item_to_crypttab(self, crypt_item, key_file):\n        if key_file is None and crypt_item.uses_cleartext_key:\n            line_key_file = self.encryption_environment.cleartext_key_base_path + crypt_item.mapper_name\n        else:\n            line_key_file = key_file\n\n        crypttab_line = \"\\n{0} {1} {2} luks,nofail\".format(crypt_item.mapper_name, crypt_item.dev_path, line_key_file)\n        if crypt_item.luks_header_path:\n            crypttab_line += \",header=\" + crypt_item.luks_header_path\n\n        with open(\"/etc/crypttab\", \"a\") as wf:\n            wf.write(crypttab_line + \"\\n\")\n\n        return True\n\n    def add_crypt_item_to_azure_crypt_mount(self, crypt_item):\n        \"\"\"\n        TODO we should judge that the second time.\n        format is like this:\n        <target name> <source device> <key file> <options>\n        \"\"\"\n        try:\n            if not crypt_item.luks_header_path:\n                crypt_item.luks_header_path = \"None\"\n\n            mount_content_item = (crypt_item.mapper_name + \" \" +\n                                  crypt_item.dev_path + \" \" +\n                                  crypt_item.luks_header_path + \" \" +\n                                  crypt_item.mount_point + \" \" +\n                                  crypt_item.file_system + \" \" +\n                                  str(crypt_item.uses_cleartext_key) + \" \" +\n                                  str(crypt_item.current_luks_slot))\n\n            if os.path.exists(self.encryption_environment.azure_crypt_mount_config_path):\n                with open(self.encryption_environment.azure_crypt_mount_config_path, 'r') as f:\n                    existing_content = f.read()\n                    if existing_content is not None and existing_content.strip() != \"\":\n                        new_mount_content = existing_content + \"\\n\" + mount_content_item\n                    else:\n                        new_mount_content = mount_content_item\n            else:\n                new_mount_content = mount_content_item\n\n            with open(self.encryption_environment.azure_crypt_mount_config_path, 'w') as wf:\n                wf.write('\\n')\n                wf.write(new_mount_content)\n                wf.write('\\n')\n            return True\n        except Exception:\n            return False\n\n    def remove_crypt_item(self, crypt_item):\n        try:\n            if self.should_use_azure_crypt_mount():\n                crypt_file_path = self.encryption_environment.azure_crypt_mount_config_path\n                crypt_line_parser = self.parse_azure_crypt_mount_line\n            elif os.path.exists(\"/etc/crypttab\"):\n                crypt_file_path = \"/etc/crypttab\"\n                crypt_line_parser = self.parse_crypttab_line\n            else:\n                return True\n\n            filtered_mount_lines = []\n            with open(crypt_file_path, 'r') as f:\n                self.logger.log(\"removing an entry from {0}\".format(crypt_file_path))\n                for line in f:\n                    if not line.strip():\n                        continue\n\n                    parsed_crypt_item = crypt_line_parser(line)\n                    if parsed_crypt_item is not None and parsed_crypt_item.mapper_name == crypt_item.mapper_name:\n                        self.logger.log(\"Removing crypt mount entry: {0}\".format(line))\n                        continue\n\n                    filtered_mount_lines.append(line)\n\n            with open(crypt_file_path, 'w') as wf:\n                wf.write(''.join(filtered_mount_lines))\n\n            return True\n\n        except Exception as e:\n            return False\n\n    def update_crypt_item(self, crypt_item, key_file_path):\n        self.logger.log(\"Updating entry for crypt item {0}\".format(crypt_item))\n        self.remove_crypt_item(crypt_item)\n        self.add_crypt_item(crypt_item, key_file_path)\n\n    def migrate_crypt_items(self, passphrase_file):\n        crypt_items = self.get_crypt_items()\n        # Archive azure_crypt_mount file\n        try:\n            if os.path.exists(self.encryption_environment.azure_crypt_mount_config_path):\n                self.logger.log(msg=\"archiving azure crypt mount file: {0}\".format(self.encryption_environment.azure_crypt_mount_config_path))\n                time_stamp = datetime.now()\n                new_name = \"{0}_{1}\".format(self.encryption_environment.azure_crypt_mount_config_path, time_stamp)\n                os.rename(self.encryption_environment.azure_crypt_mount_config_path, new_name)\n            else:\n                self.logger.log(msg=(\"the azure crypt mount file not exist: {0}\".format(self.encryption_environment.azure_crypt_mount_config_path)), level=CommonVariables.InfoLevel)\n        except OSError as e:\n            self.logger.log(\"Failed to archive encryption mount file with error: {0}, stack trace: {1}\".format(e, traceback.format_exc()))\n\n        for crypt_item in crypt_items:\n            self.logger.log(\"Migrating crypt item: {0}\".format(crypt_item))\n            if crypt_item.mount_point == \"/\" or CommonVariables.osmapper_name == crypt_item.mapper_name:\n                self.logger.log(\"Skipping OS disk\")\n                continue\n\n            if crypt_item.mount_point and crypt_item.mount_point != \"None\":\n                self.logger.log(msg=\"restoring entry for {0} drive in fstab\".format(crypt_item.mount_point), level=CommonVariables.InfoLevel)\n                self.restore_mount_info(crypt_item.mount_point)\n            elif crypt_item.mapper_name:\n                self.logger.log(msg=\"restoring entry for {0} drive in fstab\".format(crypt_item.mapper_name), level=CommonVariables.InfoLevel)\n                self.restore_mount_info(crypt_item.mapper_name)\n            else:\n                self.logger.log(msg=crypt_item.dev_path + \" was not in fstab when encryption was enabled, no need to restore\",\n                                level=CommonVariables.InfoLevel)\n            self.modify_fstab_entry_encrypt(crypt_item.mount_point, os.path.join(CommonVariables.dev_mapper_root, crypt_item.mapper_name))\n            self.add_crypt_item_to_crypttab(crypt_item, passphrase_file)\n\n    def is_luks_device(self, device_path, device_header_path):\n        \"\"\" checks if the device is set up with a luks header \"\"\"\n        path_var = device_header_path if device_header_path else device_path\n        cmd = 'cryptsetup isLuks ' + path_var\n        return (int)(self.command_executor.Execute(cmd, suppress_logging=True)) == CommonVariables.process_success\n\n    def create_luks_header(self, mapper_name):\n        luks_header_file_path = self.encryption_environment.luks_header_base_path + mapper_name\n        if not os.path.exists(luks_header_file_path):\n            dd_command = self.distro_patcher.dd_path + ' if=/dev/zero bs=33554432 count=1 > ' + luks_header_file_path\n            self.command_executor.ExecuteInBash(dd_command, raise_exception_on_failure=True)\n        return luks_header_file_path\n\n    def create_cleartext_key(self, mapper_name):\n        cleartext_key_file_path = self.encryption_environment.cleartext_key_base_path + mapper_name\n        if not os.path.exists(cleartext_key_file_path):\n            dd_command = self.distro_patcher.dd_path + ' if=/dev/urandom bs=128 count=1 > ' + cleartext_key_file_path\n            self.command_executor.ExecuteInBash(dd_command, raise_exception_on_failure=True)\n        return cleartext_key_file_path\n\n    def encrypt_disk(self, dev_path, passphrase_file, mapper_name, header_file):\n        return_code = self.luks_format(passphrase_file=passphrase_file, dev_path=dev_path, header_file=header_file)\n        if return_code != CommonVariables.process_success:\n            self.logger.log(msg=('cryptsetup luksFormat failed, return_code is:{0}'.format(return_code)), level=CommonVariables.ErrorLevel)\n            return return_code\n        else:\n            return_code = self.luks_open(passphrase_file=passphrase_file,\n                                         dev_path=dev_path,\n                                         mapper_name=mapper_name,\n                                         header_file=header_file,\n                                         uses_cleartext_key=False)\n            if return_code != CommonVariables.process_success:\n                self.logger.log(msg=('cryptsetup luksOpen failed, return_code is:{0}'.format(return_code)), level=CommonVariables.ErrorLevel)\n            return return_code\n\n    def check_fs(self, dev_path):\n        self.logger.log(\"checking fs:\" + str(dev_path))\n        check_fs_cmd = self.distro_patcher.e2fsck_path + \" -f -y \" + dev_path\n        return self.command_executor.Execute(check_fs_cmd)\n\n    def expand_fs(self, dev_path):\n        expandfs_cmd = self.distro_patcher.resize2fs_path + \" \" + str(dev_path)\n        return self.command_executor.Execute(expandfs_cmd)\n\n    def shrink_fs(self, dev_path, size_shrink_to):\n        \"\"\"\n        size_shrink_to is in sector (512 byte)\n        \"\"\"\n        shrinkfs_cmd = self.distro_patcher.resize2fs_path + ' ' + str(dev_path) + ' ' + str(size_shrink_to) + 's'\n        return self.command_executor.Execute(shrinkfs_cmd)\n\n    def check_shrink_fs(self, dev_path, size_shrink_to):\n        return_code = self.check_fs(dev_path)\n        if return_code == CommonVariables.process_success:\n            return_code = self.shrink_fs(dev_path=dev_path, size_shrink_to=size_shrink_to)\n            return return_code\n        else:\n            return return_code\n\n    def luks_format(self, passphrase_file, dev_path, header_file):\n        \"\"\"\n        return the return code of the process for error handling.\n        \"\"\"\n        self.hutil.log(\"dev path to cryptsetup luksFormat {0}\".format(dev_path))\n        #walkaround for sles sp3\n        if self.distro_patcher.distro_info[0].lower() == 'suse' and self.distro_patcher.distro_info[1] == '11':\n            proc_comm = ProcessCommunicator()\n            passphrase_cmd = self.distro_patcher.cat_path + ' ' + passphrase_file\n            self.command_executor.Execute(passphrase_cmd, communicator=proc_comm)\n            passphrase = proc_comm.stdout\n\n            cryptsetup_cmd = \"{0} luksFormat {1} -q\".format(self.distro_patcher.cryptsetup_path, dev_path)\n            return self.command_executor.Execute(cryptsetup_cmd, input=passphrase)\n        else:\n            if header_file is not None:\n                cryptsetup_cmd = \"{0} luksFormat {1} --header {2} -d {3} -q\".format(self.distro_patcher.cryptsetup_path, dev_path, header_file, passphrase_file)\n            else:\n                cryptsetup_cmd = \"{0} luksFormat {1} -d {2} -q\".format(self.distro_patcher.cryptsetup_path, dev_path, passphrase_file)\n            \n            return self.command_executor.Execute(cryptsetup_cmd)\n        \n    def luks_add_key(self, passphrase_file, dev_path, mapper_name, header_file, new_key_path):\n        \"\"\"\n        return the return code of the process for error handling.\n        \"\"\"\n        self.hutil.log(\"new key path: \" + (new_key_path))\n\n        if not os.path.exists(new_key_path):\n            self.hutil.error(\"new key does not exist\")\n            return None\n\n        if header_file:\n            cryptsetup_cmd = \"{0} luksAddKey {1} {2} -d {3} -q\".format(self.distro_patcher.cryptsetup_path, header_file, new_key_path, passphrase_file)\n        else:\n            cryptsetup_cmd = \"{0} luksAddKey {1} {2} -d {3} -q\".format(self.distro_patcher.cryptsetup_path, dev_path, new_key_path, passphrase_file)\n\n        return self.command_executor.Execute(cryptsetup_cmd)\n        \n    def luks_remove_key(self, passphrase_file, dev_path, header_file):\n        \"\"\"\n        return the return code of the process for error handling.\n        \"\"\"\n        self.hutil.log(\"removing keyslot: {0}\".format(passphrase_file))\n\n        if header_file:\n            cryptsetup_cmd = \"{0} luksRemoveKey {1} -d {2} -q\".format(self.distro_patcher.cryptsetup_path, header_file, passphrase_file)\n        else:\n            cryptsetup_cmd = \"{0} luksRemoveKey {1} -d {2} -q\".format(self.distro_patcher.cryptsetup_path, dev_path, passphrase_file)\n\n        return self.command_executor.Execute(cryptsetup_cmd)\n        \n    def luks_kill_slot(self, passphrase_file, dev_path, header_file, keyslot):\n        \"\"\"\n        return the return code of the process for error handling.\n        \"\"\"\n        self.hutil.log(\"killing keyslot: {0}\".format(keyslot))\n\n        if header_file:\n            cryptsetup_cmd = \"{0} luksKillSlot {1} {2} -d {3} -q\".format(self.distro_patcher.cryptsetup_path, header_file, keyslot, passphrase_file)\n        else:\n            cryptsetup_cmd = \"{0} luksKillSlot {1} {2} -d {3} -q\".format(self.distro_patcher.cryptsetup_path, dev_path, keyslot, passphrase_file)\n\n        return self.command_executor.Execute(cryptsetup_cmd)\n        \n    def luks_add_cleartext_key(self, passphrase_file, dev_path, mapper_name, header_file):\n        \"\"\"\n        return the return code of the process for error handling.\n        \"\"\"\n        cleartext_key_file_path = self.encryption_environment.cleartext_key_base_path + mapper_name\n\n        self.hutil.log(\"cleartext key path: \" + (cleartext_key_file_path))\n\n        return self.luks_add_key(passphrase_file, dev_path, mapper_name, header_file, cleartext_key_file_path)\n\n    def luks_dump_keyslots(self, dev_path, header_file):\n        cryptsetup_cmd = \"\"\n        if header_file:\n            cryptsetup_cmd = \"{0} luksDump {1}\".format(self.distro_patcher.cryptsetup_path, header_file)\n        else:\n            cryptsetup_cmd = \"{0} luksDump {1}\".format(self.distro_patcher.cryptsetup_path, dev_path)\n\n        proc_comm = ProcessCommunicator()\n        self.command_executor.Execute(cryptsetup_cmd, communicator=proc_comm)\n\n        lines = filter(lambda l: \"key slot\" in l.lower(), proc_comm.stdout.split(\"\\n\"))\n        keyslots = map(lambda l: \"enabled\" in l.lower(), lines)\n\n        return keyslots\n\n    def luks_open(self, passphrase_file, dev_path, mapper_name, header_file, uses_cleartext_key):\n        \"\"\"\n        return the return code of the process for error handling.\n        \"\"\"\n        self.hutil.log(\"dev mapper name to cryptsetup luksOpen \" + (mapper_name))\n\n        if uses_cleartext_key:\n            passphrase_file = self.encryption_environment.cleartext_key_base_path + mapper_name\n\n        self.hutil.log(\"keyfile: \" + (passphrase_file))\n\n        if header_file:\n            cryptsetup_cmd = \"{0} luksOpen {1} {2} --header {3} -d {4} -q\".format(self.distro_patcher.cryptsetup_path, dev_path, mapper_name, header_file, passphrase_file)\n        else:\n            cryptsetup_cmd = \"{0} luksOpen {1} {2} -d {3} -q\".format(self.distro_patcher.cryptsetup_path, dev_path, mapper_name, passphrase_file)\n\n        return self.command_executor.Execute(cryptsetup_cmd)\n\n    def luks_close(self, mapper_name):\n        \"\"\"\n        returns the exit code for cryptsetup process.\n        \"\"\"\n        self.hutil.log(\"dev mapper name to cryptsetup luksOpen \" + (mapper_name))\n        cryptsetup_cmd = \"{0} luksClose {1} -q\".format(self.distro_patcher.cryptsetup_path, mapper_name)\n\n        return self.command_executor.Execute(cryptsetup_cmd)\n\n    # TODO error handling.\n    def append_mount_info(self, dev_path, mount_point):\n        shutil.copy2('/etc/fstab', '/etc/fstab.backup.' + str(str(uuid.uuid4())))\n        mount_content_item = dev_path + \" \" + mount_point + \"  auto defaults 0 0\"\n        new_mount_content = \"\"\n        with open(\"/etc/fstab\", 'r') as f:\n            existing_content = f.read()\n            new_mount_content = existing_content + \"\\n\" + mount_content_item\n        with open(\"/etc/fstab\", 'w') as wf:\n            wf.write(new_mount_content)\n\n    def is_bek_in_fstab_file(self, lines):\n        for line in lines:\n            fstab_device, fstab_mount_point = self.parse_fstab_line(line)\n            if fstab_mount_point == CommonVariables.encryption_key_mount_point:\n                return True\n        return False\n\n    def parse_fstab_line(self, line):\n        fstab_parts = line.strip().split()\n\n        if len(fstab_parts) < 2:  # Line should have enough content\n            return None, None\n\n        if fstab_parts[0].startswith(\"#\"):  # Line should not be a comment\n            return None, None\n\n        fstab_device = fstab_parts[0]\n        fstab_mount_point = fstab_parts[1]\n        return fstab_device, fstab_mount_point\n\n    def modify_fstab_entry_encrypt(self, mount_point, mapper_path):\n        self.logger.log(\"modify_fstab_entry_encrypt called with mount_point={0}, mapper_path={1}\".format(mount_point, mapper_path))\n\n        if not mount_point:\n            self.logger.log(\"modify_fstab_entry_encrypt: mount_point is empty\")\n            return\n\n        shutil.copy2('/etc/fstab', '/etc/fstab.backup.' + str(str(uuid.uuid4())))\n\n        with open('/etc/fstab', 'r') as f:\n            lines = f.readlines()\n\n        relevant_line = None\n        for i in range(len(lines)):\n            line = lines[i]\n            fstab_device, fstab_mount_point = self.parse_fstab_line(line)\n            if fstab_mount_point != mount_point:  # Not the line we are looking for\n                continue\n\n            self.logger.log(\"Found the relevant fstab line: \" + line)\n            relevant_line = line\n\n            if self.should_use_azure_crypt_mount():\n                # in this case we just remove the line\n                lines.pop(i)\n                break\n            else:\n                new_line = relevant_line.replace(fstab_device, mapper_path)\n                self.logger.log(\"Replacing that line with: \" + new_line)\n                lines[i] = new_line\n                break\n\n        if not self.is_bek_in_fstab_file(lines):\n            lines.append(self.get_fstab_bek_line())\n\n        with open('/etc/fstab', 'w') as f:\n            f.writelines(lines)\n\n        if relevant_line is not None:\n            with open('/etc/fstab.azure.backup', 'a+') as f:\n                f.write(\"\\n\" + relevant_line)\n\n    def get_fstab_bek_line(self):\n        if self.distro_patcher.distro_info[0].lower() == 'ubuntu' and self.distro_patcher.distro_info[1].startswith('14'):\n            return CommonVariables.bek_fstab_line_template_ubuntu_14.format(CommonVariables.encryption_key_mount_point)\n        else:\n            return CommonVariables.bek_fstab_line_template.format(CommonVariables.encryption_key_mount_point)\n\n    def add_bek_to_default_cryptdisks(self):\n        if os.path.exists(\"/etc/default/cryptdisks\"):\n            with open(\"/etc/default/cryptdisks\", 'r') as f:\n                lines = f.readlines()\n            if not any([\"azure_bek_disk\" in line for line in lines]):\n                with open(\"/etc/default/cryptdisks\", 'a') as f:\n                    f.write('\\n' + CommonVariables.etc_defaults_cryptdisks_line.format(CommonVariables.encryption_key_mount_point))\n\n    def remove_mount_info(self, mount_point):\n        if not mount_point:\n            self.logger.log(\"remove_mount_info: mount_point is empty\")\n            return\n\n        shutil.copy2('/etc/fstab', '/etc/fstab.backup.' + str(str(uuid.uuid4())))\n\n        filtered_contents = []\n        removed_lines = []\n\n        with open('/etc/fstab', 'r') as f:\n            for line in f.readlines():\n                line = line.strip()\n                pattern = '\\s' + re.escape(mount_point) + '\\s'\n\n                if re.search(pattern, line):\n                    self.logger.log(\"removing fstab line: {0}\".format(line))\n                    removed_lines.append(line)\n                    continue\n\n                filtered_contents.append(line)\n\n        with open('/etc/fstab', 'w') as f:\n            f.write('\\n')\n            f.write('\\n'.join(filtered_contents))\n            f.write('\\n')\n\n        self.logger.log(\"fstab updated successfully\")\n\n        with open('/etc/fstab.azure.backup', 'a+') as f:\n            f.write('\\n')\n            f.write('\\n'.join(removed_lines))\n            f.write('\\n')\n\n        self.logger.log(\"fstab.azure.backup updated successfully\")\n\n    def restore_mount_info(self, mount_point_or_mapper_name):\n        if not mount_point_or_mapper_name:\n            self.logger.log(\"restore_mount_info: mount_point_or_mapper_name is empty\")\n            return\n\n        shutil.copy2('/etc/fstab', '/etc/fstab.backup.' + str(str(uuid.uuid4())))\n\n        lines_to_keep_in_backup_fstab = []\n        lines_to_put_back_to_fstab = []\n\n        with open('/etc/fstab.azure.backup', 'r') as f:\n            for line in f.readlines():\n                line = line.strip() + '\\n'\n                pattern = '\\s' + re.escape(mount_point_or_mapper_name) + '\\s'\n\n                if re.search(pattern, line):\n                    self.logger.log(\"removing fstab.azure.backup line: {0}\".format(line))\n                    lines_to_put_back_to_fstab.append(line)\n                    continue\n\n                lines_to_keep_in_backup_fstab.append(line)\n\n        with open('/etc/fstab.azure.backup', 'w') as f:\n            f.writelines(lines_to_keep_in_backup_fstab)\n\n        self.logger.log(\"fstab.azure.backup updated successfully\")\n\n        lines_that_remain_in_fstab = []\n        with open('/etc/fstab', 'r') as f:\n            for line in f.readlines():\n                line = line.strip() + '\\n'\n                pattern = '\\s' + re.escape(mount_point_or_mapper_name) + '\\s'\n                if re.search(pattern, line):\n                    # This line should not remain in the fstab.\n                    self.logger.log(\"removing fstab line: {0}\".format(line))\n                    continue\n                lines_that_remain_in_fstab.append(line)\n\n        with open('/etc/fstab', 'w') as f:\n            f.writelines(lines_that_remain_in_fstab + lines_to_put_back_to_fstab)\n\n        self.logger.log(\"fstab updated successfully\")\n\n    def mount_bek_volume(self, bek_label, mount_point, option_string):\n        \"\"\"\n        mount the BEK volume\n        \"\"\"\n        self.make_sure_path_exists(mount_point)\n        mount_cmd = self.distro_patcher.mount_path + ' -L \"' + bek_label + '\" ' + mount_point + ' -o ' + option_string\n        return self.command_executor.Execute(mount_cmd)\n\n    def mount_auto(self, dev_path_or_mount_point):\n        \"\"\"\n        mount the file system via fstab entry\n        \"\"\"\n        mount_cmd = self.distro_patcher.mount_path + ' ' + dev_path_or_mount_point\n        return self.command_executor.Execute(mount_cmd)\n\n    def mount_filesystem(self, dev_path, mount_point, file_system=None):\n        \"\"\"\n        mount the file system.\n        \"\"\"\n        self.make_sure_path_exists(mount_point)\n        if file_system is None:\n            mount_cmd = self.distro_patcher.mount_path + ' ' + dev_path + ' ' + mount_point\n        else: \n            mount_cmd = self.distro_patcher.mount_path + ' ' + dev_path + ' ' + mount_point + ' -t ' + file_system\n\n        return self.command_executor.Execute(mount_cmd)\n\n    def mount_crypt_item(self, crypt_item, passphrase):\n        self.logger.log(\"trying to mount the crypt item:\" + str(crypt_item))\n        self.logger.log(msg=('First trying to auto mount for the item'))\n        mount_filesystem_result = self.mount_auto(os.path.join(CommonVariables.dev_mapper_root, crypt_item.mapper_name))\n        if str(crypt_item.mount_point) != 'None' and mount_filesystem_result != CommonVariables.process_success:\n            self.logger.log(msg=('mount_point is not None and auto mount failed. Trying manual mount.'), level=CommonVariables.WarningLevel)\n            mount_filesystem_result = self.mount_filesystem(os.path.join(CommonVariables.dev_mapper_root, crypt_item.mapper_name), crypt_item.mount_point, crypt_item.file_system)\n            self.logger.log(\"mount file system result:{0}\".format(mount_filesystem_result))\n\n    def swapoff(self):\n        return self.command_executor.Execute('swapoff -a')\n\n    def umount(self, path):\n        umount_cmd = self.distro_patcher.umount_path + ' ' + path\n        return self.command_executor.Execute(umount_cmd)\n\n    def umount_all_crypt_items(self):\n        for crypt_item in self.get_crypt_items():\n            self.logger.log(\"Unmounting {0}\".format(os.path.join(CommonVariables.dev_mapper_root, crypt_item.mapper_name)))\n            self.umount(os.path.join(CommonVariables.dev_mapper_root, crypt_item.mapper_name))\n\n    def mount_all(self):\n        mount_all_cmd = self.distro_patcher.mount_path + ' -a'\n        return self.command_executor.Execute(mount_all_cmd)\n\n    def get_mount_items(self):\n        items = []\n\n        for line in file('/proc/mounts'):\n            line = [s.decode('string_escape') for s in line.split()]\n            item = {\n                \"src\": line[0],\n                \"dest\": line[1],\n                \"fs\": line[2]\n            }\n            items.append(item)\n\n        return items\n\n    def get_encryption_status(self):\n        encryption_status = {\n            \"data\": \"NotEncrypted\",\n            \"os\": \"NotEncrypted\"\n        }\n\n        mount_items = self.get_mount_items()\n        device_items = self.get_device_items(None)\n        device_items_dict = dict([(device_item.mount_point, device_item) for device_item in device_items])\n\n        os_drive_encrypted = False\n        data_drives_found = False\n        all_data_drives_encrypted = True\n\n        osmapper_path = os.path.join(CommonVariables.dev_mapper_root, CommonVariables.osmapper_name)\n\n        if self.is_os_disk_lvm():\n            grep_result = self.command_executor.ExecuteInBash('pvdisplay | grep {0}'.format(osmapper_path),\n                                                              suppress_logging=True)\n            if grep_result == 0 and not os.path.exists('/volumes.lvm'):\n                self.logger.log(\"OS PV is encrypted\")\n                os_drive_encrypted = True\n\n        special_azure_devices_to_skip = self.get_azure_devices()\n\n        for mount_item in mount_items:\n            device_item = device_items_dict.get(mount_item[\"dest\"])\n\n            if device_item is not None and \\\n               mount_item[\"fs\"] in CommonVariables.format_supported_file_systems and \\\n               self.is_data_disk(device_item, special_azure_devices_to_skip):\n                data_drives_found = True\n\n                if not device_item.type == \"crypt\":\n                    self.logger.log(\"Data volume {0} is mounted from {1}\".format(mount_item[\"dest\"], mount_item[\"src\"]))\n                    all_data_drives_encrypted = False\n\n            if mount_item[\"dest\"] == \"/\" and \\\n               not self.is_os_disk_lvm() and \\\n               CommonVariables.dev_mapper_root in mount_item[\"src\"] or \\\n               \"/dev/dm\" in mount_item[\"src\"]:\n                self.logger.log(\"OS volume {0} is mounted from {1}\".format(mount_item[\"dest\"], mount_item[\"src\"]))\n                os_drive_encrypted = True\n    \n        if not data_drives_found:\n            encryption_status[\"data\"] = \"NotMounted\"\n        elif all_data_drives_encrypted:\n            encryption_status[\"data\"] = \"Encrypted\"\n        if os_drive_encrypted:\n            encryption_status[\"os\"] = \"Encrypted\"\n\n        encryption_marker = EncryptionMarkConfig(self.logger, self.encryption_environment)\n        decryption_marker = DecryptionMarkConfig(self.logger, self.encryption_environment)\n        if decryption_marker.config_file_exists():\n            encryption_status[\"data\"] = \"DecryptionInProgress\"\n        elif encryption_marker.config_file_exists():\n            encryption_config = EncryptionConfig(self.encryption_environment, self.logger)\n            volume_type = encryption_config.get_volume_type().lower()\n\n            if volume_type == CommonVariables.VolumeTypeData.lower() or \\\n                volume_type == CommonVariables.VolumeTypeAll.lower():\n                encryption_status[\"data\"] = \"EncryptionInProgress\"\n\n            if volume_type == CommonVariables.VolumeTypeOS.lower() or \\\n                volume_type == CommonVariables.VolumeTypeAll.lower():\n                if not os_drive_encrypted:\n                    encryption_status[\"os\"] = \"EncryptionInProgress\"\n        elif os.path.exists(osmapper_path) and not os_drive_encrypted:\n            encryption_status[\"os\"] = \"VMRestartPending\"\n\n        return json.dumps(encryption_status)\n\n    def query_dev_sdx_path_by_scsi_id(self, scsi_number): \n        p = Popen([self.distro_patcher.lsscsi_path, scsi_number], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n        identity, err = p.communicate()\n        # identity sample: [5:0:0:0] disk Msft Virtual Disk 1.0 /dev/sdc\n        self.logger.log(\"lsscsi output is: {0}\\n\".format(identity))\n        vals = identity.split()\n        if vals is None or len(vals) == 0:\n            return None\n        sdx_path = vals[len(vals) - 1]\n        return sdx_path\n\n    def query_dev_sdx_path_by_uuid(self, uuid):\n        \"\"\"\n        return /dev/disk/by-id that maps to the sdx_path, otherwise return the original path\n        \"\"\"\n        desired_uuid_path = os.path.join(CommonVariables.disk_by_uuid_root, uuid)\n        for disk_by_uuid in os.listdir(CommonVariables.disk_by_uuid_root):\n            disk_by_uuid_path = os.path.join(CommonVariables.disk_by_uuid_root, disk_by_uuid)\n\n            if disk_by_uuid_path == desired_uuid_path:\n                return os.path.realpath(disk_by_uuid_path)\n\n        return desired_uuid_path\n\n    def query_dev_id_path_by_sdx_path(self, sdx_path):\n        \"\"\"\n        return /dev/disk/by-id that maps to the sdx_path, otherwise return the original path\n        Update: now we have realised that by-id is not a good way to refer to devices (they can change on reallocations or resizes).\n        Try not to use this- use get_stable_path_from_sdx instead\n        \"\"\"\n        for disk_by_id in os.listdir(CommonVariables.disk_by_id_root):\n            disk_by_id_path = os.path.join(CommonVariables.disk_by_id_root, disk_by_id)\n            if os.path.realpath(disk_by_id_path) == sdx_path:\n                return disk_by_id_path\n\n        return sdx_path\n\n    def get_persistent_path_by_sdx_path(self, sdx_path):\n        \"\"\"\n        return a stable path for this /dev/sdx device\n        \"\"\"\n        sdx_realpath = os.path.realpath(sdx_path)\n\n        # First try finding an Azure symlink\n        azure_name_table = self.get_block_device_to_azure_udev_table()\n        if sdx_realpath in azure_name_table:\n            return azure_name_table[sdx_realpath]\n\n        # A mapper path is also pretty good (especially for raid or lvm)\n        for mapper_name in os.listdir(CommonVariables.dev_mapper_root):\n            mapper_path = os.path.join(CommonVariables.dev_mapper_root, mapper_name)\n            if os.path.realpath(mapper_path) == sdx_realpath:\n                return mapper_path\n\n        # Then try matching a uuid symlink. Those are probably the best\n        for disk_by_uuid in os.listdir(CommonVariables.disk_by_uuid_root):\n            disk_by_uuid_path = os.path.join(CommonVariables.disk_by_uuid_root, disk_by_uuid)\n\n            if os.path.realpath(disk_by_uuid_path) == sdx_realpath:\n                return disk_by_uuid_path\n\n        # Found nothing very persistent. Just return the original sdx path.\n        # And Log it.\n        self.logger.log(msg=\"Failed to find a persistent path for [{0}].\".format(sdx_path), level=CommonVariables.WarningLevel)\n        return sdx_path\n\n    def get_device_path(self, dev_name):\n        device_path = None\n\n        if os.path.exists(\"/dev/\" + dev_name):\n            device_path = \"/dev/\" + dev_name\n        elif os.path.exists(\"/dev/mapper/\" + dev_name):\n            device_path = \"/dev/mapper/\" + dev_name\n\n        return device_path\n\n    def get_device_id(self, dev_path):\n        if (dev_path) in DiskUtil.device_id_cache:\n            return DiskUtil.device_id_cache[dev_path]\n\n        udev_cmd = \"udevadm info -a -p $(udevadm info -q path -n {0}) | grep device_id\".format(dev_path)\n        proc_comm = ProcessCommunicator()\n        self.command_executor.ExecuteInBash(udev_cmd, communicator=proc_comm, suppress_logging=True)\n        match = re.findall(r'\"{(.*)}\"', proc_comm.stdout.strip())\n        DiskUtil.device_id_cache[dev_path] = match[0] if match else \"\"\n\n        return DiskUtil.device_id_cache[dev_path]\n\n    def get_device_items_property(self, dev_name, property_name):\n        if (dev_name, property_name) in DiskUtil.sles_cache:\n            return DiskUtil.sles_cache[(dev_name, property_name)]\n\n        self.logger.log(\"getting property of device {0}\".format(dev_name))\n\n        device_path = self.get_device_path(dev_name)\n        property_value = \"\"\n\n        if property_name == \"SIZE\":\n            get_property_cmd = self.distro_patcher.blockdev_path + \" --getsize64 \" + device_path\n            proc_comm = ProcessCommunicator()\n            self.command_executor.Execute(get_property_cmd, communicator=proc_comm, suppress_logging=True)\n            property_value = proc_comm.stdout.strip()\n        elif property_name == \"DEVICE_ID\":\n            property_value = self.get_device_id(device_path)\n        else:\n            get_property_cmd = self.distro_patcher.lsblk_path + \" \" + device_path + \" -b -nl -o NAME,\" + property_name\n            proc_comm = ProcessCommunicator()\n            self.command_executor.Execute(get_property_cmd, communicator=proc_comm, raise_exception_on_failure=True, suppress_logging=True)\n            for line in proc_comm.stdout.splitlines():\n                if line.strip():\n                    disk_info_item_array = line.strip().split()\n                    if dev_name == disk_info_item_array[0]:\n                        if len(disk_info_item_array) > 1:\n                            property_value = disk_info_item_array[1]\n\n        DiskUtil.sles_cache[(dev_name, property_name)] = property_value\n        return property_value\n\n    def get_block_device_to_azure_udev_table(self):\n        table = {}\n        \n        if not os.path.exists(CommonVariables.azure_symlinks_dir):\n            return table\n\n        for top_level_item in os.listdir(CommonVariables.azure_symlinks_dir):\n            top_level_item_full_path = os.path.join(CommonVariables.azure_symlinks_dir, top_level_item)\n            if os.path.isdir(top_level_item_full_path):\n                scsi_path = os.path.join(CommonVariables.azure_symlinks_dir, top_level_item)\n                for symlink in os.listdir(scsi_path):\n                    symlink_full_path = os.path.join(scsi_path, symlink)\n                    table[os.path.realpath(symlink_full_path)] = symlink_full_path\n            else:\n                table[os.path.realpath(top_level_item_full_path)] = top_level_item_full_path\n        return table\n\n    def get_azure_symlinks(self):\n        azure_udev_links = {}\n\n        if os.path.exists(CommonVariables.azure_symlinks_dir):\n            wdbackup = os.getcwd()\n            os.chdir(CommonVariables.azure_symlinks_dir)\n            for symlink in os.listdir(CommonVariables.azure_symlinks_dir):\n                azure_udev_links[os.path.basename(symlink)] = os.path.realpath(symlink)\n            os.chdir(wdbackup)\n\n        return azure_udev_links\n\n    def log_lsblk_output(self):\n        lsblk_command = 'lsblk -o NAME,TYPE,FSTYPE,LABEL,SIZE,RO,MOUNTPOINT'\n        proc_comm = ProcessCommunicator()\n        self.command_executor.Execute(lsblk_command, communicator=proc_comm)\n        self.logger.log('\\n' + str(proc_comm.stdout) + '\\n')\n\n    def get_device_items_sles(self, dev_path):\n        if dev_path:\n            self.logger.log(msg=(\"getting blk info for: {0}\".format(dev_path)))\n        device_items_to_return = []\n        device_items = []\n\n        #first get all the device names\n        if dev_path is None:\n            lsblk_command = 'lsblk -b -nl -o NAME'\n        else:\n            lsblk_command = 'lsblk -b -nl -o NAME ' + dev_path\n\n        proc_comm = ProcessCommunicator()\n        self.command_executor.Execute(lsblk_command, communicator=proc_comm, raise_exception_on_failure=True)\n\n        for line in proc_comm.stdout.splitlines():\n            item_value_str = line.strip()\n            if item_value_str:\n                device_item = DeviceItem()\n                device_item.name = item_value_str.split()[0]\n                device_items.append(device_item)\n\n        for device_item in device_items:\n            device_item.file_system = self.get_device_items_property(dev_name=device_item.name, property_name='FSTYPE')\n            device_item.mount_point = self.get_device_items_property(dev_name=device_item.name, property_name='MOUNTPOINT')\n            device_item.label = self.get_device_items_property(dev_name=device_item.name, property_name='LABEL')\n            device_item.uuid = self.get_device_items_property(dev_name=device_item.name, property_name='UUID')\n            device_item.majmin = self.get_device_items_property(dev_name=device_item.name, property_name='MAJ:MIN')\n            device_item.device_id = self.get_device_items_property(dev_name=device_item.name, property_name='DEVICE_ID')\n\n            device_item.azure_name = ''\n            for symlink, target in self.get_azure_symlinks().items():\n                if device_item.name in target:\n                    device_item.azure_name = symlink\n\n            # get the type of device\n            model_file_path = '/sys/block/' + device_item.name + '/device/model'\n\n            if os.path.exists(model_file_path):\n                with open(model_file_path, 'r') as f:\n                    device_item.model = f.read().strip()\n            else:\n                self.logger.log(msg=(\"no model file found for device {0}\".format(device_item.name)))\n\n            if device_item.model == 'Virtual Disk':\n                self.logger.log(msg=\"model is virtual disk\")\n                device_item.type = 'disk'\n            else:\n                partition_files = glob.glob('/sys/block/*/' + device_item.name + '/partition')\n                self.logger.log(msg=\"partition files exists\")\n                if partition_files is not None and len(partition_files) > 0:\n                    device_item.type = 'part'\n\n            size_string = self.get_device_items_property(dev_name=device_item.name, property_name='SIZE')\n\n            if size_string is not None and size_string != \"\":\n                device_item.size = int(size_string)\n\n            if device_item.type is None:\n                device_item.type = ''\n\n            if device_item.size is not None:\n                device_items_to_return.append(device_item)\n            else:\n                self.logger.log(msg=(\"skip the device {0} because we could not get size of it.\".format(device_item.name)))\n\n        return device_items_to_return\n\n    def get_device_items(self, dev_path):\n        if self.distro_patcher.distro_info[0].lower() == 'suse' and self.distro_patcher.distro_info[1] == '11':\n            return self.get_device_items_sles(dev_path)\n        else:\n            if dev_path:\n                self.logger.log(msg=(\"getting blk info for: \" + str(dev_path)))\n\n            if dev_path is None:\n                lsblk_command = 'lsblk -b -n -P -o NAME,TYPE,FSTYPE,MOUNTPOINT,LABEL,UUID,MODEL,SIZE,MAJ:MIN'\n            else:\n                lsblk_command = 'lsblk -b -n -P -o NAME,TYPE,FSTYPE,MOUNTPOINT,LABEL,UUID,MODEL,SIZE,MAJ:MIN ' + dev_path\n            \n            proc_comm = ProcessCommunicator()\n            self.command_executor.Execute(lsblk_command, communicator=proc_comm, raise_exception_on_failure=True, suppress_logging=True)\n            \n            device_items = []\n            lvm_items = self.get_lvm_items()\n            for line in proc_comm.stdout.splitlines():\n                if line:\n                    device_item = DeviceItem()\n\n                    for disk_info_property in line.split():\n                        property_item_pair = disk_info_property.split('=')\n                        if property_item_pair[0] == 'SIZE':\n                            device_item.size = int(property_item_pair[1].strip('\"'))\n\n                        if property_item_pair[0] == 'NAME':\n                            device_item.name = property_item_pair[1].strip('\"')\n\n                        if property_item_pair[0] == 'TYPE':\n                            device_item.type = property_item_pair[1].strip('\"')\n\n                        if property_item_pair[0] == 'FSTYPE':\n                            device_item.file_system = property_item_pair[1].strip('\"')\n                        \n                        if property_item_pair[0] == 'MOUNTPOINT':\n                            device_item.mount_point = property_item_pair[1].strip('\"')\n\n                        if property_item_pair[0] == 'LABEL':\n                            device_item.label = property_item_pair[1].strip('\"')\n\n                        if property_item_pair[0] == 'UUID':\n                            device_item.uuid = property_item_pair[1].strip('\"')\n\n                        if property_item_pair[0] == 'MODEL':\n                            device_item.model = property_item_pair[1].strip('\"')\n\n                        if property_item_pair[0] == 'MAJ:MIN':\n                            device_item.majmin = property_item_pair[1].strip('\"')\n\n                    device_item.device_id = self.get_device_id(self.get_device_path(device_item.name))\n\n                    if device_item.type is None:\n                        device_item.type = ''\n\n                    if device_item.type.lower() == 'lvm':\n                        for lvm_item in lvm_items:\n                            majmin = lvm_item.lv_kernel_major + ':' + lvm_item.lv_kernel_minor\n\n                            if majmin == device_item.majmin:\n                                device_item.name = lvm_item.vg_name + '/' + lvm_item.lv_name\n\n                    device_item.azure_name = ''\n                    for symlink, target in self.get_azure_symlinks().items():\n                        if device_item.name in target:\n                            device_item.azure_name = symlink\n\n                    device_items.append(device_item)\n\n            return device_items\n\n    def get_lvm_items(self):\n        lvs_command = 'lvs --noheadings --nameprefixes --unquoted -o lv_name,vg_name,lv_kernel_major,lv_kernel_minor'\n        proc_comm = ProcessCommunicator()\n\n        if self.command_executor.Execute(lvs_command, communicator=proc_comm):\n            return []\n\n        lvm_items = []\n\n        for line in proc_comm.stdout.splitlines():\n            if not line:\n                continue\n\n            lvm_item = LvmItem()\n\n            for pair in line.strip().split():\n                if len(pair.split('=')) != 2:\n                    continue\n\n                key, value = pair.split('=')\n\n                if key == 'LVM2_LV_NAME':\n                    lvm_item.lv_name = value\n\n                if key == 'LVM2_VG_NAME':\n                    lvm_item.vg_name = value\n\n                if key == 'LVM2_LV_KERNEL_MAJOR':\n                    lvm_item.lv_kernel_major = value\n\n                if key == 'LVM2_LV_KERNEL_MINOR':\n                    lvm_item.lv_kernel_minor = value\n\n            lvm_items.append(lvm_item)\n\n        return lvm_items\n\n    def is_os_disk_lvm(self):\n        if DiskUtil.os_disk_lvm is not None:\n            return DiskUtil.os_disk_lvm\n\n        device_items = self.get_device_items(None)\n\n        if not any([item.type.lower() == 'lvm' for item in device_items]):\n            DiskUtil.os_disk_lvm = False\n            return False\n\n        lvm_items = filter(lambda item: item.vg_name == \"rootvg\", self.get_lvm_items())\n\n        current_lv_names = set([item.lv_name for item in lvm_items])\n\n        DiskUtil.os_disk_lvm = False\n\n        expected_lv_names = set(['homelv', 'optlv', 'rootlv', 'swaplv', 'tmplv', 'usrlv', 'varlv'])\n        if expected_lv_names == current_lv_names:\n            DiskUtil.os_disk_lvm = True\n\n        expected_lv_names = set(['homelv', 'optlv', 'rootlv', 'tmplv', 'usrlv', 'varlv'])\n        if expected_lv_names == current_lv_names:\n            DiskUtil.os_disk_lvm = True\n\n        return DiskUtil.os_disk_lvm\n\n    def is_data_disk(self, device_item, azure_devices):\n        # Root disk\n        if device_item.device_id.startswith('00000000-0000'):\n            self.logger.log(msg=\"skipping root disk\", level=CommonVariables.WarningLevel)\n            return False\n        # Resource Disk. Not considered a \"data disk\" exactly (is not attached via portal and we have a separate code path for encrypting it)\n        if device_item.device_id.startswith('00000000-0001'):\n            self.logger.log(msg=\"skipping resource disk\", level=CommonVariables.WarningLevel)\n            return False\n\n        for azure_blk_item in azure_devices:\n            if azure_blk_item.name == device_item.name:\n                self.logger.log(msg=\"the mountpoint is the azure disk root or resource, so skip it.\")\n                return False\n\n        return True\n\n    def should_skip_for_inplace_encryption(self, device_item, special_azure_devices_to_skip, encrypt_volume_type):\n        \"\"\"\n        TYPE=\"raid0\"\n        TYPE=\"part\"\n        TYPE=\"crypt\"\n\n        first check whether there's one file system on it.\n        if the type is disk, then to check whether it have child-items, say the part, lvm or crypt luks.\n        if the answer is yes, then skip it.\n        \"\"\"\n\n        if encrypt_volume_type.lower() == 'data' and not self.is_data_disk(device_item, special_azure_devices_to_skip):\n            return True # Skip data disks\n\n        if device_item.file_system is None or device_item.file_system == \"\":\n            self.logger.log(msg=(\"there's no file system on this device: {0}, so skip it.\").format(device_item))\n            return True\n        else:\n            if device_item.size < CommonVariables.min_filesystem_size_support:\n                self.logger.log(msg=\"the device size is too small,\" + str(device_item.size) + \" so skip it.\", level=CommonVariables.WarningLevel)\n                return True\n\n            supported_device_type = [\"disk\",\"part\",\"raid0\",\"raid1\",\"raid5\",\"raid10\",\"lvm\"]\n            if device_item.type not in supported_device_type:\n                self.logger.log(msg=\"the device type: \" + str(device_item.type) + \" is not supported yet, so skip it.\", level=CommonVariables.WarningLevel)\n                return True\n\n            if device_item.uuid is None or device_item.uuid == \"\":\n                self.logger.log(msg=\"the device do not have the related uuid, so skip it.\", level=CommonVariables.WarningLevel)\n                return True\n            sub_items = self.get_device_items(\"/dev/\" + device_item.name)\n            if len(sub_items) > 1:\n                self.logger.log(msg=(\"there's sub items for the device:{0} , so skip it.\".format(device_item.name)), level=CommonVariables.WarningLevel)\n                return True\n\n            if device_item.type == \"crypt\":\n                self.logger.log(msg=(\"device_item.type is:{0}, so skip it.\".format(device_item.type)), level=CommonVariables.WarningLevel)\n                return True\n\n            if device_item.mount_point == \"/\":\n                self.logger.log(msg=(\"the mountpoint is root:{0}, so skip it.\".format(device_item)), level=CommonVariables.WarningLevel)\n                return True\n\n            for azure_blk_item in special_azure_devices_to_skip:\n                if azure_blk_item.name == device_item.name:\n                    self.logger.log(msg=\"the mountpoint is the azure disk root or resource, so skip it.\")\n                    return True\n            return False\n\n    def get_azure_devices(self):\n        ide_devices = self.get_ide_devices()\n        blk_items = []\n        for ide_device in ide_devices:\n            current_blk_items = self.get_device_items(\"/dev/\" + ide_device)\n            for current_blk_item in current_blk_items:\n                blk_items.append(current_blk_item)\n        return blk_items\n\n    def get_ide_devices(self):\n        \"\"\"\n        this only return the device names of the ide.\n        \"\"\"\n        ide_devices = []\n        for vmbus in os.listdir(self.vmbus_sys_path):\n            f = open('%s/%s/%s' % (self.vmbus_sys_path, vmbus, 'class_id'), 'r')\n            class_id = f.read()\n            f.close()\n            if class_id.strip() == self.ide_class_id:\n                device_sdx_path = self.find_block_sdx_path(vmbus)\n                self.logger.log(\"found one ide with vmbus: {0} and the sdx path is: {1}\".format(vmbus,\n                                                                                                device_sdx_path))\n                ide_devices.append(device_sdx_path)\n        return ide_devices\n\n    def find_block_sdx_path(self, vmbus):\n        device = None\n        for root, dirs, files in os.walk(os.path.join(self.vmbus_sys_path , vmbus)):\n            if root.endswith(\"/block\"):\n                device = dirs[0]\n            else : #older distros\n                for d in dirs:\n                    if ':' in d and \"block\" == d.split(':')[0]:\n                        device = d.split(':')[1]\n                        break\n        return device\n"
  },
  {
    "path": "VMEncryption/main/EncryptionConfig.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport datetime\nimport os.path\n\nfrom Common import CommonVariables\nfrom ConfigParser import ConfigParser\nfrom ConfigUtil import ConfigUtil\nfrom ConfigUtil import ConfigKeyValuePair\n\nclass EncryptionConfig(object):\n    def __init__(self, encryption_environment, logger):\n        self.encryption_environment = encryption_environment\n        self.passphrase_file_name = None\n        self.volume_type = None\n        self.secret_id = None\n        self.secret_seq_num = None\n        self.encryption_config = ConfigUtil(encryption_environment.encryption_config_file_path,\n                                            'azure_crypt_config',\n                                            logger)\n        self.logger = logger\n\n    def config_file_exists(self):\n        return self.encryption_config.config_file_exists()\n\n    def get_bek_filename(self):\n        return self.encryption_config.get_config(CommonVariables.PassphraseFileNameKey)\n\n    def get_volume_type(self):\n        return self.encryption_config.get_config(CommonVariables.VolumeTypeKey)\n\n    def get_secret_id(self):\n        return self.encryption_config.get_config(CommonVariables.SecretUriKey)\n\n    def get_secret_seq_num(self):\n        return self.encryption_config.get_config(CommonVariables.SecretSeqNum)\n\n    def commit(self):\n        key_value_pairs = []\n        command = ConfigKeyValuePair(CommonVariables.PassphraseFileNameKey, self.passphrase_file_name)\n        key_value_pairs.append(command)\n        volume_type = ConfigKeyValuePair(CommonVariables.VolumeTypeKey, self.volume_type)\n        key_value_pairs.append(volume_type)\n        parameters = ConfigKeyValuePair(CommonVariables.SecretUriKey, self.secret_id)\n        key_value_pairs.append(parameters)\n        parameters = ConfigKeyValuePair(CommonVariables.SecretSeqNum, self.secret_seq_num)\n        key_value_pairs.append(parameters)\n        self.encryption_config.save_configs(key_value_pairs)\n\n    def clear_config(self):\n        try:\n            if os.path.exists(self.encryption_environment.encryption_config_file_path):\n                self.logger.log(msg=\"archiving the encryption config file: {0}\".format(self.encryption_environment.encryption_config_file_path))\n                time_stamp = datetime.datetime.now()\n                new_name = \"{0}_{1}\".format(self.encryption_environment.encryption_config_file_path, time_stamp)\n                os.rename(self.encryption_environment.encryption_config_file_path, new_name)\n            else:\n                self.logger.log(msg=(\"the config file not exist: {0}\".format(self.encryption_environment.encryption_config_file_path)), level = CommonVariables.WarningLevel)\n            return True\n        except OSError as e:\n            self.logger.log(\"Failed to archive encryption config with error: {0}, stack trace: {1}\".format(e, traceback.format_exc()))\n            return False\n"
  },
  {
    "path": "VMEncryption/main/EncryptionEnvironment.py",
    "content": "﻿#!/usr/bin/env python\n#\n# VMEncryption extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport os.path\nimport subprocess\nfrom subprocess import *\n\nclass EncryptionEnvironment(object):\n    \"\"\"description of class\"\"\"\n    def __init__(self, patching, logger):\n        self.patching = patching\n        self.logger = logger\n        self.encryption_config_path = '/var/lib/azure_disk_encryption_config/'\n        # Lock file for daemon.\n        self.daemon_lock_file_path = os.path.join(self.encryption_config_path, 'daemon_lock_file.lck')\n        self.encryption_config_file_path = os.path.join(self.encryption_config_path, 'azure_crypt_config.ini')\n        self.extension_parameter_file_path = os.path.join(self.encryption_config_path, 'azure_crypt_params.ini')\n        self.azure_crypt_mount_config_path = os.path.join(self.encryption_config_path, 'azure_crypt_mount')\n        self.azure_crypt_request_queue_path = os.path.join(self.encryption_config_path, 'azure_crypt_request_queue.ini')\n        self.azure_decrypt_request_queue_path = os.path.join(self.encryption_config_path, 'azure_decrypt_request_queue.ini')\n        self.azure_crypt_ongoing_item_config_path = os.path.join(self.encryption_config_path, 'azure_crypt_ongoing_item.ini')\n        self.azure_crypt_current_transactional_copy_path = os.path.join(self.encryption_config_path, 'azure_crypt_copy_progress.ini')\n        self.luks_header_base_path = os.path.join(self.encryption_config_path, 'azureluksheader')\n        self.cleartext_key_base_path = os.path.join(self.encryption_config_path, 'cleartext_key')\n        self.copy_header_slice_file_path = os.path.join(self.encryption_config_path, 'copy_header_slice_file')\n        self.copy_slice_item_backup_file = os.path.join(self.encryption_config_path, 'copy_slice_item.bak')\n        self.os_encryption_markers_path = os.path.join(self.encryption_config_path, 'os_encryption_markers')\n        self.bek_backup_path = os.path.join(self.encryption_config_path, 'bek_backup')\n\n    def get_se_linux(self):\n        proc = Popen([self.patching.getenforce_path], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n        identity, err = proc.communicate()\n        return identity.strip().lower()\n\n    def disable_se_linux(self):\n        self.logger.log(\"disabling se linux\")\n        proc = Popen([self.patching.setenforce_path,'0'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n        return_code = proc.wait()\n        return return_code\n\n    def enable_se_linux(self):\n        self.logger.log(\"enabling se linux\")\n        proc = Popen([self.patching.setenforce_path,'1'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n        return_code = proc.wait()\n        return return_code\n"
  },
  {
    "path": "VMEncryption/main/EncryptionMarkConfig.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport os.path\nimport traceback\nfrom ConfigUtil import *\nfrom Common import CommonVariables\n\nclass EncryptionMarkConfig(object):\n    def __init__(self, logger, encryption_environment):\n        self.logger = logger\n        self.encryption_environment = encryption_environment\n        self.command = None\n        self.volume_type = None\n        self.diskFormatQuery = None\n        self.encryption_mark_config = ConfigUtil(self.encryption_environment.azure_crypt_request_queue_path,\n                                                 'encryption_request_queue',\n                                                 self.logger)\n\n    def get_volume_type(self):\n        return self.encryption_mark_config.get_config(CommonVariables.EncryptionVolumeTypeKey)\n\n    def get_current_command(self):\n        return self.encryption_mark_config.get_config(CommonVariables.EncryptionEncryptionOperationKey)\n\n    def get_encryption_disk_format_query(self):\n        return self.encryption_mark_config.get_config(CommonVariables.EncryptionDiskFormatQueryKey)\n\n    def config_file_exists(self):\n        \"\"\"\n        we should compare the timestamp of the file with the current system time\n        if not match (in 30 minutes, then should skip the file)\n        \"\"\"\n        return self.encryption_mark_config.config_file_exists()\n    \n    def commit(self):\n        key_value_pairs = []\n        command = ConfigKeyValuePair(CommonVariables.EncryptionEncryptionOperationKey, self.command)\n        key_value_pairs.append(command)\n        volume_type = ConfigKeyValuePair(CommonVariables.EncryptionVolumeTypeKey, self.volume_type)\n        key_value_pairs.append(volume_type)\n        disk_format_query = ConfigKeyValuePair(CommonVariables.EncryptionDiskFormatQueryKey, self.diskFormatQuery)\n        key_value_pairs.append(disk_format_query)\n        self.encryption_mark_config.save_configs(key_value_pairs)\n\n    def clear_config(self):\n        try:\n            if os.path.exists(self.encryption_environment.azure_crypt_request_queue_path):\n                os.remove(self.encryption_environment.azure_crypt_request_queue_path)\n            return True\n        except OSError as e:\n            self.logger.log(\"Failed to clear_queue with error: {0}, stack trace: {1}\".format(e, traceback.format_exc()))\n            return False"
  },
  {
    "path": "VMEncryption/main/ExtensionParameter.py",
    "content": "#!/usr/bin/env python\n#\n# VMEncryption extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport hashlib\nimport xml.parsers.expat\n\nfrom DiskUtil import DiskUtil\nfrom BekUtil import BekUtil\nfrom EncryptionConfig import EncryptionConfig\nfrom Utils import HandlerUtil\nfrom Common import *\nfrom ConfigParser import ConfigParser\nfrom ConfigUtil import ConfigUtil\nfrom ConfigUtil import ConfigKeyValuePair\nimport os.path\n\n# parameter format should be like this:\n#{\"command\":\"enableencryption\",\"query\":[{\"source_scsi_number\":\"[5:0:0:0]\",\"target_scsi_number\":\"[5:0:0:2]\"},{\"source_scsi_number\":\"[5:0:0:1]\",\"target_scsi_number\":\"[5:0:0:3]\"}],\n#\"force\":\"true\", \"passphrase\":\"User@123\"}\nclass ExtensionParameter(object):\n    def __init__(self, hutil, logger, distro_patcher, encryption_environment, protected_settings, public_settings):\n        \"\"\"\n        TODO: we should validate the parameter first\n        \"\"\"\n        self.hutil = hutil\n        self.logger = logger\n        self.distro_patcher = distro_patcher\n        self.encryption_environment = encryption_environment\n\n        self.disk_util = DiskUtil(hutil=hutil, patching=distro_patcher, logger=logger, encryption_environment=encryption_environment)\n        self.bek_util = BekUtil(self.disk_util, logger)\n        self.encryption_config = EncryptionConfig(encryption_environment, logger)\n\n        self.command = public_settings.get(CommonVariables.EncryptionEncryptionOperationKey)\n        self.KeyEncryptionKeyURL = public_settings.get(CommonVariables.KeyEncryptionKeyURLKey)\n        self.KeyVaultURL = public_settings.get(CommonVariables.KeyVaultURLKey)\n        self.AADClientID = public_settings.get(CommonVariables.AADClientIDKey)\n        self.AADClientCertThumbprint = public_settings.get(CommonVariables.AADClientCertThumbprintKey)\n\n        keyEncryptionAlgorithm = public_settings.get(CommonVariables.KeyEncryptionAlgorithmKey)\n        if keyEncryptionAlgorithm is not None and keyEncryptionAlgorithm !=\"\":\n            self.KeyEncryptionAlgorithm = keyEncryptionAlgorithm\n        else:\n            self.KeyEncryptionAlgorithm = 'RSA-OAEP'\n\n        self.VolumeType = public_settings.get(CommonVariables.VolumeTypeKey)\n        self.DiskFormatQuery = public_settings.get(CommonVariables.DiskFormatQuerykey)\n\n        \"\"\"\n        private settings\n        \"\"\"\n        self.AADClientSecret = protected_settings.get(CommonVariables.AADClientSecretKey)\n\n        if self.AADClientSecret is None:\n            self.AADClientSecret = ''\n\n        self.passphrase = protected_settings.get(CommonVariables.PassphraseKey)\n\n        self.DiskEncryptionKeyFileName = \"LinuxPassPhraseFileName\"\n        # parse the query from the array\n\n        self.params_config = ConfigUtil(encryption_environment.extension_parameter_file_path,\n                                        'azure_extension_params',\n                                        logger)\n\n    def config_file_exists(self):\n        return self.params_config.config_file_exists()\n\n    def get_command(self):\n        return self.params_config.get_config(CommonVariables.EncryptionEncryptionOperationKey)\n\n    def get_kek_url(self):\n        return self.params_config.get_config(CommonVariables.KeyEncryptionKeyURLKey)\n\n    def get_keyvault_url(self):\n        return self.params_config.get_config(CommonVariables.KeyVaultURLKey)\n\n    def get_aad_client_id(self):\n        return self.params_config.get_config(CommonVariables.AADClientIDKey)\n\n    def get_aad_client_secret(self):\n        return self.params_config.get_config(CommonVariables.AADClientSecretKey)\n\n    def get_aad_client_cert(self):\n        return self.params_config.get_config(CommonVariables.AADClientCertThumbprintKey)\n\n    def get_kek_algorithm(self):\n        return self.params_config.get_config(CommonVariables.KeyEncryptionAlgorithmKey)\n\n    def get_volume_type(self):\n        return self.params_config.get_config(CommonVariables.VolumeTypeKey)\n\n    def get_disk_format_query(self):\n        return self.params_config.get_config(CommonVariables.DiskFormatQuerykey)\n\n    def get_bek_filename(self):\n        return self.DiskEncryptionKeyFileName\n\n    def commit(self):\n        key_value_pairs = []\n\n        command = ConfigKeyValuePair(CommonVariables.EncryptionEncryptionOperationKey, self.command)\n        key_value_pairs.append(command)\n\n        KeyEncryptionKeyURL = ConfigKeyValuePair(CommonVariables.KeyEncryptionKeyURLKey, self.KeyEncryptionKeyURL)\n        key_value_pairs.append(KeyEncryptionKeyURL)\n\n        KeyVaultURL = ConfigKeyValuePair(CommonVariables.KeyVaultURLKey, self.KeyVaultURL)\n        key_value_pairs.append(KeyVaultURL)\n\n        AADClientID = ConfigKeyValuePair(CommonVariables.AADClientIDKey, self.AADClientID)\n        key_value_pairs.append(AADClientID)\n\n        AADClientSecret = ConfigKeyValuePair(CommonVariables.AADClientSecretKey, hashlib.sha256(self.AADClientSecret.encode(\"utf-8\")).hexdigest())\n        key_value_pairs.append(AADClientSecret)\n\n        AADClientCertThumbprint = ConfigKeyValuePair(CommonVariables.AADClientCertThumbprintKey, self.AADClientCertThumbprint)\n        key_value_pairs.append(AADClientCertThumbprint)\n\n        KeyEncryptionAlgorithm = ConfigKeyValuePair(CommonVariables.KeyEncryptionAlgorithmKey, self.KeyEncryptionAlgorithm)\n        key_value_pairs.append(KeyEncryptionAlgorithm)\n\n        VolumeType = ConfigKeyValuePair(CommonVariables.VolumeTypeKey, self.VolumeType)\n        key_value_pairs.append(VolumeType)\n\n        DiskFormatQuery = ConfigKeyValuePair(CommonVariables.DiskFormatQuerykey, self.DiskFormatQuery)\n        key_value_pairs.append(DiskFormatQuery)\n\n        self.params_config.save_configs(key_value_pairs)\n\n    def clear_config(self):\n        try:\n            if os.path.exists(self.encryption_environment.encryption_config_file_path):\n                self.logger.log(msg=\"archiving the encryption config file: {0}\".format(self.encryption_environment.encryption_config_file_path))\n                time_stamp = datetime.datetime.now()\n                new_name = \"{0}_{1}\".format(self.encryption_environment.encryption_config_file_path, time_stamp)\n                os.rename(self.encryption_environment.encryption_config_file_path, new_name)\n            else:\n                self.logger.log(msg=(\"the config file not exist: {0}\".format(self.encryption_environment.encryption_config_file_path)), level = CommonVariables.WarningLevel)\n            return True\n        except OSError as e:\n            self.logger.log(\"Failed to archive encryption config with error: {0}, stack trace: {1}\".format(e, traceback.format_exc()))\n            return False\n\n    def _is_encrypt_command(self, command):\n        return command in [CommonVariables.EnableEncryption, CommonVariables.EnableEncryptionFormat, CommonVariables.EnableEncryptionFormatAll]\n\n    def config_changed(self):\n        if (self.command or self.get_command()) and \\\n           (self.command != self.get_command() and \\\n           # Even if the commands are not exactly the same, if they're both encrypt commands, don't consider this a change\n           not (self._is_encrypt_command(self.command) and self._is_encrypt_command(self.get_command()))):\n            self.logger.log('Current config command {0} differs from effective config command {1}'.format(self.command, self.get_command()))\n            return True\n\n        if (self.KeyEncryptionKeyURL or self.get_kek_url()) and \\\n           (self.KeyEncryptionKeyURL != self.get_kek_url()):\n            self.logger.log('Current config KeyEncryptionKeyURL {0} differs from effective config KeyEncryptionKeyURL {1}'.format(self.KeyEncryptionKeyURL, self.get_kek_url()))\n            return True\n\n        if (self.KeyVaultURL or self.get_keyvault_url()) and \\\n           (self.KeyVaultURL != self.get_keyvault_url()):\n            self.logger.log('Current config KeyVaultURL {0} differs from effective config KeyVaultURL {1}'.format(self.KeyVaultURL, self.get_keyvault_url()))\n            return True\n\n        if (self.AADClientID or self.get_aad_client_id()) and \\\n           (self.AADClientID != self.get_aad_client_id()):\n            self.logger.log('Current config AADClientID {0} differs from effective config AADClientID {1}'.format(self.AADClientID, self.get_aad_client_id()))\n            return True\n\n        if (self.AADClientSecret or self.get_aad_client_secret()) and \\\n           (hashlib.sha256(self.AADClientSecret.encode(\"utf-8\")).hexdigest() != self.get_aad_client_secret()):\n            self.logger.log('Current config AADClientSecret {0} differs from effective config AADClientSecret {1}'.format(hashlib.sha256(self.AADClientSecret.encode(\"utf-8\")).hexdigest(),\n                                                                                                                          self.get_aad_client_secret()))\n            return True\n\n        if (self.AADClientCertThumbprint or self.get_aad_client_cert()) and \\\n           (self.AADClientCertThumbprint != self.get_aad_client_cert()):\n            self.logger.log('Current config AADClientCertThumbprint {0} differs from effective config AADClientCertThumbprint {1}'.format(self.AADClientCertThumbprint, self.get_aad_client_cert()))\n            return True\n\n        if (self.KeyEncryptionAlgorithm or self.get_kek_algorithm()) and \\\n           (self.KeyEncryptionAlgorithm != self.get_kek_algorithm()):\n            self.logger.log('Current config KeyEncryptionAlgorithm {0} differs from effective config KeyEncryptionAlgorithm {1}'.format(self.KeyEncryptionAlgorithm, self.get_kek_algorithm()))\n            return True\n\n        bek_passphrase_file_name = self.bek_util.get_bek_passphrase_file(self.encryption_config)\n        bek_passphrase = None\n        if bek_passphrase_file_name is not None and os.path.exists(bek_passphrase_file_name):\n            bek_passphrase = file(bek_passphrase_file_name).read()\n\n        if (self.passphrase and bek_passphrase) and \\\n           (self.passphrase != bek_passphrase):\n            self.logger.log('Current config passphrase differs from effective config passphrase')\n            return True\n   \n        self.logger.log('Current config is not different from effective config')\n        return False\n"
  },
  {
    "path": "VMEncryption/main/HttpUtil.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport time\nimport datetime\nimport traceback\nimport urlparse\nimport httplib\nimport shlex\nimport subprocess\nfrom Common import CommonVariables\nfrom subprocess import *\nfrom Utils.WAAgentUtil import waagent\n\nclass HttpUtil(object):\n    \"\"\"description of class\"\"\"\n    def __init__(self, logger):\n        self.logger = logger\n        try:\n            waagent.MyDistro = waagent.GetMyDistro()\n            Config = waagent.ConfigurationProvider(None)\n        except Exception as e:\n            errorMsg = \"Failed to construct ConfigurationProvider, which may due to the old wala code.\"\n            self.logger.log(errorMsg)\n            Config = waagent.ConfigurationProvider()\n        self.proxyHost = Config.get(\"HttpProxy.Host\")\n        self.proxyPort = Config.get(\"HttpProxy.Port\")\n        self.connection = None\n\n    \"\"\"\n    snapshot also called this. so we should not write the file/read the file in this method.\n    \"\"\"\n\n    def Call(self, method, http_uri, data, headers):\n        try:\n            uri_obj = urlparse.urlparse(http_uri)\n            #parse the uri str here\n            if self.proxyHost is None or self.proxyPort is None:\n                self.connection = httplib.HTTPSConnection(uri_obj.hostname, timeout = 10)\n                if uri_obj.query is not None:\n                    self.connection.request(method = method, url=(uri_obj.path +'?'+ uri_obj.query), body = data, headers = headers)\n                else:\n                    self.connection.request(method = method, url=(uri_obj.path), body = data, headers = headers)\n                resp = self.connection.getresponse()\n            else:\n                self.logger.log(\"proxyHost is not empty, so use the proxy to call the http.\")\n                self.connection = httplib.HTTPSConnection(self.proxyHost, self.proxyPort, timeout = 10)\n                if uri_obj.scheme.lower() == \"https\":\n                    self.connection.set_tunnel(uri_obj.hostname, 443)\n                else:\n                    self.connection.set_tunnel(uri_obj.hostname, 80)\n                self.connection.request(method = method, url = (http_uri), body = data, headers = headers)\n                resp = self.connection.getresponse()\n            return resp\n        except Exception as e:\n            errorMsg = \"Failed to call http with error: {0}, stack trace: {1}\".format(e, traceback.format_exc())\n            self.logger.log(errorMsg)\n            return None"
  },
  {
    "path": "VMEncryption/main/KeyVaultUtil.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport httplib\nimport urllib\nimport json\nimport uuid\nimport base64\nimport traceback\nimport re\nimport os\nimport subprocess\n\nfrom tempfile import mkstemp \nfrom HttpUtil import HttpUtil\nfrom urlparse import urlparse\n\nclass KeyVaultUtil(object):\n    def __init__(self, logger):\n        self.api_version = \"2015-06-01\"\n        self.logger = logger\n\n    def urljoin(self,*args):\n        \"\"\"\n        Joins given arguments into a url. Trailing but not leading slashes are\n        stripped for each argument.\n        \"\"\"\n        return \"/\".join(map(lambda x: str(x).rstrip('/'), args))\n\n    \"\"\"\n    The Passphrase is a plain encoded string. before the encryption it would be base64encoding.\n    return the secret uri if creation successfully.\n    \"\"\"\n    def create_kek_secret(self, Passphrase, KeyVaultURL, KeyEncryptionKeyURL, AADClientID, AADClientCertThumbprint, KeyEncryptionAlgorithm, AADClientSecret, DiskEncryptionKeyFileName):\n        try:\n            self.logger.log(\"start creating kek secret\")\n            passphrase_encoded = base64.standard_b64encode(Passphrase)\n            keys_uri = self.urljoin(KeyVaultURL, \"keys\")\n\n            http_util = HttpUtil(self.logger)\n            headers = {}\n            result = http_util.Call(method='GET', http_uri=keys_uri, data=None, headers=headers)\n            http_util.connection.close()\n            \"\"\"\n            get the access token \n            \"\"\"\n            self.logger.log(\"getting the access token.\")\n            bearerHeader = result.getheader(\"www-authenticate\")\n\n            authorize_uri = self.get_authorize_uri(bearerHeader)\n            if authorize_uri is None:\n                self.logger.log(\"the authorize uri is None\")\n                return None\n\n            parsed_url = urlparse(KeyVaultURL)\n            vault_domain = re.findall(r\".*(vault.*)\", parsed_url.netloc)[0]\n            kv_resource_name = parsed_url.scheme + '://' + vault_domain\n\n            access_token = self.get_access_token(kv_resource_name, authorize_uri, AADClientID, AADClientCertThumbprint, AADClientSecret)\n            if access_token is None:\n                self.logger.log(\"the access token is None\")\n                return None\n\n            \"\"\"\n            we should skip encrypting the passphrase if the KeyVaultURL and KeyEncryptionKeyURL is empty\n            \"\"\"\n            if KeyEncryptionKeyURL is None or KeyEncryptionKeyURL == \"\":\n                secret_value = passphrase_encoded\n            else:\n                secret_value = self.encrypt_passphrase(access_token, passphrase_encoded, KeyVaultURL, KeyEncryptionKeyURL, AADClientID, KeyEncryptionAlgorithm, AADClientSecret)\n            if secret_value is None:\n                self.logger.log(\"secret value is None\")\n                return None\n\n            secret_id = self.create_secret(access_token, KeyVaultURL, secret_value, KeyEncryptionAlgorithm, DiskEncryptionKeyFileName)\n\n            return secret_id\n        except Exception as e:\n            self.logger.log(\"Failed to create_kek_secret with error: {0}, stack trace: {1}\".format(e, traceback.format_exc()))\n            raise\n\n    def is_adal_available(self):\n        try:\n            import adal\n            self.logger.log('Python ADAL library is natively available on the system')\n            return True\n        except:            \n            self.logger.log('Python ADAL library is not natively available on the system')\n            return False\n\n    def is_scl_adal_available(self):\n        try:\n            subprocess.check_call(['scl', 'enable', 'python27', \"python -c 'import adal'\"])\n            self.logger.log('Python ADAL library is available on the system via SCL')\n            return True\n        except:\n            self.logger.log('Python ADAL library is not available on the system via SCL')\n            return False\n\n    def get_access_token_with_certificate(self, KeyVaultResourceName, AuthorizeUri, AADClientID, AADClientCertThumbprint):\n        # construct path to the private key file which is stored and managed by waagent inside of the lib directory\n        import waagent\n        prv_path = os.path.join(waagent.LibDir, AADClientCertThumbprint.upper() + '.prv')\n\n        if self.is_adal_available():\n            import adal\n            prv_data = waagent.GetFileContents(prv_path)\n            context = adal.AuthenticationContext(AuthorizeUri)\n            result_json = context.acquire_token_with_client_certificate(KeyVaultResourceName, AADClientID, prv_data, AADClientCertThumbprint)\n            access_token = result_json[\"accessToken\"]\n            return access_token\n        elif self.is_scl_adal_available():\n            # On RHEL, support for python-pip and the adal library are made available outside of default python via SCL \n            tmp_data = { \"auth\": AuthorizeUri, \"resource\": KeyVaultResourceName, \"client\": AADClientID, \"certificate\": prv_path, \"thumbprint\": AADClientCertThumbprint}\n            tmp_fd, tmp_path = mkstemp()\n            with open(tmp_path,'w') as tmp_file:\n                json.dump(tmp_data,tmp_file)\n            os.close(tmp_fd)\n            tok_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),'TokenUtil.py')\n            scl_args = 'python ' + tok_path + ' ' + tmp_path\n            access_token = subprocess.check_output(['scl', 'enable', 'python27', scl_args]).rstrip()\n            if os.path.isfile(tmp_path): \n                os.remove(tmp_path)\n            return access_token\n        else:\n            raise Exception('Python ADAL library required for client certificate authentication was not found')\n\n    def get_access_token(self, KeyVaultResourceName, AuthorizeUri, AADClientID, AADClientCertThumbprint, AADClientSecret):\n        if not AADClientSecret and not AADClientCertThumbprint:\n            raise ValueError(\"Missing Credentials.  Either AADClientSecret or AADClientCertThumbprint must be specified\")\n\n        if AADClientSecret and AADClientCertThumbprint:\n            raise ValueError(\"Both AADClientSecret and AADClientCertThumbprint were supplied, when only one of these was expected.\")\n\n        if AADClientCertThumbprint:\n            return self.get_access_token_with_certificate(KeyVaultResourceName, AuthorizeUri, AADClientID, AADClientCertThumbprint)\n        else:\n            # retrieve access token directly, adal library not required\n            token_uri = AuthorizeUri + \"/oauth2/token\"\n            request_content = \"resource=\" + urllib.quote(KeyVaultResourceName) + \"&client_id=\" + AADClientID + \"&client_secret=\" + urllib.quote(AADClientSecret) + \"&grant_type=client_credentials\"\n            headers = {}\n            http_util = HttpUtil(self.logger)\n            result = http_util.Call(method='POST', http_uri=token_uri, data=request_content, headers=headers)\n\n            self.logger.log(\"{0} {1}\".format(result.status, result.getheaders()))\n            result_content = result.read()\n            if result.status != httplib.OK and result.status != httplib.ACCEPTED:\n                self.logger.log(str(result_content))\n                return None\n            http_util.connection.close()\n\n            result_json = json.loads(result_content)\n            access_token = result_json[\"access_token\"]\n            return access_token\n\n    \"\"\"\n    return the encrypted secret uri if success. else return None\n    \"\"\"\n    def encrypt_passphrase(self, AccessToken, Passphrase, KeyVaultURL, KeyEncryptionKeyURL, AADClientID, KeyEncryptionAlgorithm, AADClientSecret):\n        try:\n            \"\"\"\n            wrap our passphrase using the encryption key\n            api ref for wrapkey: https://msdn.microsoft.com/en-us/library/azure/dn878066.aspx\n            \"\"\"\n            self.logger.log(\"encrypting the secret using key: \" + KeyEncryptionKeyURL)\n\n            request_content = '{\"alg\":\"' + str(KeyEncryptionAlgorithm) + '\",\"value\":\"' + str(Passphrase) + '\"}'\n            headers = {}\n            headers[\"Content-Type\"] = \"application/json\"\n            headers[\"Authorization\"] = \"Bearer \" + str(AccessToken)\n            relative_path = KeyEncryptionKeyURL + \"/wrapkey\" + '?api-version=' + self.api_version\n            http_util = HttpUtil(self.logger)\n            result = http_util.Call(method='POST', http_uri=relative_path, data=request_content, headers=headers)\n\n            result_content = result.read()\n            self.logger.log(\"result_content is: {0}\".format(result_content))\n            self.logger.log(\"{0} {1}\".format(result.status, result.getheaders()))\n            if result.status != httplib.OK and result.status != httplib.ACCEPTED:\n                return None\n            http_util.connection.close()\n            result_json = json.loads(result_content)\n            secret_value = result_json[u'value']\n            return secret_value\n        except Exception as e:\n            self.logger.log(\"Failed to encrypt_passphrase with error: {0}, stack trace: %s\".format(e, traceback.format_exc()))\n            return None\n\n    def create_secret(self, AccessToken, KeyVaultURL, secret_value, KeyEncryptionAlgorithm, DiskEncryptionKeyFileName):\n        \"\"\"\n        create secret api https://msdn.microsoft.com/en-us/library/azure/dn903618.aspx\n        https://mykeyvault.vault.azure.net/secrets/{secret-name}?api-version={api-version}\n        \"\"\"\n        try:\n            secret_name = str(uuid.uuid4())\n            secret_keyvault_uri = self.urljoin(KeyVaultURL, \"secrets\", secret_name)\n            self.logger.log(\"secret_keyvault_uri is: {0} and keyvault_uri is:{1}\".format(secret_keyvault_uri, KeyVaultURL))\n            if KeyEncryptionAlgorithm is None:\n                request_content = '{{\"value\":\"{0}\",\"attributes\":{{\"enabled\":\"true\"}},\"tags\":{{\"DiskEncryptionKeyFileName\":\"{1}\"}}}}'\\\n                    .format(str(secret_value), DiskEncryptionKeyFileName)\n            else:\n                request_content = '{{\"value\":\"{0}\",\"attributes\":{{\"enabled\":\"true\"}},\"tags\":{{\"DiskEncryptionKeyEncryptionAlgorithm\":\"{1}\",\"DiskEncryptionKeyFileName\":\"{2}\"}}}}'\\\n                    .format(str(secret_value), KeyEncryptionAlgorithm, DiskEncryptionKeyFileName)\n            http_util = HttpUtil(self.logger)\n            headers = {}\n            headers[\"Content-Type\"] = \"application/json\"\n            headers[\"Authorization\"] = \"Bearer \" + AccessToken\n            result = http_util.Call(method='PUT', http_uri=secret_keyvault_uri + '?api-version=' + self.api_version, data=request_content, headers=headers)\n\n            self.logger.log(\"{0} {1}\".format(result.status, result.getheaders()))\n            result_content = result.read()\n            # Do NOT log the result_content. It contains the uploaded secret and we don't want that in the logs.\n            result_json = json.loads(result_content)\n            secret_id = result_json[\"id\"]\n            http_util.connection.close()\n            if result.status != httplib.OK and result.status != httplib.ACCEPTED:\n                self.logger.log(\"the result status failed.\")\n                return None\n            return secret_id\n        except Exception as e:\n            self.logger.log(\"Failed to create_secret with error: {0}, stack trace: {1}\".format(e, traceback.format_exc()))\n            return None\n\n    def get_authorize_uri(self, bearerHeader):\n        \"\"\"\n        Bearer authorization=\"https://login.windows.net/72f988bf-86f1-41af-91ab-2d7cd011db47\", resource=\"https://vault.azure.net\"\n        \"\"\"\n        try:\n            self.logger.log(\"trying to get the authorize uri from: \" + str(bearerHeader))\n            bearerString = str(bearerHeader)\n            authorization_key = 'authorization=\"'\n            authoirzation_index = bearerString.index(authorization_key)\n            bearerString = bearerString[(authoirzation_index + len(authorization_key)):]\n            bearerString = bearerString[0:bearerString.index('\"')]\n\n            return bearerString\n        except Exception as e:\n            self.logger.log(\"Failed to get_authorize_uri with error: {0}, stack trace: {1}\".format(e, traceback.format_exc()))\n            return None"
  },
  {
    "path": "VMEncryption/main/MachineIdentity.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport subprocess\nimport xml\nimport xml.dom.minidom\n\n\nclass MachineIdentity:\n    def __init__(self):\n        self.store_identity_file = './machine_identity_FD76C85E-406F-4CFA-8EB0-CF18B123365C'\n\n    def current_identity(self):\n        with open(\"/var/lib/waagent/HostingEnvironmentConfig.xml\",'r') as file:\n            xmlText = file.read()\n            dom = xml.dom.minidom.parseString(xmlText)\n            deployment = dom.getElementsByTagName(\"Role\")\n            identity = deployment[0].getAttribute(\"guid\")\n            return identity\n\n    def save_identity(self):\n        with open(self.store_identity_file,'w') as file:\n            machine_identity = self.current_identity()\n            file.write(machine_identity)\n\n    def stored_identity(self):\n        identity_stored = None\n        if os.path.exists(self.store_identity_file):\n            with open(self.store_identity_file,'r') as file:\n                identity_stored = file.read()\n        return identity_stored\n\n"
  },
  {
    "path": "VMEncryption/main/OnGoingItemConfig.py",
    "content": "#!/usr/bin/env python\n#\n# VMEncryption extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport os.path\nimport uuid\nimport time\nimport datetime\nfrom Common import CommonVariables\nfrom ConfigParser import ConfigParser\nfrom ConfigUtil import ConfigUtil\nfrom ConfigUtil import ConfigKeyValuePair\n\n\nclass OnGoingItemConfig(object):\n    def __init__(self, encryption_environment, logger):\n        self.encryption_environment = encryption_environment\n        self.logger = logger\n        self.original_dev_name_path = None\n        self.original_dev_path = None\n        self.mapper_name = None\n        self.luks_header_file_path = None\n        self.phase = None\n        self.file_system = None\n        self.mount_point = None \n        self.device_size = None\n        self.from_end = None\n        self.header_slice_file_path = None\n        self.current_block_size = None\n        self.current_source_path = None\n        self.current_total_copy_size = None\n        self.current_slice_index = None\n        self.current_destination = None\n        self.ongoing_item_config = ConfigUtil(encryption_environment.azure_crypt_ongoing_item_config_path, 'azure_crypt_ongoing_item_config', logger)\n\n    def config_file_exists(self):\n        return self.ongoing_item_config.config_file_exists()\n\n    def get_original_dev_name_path(self):\n        return self.ongoing_item_config.get_config(CommonVariables.OngoingItemOriginalDevNamePathKey)\n\n    def get_original_dev_path(self):\n        return self.ongoing_item_config.get_config(CommonVariables.OngoingItemOriginalDevPathKey)\n\n    def get_mapper_name(self):\n        return self.ongoing_item_config.get_config(CommonVariables.OngoingItemMapperNameKey)\n\n    def get_header_file_path(self):\n        return self.ongoing_item_config.get_config(CommonVariables.OngoingItemHeaderFilePathKey)\n\n    def get_phase(self):\n        return self.ongoing_item_config.get_config(CommonVariables.OngoingItemPhaseKey)\n\n    def get_header_slice_file_path(self):\n        return self.ongoing_item_config.get_config(CommonVariables.OngoingItemHeaderSliceFilePathKey)\n\n    def get_file_system(self):\n        return self.ongoing_item_config.get_config(CommonVariables.OngoingItemFileSystemKey)\n\n    def get_mount_point(self):\n        return self.ongoing_item_config.get_config(CommonVariables.OngoingItemMountPointKey)\n\n    def get_device_size(self):\n        device_size_value = self.ongoing_item_config.get_config(CommonVariables.OngoingItemDeviceSizeKey)\n        if device_size_value is None or device_size_value == \"\":\n            return None\n        else:\n            return long(device_size_value)\n\n    def get_current_slice_index(self):\n        current_slice_index_value = self.ongoing_item_config.get_config(CommonVariables.OngoingItemCurrentSliceIndexKey)\n        if current_slice_index_value is None or current_slice_index_value == \"\":\n            return None\n        else:\n            return long(current_slice_index_value)\n\n    def get_from_end(self):\n        return self.ongoing_item_config.get_config(CommonVariables.OngoingItemFromEndKey)\n\n    def get_current_block_size(self):\n        block_size_value = self.ongoing_item_config.get_config(CommonVariables.OngoingItemCurrentBlockSizeKey)\n        if block_size_value is None or block_size_value == \"\":\n            return None\n        else:\n            return long(block_size_value)\n\n    def get_current_source_path(self):\n        return self.ongoing_item_config.get_config(CommonVariables.OngoingItemCurrentSourcePathKey)\n\n    def get_current_destination(self):\n        return self.ongoing_item_config.get_config(CommonVariables.OngoingItemCurrentDestinationKey)\n    \n    def get_current_total_copy_size(self):\n        total_copy_size_value = self.ongoing_item_config.get_config(CommonVariables.OngoingItemCurrentTotalCopySizeKey)\n        if total_copy_size_value is None or total_copy_size_value == \"\":\n            return None\n        else:\n            return long(total_copy_size_value)\n\n    def get_luks_header_file_path(self):\n        return self.ongoing_item_config.get_config(CommonVariables.OngoingItemCurrentLuksHeaderFilePathKey)\n\n    def load_value_from_file(self):\n        self.original_dev_name_path = self.get_original_dev_name_path()\n        self.original_dev_path = self.get_original_dev_path()\n        self.mapper_name = self.get_mapper_name()\n        self.luks_header_file_path = self.get_luks_header_file_path()\n        self.phase = self.get_phase()\n        self.file_system = self.get_file_system()\n        self.mount_point = self.get_mount_point() \n        self.device_size = self.get_device_size()\n        self.from_end = self.get_from_end()\n        self.header_slice_file_path = self.get_header_slice_file_path()\n        self.current_block_size = self.get_current_block_size()\n        self.current_source_path = self.get_current_source_path()\n        self.current_total_copy_size = self.get_current_total_copy_size()\n        self.current_slice_index = self.get_current_slice_index()\n        self.current_destination = self.get_current_destination()\n\n    def commit(self):\n        key_value_pairs = []\n        original_dev_name_path_pair = ConfigKeyValuePair(CommonVariables.OngoingItemOriginalDevNamePathKey, self.original_dev_name_path)\n        key_value_pairs.append(original_dev_name_path_pair)\n\n        original_dev_path_pair = ConfigKeyValuePair(CommonVariables.OngoingItemOriginalDevPathKey, self.original_dev_path)\n        key_value_pairs.append(original_dev_path_pair)\n\n        mapper_name_pair = ConfigKeyValuePair(CommonVariables.OngoingItemMapperNameKey, self.mapper_name)\n        key_value_pairs.append(mapper_name_pair)\n\n        header_file_pair = ConfigKeyValuePair(CommonVariables.OngoingItemHeaderFilePathKey, self.luks_header_file_path)\n        key_value_pairs.append(header_file_pair)\n\n        phase_pair = ConfigKeyValuePair(CommonVariables.OngoingItemPhaseKey, self.phase)\n        key_value_pairs.append(phase_pair)\n\n        header_slice_file_pair = ConfigKeyValuePair(CommonVariables.OngoingItemHeaderSliceFilePathKey, self.header_slice_file_path)\n        key_value_pairs.append(header_slice_file_pair)\n\n        file_system_pair = ConfigKeyValuePair(CommonVariables.OngoingItemFileSystemKey, self.file_system)\n        key_value_pairs.append(file_system_pair)\n\n        mount_point_pair = ConfigKeyValuePair(CommonVariables.OngoingItemMountPointKey, self.mount_point)\n        key_value_pairs.append(mount_point_pair)\n\n        device_size_pair = ConfigKeyValuePair(CommonVariables.OngoingItemDeviceSizeKey, self.device_size)\n        key_value_pairs.append(device_size_pair)\n\n        current_slice_index_pair = ConfigKeyValuePair(CommonVariables.OngoingItemCurrentSliceIndexKey, self.current_slice_index)\n        key_value_pairs.append(current_slice_index_pair)\n\n        from_end_pair = ConfigKeyValuePair(CommonVariables.OngoingItemFromEndKey, self.from_end)\n        key_value_pairs.append(from_end_pair)\n\n        current_source_path_pair = ConfigKeyValuePair(CommonVariables.OngoingItemCurrentSourcePathKey, self.current_source_path)\n        key_value_pairs.append(current_source_path_pair)\n\n        current_destination_pair = ConfigKeyValuePair(CommonVariables.OngoingItemCurrentDestinationKey, self.current_destination)\n        key_value_pairs.append(current_destination_pair)\n\n        current_total_copy_size_pair = ConfigKeyValuePair(CommonVariables.OngoingItemCurrentTotalCopySizeKey, self.current_total_copy_size)\n        key_value_pairs.append(current_total_copy_size_pair)\n\n        current_block_size_pair = ConfigKeyValuePair(CommonVariables.OngoingItemCurrentBlockSizeKey, self.current_block_size)\n        key_value_pairs.append(current_block_size_pair)\n\n        self.ongoing_item_config.save_configs(key_value_pairs)\n\n    def clear_config(self):\n        try:\n            if os.path.exists(self.encryption_environment.azure_crypt_ongoing_item_config_path):\n                self.logger.log(msg=\"archive the config file: {0}\".format(self.encryption_environment.azure_crypt_ongoing_item_config_path))\n                time_stamp = datetime.datetime.now()\n                new_name = \"{0}_{1}\".format(self.encryption_environment.azure_crypt_ongoing_item_config_path, time_stamp)\n                os.rename(self.encryption_environment.azure_crypt_ongoing_item_config_path, new_name)\n            else:\n                self.logger.log(msg=(\"the config file not exist: {0}\".format(self.encryption_environment.azure_crypt_ongoing_item_config_path)), level = CommonVariables.WarningLevel)\n            return True\n        except OSError as e:\n            self.logger.log(\"Failed to archive_backup_config with error: {0}, stack trace: {1}\".format(e, traceback.format_exc()))\n            return False\n\n    def __str__(self):\n        return \"dev_uuid_path is {0}, mapper_name is {1}, luks_header_file_path is {2}, phase is {3}, header_slice_file_path is {4}, file system is {5}, mount_point is {6}, device size is {7}\"\\\n                .format(self.original_dev_path, self.mapper_name, self.luks_header_file_path, self.phase, self.header_slice_file_path, self.file_system, self.mount_point, self.device_size)\n"
  },
  {
    "path": "VMEncryption/main/ProcessLock.py",
    "content": "﻿#!/usr/bin/env python\n#\n# VMEncryption extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os.path\nimport fcntl\nfrom Common import CommonVariables\n\n\nclass ProcessLock(object):\n    def __init__(self, logger, lock_file_path):\n        self.logger = logger\n        self.lock_file_path = lock_file_path\n        self.fd = None\n\n    def try_lock(self):\n        try:\n            self.fd = open(self.lock_file_path, \"w\") \n            fcntl.flock(self.fd, fcntl.LOCK_EX)\n            return True\n        except Exception as e:\n            self.logger.log(\"could not acquire a lock, error: {0}\".format(str(e)))\n            return False\n\n    def release_lock(self):\n        fcntl.flock(self.fd, fcntl.LOCK_UN)\n        self.fd.close()"
  },
  {
    "path": "VMEncryption/main/ResourceDiskUtil.py",
    "content": "#!/usr/bin/env python\n#\n# *********************************************************\n# Copyright (c) Microsoft. All rights reserved.\n#\n# Apache 2.0 License\n#\n# You may obtain a copy of the License at\n# http:#www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied. See the License for the specific language governing\n# permissions and limitations under the License.\n#\n# *********************************************************\n\n\"\"\" Functionality to encrypt the Azure resource disk\"\"\"\n\nimport time\nimport os\n\nfrom CommandExecutor import CommandExecutor\nfrom Common import CommonVariables, CryptItem\n\n\nclass ResourceDiskUtil(object):\n    \"\"\" Resource Disk Encryption Utilities \"\"\"\n\n    RD_MOUNT_POINT = '/mnt/resource'\n    RD_BASE_DEV_PATH = os.path.join(CommonVariables.azure_symlinks_dir, 'resource')\n    RD_DEV_PATH = os.path.join(CommonVariables.azure_symlinks_dir, 'resource-part1')\n    DEV_DM_PREFIX = '/dev/dm-'\n    # todo: consolidate this and other key file path references\n    # (BekUtil.py, ExtensionParameter.py, and dracut patches)\n    RD_MAPPER_NAME = 'resourceencrypt'\n    RD_MAPPER_PATH = os.path.join(CommonVariables.dev_mapper_root, RD_MAPPER_NAME)\n\n    def __init__(self, logger, disk_util, passphrase_filename, public_settings, distro_info):\n        self.logger = logger\n        self.executor = CommandExecutor(self.logger)\n        self.disk_util = disk_util\n        self.passphrase_filename = passphrase_filename  # WARNING: This may be null, in which case we mount the resource disk if its unencrypted and do nothing if it is.\n        self.public_settings = public_settings\n        self.distro_info = distro_info\n\n    def _is_encrypt_format_all(self):\n        \"\"\" return true if current encryption operation is EncryptFormatAll \"\"\"\n        encryption_operation = self.public_settings.get(CommonVariables.EncryptionEncryptionOperationKey)\n        if encryption_operation in [CommonVariables.EnableEncryptionFormatAll]:\n            return True\n        self.logger.log(\"Current encryption operation is not EnableEncryptionFormatAll\")\n        return False\n\n    def _is_luks_device(self):\n        \"\"\" checks if the device is set up with a luks header \"\"\"\n        if not self._resource_disk_partition_exists():\n            return False\n        cmd = 'cryptsetup isLuks ' + self.RD_DEV_PATH\n        return (int)(self.executor.Execute(cmd, suppress_logging=True)) == CommonVariables.process_success\n\n    def _resource_disk_partition_exists(self):\n        \"\"\" true if udev name for resource disk partition exists \"\"\"\n        cmd = 'test -b ' + self.RD_DEV_PATH\n        return (int)(self.executor.Execute(cmd, suppress_logging=True)) == CommonVariables.process_success\n\n    def _encrypt(self):\n        \"\"\" use disk util with the appropriate device mapper \"\"\"\n        return (int)(self.disk_util.encrypt_disk(dev_path=self.RD_DEV_PATH,\n                                                 passphrase_file=self.passphrase_filename,\n                                                 mapper_name=self.RD_MAPPER_NAME,\n                                                 header_file=None)) == CommonVariables.process_success\n\n    def _format_encrypted_partition(self):\n        \"\"\" make a default file system on top of the crypt layer \"\"\"\n        make_result = self.disk_util.format_disk(dev_path=self.RD_MAPPER_PATH, file_system=CommonVariables.default_file_system)\n        if make_result != CommonVariables.process_success:\n            self.logger.log(msg=\"Failed to make file system on ephemeral disk\", level=CommonVariables.ErrorLevel)\n            return False\n        # todo - drop DATALOSS_WARNING_README.txt file to disk\n        return True\n\n    def _mount_resource_disk(self, dev_path):\n        \"\"\" mount the file system previously made on top of the crypt layer \"\"\"\n        # ensure that resource disk mount point directory has been created\n        cmd = 'mkdir -p ' + self.RD_MOUNT_POINT\n        if self.executor.Execute(cmd, suppress_logging=True) != CommonVariables.process_success:\n            self.logger.log(msg='Failed to precreate mount point directory: ' + cmd, level=CommonVariables.ErrorLevel)\n            return False\n\n        # mount to mount point directory\n        mount_result = self.disk_util.mount_filesystem(dev_path=dev_path, mount_point=self.RD_MOUNT_POINT)\n        if mount_result != CommonVariables.process_success:\n            self.logger.log(msg=\"Failed to mount file system on resource disk\", level=CommonVariables.ErrorLevel)\n            return False\n        return True\n\n    def _configure_waagent(self):\n        \"\"\" turn off waagent.conf resource disk management  \"\"\"\n        # set ResourceDisk.MountPoint to standard mount point\n        cmd = \"sed -i.rdbak1 's|ResourceDisk.MountPoint=.*|ResourceDisk.MountPoint=\" + self.RD_MOUNT_POINT + \"|' /etc/waagent.conf\"\n        if self.executor.ExecuteInBash(cmd) != CommonVariables.process_success:\n            self.logger.log(msg=\"Failed to change ResourceDisk.MountPoint in /etc/waagent.conf\", level=CommonVariables.WarningLevel)\n            return False\n        # set ResourceDiskFormat=n to ensure waagent does not attempt a simultaneous format\n        cmd = \"sed -i.rdbak2 's|ResourceDisk.Format=y|ResourceDisk.Format=n|' /etc/waagent.conf\"\n        if self.executor.ExecuteInBash(cmd) != CommonVariables.process_success:\n            self.logger.log(msg=\"Failed to set ResourceDiskFormat in /etc/waagent.conf\", level=CommonVariables.WarningLevel)\n            return False\n        # todo: restart waagent if necessary to ensure changes are picked up?\n        return True\n\n    def _configure_fstab(self):\n        \"\"\" remove resource disk from /etc/fstab if present \"\"\"\n        cmd = \"sed -i.bak '/azure_resource-part1/d' /etc/fstab\"\n        if self.executor.ExecuteInBash(cmd) != CommonVariables.process_success:\n            self.logger.log(msg=\"Failed to configure resource disk entry of /etc/fstab\", level=CommonVariables.WarningLevel)\n            return False\n        return True\n\n    def _unmount_resource_disk(self):\n        \"\"\" unmount resource disk \"\"\"\n        self.disk_util.umount(self.RD_MOUNT_POINT)\n        self.disk_util.umount(CommonVariables.encryption_key_mount_point)\n        self.disk_util.umount('/mnt')\n        self.disk_util.make_sure_path_exists(CommonVariables.encryption_key_mount_point)\n        self.disk_util.mount_bek_volume(\"BEK VOLUME\", CommonVariables.encryption_key_mount_point, \"fmask=077\")\n\n    def _is_plain_mounted(self):\n        \"\"\" return true if mount point is mounted from a non-crypt layer \"\"\"\n        mount_items = self.disk_util.get_mount_items()\n        for mount_item in mount_items:\n            if mount_item[\"dest\"] == self.RD_MOUNT_POINT and not (mount_item[\"src\"].startswith(CommonVariables.dev_mapper_root) or mount_item[\"src\"].startswith(self.DEV_DM_PREFIX)):\n                return True\n        return False\n\n    def _is_crypt_mounted(self):\n        \"\"\" return true if mount point is already on a crypt layer \"\"\"\n        mount_items = self.disk_util.get_mount_items()\n        for mount_item in mount_items:\n            if mount_item[\"dest\"] == self.RD_MOUNT_POINT and (mount_item[\"src\"].startswith(CommonVariables.dev_mapper_root) or mount_item[\"src\"].startswith(self.DEV_DM_PREFIX)):\n                return True\n        return False\n\n    def _get_rd_device_mappers(self):\n        \"\"\"\n        Retreive any device mapper device on the resource disk (e.g. /dev/dm-0).\n        Can't imagine why there would be multiple device mappers here, but doesn't hurt to handle the case\n        \"\"\"\n        device_items = self.disk_util.get_device_items(self.RD_DEV_PATH)\n        device_mappers = []\n        mapper_device_types = [\"raid0\", \"raid1\", \"raid5\", \"raid10\", \"lvm\", \"crypt\"]\n        for device_item in device_items:\n            # fstype should be crypto_LUKS\n            dev_path = self.disk_util.get_device_path(device_item.name)\n            if device_item.type in mapper_device_types:\n                device_mappers.append(device_item)\n                self.logger.log('Found device mapper: ' + dev_path, level='Info')\n        return device_mappers\n\n    def _remove_device_mappers(self):\n        \"\"\"\n        Use dmsetup to remove the resource disk device mapper if it exists.\n        This is to allow us to make sure that the resource disk is not being used by anything and we can\n        safely luksFormat it.\n        \"\"\"\n\n        # There could be a dependency between the\n        something_closed = True\n        while something_closed is True:\n            # The mappers might be dependant on each other, like a crypt on an LVM.\n            # Instead of trying to figure out the dependency tree we will try to close anything we can\n            # and if anything does get closed we will refresh the list of devices and try to close everything again.\n            # In effect we repeat until we either close everything or we reach a point where we can't close anything.\n            dm_items = self._get_rd_device_mappers()\n            something_closed = False\n\n            if len(dm_items) == 0:\n                self.logger.log('no resource disk device mapper found')\n            for dm_item in dm_items:\n                # try luksClose\n                cmd = 'cryptsetup luksClose ' + dm_item.name\n                if self.executor.Execute(cmd) == CommonVariables.process_success:\n                    self.logger.log('Successfully closed cryptlayer: ' + dm_item.name)\n                    something_closed = True\n                else:\n                    # try a dmsetup remove, in case its non-crypt device mapper (lvm, raid, something we don't know)\n                    cmd = 'dmsetup remove ' + self.disk_util.get_device_path(dm_item.name)\n                    if self.executor.Execute(cmd) == CommonVariables.process_success:\n                        something_closed = True\n                    else:\n                        self.logger.log('failed to remove ' + dm_item.name)\n\n    def _prepare_partition(self):\n        \"\"\" create partition on resource disk if missing \"\"\"\n        if self._resource_disk_partition_exists():\n            return True\n        self.logger.log(\"resource disk partition does not exist\", level='Info')\n        cmd = 'parted ' + self.RD_BASE_DEV_PATH + ' mkpart primary ext4 0% 100%'\n        if self.executor.ExecuteInBash(cmd) == CommonVariables.process_success:\n            # wait for the corresponding udev name to become available\n            for i in range(0, 10):\n                time.sleep(i)\n                if self._resource_disk_partition_exists():\n                    return True\n        self.logger.log('unable to make resource disk partition')\n        return False\n\n    def _wipe_partition_header(self):\n        \"\"\" clear any possible header (luke or filesystem) by overwriting with 10MB of entropy \"\"\"\n        if not self._resource_disk_partition_exists():\n            self.logger.log(\"resource partition does not exist, no header to clear\")\n            return True\n        cmd = 'dd if=/dev/urandom of=' + self.RD_DEV_PATH + ' bs=512 count=20480'\n        return self.executor.Execute(cmd) == CommonVariables.process_success\n\n    def try_remount(self):\n        \"\"\"\n        Mount the resource disk if not already mounted\n        Returns true if the resource disk is mounted, false otherwise\n        Throws an exception if anything goes wrong\n        \"\"\"\n        self.logger.log(\"In try_remount\")\n\n        if self.passphrase_filename:\n            self.logger.log(\"passphrase_filename(value={0}) is not null, so trying to mount encrypted Resource Disk\".format(self.passphrase_filename))\n\n            if self._is_crypt_mounted():\n                self.logger.log(\"Resource disk already encrypted and mounted\")\n                # Add resource disk to crypttab if crypt mount is used\n                # Scenario: RD is alreday crypt mounted and crypt mount to crypttab migration is initiated\n                if not self.disk_util.should_use_azure_crypt_mount():\n                    self.add_resource_disk_to_crypttab()\n                return True\n\n            if self._resource_disk_partition_exists() and self._is_luks_device():\n                self.disk_util.luks_open(passphrase_file=self.passphrase_filename, dev_path=self.RD_DEV_PATH, mapper_name=self.RD_MAPPER_NAME, header_file=None, uses_cleartext_key=False)\n                self.logger.log(\"Trying to mount resource disk.\")\n                mount_retval = self._mount_resource_disk(self.RD_MAPPER_PATH)\n                if mount_retval:\n                    # We successfully mounted the RD but\n                    # the RD was not auto-mounted, so trying to enable auto-unlock for RD\n                    self.add_resource_disk_to_crypttab()\n                return mount_retval\n\n        else:\n            self.logger.log(\"passphrase_filename(value={0}) is null, so trying to mount plain Resource Disk\".format(self.passphrase_filename))\n            if self._is_plain_mounted():\n                self.logger.log(\"Resource disk already encrypted and mounted\")\n                return True\n            return self._mount_resource_disk(self.RD_DEV_PATH)\n\n        # conditions required to re-mount were not met\n        return False\n\n    def prepare(self):\n        \"\"\" prepare a non-encrypted resource disk to be encrypted \"\"\"\n        self._configure_waagent()\n        self._configure_fstab()\n        if self._resource_disk_partition_exists():\n            self.disk_util.swapoff()\n            self._unmount_resource_disk()\n            self._remove_device_mappers()\n            self._wipe_partition_header()\n        self._prepare_partition()\n        return True\n\n    def add_to_fstab(self):\n        with open(\"/etc/fstab\") as f:\n            lines = f.readlines()\n\n        if not self.disk_util.is_bek_in_fstab_file(lines):\n            lines.append(self.disk_util.get_fstab_bek_line())\n            self.disk_util.add_bek_to_default_cryptdisks()\n\n        if not any([line.startswith(self.RD_MAPPER_PATH) for line in lines]):\n            if self.distro_info[0].lower() == 'ubuntu' and self.distro_info[1].startswith('14'):\n                lines.append('{0} {1} auto defaults,discard,nobootwait 0 0\\n'.format(self.RD_MAPPER_PATH, self.RD_MOUNT_POINT))\n            else:\n                lines.append('{0} {1} auto defaults,discard,nofail 0 0\\n'.format(self.RD_MAPPER_PATH, self.RD_MOUNT_POINT))\n\n        with open('/etc/fstab', 'w') as f:\n            f.writelines(lines)\n\n    def encrypt_format_mount(self):\n        if not self.prepare():\n            self.logger.log(\"Failed to prepare VM for Resource Disk Encryption\", CommonVariables.ErrorLevel)\n            return False\n        if not self._encrypt():\n            self.logger.log(\"Failed to encrypt Resource Disk Encryption\", CommonVariables.ErrorLevel)\n            return False\n        if not self._format_encrypted_partition():\n            self.logger.log(\"Failed to format the encrypted Resource Disk Encryption\", CommonVariables.ErrorLevel)\n            return False\n        if not self._mount_resource_disk(self.RD_MAPPER_PATH):\n            self.logger.log(\"Failed to mount after formatting and encrypting the Resource Disk Encryption\", CommonVariables.ErrorLevel)\n            return False\n        # We haven't failed so far, lets just add the RD to crypttab\n        self.add_resource_disk_to_crypttab()\n        return True\n\n    def add_resource_disk_to_crypttab(self):\n        self.logger.log(\"Adding resource disk to the crypttab file\")\n        crypt_item = CryptItem()\n        crypt_item.dev_path = self.RD_DEV_PATH\n        crypt_item.mapper_name = self.RD_MAPPER_NAME\n        crypt_item.uses_cleartext_key = False\n        self.disk_util.remove_crypt_item(crypt_item)  # Remove old item in case it was already there\n        self.disk_util.add_crypt_item_to_crypttab(crypt_item, self.passphrase_filename)\n        self.add_to_fstab()\n\n    def automount(self):\n        \"\"\"\n        Mount the resource disk (encrypted or not)\n        or\n        encrypt the resource disk and mount it if enable was called with EFA\n\n        If False is returned, the resource disk is not mounted.\n        \"\"\"\n        # try to remount if the disk was previously encrypted and is still valid\n        if self.try_remount():\n            return True\n        # unencrypted or unusable\n        elif self._is_encrypt_format_all():\n            return self.encrypt_format_mount()\n        else:\n            self.logger.log('EncryptionFormatAll not in use, resource disk will not be automatically formatted and encrypted.')\n\n        return self._is_crypt_mounted() or self._is_plain_mounted()\n"
  },
  {
    "path": "VMEncryption/main/SupportedOS.json",
    "content": "{\n  \"redhat\": [\n        {\n          \"Version\" : \"7.7\"\n        },\n        {\n          \"Version\" : \"7.6\"\n        },\n        {\n          \"Version\" : \"7.5\"\n        },\n        {\n          \"Version\" : \"7.4\"\n        },\n        {\n          \"Version\" : \"7.3\"\n        },\n        {\n          \"Version\" : \"7.2\"\n        },\n        {\n          \"Version\" : \"6.8\"\n        }\n    ],\n    \"Ubuntu\" : [\n        {\n          \"Version\" : \"16.04\"\n        },\n        {\n          \"Version\" : \"18.04\"\n        },\n        {\n          \"Version\" : \"14.04\",\n          \"Kernel\": \"4.15\"\n        }\n    ],\n    \"centos\" : [\n        {\n          \"Version\" : \"7.7\"\n        },\n        {\n          \"Version\" : \"7.6\"\n        },\n        {\n          \"Version\" : \"7.5\"\n        },\n        {\n          \"Version\" : \"7.4\"\n        },\n        {\n          \"Version\" : \"7.3.1611\"\n        },\n        {\n          \"Version\" : \"7.2.1511\"\n        },\n        {\n          \"Version\" : \"6.9\"\n        },\n        {\n          \"Version\" : \"6.8\"\n        }\n    ]\n}"
  },
  {
    "path": "VMEncryption/main/TokenUtil.py",
    "content": "# Copyright (C) Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport adal\nimport json\nimport sys\nimport base64\n\ndef get_key(filename):\n    with open(filename, 'r') as key_file:\n        private_key = key_file.read()\n    return private_key\n\ntry:\n    with open(sys.argv[1]) as json_file:\n        d = json.load(json_file)\n    key = get_key(d['certificate'])\n    context = adal.AuthenticationContext(d['auth'])\n    token = context.acquire_token_with_client_certificate(d['resource'],d['client'],key,d['thumbprint'])\n    if token and 'accessToken' in token:\n        print(token['accessToken'])\nexcept:\n    exit(1)"
  },
  {
    "path": "VMEncryption/main/TransactionalCopyTask.py",
    "content": "﻿#!/usr/bin/env python\n#\n# VMEncryption extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport subprocess\nimport os\nimport os.path\nimport sys\nimport shlex\nfrom subprocess import *\nfrom CommandExecutor import CommandExecutor\nfrom Common import CommonVariables\nfrom ConfigUtil import ConfigUtil\nfrom OnGoingItemConfig import *\n\n\nclass TransactionalCopyTask(object):\n    \"\"\"\n    copy_total_size is in byte, skip_target_size is also in byte\n    slice_size is in byte 50M\n    \"\"\"\n    def __init__(self, logger, hutil, disk_util, ongoing_item_config, patching, encryption_environment, status_prefix=''):\n        \"\"\"\n        copy_total_size is in bytes.\n        \"\"\"\n        self.command_executer = CommandExecutor(logger)\n        self.ongoing_item_config = ongoing_item_config\n        self.total_size = self.ongoing_item_config.get_current_total_copy_size()\n        self.block_size = self.ongoing_item_config.get_current_block_size()\n        self.source_dev_full_path = self.ongoing_item_config.get_current_source_path()\n        self.destination = self.ongoing_item_config.get_current_destination()\n        self.current_slice_index = self.ongoing_item_config.get_current_slice_index()\n        self.from_end = self.ongoing_item_config.get_from_end()\n\n        self.last_slice_size = self.total_size % self.block_size\n        # we add 1 even the last_slice_size is zero.\n        self.total_slice_size = ((self.total_size - self.last_slice_size) / self.block_size) + 1\n\n        self.status_prefix = status_prefix\n        self.encryption_environment = encryption_environment\n        self.logger = logger\n        self.patching = patching\n        self.disk_util = disk_util\n        self.hutil = hutil\n        self.tmpfs_mount_point = \"/mnt/azure_encrypt_tmpfs\"\n        self.slice_file_path = self.tmpfs_mount_point + \"/slice_file\"\n        self.copy_command = self.patching.dd_path\n\n    def resume_copy_internal(self, copy_slice_item_backup_file_size, skip_block, original_total_copy_size):\n        block_size_of_slice_item_backup = 512\n        #copy the left slice\n        if copy_slice_item_backup_file_size <= original_total_copy_size:\n            skip_of_slice_item_backup_file = copy_slice_item_backup_file_size / block_size_of_slice_item_backup\n            left_count = ((original_total_copy_size - copy_slice_item_backup_file_size) / block_size_of_slice_item_backup)\n            total_count = original_total_copy_size / block_size_of_slice_item_backup\n            original_device_skip_count = (self.block_size * skip_block) / block_size_of_slice_item_backup \n            if left_count != 0:\n                dd_cmd = str(self.copy_command) \\\n                       + ' if=' + self.source_dev_full_path \\\n                       + ' of=' + self.encryption_environment.copy_slice_item_backup_file \\\n                       + ' bs=' + str(block_size_of_slice_item_backup) \\\n                       + ' skip=' + str(original_device_skip_count + skip_of_slice_item_backup_file) \\\n                       + ' seek=' + str(skip_of_slice_item_backup_file) \\\n                       + ' count=' + str(left_count)\n\n                return_code = self.command_executer.Execute(dd_cmd)\n                if return_code != CommonVariables.process_success:\n                    return return_code\n\n            dd_cmd = str(self.copy_command) \\\n                   + ' if=' + self.encryption_environment.copy_slice_item_backup_file \\\n                   + ' of=' + self.destination \\\n                   + ' bs=' + str(block_size_of_slice_item_backup) \\\n                   + ' seek=' + str(original_device_skip_count) \\\n                   + ' count=' + str(total_count)\n\n            return_code = self.command_executer.Execute(dd_cmd)\n            if return_code != CommonVariables.process_success:\n                return return_code\n            else:\n                self.current_slice_index += 1\n                self.ongoing_item_config.current_slice_index = self.current_slice_index\n                self.ongoing_item_config.commit()\n                if os.path.exists(self.encryption_environment.copy_slice_item_backup_file):\n                    os.remove(self.encryption_environment.copy_slice_item_backup_file)\n                return return_code\n        else:\n            self.logger.log(msg=\"copy_slice_item_backup_file_size is bigger than original_total_copy_size\",\n                            level=CommonVariables.ErrorLevel)\n            return CommonVariables.backup_slice_file_error\n\n    def resume_copy(self):\n        if self.from_end.lower() == 'true':\n            skip_block = (self.total_slice_size - self.current_slice_index - 1)\n        else:\n            skip_block = self.current_slice_index\n\n        return_code = CommonVariables.process_success\n\n        if self.current_slice_index == 0:\n            if self.last_slice_size > 0:\n                if os.path.exists(self.encryption_environment.copy_slice_item_backup_file):\n                    copy_slice_item_backup_file_size = os.path.getsize(self.encryption_environment.copy_slice_item_backup_file)\n                    return_code = self.resume_copy_internal(copy_slice_item_backup_file_size=copy_slice_item_backup_file_size,\n                                                           skip_block=skip_block,\n                                                           original_total_copy_size=self.last_slice_size)\n                else:\n                    self.logger.log(msg=\"1. the slice item backup file not exists.\",\n                                    level=CommonVariables.WarningLevel)\n            else:\n                self.logger.log(msg=\"the last slice\",\n                                level=CommonVariables.WarningLevel)\n        else:\n            if os.path.exists(self.encryption_environment.copy_slice_item_backup_file):\n                copy_slice_item_backup_file_size = os.path.getsize(self.encryption_environment.copy_slice_item_backup_file)\n                return_code = self.resume_copy_internal(copy_slice_item_backup_file_size, skip_block=skip_block, original_total_copy_size=self.block_size)\n            else:\n                self.logger.log(msg=\"2. unfortunately the slice item backup file not exists.\",\n                                level=CommonVariables.WarningLevel)\n        return return_code\n\n    def copy_last_slice(self, skip_block):\n        block_size_of_last_slice = 512\n        skip_of_last_slice = (skip_block * self.block_size) / block_size_of_last_slice\n        count_of_last_slice = self.last_slice_size / block_size_of_last_slice\n\n        copy_result = self.copy_internal(from_device=self.source_dev_full_path,\n                                         to_device = self.destination,\n                                         skip=skip_of_last_slice,\n                                         seek=skip_of_last_slice,\n                                         block_size=block_size_of_last_slice,\n                                         count = count_of_last_slice)\n        return copy_result\n\n    def begin_copy(self):\n        \"\"\"\n        check the device_item size first, cut it\n        \"\"\"\n        self.resume_copy()\n        if self.from_end.lower() == 'true':\n            while self.current_slice_index < self.total_slice_size:\n                skip_block = (self.total_slice_size - self.current_slice_index - 1)\n\n                if self.current_slice_index == 0:\n                    if self.last_slice_size > 0:\n                        copy_result = self.copy_last_slice(skip_block)\n                        if copy_result != CommonVariables.process_success:\n                            return copy_result\n                    else:\n                        self.logger.log(msg = \"the last slice size is zero, so skip the 0 index.\")\n                else:\n                    copy_result = self.copy_internal(from_device=self.source_dev_full_path,\n                                                     to_device=self.destination,\n                                                     skip=skip_block,\n                                                     seek=skip_block,\n                                                     block_size=self.block_size)\n\n                    if copy_result != CommonVariables.process_success:\n                        return copy_result\n\n                self.current_slice_index += 1\n\n                if self.status_prefix:\n                    msg = self.status_prefix + ': ' \\\n                        + str(int(self.current_slice_index / (float)(self.total_slice_size) * 100.0)) \\\n                        + '%'\n\n                    self.hutil.do_status_report(operation='DataCopy',\n                                                status=CommonVariables.extension_success_status,\n                                                status_code=str(CommonVariables.success),\n                                                message=msg)\n\n                self.ongoing_item_config.current_slice_index = self.current_slice_index\n                self.ongoing_item_config.commit()\n\n            return CommonVariables.process_success\n        else:\n            while self.current_slice_index < self.total_slice_size:\n                skip_block = self.current_slice_index\n\n                if self.current_slice_index == (self.total_slice_size - 1):\n                    if self.last_slice_size > 0:\n                        copy_result = self.copy_last_slice(skip_block)\n                        if copy_result != CommonVariables.process_success:\n                            return copy_result\n                    else:\n                        self.logger.log(msg = \"the last slice size is zero, so skip the last slice index.\")\n                else:\n                    copy_result = self.copy_internal(from_device=self.source_dev_full_path,\n                                                     to_device=self.destination,\n                                                     skip=skip_block,\n                                                     seek=skip_block,\n                                                     block_size=self.block_size)\n\n                    if copy_result != CommonVariables.process_success:\n                        return copy_result\n\n                self.current_slice_index += 1\n\n                if self.status_prefix:\n                    msg = self.status_prefix + ': ' \\\n                        + str(int(self.current_slice_index / (float)(self.total_slice_size) * 100.0)) \\\n                        + '%'\n\n                    self.hutil.do_status_report(operation='DataCopy',\n                                                status=CommonVariables.extension_success_status,\n                                                status_code=str(CommonVariables.success),\n                                                message=msg)\n\n                self.hutil.do_status_report(operation='DataCopy',\n                                            status=CommonVariables.extension_success_status,\n                                            status_code=str(CommonVariables.success),\n                                            message=msg)\n\n                self.ongoing_item_config.current_slice_index = self.current_slice_index\n                self.ongoing_item_config.commit()\n            return CommonVariables.process_success\n\n    \"\"\"\n    TODO: if the copy failed?\n    \"\"\"\n    def copy_internal(self, from_device, to_device,  block_size, skip=0, seek=0, count=1):\n        \"\"\"\n        first, copy the data to the middle cache\n        \"\"\"\n        dd_cmd = str(self.copy_command) \\\n               + ' if=' + from_device \\\n               + ' of=' + self.slice_file_path \\\n               + ' bs=' + str(block_size) \\\n               + ' skip=' + str(skip) \\\n               + ' count=' + str(count)\n\n        return_code = self.command_executer.Execute(dd_cmd)\n        if return_code != CommonVariables.process_success:\n            self.logger.log(msg=\"{0} is {1}\".format(dd_cmd, return_code), level=CommonVariables.ErrorLevel)\n            return return_code\n        else:\n            slice_file_size = os.path.getsize(self.slice_file_path)\n            self.logger.log(msg=(\"slice_file_size is: {0}\".format(slice_file_size)))\n            \"\"\"\n            second, copy the data in the middle cache to the backup slice.\n            \"\"\"\n            backup_slice_item_cmd = str(self.copy_command) \\\n                                  + ' if=' + self.slice_file_path \\\n                                  + ' of=' + self.encryption_environment.copy_slice_item_backup_file \\\n                                  + ' bs=' + str(block_size) \\\n                                  + ' count=' + str(count)\n            backup_slice_args = shlex.split(backup_slice_item_cmd)\n            backup_process = Popen(backup_slice_args)\n            self.logger.log(\"backup_slice_item_cmd is:{0}\".format(backup_slice_item_cmd))\n\n            \"\"\"\n            third, copy the data in the middle cache to the target device.\n            \"\"\"\n            dd_cmd = str(self.copy_command) + ' if=' + self.slice_file_path + ' of=' + to_device + ' bs=' + str(block_size) + ' seek=' + str(seek) + ' count=' + str(count)\n            return_code = self.command_executer.Execute(dd_cmd)\n            if return_code != CommonVariables.process_success:\n                self.logger.log(msg=(\"{0} is: {1}\".format(dd_cmd, return_code)), level = CommonVariables.ErrorLevel)\n            else:\n                #the copy done correctly, so clear the backup slice file item.\n                backup_process.kill()\n                if os.path.exists(self.encryption_environment.copy_slice_item_backup_file):\n                    self.logger.log(msg = \"clean up the backup file\")\n                    os.remove(self.encryption_environment.copy_slice_item_backup_file)\n                if os.path.exists(self.slice_file_path):\n                    self.logger.log(msg = \"clean up the slice file\")\n                    os.remove(self.slice_file_path)\n            return return_code\n\n    def prepare_mem_fs(self):\n        self.disk_util.make_sure_path_exists(self.tmpfs_mount_point)\n        commandToExecute = self.patching.mount_path + \" -t tmpfs -o size=\" + str(self.block_size + 1024) + \" tmpfs \" + self.tmpfs_mount_point\n        self.logger.log(\"prepare mem fs script is: {0}\".format(commandToExecute))\n        return_code = self.command_executer.Execute(commandToExecute)\n        return return_code\n\n    def clear_mem_fs(self):\n        commandToExecute = self.patching.umount_path + \" \" + self.tmpfs_mount_point\n        return_code = self.command_executer.Execute(commandToExecute)\n        return return_code\n"
  },
  {
    "path": "VMEncryption/main/Utils/HandlerUtil.py",
    "content": "#\n# Handler library for Linux IaaS\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\"\nJSON def:\nHandlerEnvironment.json\n[{\n  \"name\": \"ExampleHandlerLinux\",\n  \"seqNo\": \"seqNo\",\n  \"version\": \"1.0\",\n  \"handlerEnvironment\": {\n    \"logFolder\": \"<your log folder location>\",\n    \"configFolder\": \"<your config folder location>\",\n    \"statusFolder\": \"<your status folder location>\",\n    \"heartbeatFile\": \"<your heartbeat file location>\",\n    \n  }\n}]\n\nExample ./config/1.settings\n\"{\"runtimeSettings\":[{\"handlerSettings\":{\"protectedSettingsCertThumbprint\":\"1BE9A13AA1321C7C515EF109746998BAB6D86FD1\",\"protectedSettings\":\n\"MIIByAYJKoZIhvcNAQcDoIIBuTCCAbUCAQAxggFxMIIBbQIBADBVMEExPzA9BgoJkiaJk/IsZAEZFi9XaW5kb3dzIEF6dXJlIFNlcnZpY2UgTWFuYWdlbWVudCBmb3IgR+nhc6VHQTQpCiiV2zANBgkqhkiG9w0BAQEFAASCAQCKr09QKMGhwYe+O4/a8td+vpB4eTR+BQso84cV5KCAnD6iUIMcSYTrn9aveY6v6ykRLEw8GRKfri2d6tvVDggUrBqDwIgzejGTlCstcMJItWa8Je8gHZVSDfoN80AEOTws9Fp+wNXAbSuMJNb8EnpkpvigAWU2v6pGLEFvSKC0MCjDTkjpjqciGMcbe/r85RG3Zo21HLl0xNOpjDs/qqikc/ri43Y76E/Xv1vBSHEGMFprPy/Hwo3PqZCnulcbVzNnaXN3qi/kxV897xGMPPC3IrO7Nc++AT9qRLFI0841JLcLTlnoVG1okPzK9w6ttksDQmKBSHt3mfYV+skqs+EOMDsGCSqGSIb3DQEHATAUBggqhkiG9w0DBwQITgu0Nu3iFPuAGD6/QzKdtrnCI5425fIUy7LtpXJGmpWDUA==\",\"publicSettings\":{\"port\":\"3000\"}}}]}\"\n\n\nExample HeartBeat\n{\n\"version\": 1.0,\n    \"heartbeat\" : {\n        \"status\": \"ready\",\n        \"code\": 0,\n        \"Message\": \"Sample Handler running. Waiting for a new configuration from user.\"\n    }\n}\nExample Status Report:\n[{\"version\":\"1.0\",\"timestampUTC\":\"2014-05-29T04:20:13Z\",\"status\":{\"name\":\"Chef Extension Handler\",\"operation\":\"chef-client-run\",\"status\":\"success\",\"code\":0,\"formattedMessage\":{\"lang\":\"en-US\",\"message\":\"Chef-client run success\"}}}]\n\n\"\"\"\n\nimport fnmatch\nimport glob\nimport os\nimport os.path\nimport re\nimport shutil\nimport string\nimport subprocess\nimport sys\nimport imp\nimport base64\nimport json\nimport tempfile\nimport time\n\nfrom Common import *\nfrom os.path import join\nfrom Utils.WAAgentUtil import waagent\nfrom waagent import LoggerInit\nimport logging\nimport logging.handlers\n\nDateTimeFormat = \"%Y-%m-%dT%H:%M:%SZ\"\n\nclass HandlerContext:\n    def __init__(self, name):\n        self._name = name\n        self._version = '0.0'\n        return\n\nclass HandlerUtility:\n    def __init__(self, log, error, short_name):\n        self._log = log\n        self._error = error\n        self._short_name = short_name\n        self.patching = None\n        self.disk_util = None\n        self.find_last_nonquery_operation = False\n        self.config_archive_folder = '/var/lib/azure_disk_encryption_archive'\n        self._context = HandlerContext(self._short_name)\n\n    def _get_log_prefix(self):\n        return '[%s-%s]' % (self._context._name, self._context._version)\n\n    def _get_current_seq_no(self, config_folder):\n        seq_no = -1\n        cur_seq_no = -1\n        freshest_time = None\n        for subdir, dirs, files in os.walk(config_folder):\n            for file in files:\n                try:\n                    if file.endswith('.settings'):\n                        cur_seq_no = int(os.path.basename(file).split('.')[0])\n                        if freshest_time == None:\n                            freshest_time = os.path.getmtime(join(config_folder, file))\n                            seq_no = cur_seq_no\n                        else:\n                            current_file_m_time = os.path.getmtime(join(config_folder, file))\n                            if current_file_m_time > freshest_time:\n                                freshest_time = current_file_m_time\n                                seq_no = cur_seq_no\n                except ValueError:\n                    continue\n\n        if seq_no < 0: \n            # guest agent is expected to provide at least one settings file to extension\n            self.error(\"unable to get current sequence number from config folder\")\n                    \n        return seq_no\n\n    def get_last_seq(self):\n        if os.path.isfile('mrseq'):\n            seq = waagent.GetFileContents('mrseq')\n            if seq:\n                return int(seq)\n        return -1\n\n    def get_latest_seq(self):\n        settings_files = glob.glob(os.path.join(self._context._config_dir, '*.settings'))\n        settings_files = [os.path.basename(f) for f in settings_files]\n        seq_nums = [int(re.findall(r'(\\d+)\\.settings', f)[0]) for f in settings_files]\n\n        if seq_nums: \n            return max(seq_nums) \n        else:  \n            # guest agent is expected to provide at least one settings file to the extension\n            self.log(\"unable to get latest sequence number from config folder\")\n            return -1\n\n    def get_current_seq(self):\n        return int(self._context._seq_no)\n\n    def same_seq_as_last_run(self):\n        return self.get_current_seq() == self.get_last_seq()\n\n    def exit_if_same_seq(self, exit_status=None):\n        current_seq = int(self._context._seq_no)\n        last_seq = self.get_last_seq()\n        if current_seq == last_seq:\n            self.log(\"the sequence numbers are same, so skipping daemon\"+\n                     \", current=\" +\n                     str(current_seq) +\n                     \", last=\" +\n                     str(last_seq))\n\n            if exit_status:\n                self.do_status_report(exit_status['operation'],\n                                      exit_status['status'],\n                                      exit_status['status_code'],\n                                      exit_status['message'])\n\n            sys.exit(0)\n\n    def log(self, message):\n        # write message to stderr for inclusion in QOS telemetry \n        sys.stderr.write(message)\n        self._log(self._get_log_prefix() + ': ' + message)\n\n    def error(self, message):\n        # write message to stderr for inclusion in QOS telemetry \n        sys.stderr.write(message)\n        self._error(self._get_log_prefix() + ': ' + message)\n\n    def _parse_config(self, config_txt):\n        # pre : config_txt is a text string containing JSON configuration settings \n        # post: handlerSettings is initialized with these settings and the config \n        #       object is returned.  If an error occurs, None is returned. \n        if not config_txt:\n            self.error('empty config, nothing to parse')\n            return None\n\n        config = None\n        try:\n            config = json.loads(config_txt)\n        except:\n            self.error('invalid config, could not parse: ' + str(config_txt))\n\n        if config:\n            handlerSettings = config['runtimeSettings'][0]['handlerSettings']\n\n            # skip unnecessary decryption of protected settings for query status \n            # operations, to avoid timeouts in case of multiple settings files\n            if handlerSettings.has_key('publicSettings'):\n                ps = handlerSettings.get('publicSettings')\n                op = ps.get(CommonVariables.EncryptionEncryptionOperationKey)\n                if op == CommonVariables.QueryEncryptionStatus:\n                    return config\n            \n            if handlerSettings.has_key('protectedSettings') and \\\n                    handlerSettings.has_key(\"protectedSettingsCertThumbprint\") and \\\n                    handlerSettings['protectedSettings'] is not None and \\\n                    handlerSettings[\"protectedSettingsCertThumbprint\"] is not None:\n                thumb = handlerSettings['protectedSettingsCertThumbprint']\n                cert = waagent.LibDir + '/' + thumb + '.crt'\n                pkey = waagent.LibDir + '/' + thumb + '.prv'\n                f = tempfile.NamedTemporaryFile(delete=False)\n                f.close()\n                waagent.SetFileContents(f.name, config['runtimeSettings'][0]['handlerSettings']['protectedSettings'])\n                cleartxt = None\n                cleartxt = waagent.RunGetOutput(self.patching.base64_path + \" -d \" + f.name + \" | \" + self.patching.openssl_path + \" smime  -inform DER -decrypt -recip \" + cert + \"  -inkey \" + pkey)[1]\n                if cleartxt == None:\n                    self.error(\"OpenSSh decode error using thumbprint \" + thumb)\n                    self.do_exit(1, self.operation,'error','1', self.operation + ' Failed')\n                jctxt = ''\n                try:\n                    jctxt = json.loads(cleartxt)\n                except:\n                    self.error('JSON exception loading protected settings')\n                handlerSettings['protectedSettings'] = jctxt\n        return config\n\n    def do_parse_context(self, operation):\n        self.operation = operation\n        _context = self.try_parse_context()\n        if not _context:\n            self.log(\"no settings file found\")\n\n            self.do_exit(0,\n                         'QueryEncryptionStatus',\n                         CommonVariables.extension_success_status,\n                         str(CommonVariables.success),\n                         'No operation found, find_last_nonquery_operation={0}'.format(self.find_last_nonquery_operation))\n\n        return _context\n\n    def is_valid_nonquery(self, settings_file_path):\n        # note: the nonquery operations list includes update and disable \n        nonquery_ops = [ CommonVariables.EnableEncryption, CommonVariables.EnableEncryptionFormat, CommonVariables.EnableEncryptionFormatAll, CommonVariables.UpdateEncryptionSettings, CommonVariables.DisableEncryption ] \n\n        if settings_file_path and os.path.exists(settings_file_path):\n            # open file and look for presence of nonquery operation \n            config_txt = waagent.GetFileContents(settings_file_path)\n            config_obj = self._parse_config(config_txt)\n            public_settings_str = config_obj['runtimeSettings'][0]['handlerSettings'].get('publicSettings')\n\n            # if not json already, load string as json \n            if isinstance(public_settings_str, basestring):\n                public_settings = json.loads(public_settings_str)\n            else:\n                public_settings = public_settings_str\n\n            operation = public_settings.get(CommonVariables.EncryptionEncryptionOperationKey)\n            if operation and (operation in nonquery_ops):\n                return True\n\n        # invalid input, or not recognized as a valid nonquery operation \n        return False\n\n\n    def get_last_nonquery_config_path(self):\n        # pre: internal self._context._config_dir and _seq_no, _settings_file must be set prior to call\n        # post: returns path to last nonquery settings file in current config, archived folder, or None\n        \n        # validate that internal preconditions are satisfied and internal variables are initialized\n        if self._context._seq_no < 0:\n            self.error(\"current context sequence number must be initialized and non-negative\")\n        if not self._context._config_dir or not os.path.isdir(self._context._config_dir):\n            self.error(\"current context config dir must be initialized and point to a path that exists\")\n        if not self._context._settings_file or not os.path.exists(self._context._settings_file):\n            self.error(\"current context settings file variable must be initialized and point to a file that exists\")\n\n        # check timestamp of pointer to last archived settings file \n        curr_path = self._context._settings_file\n        last_path = os.path.join(self.config_archive_folder, \"lnq.settings\")\n        \n        # if an archived nonquery settings file exists, use it if no current settings file exists, or it is newer than current settings\n        if os.path.exists(last_path) and ((not os.path.exists(curr_path)) or (os.path.exists(curr_path) and (os.stat(last_path).st_mtime > os.stat(curr_path).st_mtime))):\n            return last_path\n        else:\n            # reverse iterate through numbered settings files in config dir\n            # and return path to the first nonquery settings file found\n            for i in range(self._context._seq_no,-1,-1):\n                curr_path = os.path.join(self._context._config_dir, str(i) + '.settings')\n                if self.is_valid_nonquery(curr_path):                    \n                    return curr_path\n            \n            # nothing was found in the current config settings, check the archived settings\n            if os.path.exists(last_path):\n                return last_path\n            else:\n                if os.path.exists(self.config_archive_folder):                        \n                    # walk through any archived [n].settings files found in archived settings folder \n                    # sorted by reverse timestamp (processing newest to oldest) until a nonquery settings file found \n                    files = sorted(os.listdir(self.config_archive_folder), key=os.path.getctime, reverse=True)\n                    for f in files:\n                        curr_path = os.path.join(self._context._config_dir, f)\n                        # TODO: check that file name matches the [n].settings format\n                        if self.is_valid_nonquery(curr_path):\n                            # found, copy to last_nonquery_settings in archived settings\n                            return curr_path\n\n        # unable to find any nonquery settings file \n        return None \n        \n    def get_last_config(self, nonquery):\n        # precondition:  self._context._config_dir, self._context._seq_no are already set and valid \n        # postcondition: a configuration object from the last configuration settings file is returned \n        # if nonquery flag is true, search for the last settings file that was not a query status operation\n        # if nonquery is false, return the current settings file \n        if nonquery:\n            last_config_path = self.get_last_nonquery_config_path()\n        else:\n            # retrieve the settings file corresponding to the current sequence number \n            last_config_path = os.path.join(self._context._config_dir, str(self._context._seq_no) + '.settings')\n\n        # if not found, attempt to fall back to an archived settings file\n        if not os.path.isfile(last_config_path):\n            self.log('settings file not found, checking for archived settings')\n            last_config_path = os.path.join(self.config_archive_folder, \"lnq.settings\")\n            if not os.path.isfile(last_config_path):\n                self.error('archived settings file not found, unable to get last config')\n                return None\n            \n        # settings file was found, parse config and return config object \n        config_txt = waagent.GetFileContents(last_config_path)\n        if not config_txt:\n            self.error('configuration settings empty, unable to get last config')\n            return None\n            \n        config_obj = self._parse_config(config_txt)\n        if not config_obj:\n            self.error('failed to parse configuration settings, unable to get last config')\n            return None  \n        else:\n            return config_obj\n\n    def get_handler_env(self):\n        # load environment variables from HandlerEnvironment.json \n        # according to spec, it is always in the ./ directory\n        #self.log('cwd is ' + os.path.realpath(os.path.curdir))\n        handler_env_file = './HandlerEnvironment.json'\n        if not os.path.isfile(handler_env_file):\n            self.error(\"Unable to locate \" + handler_env_file)\n            return None\n        handler_env_json_str = waagent.GetFileContents(handler_env_file)\n\n        if handler_env_json_str == None :\n            self.error(\"Unable to read \" + handler_env_file)\n        try:\n            handler_env = json.loads(handler_env_json_str)\n        except:\n            pass\n\n        if handler_env == None :\n            # TODO - treat this as a telemetry error indicating an agent bug, as this file should always be available and readable \n            self.log(\"JSON error processing \" + str(handler_env_file))\n            return None\n        if type(handler_env) == list:\n            handler_env = handler_env[0]\n        return handler_env\n\n    def try_parse_context(self):        \n        # precondition: agent is in a properly running state with at least one settings file in config folder\n        #               any archived settings from prior instances of the extension were saved to archive folder\n        # postcondition: context variables initialized to reflect current handler environment and prior call history \n                \n        # initialize handler environment context variables\n        handler_env = self.get_handler_env()\n        self._context._name = handler_env['name']\n        self._context._version = str(handler_env['version'])\n        self._context._config_dir = handler_env['handlerEnvironment']['configFolder']\n        self._context._log_dir = handler_env['handlerEnvironment']['logFolder']\n        self._context._log_file = os.path.join(handler_env['handlerEnvironment']['logFolder'],'extension.log')\n        self._change_log_file()\n        self._context._status_dir = handler_env['handlerEnvironment']['statusFolder']\n        self._context._heartbeat_file = handler_env['handlerEnvironment']['heartbeatFile']\n\n        # initialize the current sequence number corresponding to settings files in config folder\n        self._context._seq_no = self._get_current_seq_no(self._context._config_dir)\n        self._context._settings_file = os.path.join(self._context._config_dir, str(self._context._seq_no) + '.settings')\n        \n        # get a config object corresponding to the last settings file, skipping QueryEncryptionStatus settings \n        # files when find_last_nonquery_operation is True, falling back to archived settings if necessary\n        # note - in the case of nonquery settings file retrieval, when preceded by one or more query settings \n        # file that are more recent, the config object will not match the active settings file or sequence number\n        self._context._config = self.get_last_config(self.find_last_nonquery_operation)\n\n        return self._context\n\n    def _change_log_file(self):\n        #self.log(\"Change log file to \" + self._context._log_file)\n        LoggerInit(self._context._log_file,'/dev/stdout')\n        self._log = waagent.Log\n        self._error = waagent.Error\n\n    def save_seq(self):\n        self.set_last_seq(self._context._seq_no)\n        self.log(\"set most recent sequence number to \" + str(self._context._seq_no))\n\n    def set_last_seq(self, seq):\n        waagent.SetFileContents('mrseq', str(seq))\n\n    def redo_last_status(self):\n        latest_sequence_num = self.get_latest_seq()\n        if (latest_sequence_num > 0):\n            latest_seq = str(latest_sequence_num)\n            self._context._status_file = os.path.join(self._context._status_dir, latest_seq + '.status')\n\n            previous_seq = str(latest_sequence_num - 1)\n            previous_status_file = os.path.join(self._context._status_dir, previous_seq + '.status')\n\n            shutil.copy2(previous_status_file, self._context._status_file)\n            self.log(\"[StatusReport ({0})] Copied {1} to {2}\".format(latest_seq, previous_status_file, self._context._status_file))\n        else: \n            self.log(\"unable to redo last status, no prior status found\")\n\n    def redo_current_status(self):\n        stat_rept = waagent.GetFileContents(self._context._status_file)\n        stat = json.loads(stat_rept)\n\n        self.do_status_report(stat[0][\"status\"][\"operation\"],\n                              stat[0][\"status\"][\"status\"],\n                              stat[0][\"status\"][\"code\"],\n                              stat[0][\"status\"][\"formattedMessage\"][\"message\"])\n\n    def do_status_report(self, operation, status, status_code, message):\n        latest_seq_num = self.get_latest_seq()\n        if (latest_seq_num >= 0): \n            latest_seq = str(self.get_latest_seq())\n        else:\n            self.log(\"sequence number could not be derived from settings files, using 0.status\")\n            latest_seq = \"0\"\n\n        self._context._status_file = os.path.join(self._context._status_dir, latest_seq + '.status')\n\n        if message is None:\n            message = \"\"\n\n        message = filter(lambda c: c in string.printable, message)\n        message = message.encode('ascii', 'ignore')\n\n        self.log(\"[StatusReport ({0})] op: {1}\".format(latest_seq, operation))\n        self.log(\"[StatusReport ({0})] status: {1}\".format(latest_seq, status))\n        self.log(\"[StatusReport ({0})] code: {1}\".format(latest_seq, status_code))\n        self.log(\"[StatusReport ({0})] msg: {1}\".format(latest_seq, message))\n\n        tstamp = time.strftime(DateTimeFormat, time.gmtime())\n        stat = [{\n            \"version\" : self._context._version,\n            \"timestampUTC\" : tstamp,\n            \"status\" : {\n                \"name\" : self._context._name,\n                \"operation\" : operation,\n                \"status\" : status,\n                \"code\" : status_code,\n                \"formattedMessage\" : {\n                    \"lang\" : \"en-US\",\n                    \"message\" : message\n                }\n            }\n        }]\n\n        if self.disk_util:\n            encryption_status = self.disk_util.get_encryption_status()\n\n            encryption_status_dict = json.loads(encryption_status)\n            self.log(\"[StatusReport ({0})] substatus : OS : {1}  Data : {2}\".format(latest_seq, encryption_status_dict['os'], encryption_status_dict['data']))\n\n            substat = [{\n                \"name\" : self._context._name,\n                \"operation\" : operation,\n                \"status\" : status,\n                \"code\" : status_code,\n                \"formattedMessage\" : {\n                    \"lang\" : \"en-US\",\n                    \"message\" : encryption_status\n                }\n            }]\n\n            stat[0][\"status\"][\"substatus\"] = substat\n\n            if \"VMRestartPending\" in encryption_status:\n                stat[0][\"status\"][\"formattedMessage\"][\"message\"] = \"OS disk successfully encrypted, please reboot the VM\"\n\n        stat_rept = json.dumps(stat)\n        # rename all other status files, or the WALA would report the wrong\n        # status file.\n        # because the wala choose the status file with the highest sequence\n        # number to report.\n        if self._context._status_file:\n            with open(self._context._status_file,'w+') as f:\n                f.write(stat_rept)\n\n    def backup_settings_status_file(self, _seq_no):\n        self.log(\"current seq no is \" + _seq_no)\n        for subdir, dirs, files in os.walk(self._context._config_dir):\n            for file in files:\n                try:\n                    if file.endswith('.settings') and file != (_seq_no + \".settings\"):\n                        new_file_name = file.replace(\".\",\"_\")\n                        os.rename(join(self._context._config_dir, file), join(self._context._config_dir, new_file_name))\n                except:\n                    self.log(\"failed to rename the settings file.\")\n\n    def do_exit(self, exit_code, operation, status, code, message):\n        try:\n            self.do_status_report(operation, status, code, message)\n        except Exception as e:\n            self.log(\"Can't update status: \" + str(e))\n        if message:\n            # Remove newline character so that msg is printed in one line\n            strip_msg = message.replace('\\n', ' ')\n            self.log(\"Exited with message {0}\".format(strip_msg))\n        sys.exit(exit_code)\n\n    def get_handler_settings(self):\n        return self._context._config['runtimeSettings'][0]['handlerSettings']\n\n    def get_protected_settings(self):\n        return self.get_handler_settings().get('protectedSettings')\n\n    def get_public_settings(self):\n        return self.get_handler_settings().get('publicSettings')\n\n    def archive_old_configs(self):\n        if not os.path.exists(self.config_archive_folder):\n            os.makedirs(self.config_archive_folder)\n\n        # only persist latest nonquery settings file to archived settings \n        # and prevent the accumulation of large numbers of obsolete files \n        src = self.get_last_nonquery_config_path()\n        if src:\n            dest = os.path.join(self.config_archive_folder, 'lnq.settings')\n            if src != dest: \n                shutil.copy2(src,dest)\n"
  },
  {
    "path": "VMEncryption/main/Utils/WAAgentUtil.py",
    "content": "# Wrapper module for waagent\n#\n# waagent is not written as a module. This wrapper module is created \n# to use the waagent code as a module.\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport imp\nimport os\nimport os.path\n\n#\n# The following code will search and load waagent code and expose\n# it as a submodule of current module\n#\ndef searchWAAgent():\n    agentPath = '/usr/sbin/waagent'\n    if os.path.isfile(agentPath):\n        return agentPath\n    user_paths = os.environ['PYTHONPATH'].split(os.pathsep)\n    for user_path in user_paths:\n        agentPath = os.path.join(user_path, 'waagent')\n        if os.path.isfile(agentPath):\n            return agentPath\n    return None\n\nagentPath = searchWAAgent()\nif agentPath:\n    waagent = imp.load_source('waagent', agentPath)\nelse:\n    raise Exception(\"Can't load waagent.\")\n\nif not hasattr(waagent, \"AddExtensionEvent\"):\n    \"\"\"\n    If AddExtensionEvent is not defined, provide a dummy impl.\n    \"\"\"\n    def _AddExtensionEvent(*args, **kwargs):\n        pass\n    waagent.AddExtensionEvent = _AddExtensionEvent\n\nif not hasattr(waagent, \"WALAEventOperation\"):\n    class _WALAEventOperation:\n        HeartBeat=\"HeartBeat\"\n        Provision = \"Provision\"\n        Install = \"Install\"\n        UnIsntall = \"UnInstall\"\n        Disable = \"Disable\"\n        Enable = \"Enable\"\n        Download = \"Download\"\n        Upgrade = \"Upgrade\"\n        Update = \"Update\"           \n    waagent.WALAEventOperation = _WALAEventOperation\n\n"
  },
  {
    "path": "VMEncryption/main/Utils/__init__.py",
    "content": "#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n"
  },
  {
    "path": "VMEncryption/main/__init__.py",
    "content": "#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n"
  },
  {
    "path": "VMEncryption/main/check_util.py",
    "content": "#!/usr/bin/env python\n#\n# *********************************************************\n# Copyright (c) Microsoft. All rights reserved.\n#\n# Apache 2.0 License\n#\n# You may obtain a copy of the License at\n# http:#www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied. See the License for the specific language governing\n# permissions and limitations under the License.\n#\n# *********************************************************\n\n\"\"\"This module checks validity of the environment prior to disk encryption\"\"\"\n\nimport os\nimport os.path\nimport urlparse\nimport re\nimport json\nfrom Common import CommonVariables\nfrom CommandExecutor import CommandExecutor\nfrom distutils.version import LooseVersion\n\nclass CheckUtil(object):\n    \"\"\"Checks compatibility for disk encryption\"\"\"\n    def __init__(self, logger):\n        self.logger = logger\n\n    def is_app_compat_issue_detected(self):\n        \"\"\"check for the existence of applications that enable is not yet compatible with\"\"\"\n        detected = False\n        dirs = ['./usr/sap']\n        files = ['/etc/init.d/mongodb',\n                 '/etc/init.d/cassandra',\n                 '/etc/init.d/docker',\n                 '/opt/Symantec/symantec_antivirus']\n        for testdir in dirs:\n            if os.path.isdir(testdir):\n                self.logger.log('WARNING: likely app compat issue [' + testdir + ']')\n                detected = True\n        for testfile in files:\n            if os.path.isfile(testfile):\n                self.logger.log('WARNING: likely app compat issue [' + testfile + ']')\n                detected = True\n        return detected\n\n    def is_insufficient_memory(self):\n        \"\"\"check if memory total is greater than or equal to the recommended minimum size\"\"\"\n        minsize = 7000000\n        memtotal = int(os.popen(\"grep MemTotal /proc/meminfo | grep -o -E [0-9]+\").read())\n        if memtotal < minsize:\n            self.logger.log('WARNING: total memory [' + str(memtotal) + 'kb] is less than 7GB')\n            return True\n        return False\n\n    def is_unsupported_mount_scheme(self):\n        \"\"\" check for data disks mounted under /mnt and for recursively mounted\n            data disks such as /mnt/data1, /mnt/data2, or /data3 + /data3/data4 \"\"\"\n        detected = False\n        ignorelist = ['/', '/dev', '/proc', '/run', '/sys', '/sys/fs/cgroup']\n        mounts = []\n        with open('/proc/mounts') as infile:\n            for line in infile:\n                mountpoint = line.split()[1]\n                if mountpoint not in ignorelist:\n                    mounts.append(line.split()[1])\n        for mnt1 in mounts:\n            for mnt2 in mounts:\n                if (mnt1 != mnt2) and (mnt2.startswith(mnt1)):\n                    self.logger.log('WARNING: unsupported mount scheme [' + mnt1 + ' ' + mnt2 + ']')\n                    detected = True\n        return detected\n\n    def check_kv_url(self, test_url, message):\n        \"\"\"basic sanity check of the key vault url\"\"\"\n\n        if test_url is None:\n            raise Exception(message + '\\nNo URL supplied')\n\n        try:\n            parse_result = urlparse.urlparse(test_url)\n        except:\n            raise Exception(message + '\\nMalformed URL: ' + test_url)\n\n        if not parse_result.scheme.lower() == \"https\" :\n            raise Exception('\\n' + message + '\\n URL should be https: ' + test_url + \"\\n\")\n\n        if not parse_result.netloc:\n            raise Exception(message + '\\nMalformed URL: ' + test_url)\n\n        # Don't bother with explicit dns check, the host already does and should start returning better error messages.\n\n        # dns_suffix_list = [\"vault.azure.net\", \"vault.azure.cn\", \"vault.usgovcloudapi.net\", \"vault.microsoftazure.de\"]\n        # Add new suffixes here when a new national cloud is introduced.\n        # Relevant link: https://docs.microsoft.com/en-us/azure/key-vault/key-vault-access-behind-firewall#key-vault-operations\n\n        # dns_match = False\n        # for dns_suffix in dns_suffix_list:\n        #     escaped_dns_suffix = dns_suffix.replace(\".\",\"\\.\")\n        #     if re.match('[a-zA-Z0-9\\-]+\\.' + escaped_dns_suffix + '(:443)?$', parse_result.netloc):\n        #         # matched a valid dns, set matched to true\n        #         dns_match = True\n        # if not dns_match:\n        #     raise Exception('\\n' + message + '\\nProvided URL does not match known valid URL formats: ' + \\\n        #         \"\\n\\tProvided URL: \" + test_url + \\\n        #         \"\\n\\tKnown valid formats:\\n\\t\\t\" + \\\n        #         \"\\n\\t\\t\".join([\"https://<keyvault-name>.\" + dns_suffix + \"/\" for dns_suffix in dns_suffix_list]) )\n\n        return\n\n    def validate_key_vault_params(self, public_settings):\n\n        encryption_operation = public_settings.get(CommonVariables.EncryptionEncryptionOperationKey)\n        if encryption_operation not in [CommonVariables.EnableEncryption, CommonVariables.EnableEncryptionFormat, CommonVariables.EnableEncryptionFormatAll]:\n            # No need to check the KV urls if its not an encryption operation\n            return\n\n        kek_url = public_settings.get(CommonVariables.KeyEncryptionKeyURLKey)\n        kv_url = public_settings.get(CommonVariables.KeyVaultURLKey)\n        kek_algorithm = public_settings.get(CommonVariables.KeyEncryptionAlgorithmKey)\n\n        self.check_kv_url(kv_url, \"Encountered an error while checking the Key Vault URL\")\n        if kek_url:\n            self.check_kv_url(kek_url, \"A KEK URL was specified, but was invalid\")\n            if kek_algorithm is None or kek_algorithm.lower() not in [algo.lower() for algo in CommonVariables.encryption_algorithms]:\n                if kek_algorithm:\n                    raise Exception(\n                        \"The KEK encryption algorithm requested was not recognized\")\n                else:\n                    self.logger.log(\n                        \"No KEK algorithm specified will default to {0}\".format(\n                            CommonVariables.default_encryption_algorithm))\n\n    def validate_volume_type(self, public_settings):\n        encryption_operation = public_settings.get(CommonVariables.EncryptionEncryptionOperationKey)\n        if encryption_operation in [CommonVariables.QueryEncryptionStatus]:\n            # No need to validate volume type for Query Encryption Status operation\n            self.logger.log(\n                \"Ignore validating volume type for {0}\".format(\n                CommonVariables.QueryEncryptionStatus))\n            return\n\n        volume_type = public_settings.get(CommonVariables.VolumeTypeKey)\n        supported_types = CommonVariables.SupportedVolumeTypes\n        if not volume_type.lower() in map(lambda x: x.lower(), supported_types) :\n            raise Exception(\"Unknown Volume Type: {0}, has to be one of {1}\".format(volume_type, supported_types))\n\n    def validate_lvm_os(self, public_settings):\n        encryption_operation = public_settings.get(CommonVariables.EncryptionEncryptionOperationKey)\n        if not encryption_operation:\n            self.logger.log(\"LVM OS validation skipped (no encryption operation)\")\n            return\n        elif encryption_operation.lower() == CommonVariables.QueryEncryptionStatus.lower():\n            self.logger.log(\"LVM OS validation skipped (Encryption Operation: QueryEncryptionStatus)\")\n            return\n\n        volume_type = public_settings.get(CommonVariables.VolumeTypeKey)\n        if not volume_type:\n            self.logger.log(\"LVM OS validation skipped (no volume type)\")\n            return\n        elif volume_type.lower() == CommonVariables.VolumeTypeData.lower():\n            self.logger.log(\"LVM OS validation skipped (Volume Type: DATA)\")\n            return\n\n        #  run lvm check if volume type, encryption operation were specified and OS type is LVM\n        detected = False\n        # first, check if the root OS volume type is LVM\n        if ( encryption_operation and volume_type and \n             os.system(\"lsblk -o TYPE,MOUNTPOINT | grep lvm | grep -q '/$'\") == 0):\n            # next, check that all required logical volume names exist  ( swaplv is not required )\n            lvlist = ['rootvg-tmplv',\n                      'rootvg-usrlv',\n                      'rootvg-optlv',\n                      'rootvg-homelv',\n                      'rootvg-varlv',\n                      'rootvg-rootlv']\n            for lvname in lvlist:\n                if not os.system(\"lsblk -o NAME | grep -q '\" + lvname + \"'\") == 0:\n                    self.logger.log('LVM OS scheme is missing LV [' + lvname + ']')\n                    detected = True\n        if detected:\n            raise Exception(\"LVM OS disk layout does not satisfy prerequisites ( see https://aka.ms/adelvm )\")\n\n    def validate_vfat(self):\n        \"\"\" Check for vfat module using modprobe and raise exception if not found \"\"\"\n        try:\n            executor = CommandExecutor(self.logger)\n            executor.Execute(\"modprobe vfat\", True)\n        except:\n            raise RuntimeError('Incompatible system, prerequisite vfat module was not found.')\n\n    def validate_aad(self, public_settings):\n        encryption_operation = public_settings.get(CommonVariables.EncryptionEncryptionOperationKey)\n        if encryption_operation not in [CommonVariables.EnableEncryption, CommonVariables.EnableEncryptionFormat, CommonVariables.EnableEncryptionFormatAll]:\n            # skip if not an encryption operation, valid aad client id is only needed for encryption operations\n            return\n\n        aad_client_id = public_settings.get(CommonVariables.AADClientIDKey)\n        uuid_pattern = r\"^([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}){1}$\"\n        if aad_client_id:                \n            if not re.match(uuid_pattern, aad_client_id, re.IGNORECASE):             \n                message = 'AADClientID value is missing or invalid.'\n                # provide an extra hint if Unicode curly quotes were pasted in\n                if (u'\\u201c' in aad_client_id) or (u'\\u201d' in aad_client_id): \n                    message += ' Please remove Unicode quotation marks.'\n                raise Exception(message + '\\nActual Value: [' + aad_client_id + ']\\nExpected Format: [nnnnnnnn-nnnn-nnnn-nnnn-nnnnnnnnnnnn]')\n        else: \n            raise Exception(CommonVariables.AADClientIDKey + ' property was not found in settings')\n            \n    def validate_memory_os_encryption(self, public_settings, encryption_status):\n        is_enable_operation = False\n        encryption_operation = public_settings.get(CommonVariables.EncryptionEncryptionOperationKey)\n        if encryption_operation in [CommonVariables.EnableEncryption, CommonVariables.EnableEncryptionFormat, CommonVariables.EnableEncryptionFormatAll]:\n            is_enable_operation = True\n        volume_type = public_settings.get(CommonVariables.VolumeTypeKey)\n        if is_enable_operation and not volume_type.lower() == CommonVariables.VolumeTypeData.lower() and encryption_status[\"os\"] == \"NotEncrypted\":\n            if self.is_insufficient_memory():\n                raise Exception(\"Not enough memory for enabling encryption on OS volume. 8 GB memory is recommended.\")\n\n    def is_supported_os(self, public_settings, DistroPatcher, encryption_status):\n        encryption_operation = public_settings.get(CommonVariables.EncryptionEncryptionOperationKey)\n        if encryption_operation in [CommonVariables.QueryEncryptionStatus]:\n            self.logger.log(\"Query encryption operation detected. Skipping OS encryption validation check.\")\n            return\n        volume_type = public_settings.get(CommonVariables.VolumeTypeKey)\n        # If volume type is data allow the operation (At this point we are sure a patch file for the distro exist)\n        if volume_type.lower() == CommonVariables.VolumeTypeData.lower():\n            self.logger.log(\"Volume Type is DATA. Skipping OS encryption validation check.\")\n            return\n        # If OS volume is already encrypted just return (Should not break already encryted VM's)\n        if encryption_status[\"os\"] != \"NotEncrypted\":\n            self.logger.log(\"OS volume already encrypted. Skipping OS encryption validation check.\")\n            return\n        distro_name = DistroPatcher.distro_info[0]\n        distro_version = DistroPatcher.distro_info[1]\n        supported_os_file = os.path.join(os.getcwd(), 'main/SupportedOS.json')\n        with open(supported_os_file) as json_file:\n            data = json.load(json_file)\n            if distro_name in data:\n                versions = data[distro_name]\n                for version in versions:\n                    if distro_version.startswith(version['Version']):\n                        if 'Kernel' in version and LooseVersion(DistroPatcher.kernel_version) < LooseVersion(version['Kernel']):\n                            raise Exception('Kernel version {0} is not supported. Upgrade to kernel version {1}'.format(DistroPatcher.kernel_version, version['Kernel']))\n                        else:\n                            return\n            raise Exception('Distro {0} {1} is not supported for OS encryption'.format(distro_name, distro_version))\n\n    def precheck_for_fatal_failures(self, public_settings, encryption_status, DistroPatcher):\n        \"\"\" run all fatal prechecks, they should throw an exception if anything is wrong \"\"\"\n        self.validate_key_vault_params(public_settings)\n        self.validate_volume_type(public_settings)\n        self.validate_lvm_os(public_settings)\n        self.validate_vfat()\n        self.validate_aad(public_settings)\n        self.validate_memory_os_encryption(public_settings, encryption_status)\n        self.is_supported_os(public_settings, DistroPatcher, encryption_status)\n\n    def is_non_fatal_precheck_failure(self):\n        \"\"\" run all prechecks \"\"\"\n        detected = False\n        if self.is_app_compat_issue_detected():\n            detected = True\n            self.logger.log(\"PRECHECK: Likely app compat issue detected\")\n        if self.is_insufficient_memory():\n            detected = True\n            self.logger.log(\"PRECHECK: Low memory condition detected\")\n        if self.is_unsupported_mount_scheme():\n            detected = True\n            self.logger.log(\"PRECHECK: Unsupported mount scheme detected\")\n        return detected\n"
  },
  {
    "path": "VMEncryption/main/handle.py",
    "content": "#!/usr/bin/env python\n#\n# Azure Disk Encryption For Linux Extension\n#\n# Copyright 2019 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport filecmp\nimport json\nimport os\nimport os.path\nimport re\nimport subprocess\nimport sys\nimport time\nimport tempfile\nimport traceback\nimport uuid\nimport shutil\n\nfrom Utils import HandlerUtil\nfrom Common import CommonVariables, CryptItem\nfrom ExtensionParameter import ExtensionParameter\nfrom DiskUtil import DiskUtil\nfrom ResourceDiskUtil import ResourceDiskUtil\nfrom BackupLogger import BackupLogger\nfrom KeyVaultUtil import KeyVaultUtil\nfrom EncryptionConfig import EncryptionConfig\nfrom patch import GetDistroPatcher\nfrom BekUtil import BekUtil\nfrom check_util import CheckUtil\nfrom DecryptionMarkConfig import DecryptionMarkConfig\nfrom EncryptionMarkConfig import EncryptionMarkConfig\nfrom EncryptionEnvironment import EncryptionEnvironment\nfrom OnGoingItemConfig import OnGoingItemConfig\nfrom ProcessLock import ProcessLock\nfrom CommandExecutor import CommandExecutor, ProcessCommunicator\nfrom __builtin__ import int\n\n\ndef install():\n    hutil.do_parse_context('Install')\n    hutil.do_exit(0, 'Install', CommonVariables.extension_success_status, str(CommonVariables.success), 'Install Succeeded')\n\n\ndef disable():\n    hutil.do_parse_context('Disable')\n    # Archive configs at disable to make them available to new extension version prior to update\n    # The extension update handshake is [old:disable][new:update][old:uninstall][new:install]\n    hutil.archive_old_configs()\n    hutil.do_exit(0, 'Disable', CommonVariables.extension_success_status, '0', 'Disable succeeded')\n\n\ndef uninstall():\n    hutil.do_parse_context('Uninstall')\n    hutil.do_exit(0, 'Uninstall', CommonVariables.extension_success_status, '0', 'Uninstall succeeded')\n\n\ndef disable_encryption():\n    hutil.do_parse_context('DisableEncryption')\n\n    logger.log('Disabling encryption')\n\n    decryption_marker = DecryptionMarkConfig(logger, encryption_environment)\n\n    if decryption_marker.config_file_exists():\n        logger.log(msg=\"decryption is marked, starting daemon.\", level=CommonVariables.InfoLevel)\n        start_daemon('DisableEncryption')\n\n        hutil.do_exit(exit_code=0,\n                      operation='DisableEncryption',\n                      status=CommonVariables.extension_success_status,\n                      code=str(CommonVariables.success),\n                      message='Decryption started')\n\n    exit_status = {\n        'operation': 'DisableEncryption',\n        'status': CommonVariables.extension_success_status,\n        'status_code': str(CommonVariables.success),\n        'message': 'Decryption completed'\n    }\n\n    hutil.exit_if_same_seq(exit_status)\n    hutil.save_seq()\n\n    try:\n        extension_parameter = ExtensionParameter(hutil, logger, DistroPatcher, encryption_environment, get_protected_settings(), get_public_settings())\n\n        disk_util = DiskUtil(hutil=hutil, patching=DistroPatcher, logger=logger, encryption_environment=encryption_environment)\n\n        encryption_status = json.loads(disk_util.get_encryption_status())\n\n        if encryption_status[\"os\"] != \"NotEncrypted\":\n            raise Exception(\"Disabling encryption is not supported when OS volume is encrypted\")\n\n        bek_util = BekUtil(disk_util, logger)\n        encryption_config = EncryptionConfig(encryption_environment, logger)\n        bek_passphrase_file = bek_util.get_bek_passphrase_file(encryption_config)\n        crypt_items = disk_util.get_crypt_items()\n\n        logger.log('Found {0} items to decrypt'.format(len(crypt_items)))\n\n        for crypt_item in crypt_items:\n            disk_util.create_cleartext_key(crypt_item.mapper_name)\n\n            add_result = disk_util.luks_add_cleartext_key(bek_passphrase_file,\n                                                          crypt_item.dev_path,\n                                                          crypt_item.mapper_name,\n                                                          crypt_item.luks_header_path)\n            if add_result != CommonVariables.process_success:\n                if disk_util.is_luks_device(crypt_item.dev_path, crypt_item.luks_header_path):\n                    raise Exception(\"luksAdd failed with return code {0}\".format(add_result))\n                else:\n                    logger.log(\"luksAdd failed with return code {0}\".format(add_result))\n                    logger.log(\"Ignoring for now, as device ({0}) does not seem to be a luks device\".format(crypt_item.dev_path))\n                    continue\n\n            if crypt_item.dev_path.startswith(\"/dev/sd\"):\n                logger.log('Updating crypt item entry to use mapper name')\n                logger.log('Device name before update: {0}'.format(crypt_item.dev_path))\n                crypt_item.dev_path = disk_util.get_persistent_path_by_sdx_path(crypt_item.dev_path)\n                logger.log('Device name after update: {0}'.format(crypt_item.dev_path))\n\n            crypt_item.uses_cleartext_key = True\n            disk_util.update_crypt_item(crypt_item, None)\n\n            logger.log('Added cleartext key for {0}'.format(crypt_item))\n\n        decryption_marker.command = extension_parameter.command\n        decryption_marker.volume_type = extension_parameter.VolumeType\n        decryption_marker.commit()\n\n        hutil.do_exit(exit_code=0,\n                      operation='DisableEncryption',\n                      status=CommonVariables.extension_success_status,\n                      code=str(CommonVariables.success),\n                      message='Decryption started')\n\n    except Exception as e:\n        message = \"Failed to disable the extension with error: {0}, stack trace: {1}\".format(e, traceback.format_exc())\n\n        logger.log(msg=message, level=CommonVariables.ErrorLevel)\n        hutil.do_exit(exit_code=CommonVariables.unknown_error,\n                      operation='DisableEncryption',\n                      status=CommonVariables.extension_error_status,\n                      code=str(CommonVariables.unknown_error),\n                      message=message)\n\n\ndef get_public_settings():\n    public_settings_str = hutil._context._config['runtimeSettings'][0]['handlerSettings'].get('publicSettings')\n    if isinstance(public_settings_str, basestring):\n        return json.loads(public_settings_str)\n    else:\n        return public_settings_str\n\n\ndef get_protected_settings():\n    protected_settings_str = hutil._context._config['runtimeSettings'][0]['handlerSettings'].get('protectedSettings')\n    if isinstance(protected_settings_str, basestring):\n        return json.loads(protected_settings_str)\n    else:\n        return protected_settings_str\n\n\ndef update_encryption_settings():\n    hutil.do_parse_context('UpdateEncryptionSettings')\n    logger.log('Updating encryption settings')\n\n    # re-install extra packages like cryptsetup if no longer on system from earlier enable\n    try:\n        DistroPatcher.install_extras()\n    except Exception as e:\n        message = \"Failed to update encryption settings with error: {0}, stack trace: {1}\".format(e, traceback.format_exc())\n        hutil.do_exit(exit_code=CommonVariables.missing_dependency,\n                      operation='UpdateEncryptionSettings',\n                      status=CommonVariables.extension_error_status,\n                      code=str(CommonVariables.missing_dependency),\n                      message=message)\n\n    encryption_config = EncryptionConfig(encryption_environment, logger)\n    config_secret_seq = encryption_config.get_secret_seq_num()\n    current_secret_seq_num = int(config_secret_seq if config_secret_seq else -1)\n    update_call_seq_num = hutil.get_current_seq()\n\n    logger.log(\"Current secret was created in operation #{0}\".format(current_secret_seq_num))\n    logger.log(\"The update call is operation #{0}\".format(update_call_seq_num))\n\n    executor = CommandExecutor(logger)\n    executor.Execute(\"mount /boot\")\n\n    try:\n        disk_util = DiskUtil(hutil=hutil, patching=DistroPatcher, logger=logger, encryption_environment=encryption_environment)\n        bek_util = BekUtil(disk_util, logger)\n\n        extension_parameter = ExtensionParameter(hutil, logger, DistroPatcher, encryption_environment, get_protected_settings(), get_public_settings())\n        existing_passphrase_file = bek_util.get_bek_passphrase_file(encryption_config)\n\n        if current_secret_seq_num < update_call_seq_num:\n            if extension_parameter.passphrase is None or extension_parameter.passphrase == \"\":\n                extension_parameter.passphrase = bek_util.generate_passphrase(extension_parameter.KeyEncryptionAlgorithm)\n\n            logger.log('Recreating secret to store in the KeyVault')\n\n            keyVaultUtil = KeyVaultUtil(logger)\n\n            temp_keyfile = tempfile.NamedTemporaryFile(delete=False)\n            temp_keyfile.write(extension_parameter.passphrase)\n            temp_keyfile.close()\n\n            for crypt_item in disk_util.get_crypt_items():\n                if not crypt_item:\n                    continue\n\n                before_keyslots = disk_util.luks_dump_keyslots(crypt_item.dev_path, crypt_item.luks_header_path)\n\n                logger.log(\"Before key addition, keyslots for {0}: {1}\".format(crypt_item.dev_path, before_keyslots))\n\n                logger.log(\"Adding new key for {0}\".format(crypt_item.dev_path))\n\n                luks_add_result = disk_util.luks_add_key(passphrase_file=existing_passphrase_file,\n                                                         dev_path=crypt_item.dev_path,\n                                                         mapper_name=crypt_item.mapper_name,\n                                                         header_file=crypt_item.luks_header_path,\n                                                         new_key_path=temp_keyfile.name)\n\n                logger.log(\"luks add result is {0}\".format(luks_add_result))\n\n                after_keyslots = disk_util.luks_dump_keyslots(crypt_item.dev_path, crypt_item.luks_header_path)\n\n                logger.log(\"After key addition, keyslots for {0}: {1}\".format(crypt_item.dev_path, after_keyslots))\n\n                new_keyslot = list(map(lambda x: x[0] != x[1], zip(before_keyslots, after_keyslots))).index(True)\n\n                logger.log(\"New key was added in keyslot {0}\".format(new_keyslot))\n\n                # crypt_item.current_luks_slot = new_keyslot\n\n                # disk_util.update_crypt_item(crypt_item)\n\n            logger.log(\"New key successfully added to all encrypted devices\")\n\n            if DistroPatcher.distro_info[0] == \"Ubuntu\":\n                logger.log(\"Updating initrd image with new osluksheader.\")\n                executor.Execute(\"update-initramfs -u -k all\", True)\n\n            if DistroPatcher.distro_info[0] == \"redhat\" or DistroPatcher.distro_info[0] == \"centos\":\n                distro_version = DistroPatcher.distro_info[1]\n\n                if distro_version.startswith('7.'):\n                    logger.log(\"Updating initrd image with new osluksheader.\")\n                    executor.ExecuteInBash(\"/usr/sbin/dracut -f -v --kver `grubby --default-kernel | sed 's|/boot/vmlinuz-||g'`\", True)\n\n            os.unlink(temp_keyfile.name)\n\n            # install Python ADAL support if using client certificate authentication\n            if extension_parameter.AADClientCertThumbprint:\n                DistroPatcher.install_adal()\n\n            kek_secret_id_created = keyVaultUtil.create_kek_secret(Passphrase=extension_parameter.passphrase,\n                                                                   KeyVaultURL=extension_parameter.KeyVaultURL,\n                                                                   KeyEncryptionKeyURL=extension_parameter.KeyEncryptionKeyURL,\n                                                                   AADClientID=extension_parameter.AADClientID,\n                                                                   AADClientCertThumbprint=extension_parameter.AADClientCertThumbprint,\n                                                                   KeyEncryptionAlgorithm=extension_parameter.KeyEncryptionAlgorithm,\n                                                                   AADClientSecret=extension_parameter.AADClientSecret,\n                                                                   DiskEncryptionKeyFileName=extension_parameter.DiskEncryptionKeyFileName)\n\n            if kek_secret_id_created is None:\n                hutil.do_exit(exit_code=CommonVariables.create_encryption_secret_failed,\n                              operation='UpdateEncryptionSettings',\n                              status=CommonVariables.extension_error_status,\n                              code=str(CommonVariables.create_encryption_secret_failed),\n                              message='UpdateEncryptionSettings failed.')\n            else:\n                encryption_config.passphrase_file_name = extension_parameter.DiskEncryptionKeyFileName\n                encryption_config.secret_id = kek_secret_id_created\n                encryption_config.secret_seq_num = hutil.get_current_seq()\n                encryption_config.commit()\n\n                shutil.copy(existing_passphrase_file, encryption_environment.bek_backup_path)\n                logger.log(\"Backed up BEK at {0}\".format(encryption_environment.bek_backup_path))\n\n                hutil.do_exit(exit_code=0,\n                              operation='UpdateEncryptionSettings',\n                              status=CommonVariables.extension_success_status,\n                              code=str(CommonVariables.success),\n                              message=str(kek_secret_id_created))\n        else:\n            logger.log('Secret has already been updated')\n            mount_encrypted_disks(disk_util, bek_util, existing_passphrase_file, encryption_config)\n            disk_util.log_lsblk_output()\n            hutil.exit_if_same_seq()\n\n            # remount bek volume\n            existing_passphrase_file = bek_util.get_bek_passphrase_file(encryption_config)\n\n            if extension_parameter.passphrase and extension_parameter.passphrase != file(existing_passphrase_file).read():\n                logger.log(\"The new passphrase has not been placed in BEK volume yet\")\n                logger.log(\"Skipping removal of old passphrase\")\n                exit_without_status_report()\n\n            logger.log('Removing old passphrase')\n\n            for crypt_item in disk_util.get_crypt_items():\n                if not crypt_item:\n                    continue\n\n                if filecmp.cmp(existing_passphrase_file, encryption_environment.bek_backup_path):\n                    logger.log('Current BEK and backup are the same, skipping removal')\n                    continue\n\n                logger.log('Removing old passphrase from {0}'.format(crypt_item.dev_path))\n\n                keyslots = disk_util.luks_dump_keyslots(crypt_item.dev_path, crypt_item.luks_header_path)\n                logger.log(\"Keyslots before removal: {0}\".format(keyslots))\n\n                luks_remove_result = disk_util.luks_remove_key(passphrase_file=encryption_environment.bek_backup_path,\n                                                               dev_path=crypt_item.dev_path,\n                                                               header_file=crypt_item.luks_header_path)\n                logger.log(\"luks remove result is {0}\".format(luks_remove_result))\n\n                keyslots = disk_util.luks_dump_keyslots(crypt_item.dev_path, crypt_item.luks_header_path)\n                logger.log(\"Keyslots after removal: {0}\".format(keyslots))\n\n            logger.log(\"Old key successfully removed from all encrypted devices\")\n\n            if DistroPatcher.distro_info[0] == \"Ubuntu\":\n                logger.log(\"Updating initrd image with new osluksheader.\")\n                executor.Execute(\"update-initramfs -u -k all\", True)\n\n            if DistroPatcher.distro_info[0] == \"redhat\" or DistroPatcher.distro_info[0] == \"centos\":\n                distro_version = DistroPatcher.distro_info[1]\n\n                if distro_version.startswith('7.'):\n                    logger.log(\"Updating initrd image with new osluksheader.\")\n                    executor.ExecuteInBash(\"/usr/sbin/dracut -f -v --kver `grubby --default-kernel | sed 's|/boot/vmlinuz-||g'`\", True)\n\n            hutil.save_seq()\n            extension_parameter.commit()\n            os.unlink(encryption_environment.bek_backup_path)\n\n        hutil.do_exit(exit_code=0,\n                      operation='UpdateEncryptionSettings',\n                      status=CommonVariables.extension_success_status,\n                      code=str(CommonVariables.success),\n                      message='Encryption settings updated')\n    except Exception as e:\n        message = \"Failed to update encryption settings with error: {0}, stack trace: {1}\".format(e, traceback.format_exc())\n        logger.log(msg=message, level=CommonVariables.ErrorLevel)\n        hutil.do_exit(exit_code=CommonVariables.unknown_error,\n                      operation='UpdateEncryptionSettings',\n                      status=CommonVariables.extension_error_status,\n                      code=str(CommonVariables.unknown_error),\n                      message=message)\n\n\ndef update():\n    # The extension update handshake is [old:disable][new:update][old:uninstall][new:install]\n    # this method is called when updating an older version of the extension to a newer version\n    hutil.do_parse_context('Update')\n    logger.log(\"Installing pre-requisites\")\n    DistroPatcher.install_extras()\n    DistroPatcher.update_prereq()\n    hutil.do_exit(0, 'Update', CommonVariables.extension_success_status, '0', 'Update Succeeded')\n\n\ndef exit_without_status_report():\n    sys.exit(0)\n\n\ndef not_support_header_option_distro(patching):\n    if patching.distro_info[0].lower() == \"centos\" and patching.distro_info[1].startswith('6.'):\n        return True\n    if patching.distro_info[0].lower() == \"redhat\" and patching.distro_info[1].startswith('6.'):\n        return True\n    if patching.distro_info[0].lower() == \"suse\" and patching.distro_info[1].startswith('11'):\n        return True\n    return False\n\n\ndef none_or_empty(obj):\n    if obj is None or obj == \"\":\n        return True\n    else:\n        return False\n\n\ndef toggle_se_linux_for_centos7(disable):\n    if DistroPatcher.distro_info[0].lower() == 'centos' and DistroPatcher.distro_info[1].startswith('7.0'):\n        if disable:\n            se_linux_status = encryption_environment.get_se_linux()\n            if se_linux_status.lower() == 'enforcing':\n                encryption_environment.disable_se_linux()\n                return True\n        else:\n            encryption_environment.enable_se_linux()\n    return False\n\n\ndef mount_encrypted_disks(disk_util, bek_util, passphrase_file, encryption_config):\n\n    # mount encrypted resource disk\n    volume_type = encryption_config.get_volume_type().lower()\n    if volume_type == CommonVariables.VolumeTypeData.lower() or volume_type == CommonVariables.VolumeTypeAll.lower():\n        resource_disk_util = ResourceDiskUtil(logger, disk_util, passphrase_file, get_public_settings(), DistroPatcher.distro_info)\n        resource_disk_util.automount()\n        logger.log(\"mounted encrypted resource disk\")\n\n    # add walkaround for the centos 7.0\n    se_linux_status = None\n    if DistroPatcher.distro_info[0].lower() == 'centos' and DistroPatcher.distro_info[1].startswith('7.0'):\n        se_linux_status = encryption_environment.get_se_linux()\n        if se_linux_status.lower() == 'enforcing':\n            encryption_environment.disable_se_linux()\n\n    # mount any data disks - make sure the azure disk config path exists.\n    for crypt_item in disk_util.get_crypt_items():\n        if not crypt_item:\n            continue\n\n        if not os.path.exists(os.path.join(CommonVariables.dev_mapper_root, crypt_item.mapper_name)):\n            luks_open_result = disk_util.luks_open(passphrase_file=passphrase_file,\n                                                   dev_path=crypt_item.dev_path,\n                                                   mapper_name=crypt_item.mapper_name,\n                                                   header_file=crypt_item.luks_header_path,\n                                                   uses_cleartext_key=crypt_item.uses_cleartext_key)\n\n            logger.log(\"luks open result is {0}\".format(luks_open_result))\n\n        disk_util.mount_crypt_item(crypt_item, passphrase_file)\n\n    if DistroPatcher.distro_info[0].lower() == 'centos' and DistroPatcher.distro_info[1].startswith('7.0'):\n        if se_linux_status is not None and se_linux_status.lower() == 'enforcing':\n            encryption_environment.enable_se_linux()\n\n\ndef main():\n    global hutil, DistroPatcher, logger, encryption_environment\n    HandlerUtil.LoggerInit('/var/log/waagent.log', '/dev/stdout')\n    HandlerUtil.waagent.Log(\"{0} started to handle.\".format(CommonVariables.extension_name))\n\n    hutil = HandlerUtil.HandlerUtility(HandlerUtil.waagent.Log, HandlerUtil.waagent.Error, CommonVariables.extension_name)\n    logger = BackupLogger(hutil)\n    DistroPatcher = GetDistroPatcher(logger)\n    hutil.patching = DistroPatcher\n\n    encryption_environment = EncryptionEnvironment(patching=DistroPatcher, logger=logger)\n\n    disk_util = DiskUtil(hutil=hutil, patching=DistroPatcher, logger=logger, encryption_environment=encryption_environment)\n    hutil.disk_util = disk_util\n\n    if DistroPatcher is None:\n        hutil.do_exit(exit_code=CommonVariables.os_not_supported,\n                      operation='Enable',\n                      status=CommonVariables.extension_error_status,\n                      code=(CommonVariables.os_not_supported),\n                      message='Enable failed: the os is not supported')\n\n    for a in sys.argv[1:]:\n        if re.match(\"^([-/]*)(disable)\", a):\n            disable()\n        elif re.match(\"^([-/]*)(uninstall)\", a):\n            uninstall()\n        elif re.match(\"^([-/]*)(install)\", a):\n            install()\n        elif re.match(\"^([-/]*)(enable)\", a):\n            enable()\n        elif re.match(\"^([-/]*)(update)\", a):\n            update()\n        elif re.match(\"^([-/]*)(daemon)\", a):\n            daemon()\n\n\ndef mark_encryption(command, volume_type, disk_format_query):\n    encryption_marker = EncryptionMarkConfig(logger, encryption_environment)\n    encryption_marker.command = command\n    encryption_marker.volume_type = volume_type\n    encryption_marker.diskFormatQuery = disk_format_query\n    encryption_marker.commit()\n    return encryption_marker\n\n\ndef is_daemon_running():\n    handler_path = os.path.join(os.getcwd(), __file__)\n    daemon_arg = \"-daemon\"\n\n    psproc = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE)\n    pslist, _ = psproc.communicate()\n\n    for line in pslist.split(\"\\n\"):\n        if handler_path in line and daemon_arg in line:\n            return True\n\n    return False\n\n\ndef enable():\n    while True:\n        hutil.do_parse_context('Enable')\n        logger.log('Enabling extension')\n\n        public_settings = get_public_settings()\n        logger.log('Public settings:\\n{0}'.format(json.dumps(public_settings, sort_keys=True, indent=4)))\n        cutil = CheckUtil(logger)\n        # Mount already encrypted disks before running fatal prechecks\n        disk_util = DiskUtil(hutil=hutil, patching=DistroPatcher, logger=logger, encryption_environment=encryption_environment)\n        bek_util = BekUtil(disk_util, logger)\n        existing_passphrase_file = None\n        encryption_config = EncryptionConfig(encryption_environment=encryption_environment, logger=logger)\n\n        existing_passphrase_file = bek_util.get_bek_passphrase_file(encryption_config)\n        if existing_passphrase_file is not None:\n            mount_encrypted_disks(disk_util=disk_util,\n                                  bek_util=bek_util,\n                                  encryption_config=encryption_config,\n                                  passphrase_file=existing_passphrase_file)\n            # Migrate to early unlock if using crypt mount\n            if disk_util.should_use_azure_crypt_mount():\n                disk_util.migrate_crypt_items(existing_passphrase_file)\n\n        encryption_status = json.loads(disk_util.get_encryption_status())\n\n        # run fatal prechecks, report error if exceptions are caught\n        try:\n            cutil.precheck_for_fatal_failures(public_settings, encryption_status, DistroPatcher)\n        except Exception as e:\n            logger.log(\"PRECHECK: Fatal Exception thrown during precheck\")\n            logger.log(traceback.format_exc())\n            msg = e.message\n            hutil.do_exit(exit_code=CommonVariables.configuration_error,\n                          operation='Enable',\n                          status=CommonVariables.extension_error_status,\n                          code=(CommonVariables.configuration_error),\n                          message=msg)\n\n        hutil.disk_util.log_lsblk_output()\n\n        # run prechecks and log any failures detected\n        try:\n            if cutil.is_non_fatal_precheck_failure():\n                logger.log(\"PRECHECK: Precheck failure, incompatible environment suspected\")\n            else:\n                logger.log(\"PRECHECK: Prechecks successful\")\n        except Exception:\n            logger.log(\"PRECHECK: Exception thrown during precheck\")\n            logger.log(traceback.format_exc())\n\n        encryption_operation = public_settings.get(CommonVariables.EncryptionEncryptionOperationKey)\n\n        if encryption_operation in [CommonVariables.EnableEncryption, CommonVariables.EnableEncryptionFormat, CommonVariables.EnableEncryptionFormatAll]:\n            logger.log(\"handle.py found enable encryption operation\")\n\n            extension_parameter = ExtensionParameter(hutil, logger, DistroPatcher, encryption_environment, get_protected_settings(), public_settings)\n\n            if os.path.exists(encryption_environment.bek_backup_path) or (extension_parameter.config_file_exists() and extension_parameter.config_changed()):\n                logger.log(\"Config has changed, updating encryption settings\")\n                update_encryption_settings()\n                extension_parameter.commit()\n            else:\n                logger.log(\"Config did not change or first call, enabling encryption\")\n                enable_encryption()\n\n        elif encryption_operation == CommonVariables.DisableEncryption:\n            logger.log(\"handle.py found disable encryption operation\")\n\n            disable_encryption()\n\n        elif encryption_operation == CommonVariables.QueryEncryptionStatus:\n            logger.log(\"handle.py found query operation\")\n\n            encryption_marker = EncryptionMarkConfig(logger, encryption_environment)\n            if is_daemon_running() or (encryption_marker and not encryption_marker.config_file_exists()):\n                logger.log(\"A daemon is already running or no operation in progress, exiting without status report\")\n                hutil.redo_last_status()\n                exit_without_status_report()\n            else:\n                logger.log(\"No daemon found, trying to find the last non-query operation\")\n                hutil.find_last_nonquery_operation = True\n\n        else:\n            msg = \"Encryption operation {0} is not supported\".format(encryption_operation)\n            logger.log(msg)\n            hutil.do_exit(exit_code=CommonVariables.configuration_error,\n                          operation='Enable',\n                          status=CommonVariables.extension_error_status,\n                          code=(CommonVariables.configuration_error),\n                          message=msg)\n\n\ndef enable_encryption():\n    hutil.do_parse_context('EnableEncryption')\n    # we need to start another subprocess to do it, because the initial process\n    # would be killed by the wala in 5 minutes.\n    logger.log('Enabling encryption')\n\n    \"\"\"\n    trying to mount the crypted items.\n    \"\"\"\n    disk_util = DiskUtil(hutil=hutil, patching=DistroPatcher, logger=logger, encryption_environment=encryption_environment)\n    bek_util = BekUtil(disk_util, logger)\n\n    existing_passphrase_file = None\n    encryption_config = EncryptionConfig(encryption_environment=encryption_environment, logger=logger)\n    config_path_result = disk_util.make_sure_path_exists(encryption_environment.encryption_config_path)\n\n    if config_path_result != CommonVariables.process_success:\n        logger.log(msg=\"azure encryption path creation failed.\",\n                   level=CommonVariables.ErrorLevel)\n\n    if encryption_config.config_file_exists():\n        existing_passphrase_file = bek_util.get_bek_passphrase_file(encryption_config)\n        if existing_passphrase_file is not None:\n            mount_encrypted_disks(disk_util=disk_util,\n                                  bek_util=bek_util,\n                                  encryption_config=encryption_config,\n                                  passphrase_file=existing_passphrase_file)\n        else:\n            logger.log(msg=\"EncryptionConfig is present, but could not get the BEK file.\",\n                       level=CommonVariables.WarningLevel)\n            hutil.redo_last_status()\n            exit_without_status_report()\n\n    ps = subprocess.Popen([\"ps\", \"aux\"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n    ps_stdout, ps_stderr = ps.communicate()\n    if re.search(r\"dd.*of=/dev/mapper/osencrypt\", ps_stdout):\n        logger.log(msg=\"OS disk encryption already in progress, exiting\",\n                   level=CommonVariables.WarningLevel)\n        exit_without_status_report()\n\n    # handle the re-call scenario.  the re-call would resume?\n    # if there's one tag for the next reboot.\n    encryption_marker = EncryptionMarkConfig(logger, encryption_environment)\n\n    try:\n        protected_settings_str = hutil._context._config['runtimeSettings'][0]['handlerSettings'].get('protectedSettings')\n        public_settings_str = hutil._context._config['runtimeSettings'][0]['handlerSettings'].get('publicSettings')\n\n        if isinstance(public_settings_str, basestring):\n            public_settings = json.loads(public_settings_str)\n        else:\n            public_settings = public_settings_str\n\n        if isinstance(protected_settings_str, basestring):\n            protected_settings = json.loads(protected_settings_str)\n        else:\n            protected_settings = protected_settings_str\n\n        extension_parameter = ExtensionParameter(hutil, logger, DistroPatcher, encryption_environment, protected_settings, public_settings)\n\n        kek_secret_id_created = None\n\n        encryption_marker = EncryptionMarkConfig(logger, encryption_environment)\n        if encryption_marker.config_file_exists():\n            # verify the encryption mark\n            logger.log(msg=\"encryption mark is there, starting daemon.\", level=CommonVariables.InfoLevel)\n            start_daemon('EnableEncryption')\n        else:\n            encryption_config = EncryptionConfig(encryption_environment, logger)\n\n            exit_status = None\n            if encryption_config.config_file_exists():\n                exit_status = {\n                    'operation': 'EnableEncryption',\n                    'status': CommonVariables.extension_success_status,\n                    'status_code': str(CommonVariables.success),\n                    'message': encryption_config.get_secret_id()\n                }\n\n            hutil.exit_if_same_seq(exit_status)\n            hutil.save_seq()\n\n            encryption_config.volume_type = extension_parameter.VolumeType\n            encryption_config.commit()\n\n            if encryption_config.config_file_exists() and existing_passphrase_file is not None:\n                logger.log(msg=\"config file exists and passphrase file exists.\", level=CommonVariables.WarningLevel)\n                encryption_marker = mark_encryption(command=extension_parameter.command,\n                                                    volume_type=extension_parameter.VolumeType,\n                                                    disk_format_query=extension_parameter.DiskFormatQuery)\n                start_daemon('EnableEncryption')\n            else:\n                \"\"\"\n                creating the secret, the secret would be transferred to a bek volume after the updatevm called in powershell.\n                \"\"\"\n                # store the luks passphrase in the secret.\n                keyVaultUtil = KeyVaultUtil(logger)\n\n                \"\"\"\n                validate the parameters\n                \"\"\"\n                if(extension_parameter.VolumeType is None or\n                   not any([extension_parameter.VolumeType.lower() == vt.lower() for vt in CommonVariables.SupportedVolumeTypes])):\n                    if encryption_config.config_file_exists():\n                        existing_passphrase_file = bek_util.get_bek_passphrase_file(encryption_config)\n\n                        if existing_passphrase_file is None:\n                            logger.log(\"Unsupported volume type specified and BEK volume does not exist, clearing encryption config\")\n                            encryption_config.clear_config()\n\n                    hutil.do_exit(exit_code=CommonVariables.configuration_error,\n                                  operation='EnableEncryption',\n                                  status=CommonVariables.extension_error_status,\n                                  code=str(CommonVariables.configuration_error),\n                                  message='VolumeType \"{0}\" is not supported'.format(extension_parameter.VolumeType))\n\n                if extension_parameter.command not in [CommonVariables.EnableEncryption, CommonVariables.EnableEncryptionFormat, CommonVariables.EnableEncryptionFormatAll]:\n                    hutil.do_exit(exit_code=CommonVariables.configuration_error,\n                                  operation='EnableEncryption',\n                                  status=CommonVariables.extension_error_status,\n                                  code=str(CommonVariables.configuration_error),\n                                  message='Command \"{0}\" is not supported'.format(extension_parameter.command))\n\n                \"\"\"\n                this is the fresh call case\n                \"\"\"\n                # handle the passphrase related\n                if existing_passphrase_file is None:\n                    if extension_parameter.passphrase is None or extension_parameter.passphrase == \"\":\n                        extension_parameter.passphrase = bek_util.generate_passphrase(extension_parameter.KeyEncryptionAlgorithm)\n                    else:\n                        logger.log(msg=\"the extension_parameter.passphrase is already defined\")\n\n                    # install Python ADAL support if using client certificate authentication\n                    if extension_parameter.AADClientCertThumbprint:\n                        DistroPatcher.install_adal()\n\n                    kek_secret_id_created = keyVaultUtil.create_kek_secret(Passphrase=extension_parameter.passphrase,\n                                                                           KeyVaultURL=extension_parameter.KeyVaultURL,\n                                                                           KeyEncryptionKeyURL=extension_parameter.KeyEncryptionKeyURL,\n                                                                           AADClientID=extension_parameter.AADClientID,\n                                                                           AADClientCertThumbprint=extension_parameter.AADClientCertThumbprint,\n                                                                           KeyEncryptionAlgorithm=extension_parameter.KeyEncryptionAlgorithm,\n                                                                           AADClientSecret=extension_parameter.AADClientSecret,\n                                                                           DiskEncryptionKeyFileName=extension_parameter.DiskEncryptionKeyFileName)\n\n                    if kek_secret_id_created is None:\n                        encryption_config.clear_config()\n                        hutil.do_exit(exit_code=CommonVariables.create_encryption_secret_failed,\n                                      operation='EnableEncryption',\n                                      status=CommonVariables.extension_error_status,\n                                      code=str(CommonVariables.create_encryption_secret_failed),\n                                      message='Enable failed.')\n                    else:\n                        encryption_config.passphrase_file_name = extension_parameter.DiskEncryptionKeyFileName\n                        encryption_config.volume_type = extension_parameter.VolumeType\n                        encryption_config.secret_id = kek_secret_id_created\n                        encryption_config.secret_seq_num = hutil.get_current_seq()\n                        encryption_config.commit()\n\n                        extension_parameter.commit()\n\n                encryption_marker = mark_encryption(command=extension_parameter.command,\n                                                    volume_type=extension_parameter.VolumeType,\n                                                    disk_format_query=extension_parameter.DiskFormatQuery)\n\n                if kek_secret_id_created:\n                    hutil.do_exit(exit_code=0,\n                                  operation='EnableEncryption',\n                                  status=CommonVariables.extension_success_status,\n                                  code=str(CommonVariables.success),\n                                  message=str(kek_secret_id_created))\n                else:\n                    \"\"\"\n                    the enabling called again. the passphrase would be re-used.\n                    \"\"\"\n                    hutil.do_exit(exit_code=0,\n                                  operation='EnableEncryption',\n                                  status=CommonVariables.extension_success_status,\n                                  code=str(CommonVariables.encrypttion_already_enabled),\n                                  message=str(kek_secret_id_created))\n    except Exception as e:\n        message = \"Failed to enable the extension with error: {0}, stack trace: {1}\".format(e, traceback.format_exc())\n        logger.log(msg=message, level=CommonVariables.ErrorLevel)\n        hutil.do_exit(exit_code=CommonVariables.unknown_error,\n                      operation='EnableEncryption',\n                      status=CommonVariables.extension_error_status,\n                      code=str(CommonVariables.unknown_error),\n                      message=message)\n\n\ndef enable_encryption_format(passphrase, disk_format_query, disk_util, force=False):\n    logger.log('enable_encryption_format')\n    logger.log('disk format query is {0}'.format(disk_format_query))\n\n    json_parsed = json.loads(disk_format_query)\n\n    if type(json_parsed) is dict:\n        encryption_format_items = [json_parsed, ]\n    elif type(json_parsed) is list:\n        encryption_format_items = json_parsed\n    else:\n        raise Exception(\"JSON parse error. Input: {0}\".format(disk_format_query))\n\n    for encryption_item in encryption_format_items:\n        dev_path_in_query = None\n\n        if \"scsi\" in encryption_item and encryption_item[\"scsi\"] != '':\n            dev_path_in_query = disk_util.query_dev_sdx_path_by_scsi_id(encryption_item[\"scsi\"])\n        if \"dev_path\" in encryption_item and encryption_item[\"dev_path\"] != '':\n            dev_path_in_query = encryption_item[\"dev_path\"]\n\n        if not dev_path_in_query:\n            raise Exception(\"Could not find a device path for Encryption Item: {0}\".format(json.dumps(encryption_item)))\n\n        devices = disk_util.get_device_items(dev_path_in_query)\n        if len(devices) != 1:\n            logger.log(msg=(\"the device with this path {0} have more than one sub device. so skip it.\".format(dev_path_in_query)), level=CommonVariables.WarningLevel)\n            continue\n        else:\n            device_item = devices[0]\n            if device_item.file_system is None or device_item.file_system == \"\" or force:\n                if device_item.mount_point:\n                    disk_util.swapoff()\n                    disk_util.umount(device_item.mount_point)\n                mapper_name = str(uuid.uuid4())\n                logger.log(\"encrypting \" + str(device_item))\n                encrypted_device_path = os.path.join(CommonVariables.dev_mapper_root, mapper_name)\n                try:\n                    se_linux_status = None\n                    if DistroPatcher.distro_info[0].lower() == 'centos' and DistroPatcher.distro_info[1].startswith('7.0'):\n                        se_linux_status = encryption_environment.get_se_linux()\n                        if se_linux_status.lower() == 'enforcing':\n                            encryption_environment.disable_se_linux()\n                    encrypt_result = disk_util.encrypt_disk(dev_path=dev_path_in_query, passphrase_file=passphrase, mapper_name=mapper_name, header_file=None)\n                finally:\n                    if DistroPatcher.distro_info[0].lower() == 'centos' and DistroPatcher.distro_info[1].startswith('7.0'):\n                        if se_linux_status is not None and se_linux_status.lower() == 'enforcing':\n                            encryption_environment.enable_se_linux()\n\n                if encrypt_result == CommonVariables.process_success:\n                    # TODO: let customer specify the default file system in the\n                    # parameter\n                    file_system = None\n                    if \"file_system\" in encryption_item and encryption_item[\"file_system\"] != \"\":\n                        file_system = encryption_item[\"file_system\"]\n                    else:\n                        file_system = CommonVariables.default_file_system\n                    format_disk_result = disk_util.format_disk(dev_path=encrypted_device_path, file_system=file_system)\n                    if format_disk_result != CommonVariables.process_success:\n                        logger.log(msg=(\"format of disk {0} failed with result: {1}\".format(encrypted_device_path, format_disk_result)), level=CommonVariables.ErrorLevel)\n                    crypt_item_to_update = CryptItem()\n                    crypt_item_to_update.mapper_name = mapper_name\n                    crypt_item_to_update.dev_path = dev_path_in_query\n                    crypt_item_to_update.luks_header_path = None\n                    crypt_item_to_update.file_system = file_system\n                    crypt_item_to_update.uses_cleartext_key = False\n                    crypt_item_to_update.current_luks_slot = 0\n\n                    if \"name\" in encryption_item and encryption_item[\"name\"] != \"\":\n                        crypt_item_to_update.mount_point = os.path.join(\"/mnt/\", str(encryption_item[\"name\"]))\n                    else:\n                        crypt_item_to_update.mount_point = os.path.join(\"/mnt/\", mapper_name)\n\n                    # allow override through the new full_mount_point field\n                    if \"full_mount_point\" in encryption_item and encryption_item[\"full_mount_point\"] != \"\":\n                        crypt_item_to_update.mount_point = os.path.join(str(encryption_item[\"full_mount_point\"]))\n\n                    logger.log(msg=\"modifying/removing the entry for unencrypted drive in fstab\", level=CommonVariables.InfoLevel)\n                    disk_util.modify_fstab_entry_encrypt(crypt_item_to_update.mount_point, os.path.join(CommonVariables.dev_mapper_root, mapper_name))\n\n                    disk_util.make_sure_path_exists(crypt_item_to_update.mount_point)\n                    update_crypt_item_result = disk_util.add_crypt_item(crypt_item_to_update, passphrase)\n                    if not update_crypt_item_result:\n                        logger.log(msg=\"update crypt item failed\", level=CommonVariables.ErrorLevel)\n\n                    mount_result = disk_util.mount_filesystem(dev_path=encrypted_device_path, mount_point=crypt_item_to_update.mount_point)\n                    logger.log(msg=(\"mount result is {0}\".format(mount_result)))\n                else:\n                    logger.log(msg=\"encryption failed with code {0}\".format(encrypt_result), level=CommonVariables.ErrorLevel)\n            else:\n                logger.log(msg=(\"the item fstype is not empty {0}\".format(device_item.file_system)))\n\n\ndef encrypt_inplace_without_seperate_header_file(passphrase_file,\n                                                 device_item,\n                                                 disk_util,\n                                                 bek_util,\n                                                 status_prefix='',\n                                                 ongoing_item_config=None):\n    \"\"\"\n    if ongoing_item_config is not None, then this is a resume case.\n    this function will return the phase\n    \"\"\"\n    logger.log(\"encrypt_inplace_without_seperate_header_file\")\n    current_phase = CommonVariables.EncryptionPhaseBackupHeader\n    if ongoing_item_config is None:\n        ongoing_item_config = OnGoingItemConfig(encryption_environment=encryption_environment, logger=logger)\n        ongoing_item_config.current_block_size = CommonVariables.default_block_size\n        ongoing_item_config.current_slice_index = 0\n        ongoing_item_config.device_size = device_item.size\n        ongoing_item_config.file_system = device_item.file_system\n        ongoing_item_config.luks_header_file_path = None\n        ongoing_item_config.mapper_name = str(uuid.uuid4())\n        ongoing_item_config.mount_point = device_item.mount_point\n        if os.path.exists(os.path.join('/dev/', device_item.name)):\n            ongoing_item_config.original_dev_name_path = os.path.join('/dev/', device_item.name)\n            ongoing_item_config.original_dev_path = os.path.join('/dev/', device_item.name)\n        else:\n            ongoing_item_config.original_dev_name_path = os.path.join('/dev/mapper/', device_item.name)\n            ongoing_item_config.original_dev_path = os.path.join('/dev/mapper/', device_item.name)\n        ongoing_item_config.phase = CommonVariables.EncryptionPhaseBackupHeader\n        ongoing_item_config.commit()\n    else:\n        logger.log(msg=\"ongoing item config is not none, this is resuming, info: {0}\".format(ongoing_item_config),\n                   level=CommonVariables.WarningLevel)\n\n    logger.log(msg=(\"encrypting device item: {0}\".format(ongoing_item_config.get_original_dev_path())))\n    # we only support ext file systems.\n    current_phase = ongoing_item_config.get_phase()\n\n    original_dev_path = ongoing_item_config.get_original_dev_path()\n    mapper_name = ongoing_item_config.get_mapper_name()\n    device_size = ongoing_item_config.get_device_size()\n\n    luks_header_size = CommonVariables.luks_header_size\n    size_shrink_to = (device_size - luks_header_size) / CommonVariables.sector_size\n\n    while current_phase != CommonVariables.EncryptionPhaseDone:\n        if current_phase == CommonVariables.EncryptionPhaseBackupHeader:\n            logger.log(msg=\"the current phase is \" + str(CommonVariables.EncryptionPhaseBackupHeader),\n                       level=CommonVariables.InfoLevel)\n\n            # log an appropriate warning if the file system type is not supported\n            device_fs = ongoing_item_config.get_file_system().lower()\n            if not device_fs in CommonVariables.inplace_supported_file_systems:\n                if device_fs in CommonVariables.format_supported_file_systems:\n                    msg = \"Encrypting {0} file system is not supported for data-preserving encryption. Consider using the encrypt-format-all option.\".format(device_fs)\n                else:\n                    msg = \"AzureDiskEncryption does not support the {0} file system\".format(device_fs)\n                logger.log(msg=msg, level=CommonVariables.WarningLevel)\n\n                ongoing_item_config.clear_config()\n                return current_phase\n\n            chk_shrink_result = disk_util.check_shrink_fs(dev_path=original_dev_path, size_shrink_to=size_shrink_to)\n\n            if chk_shrink_result != CommonVariables.process_success:\n                logger.log(msg=\"check shrink fs failed with code {0} for {1}\".format(chk_shrink_result, original_dev_path),\n                           level=CommonVariables.ErrorLevel)\n                logger.log(msg=\"your file system may not have enough space to do the encryption.\")\n\n                # remove the ongoing item.\n                ongoing_item_config.clear_config()\n                return current_phase\n            else:\n                ongoing_item_config.current_slice_index = 0\n                ongoing_item_config.current_source_path = original_dev_path\n                ongoing_item_config.current_destination = encryption_environment.copy_header_slice_file_path\n                ongoing_item_config.current_total_copy_size = CommonVariables.default_block_size\n                ongoing_item_config.from_end = False\n                ongoing_item_config.header_slice_file_path = encryption_environment.copy_header_slice_file_path\n                ongoing_item_config.original_dev_path = original_dev_path\n                ongoing_item_config.commit()\n                if os.path.exists(encryption_environment.copy_header_slice_file_path):\n                    logger.log(msg=\"the header slice file is there, remove it.\", level=CommonVariables.WarningLevel)\n                    os.remove(encryption_environment.copy_header_slice_file_path)\n\n                copy_result = disk_util.copy(ongoing_item_config=ongoing_item_config, status_prefix=status_prefix)\n\n                if copy_result != CommonVariables.process_success:\n                    logger.log(msg=\"copy the header block failed, return code is: {0}\".format(copy_result),\n                               level=CommonVariables.ErrorLevel)\n                    return current_phase\n                else:\n                    ongoing_item_config.current_slice_index = 0\n                    ongoing_item_config.phase = CommonVariables.EncryptionPhaseEncryptDevice\n                    ongoing_item_config.commit()\n                    current_phase = CommonVariables.EncryptionPhaseEncryptDevice\n\n        elif current_phase == CommonVariables.EncryptionPhaseEncryptDevice:\n            logger.log(msg=\"the current phase is {0}\".format(CommonVariables.EncryptionPhaseEncryptDevice),\n                       level=CommonVariables.InfoLevel)\n\n            encrypt_result = disk_util.encrypt_disk(dev_path=original_dev_path,\n                                                    passphrase_file=passphrase_file,\n                                                    mapper_name=mapper_name,\n                                                    header_file=None)\n\n            # after the encrypt_disk without seperate header, then the uuid\n            # would change.\n            if encrypt_result != CommonVariables.process_success:\n                logger.log(msg=\"encrypt file system failed.\", level=CommonVariables.ErrorLevel)\n                return current_phase\n            else:\n                ongoing_item_config.current_slice_index = 0\n                ongoing_item_config.phase = CommonVariables.EncryptionPhaseCopyData\n                ongoing_item_config.commit()\n                current_phase = CommonVariables.EncryptionPhaseCopyData\n\n        elif current_phase == CommonVariables.EncryptionPhaseCopyData:\n            logger.log(msg=\"the current phase is {0}\".format(CommonVariables.EncryptionPhaseCopyData),\n                       level=CommonVariables.InfoLevel)\n            device_mapper_path = os.path.join(CommonVariables.dev_mapper_root, mapper_name)\n            ongoing_item_config.current_destination = device_mapper_path\n            ongoing_item_config.current_source_path = original_dev_path\n            ongoing_item_config.current_total_copy_size = (device_size - luks_header_size)\n            ongoing_item_config.from_end = True\n            ongoing_item_config.phase = CommonVariables.EncryptionPhaseCopyData\n            ongoing_item_config.commit()\n\n            copy_result = disk_util.copy(ongoing_item_config=ongoing_item_config, status_prefix=status_prefix)\n            if copy_result != CommonVariables.process_success:\n                logger.log(msg=\"copy the main content block failed, return code is: {0}\".format(copy_result),\n                           level=CommonVariables.ErrorLevel)\n                return current_phase\n            else:\n                ongoing_item_config.phase = CommonVariables.EncryptionPhaseRecoverHeader\n                ongoing_item_config.commit()\n                current_phase = CommonVariables.EncryptionPhaseRecoverHeader\n\n        elif current_phase == CommonVariables.EncryptionPhaseRecoverHeader:\n            logger.log(msg=\"the current phase is \" + str(CommonVariables.EncryptionPhaseRecoverHeader),\n                       level=CommonVariables.InfoLevel)\n            ongoing_item_config.from_end = False\n            backed_up_header_slice_file_path = ongoing_item_config.get_header_slice_file_path()\n            ongoing_item_config.current_slice_index = 0\n            ongoing_item_config.current_source_path = backed_up_header_slice_file_path\n            device_mapper_path = os.path.join(CommonVariables.dev_mapper_root, mapper_name)\n            ongoing_item_config.current_destination = device_mapper_path\n            ongoing_item_config.current_total_copy_size = CommonVariables.default_block_size\n            ongoing_item_config.commit()\n\n            copy_result = disk_util.copy(ongoing_item_config=ongoing_item_config, status_prefix=status_prefix)\n\n            if copy_result == CommonVariables.process_success:\n                crypt_item_to_update = CryptItem()\n                crypt_item_to_update.mapper_name = mapper_name\n                original_dev_name_path = ongoing_item_config.get_original_dev_name_path()\n                crypt_item_to_update.dev_path = disk_util.get_persistent_path_by_sdx_path(original_dev_name_path)\n                crypt_item_to_update.luks_header_path = \"None\"\n                crypt_item_to_update.file_system = ongoing_item_config.get_file_system()\n                crypt_item_to_update.uses_cleartext_key = False\n                crypt_item_to_update.current_luks_slot = 0\n                # if the original mountpoint is empty, then leave\n                # it as None\n                mount_point = ongoing_item_config.get_mount_point()\n                if mount_point == \"\" or mount_point is None:\n                    crypt_item_to_update.mount_point = \"None\"\n                else:\n                    crypt_item_to_update.mount_point = mount_point\n                update_crypt_item_result = disk_util.add_crypt_item(crypt_item_to_update, passphrase_file)\n                if not update_crypt_item_result:\n                    logger.log(msg=\"update crypt item failed\", level=CommonVariables.ErrorLevel)\n\n                if mount_point:\n                    logger.log(msg=\"removing entry for unencrypted drive from fstab\",\n                               level=CommonVariables.InfoLevel)\n                    disk_util.modify_fstab_entry_encrypt(mount_point, os.path.join(CommonVariables.dev_mapper_root, mapper_name))\n                else:\n                    logger.log(msg=original_dev_name_path + \" is not defined in fstab, no need to update\",\n                               level=CommonVariables.InfoLevel)\n\n                if os.path.exists(encryption_environment.copy_header_slice_file_path):\n                    os.remove(encryption_environment.copy_header_slice_file_path)\n\n                current_phase = CommonVariables.EncryptionPhaseDone\n                ongoing_item_config.phase = current_phase\n                ongoing_item_config.commit()\n                expand_fs_result = disk_util.expand_fs(dev_path=device_mapper_path)\n\n                if crypt_item_to_update.mount_point != \"None\":\n                    disk_util.mount_filesystem(device_mapper_path, ongoing_item_config.get_mount_point())\n                else:\n                    logger.log(\"the crypt_item_to_update.mount_point is None, so we do not mount it.\")\n\n                ongoing_item_config.clear_config()\n                if expand_fs_result != CommonVariables.process_success:\n                    logger.log(msg=\"expand fs result is: {0}\".format(expand_fs_result),\n                               level=CommonVariables.ErrorLevel)\n                return current_phase\n            else:\n                logger.log(msg=\"recover header failed result is: {0}\".format(copy_result),\n                           level=CommonVariables.ErrorLevel)\n                return current_phase\n\n\ndef encrypt_inplace_with_seperate_header_file(passphrase_file,\n                                              device_item,\n                                              disk_util,\n                                              bek_util,\n                                              status_prefix='',\n                                              ongoing_item_config=None):\n    \"\"\"\n    if ongoing_item_config is not None, then this is a resume case.\n    \"\"\"\n    logger.log(\"encrypt_inplace_with_seperate_header_file\")\n    current_phase = CommonVariables.EncryptionPhaseEncryptDevice\n    if ongoing_item_config is None:\n        ongoing_item_config = OnGoingItemConfig(encryption_environment=encryption_environment,\n                                                logger=logger)\n        mapper_name = str(uuid.uuid4())\n        ongoing_item_config.current_block_size = CommonVariables.default_block_size\n        ongoing_item_config.current_slice_index = 0\n        ongoing_item_config.device_size = device_item.size\n        ongoing_item_config.file_system = device_item.file_system\n        ongoing_item_config.mapper_name = mapper_name\n        ongoing_item_config.mount_point = device_item.mount_point\n        # TODO improve this.\n        if os.path.exists(os.path.join('/dev/', device_item.name)):\n            ongoing_item_config.original_dev_name_path = os.path.join('/dev/', device_item.name)\n        else:\n            ongoing_item_config.original_dev_name_path = os.path.join('/dev/mapper/', device_item.name)\n        ongoing_item_config.original_dev_path = os.path.join('/dev/disk/by-uuid', device_item.uuid)\n        luks_header_file_path = disk_util.create_luks_header(mapper_name=mapper_name)\n        if luks_header_file_path is None:\n            logger.log(msg=\"create header file failed\", level=CommonVariables.ErrorLevel)\n            return current_phase\n        else:\n            ongoing_item_config.luks_header_file_path = luks_header_file_path\n            ongoing_item_config.phase = CommonVariables.EncryptionPhaseEncryptDevice\n            ongoing_item_config.commit()\n    else:\n        logger.log(msg=\"ongoing item config is not none, this is resuming: {0}\".format(ongoing_item_config),\n                   level=CommonVariables.WarningLevel)\n        current_phase = ongoing_item_config.get_phase()\n\n    while current_phase != CommonVariables.EncryptionPhaseDone:\n        if current_phase == CommonVariables.EncryptionPhaseEncryptDevice:\n            try:\n                mapper_name = ongoing_item_config.get_mapper_name()\n                original_dev_path = ongoing_item_config.get_original_dev_path()\n                luks_header_file_path = ongoing_item_config.get_header_file_path()\n                toggle_se_linux_for_centos7(True)\n\n                encrypt_result = disk_util.encrypt_disk(dev_path=original_dev_path,\n                                                        passphrase_file=passphrase_file,\n                                                        mapper_name=mapper_name,\n                                                        header_file=luks_header_file_path)\n\n                if encrypt_result != CommonVariables.process_success:\n                    logger.log(msg=\"the encrypton for {0} failed\".format(original_dev_path),\n                               level=CommonVariables.ErrorLevel)\n                    return current_phase\n                else:\n                    ongoing_item_config.phase = CommonVariables.EncryptionPhaseCopyData\n                    ongoing_item_config.commit()\n                    current_phase = CommonVariables.EncryptionPhaseCopyData\n            finally:\n                toggle_se_linux_for_centos7(False)\n\n        elif current_phase == CommonVariables.EncryptionPhaseCopyData:\n            try:\n                mapper_name = ongoing_item_config.get_mapper_name()\n                original_dev_path = ongoing_item_config.get_original_dev_path()\n                luks_header_file_path = ongoing_item_config.get_header_file_path()\n                toggle_se_linux_for_centos7(True)\n                device_mapper_path = os.path.join(\"/dev/mapper\", mapper_name)\n                if not os.path.exists(device_mapper_path):\n                    open_result = disk_util.luks_open(passphrase_file=passphrase_file,\n                                                      dev_path=original_dev_path,\n                                                      mapper_name=mapper_name,\n                                                      header_file=luks_header_file_path,\n                                                      uses_cleartext_key=False)\n\n                    if open_result != CommonVariables.process_success:\n                        logger.log(msg=\"the luks open for {0} failed.\".format(original_dev_path),\n                                   level=CommonVariables.ErrorLevel)\n                        return current_phase\n                else:\n                    logger.log(msg=\"the device mapper path existed, so skip the luks open.\",\n                               level=CommonVariables.InfoLevel)\n\n                device_size = ongoing_item_config.get_device_size()\n\n                current_slice_index = ongoing_item_config.get_current_slice_index()\n                if current_slice_index is None:\n                    ongoing_item_config.current_slice_index = 0\n                ongoing_item_config.current_source_path = original_dev_path\n                ongoing_item_config.current_destination = device_mapper_path\n                ongoing_item_config.current_total_copy_size = device_size\n                ongoing_item_config.from_end = True\n                ongoing_item_config.commit()\n\n                copy_result = disk_util.copy(ongoing_item_config=ongoing_item_config, status_prefix=status_prefix)\n\n                if copy_result != CommonVariables.success:\n                    error_message = \"the copying result is {0} so skip the mounting\".format(copy_result)\n                    logger.log(msg=(error_message), level=CommonVariables.ErrorLevel)\n                    return current_phase\n                else:\n                    crypt_item_to_update = CryptItem()\n                    crypt_item_to_update.mapper_name = mapper_name\n                    original_dev_name_path = ongoing_item_config.get_original_dev_name_path()\n                    crypt_item_to_update.dev_path = disk_util.get_persistent_path_by_sdx_path(original_dev_name_path)\n                    crypt_item_to_update.luks_header_path = luks_header_file_path\n                    crypt_item_to_update.file_system = ongoing_item_config.get_file_system()\n                    crypt_item_to_update.uses_cleartext_key = False\n                    crypt_item_to_update.current_luks_slot = 0\n\n                    # if the original mountpoint is empty, then leave\n                    # it as None\n                    mount_point = ongoing_item_config.get_mount_point()\n                    if mount_point is None or mount_point == \"\":\n                        crypt_item_to_update.mount_point = \"None\"\n                    else:\n                        crypt_item_to_update.mount_point = mount_point\n                    update_crypt_item_result = disk_util.add_crypt_item(crypt_item_to_update, passphrase_file)\n                    if not update_crypt_item_result:\n                        logger.log(msg=\"update crypt item failed\", level=CommonVariables.ErrorLevel)\n                    if crypt_item_to_update.mount_point != \"None\":\n                        disk_util.mount_filesystem(device_mapper_path, mount_point)\n                    else:\n                        logger.log(\"the crypt_item_to_update.mount_point is None, so we do not mount it.\")\n\n                    if mount_point:\n                        logger.log(msg=\"removing entry for unencrypted drive from fstab\",\n                                   level=CommonVariables.InfoLevel)\n                        disk_util.modify_fstab_entry_encrypt(mount_point, os.path.join(CommonVariables.dev_mapper_root, mapper_name))\n                    else:\n                        logger.log(msg=original_dev_name_path + \" is not defined in fstab, no need to update\",\n                                   level=CommonVariables.InfoLevel)\n\n                    current_phase = CommonVariables.EncryptionPhaseDone\n                    ongoing_item_config.phase = current_phase\n                    ongoing_item_config.commit()\n                    ongoing_item_config.clear_config()\n                    return current_phase\n            finally:\n                toggle_se_linux_for_centos7(False)\n\n\ndef decrypt_inplace_copy_data(passphrase_file,\n                              crypt_item,\n                              raw_device_item,\n                              mapper_device_item,\n                              disk_util,\n                              status_prefix='',\n                              ongoing_item_config=None):\n    logger.log(msg=\"decrypt_inplace_copy_data\")\n\n    if ongoing_item_config:\n        logger.log(msg=\"ongoing item config is not none, resuming decryption, info: {0}\".format(ongoing_item_config),\n                   level=CommonVariables.WarningLevel)\n    else:\n        logger.log(msg=\"starting decryption of {0}\".format(crypt_item))\n        ongoing_item_config = OnGoingItemConfig(encryption_environment=encryption_environment, logger=logger)\n        ongoing_item_config.current_destination = crypt_item.dev_path\n        ongoing_item_config.current_source_path = os.path.join(CommonVariables.dev_mapper_root,\n                                                               crypt_item.mapper_name)\n        ongoing_item_config.current_total_copy_size = mapper_device_item.size\n        ongoing_item_config.from_end = True\n        ongoing_item_config.phase = CommonVariables.DecryptionPhaseCopyData\n        ongoing_item_config.current_slice_index = 0\n        ongoing_item_config.current_block_size = CommonVariables.default_block_size\n        ongoing_item_config.mount_point = crypt_item.mount_point\n        ongoing_item_config.commit()\n\n    current_phase = ongoing_item_config.get_phase()\n\n    while current_phase != CommonVariables.DecryptionPhaseDone:\n        logger.log(msg=(\"the current phase is {0}\".format(CommonVariables.EncryptionPhaseBackupHeader)),\n                   level=CommonVariables.InfoLevel)\n\n        if current_phase == CommonVariables.DecryptionPhaseCopyData:\n            copy_result = disk_util.copy(ongoing_item_config=ongoing_item_config, status_prefix=status_prefix)\n            if copy_result == CommonVariables.process_success:\n                mount_point = ongoing_item_config.get_mount_point()\n                if mount_point and mount_point != \"None\":\n                    logger.log(msg=\"restoring entry for unencrypted drive from fstab\", level=CommonVariables.InfoLevel)\n                    disk_util.restore_mount_info(ongoing_item_config.get_mount_point())\n                elif crypt_item.mapper_name:\n                    disk_util.restore_mount_info(crypt_item.mapper_name)\n                else:\n                    logger.log(msg=crypt_item.dev_path + \" was not in fstab when encryption was enabled, no need to restore\",\n                               level=CommonVariables.InfoLevel)\n\n                ongoing_item_config.phase = CommonVariables.DecryptionPhaseDone\n                ongoing_item_config.commit()\n                current_phase = CommonVariables.DecryptionPhaseDone\n            else:\n                logger.log(msg=\"decryption: block copy failed, result: {0}\".format(copy_result),\n                           level=CommonVariables.ErrorLevel)\n                return current_phase\n\n    ongoing_item_config.clear_config()\n\n    return current_phase\n\n\ndef decrypt_inplace_without_separate_header_file(passphrase_file,\n                                                 crypt_item,\n                                                 raw_device_item,\n                                                 mapper_device_item,\n                                                 disk_util,\n                                                 status_prefix='',\n                                                 ongoing_item_config=None):\n    logger.log(msg=\"decrypt_inplace_without_separate_header_file\")\n\n    proc_comm = ProcessCommunicator()\n    executor = CommandExecutor(logger)\n    executor.Execute(DistroPatcher.cryptsetup_path + \" luksDump \" + crypt_item.dev_path, communicator=proc_comm)\n\n    luks_header_size = int(re.findall(r\"Payload.*?(\\d+)\", proc_comm.stdout)[0]) * CommonVariables.sector_size\n\n    if raw_device_item.size - mapper_device_item.size != luks_header_size:\n        logger.log(msg=\"mismatch between raw and mapper device found for crypt_item {0}\".format(crypt_item),\n                   level=CommonVariables.ErrorLevel)\n        logger.log(msg=\"raw_device_item: {0}\".format(raw_device_item),\n                   level=CommonVariables.ErrorLevel)\n        logger.log(msg=\"mapper_device_item {0}\".format(mapper_device_item),\n                   level=CommonVariables.ErrorLevel)\n\n        return None\n\n    return decrypt_inplace_copy_data(passphrase_file,\n                                     crypt_item,\n                                     raw_device_item,\n                                     mapper_device_item,\n                                     disk_util,\n                                     status_prefix,\n                                     ongoing_item_config)\n\n\ndef decrypt_inplace_with_separate_header_file(passphrase_file,\n                                              crypt_item,\n                                              raw_device_item,\n                                              mapper_device_item,\n                                              disk_util,\n                                              status_prefix='',\n                                              ongoing_item_config=None):\n    logger.log(msg=\"decrypt_inplace_with_separate_header_file\")\n\n    if raw_device_item.size != mapper_device_item.size:\n        logger.log(msg=\"mismatch between raw and mapper device found for crypt_item {0}\".format(crypt_item),\n                   level=CommonVariables.ErrorLevel)\n        logger.log(msg=\"raw_device_item: {0}\".format(raw_device_item),\n                   level=CommonVariables.ErrorLevel)\n        logger.log(msg=\"mapper_device_item {0}\".format(mapper_device_item),\n                   level=CommonVariables.ErrorLevel)\n\n        return\n\n    return decrypt_inplace_copy_data(passphrase_file,\n                                     crypt_item,\n                                     raw_device_item,\n                                     mapper_device_item,\n                                     disk_util,\n                                     status_prefix,\n                                     ongoing_item_config)\n\n\ndef enable_encryption_all_format(passphrase_file, encryption_marker, disk_util, bek_util):\n    \"\"\"\n    In case of success return None, otherwise return the device item which failed.\n    \"\"\"\n    logger.log(msg=\"executing the enable_encryption_all_format command\")\n\n    device_items = find_all_devices_to_encrypt(encryption_marker, disk_util, bek_util)\n    # Don't encrypt partitions that are not even mounted\n    device_items_to_encrypt = filter(lambda di: di.mount_point is not None and di.mount_point != \"\", device_items)\n\n    dev_path_reference_table = disk_util.get_block_device_to_azure_udev_table()\n    device_items_to_encrypt = filter(lambda di: os.path.join('/dev/', di.name) in dev_path_reference_table, device_items_to_encrypt)\n\n    msg = 'Encrypting and formatting {0} data volumes'.format(len(device_items_to_encrypt))\n    logger.log(msg)\n\n    hutil.do_status_report(operation='EnableEncryptionFormatAll',\n                           status=CommonVariables.extension_success_status,\n                           status_code=str(CommonVariables.success),\n                           message=msg)\n\n    return encrypt_format_device_items(passphrase_file, device_items_to_encrypt, disk_util, True)\n\n\ndef encrypt_format_device_items(passphrase, device_items, disk_util, force=False):\n    \"\"\"\n    Formats the block devices represented by the supplied device_item.\n\n    This is done by constructing a disk format query based on the supplied device items\n    and passing it on to the enable_encryption_format method.\n\n    Returns None if all items are successfully format-encrypted\n    Otherwise returns the device item which failed.\n    \"\"\"\n\n    # use the new udev names for formatting and later on for cryptmounting\n    dev_path_reference_table = disk_util.get_block_device_to_azure_udev_table()\n\n    def single_device_item_to_format_query_dict(device_item):\n        \"\"\"\n        Converts a single device_item into an dictionary than will be later \"json-stringified\"\n        \"\"\"\n        format_query_element = {}\n        dev_path = os.path.join('/dev/', device_item.name)\n        if dev_path in dev_path_reference_table:\n            format_query_element[\"dev_path\"] = dev_path_reference_table[dev_path]\n        else:\n            format_query_element[\"dev_path\"] = dev_path\n\n        # introduce a new \"full_mount_point\" field below to avoid the /mnt/ prefix that automatically gets appended\n        format_query_element[\"full_mount_point\"] = str(device_item.mount_point)\n        format_query_element[\"file_system\"] = str(device_item.file_system)\n        return format_query_element\n\n    disk_format_query = json.dumps(map(single_device_item_to_format_query_dict, device_items))\n\n    return enable_encryption_format(passphrase, disk_format_query, disk_util, force)\n\n\ndef find_all_devices_to_encrypt(encryption_marker, disk_util, bek_util):\n    device_items = disk_util.get_device_items(None)\n    device_items_to_encrypt = []\n    special_azure_devices_to_skip = disk_util.get_azure_devices()\n    for device_item in device_items:\n        logger.log(\"device_item == \" + str(device_item))\n\n        should_skip = disk_util.should_skip_for_inplace_encryption(device_item, special_azure_devices_to_skip, encryption_marker.get_volume_type())\n        if not should_skip and \\\n           not any(di.name == device_item.name for di in device_items_to_encrypt):\n            device_items_to_encrypt.append(device_item)\n    return device_items_to_encrypt\n\n\ndef enable_encryption_all_in_place(passphrase_file, encryption_marker, disk_util, bek_util):\n    \"\"\"\n    if return None for the success case, or return the device item which failed.\n    \"\"\"\n    logger.log(msg=\"executing the enable_encryption_all_in_place command.\")\n\n    device_items_to_encrypt = find_all_devices_to_encrypt(encryption_marker, disk_util, bek_util)\n    msg = 'Encrypting {0} data volumes'.format(len(device_items_to_encrypt))\n    logger.log(msg)\n\n    hutil.do_status_report(operation='EnableEncryption',\n                           status=CommonVariables.extension_success_status,\n                           status_code=str(CommonVariables.success),\n                           message=msg)\n\n    for device_num, device_item in enumerate(device_items_to_encrypt):\n        umount_status_code = CommonVariables.success\n        if device_item.mount_point is not None and device_item.mount_point != \"\":\n            umount_status_code = disk_util.umount(device_item.mount_point)\n        if umount_status_code != CommonVariables.success:\n            logger.log(\"error occured when do the umount for: {0} with code: {1}\".format(device_item.mount_point, umount_status_code))\n        else:\n            logger.log(msg=(\"encrypting: {0}\".format(device_item)))\n            no_header_file_support = not_support_header_option_distro(DistroPatcher)\n            status_prefix = \"Encrypting data volume {0}/{1}\".format(device_num + 1,\n                                                                    len(device_items_to_encrypt))\n\n            # TODO check the file system before encrypting it.\n            if no_header_file_support:\n                logger.log(msg=\"this is the centos 6 or redhat 6 or sles 11 series, need to resize data drive\",\n                           level=CommonVariables.WarningLevel)\n\n                encryption_result_phase = encrypt_inplace_without_seperate_header_file(passphrase_file=passphrase_file,\n                                                                                       device_item=device_item,\n                                                                                       disk_util=disk_util,\n                                                                                       bek_util=bek_util,\n                                                                                       status_prefix=status_prefix)\n            else:\n                encryption_result_phase = encrypt_inplace_with_seperate_header_file(passphrase_file=passphrase_file,\n                                                                                    device_item=device_item,\n                                                                                    disk_util=disk_util,\n                                                                                    bek_util=bek_util,\n                                                                                    status_prefix=status_prefix)\n\n            if encryption_result_phase == CommonVariables.EncryptionPhaseDone:\n                continue\n            else:\n                # do exit to exit from this round\n                return device_item\n    return None\n\n\ndef disable_encryption_all_in_place(passphrase_file, decryption_marker, disk_util):\n    \"\"\"\n    On success, returns None. Otherwise returns the crypt item for which decryption failed.\n    \"\"\"\n\n    logger.log(msg=\"executing disable_encryption_all_in_place\")\n\n    device_items = disk_util.get_device_items(None)\n    crypt_items = disk_util.get_crypt_items()\n\n    msg = 'Decrypting {0} data volumes'.format(len(crypt_items))\n    logger.log(msg)\n\n    hutil.do_status_report(operation='DisableEncryption',\n                           status=CommonVariables.extension_success_status,\n                           status_code=str(CommonVariables.success),\n                           message=msg)\n\n    for crypt_item_num, crypt_item in enumerate(crypt_items):\n        logger.log(\"processing crypt_item: \" + str(crypt_item))\n\n        def raw_device_item_match(device_item):\n            sdx_device_name = os.path.join(\"/dev/\", device_item.name)\n            if crypt_item.dev_path.startswith(CommonVariables.disk_by_id_root):\n                return crypt_item.dev_path == disk_util.query_dev_id_path_by_sdx_path(sdx_device_name)\n            else:\n                return crypt_item.dev_path == sdx_device_name\n\n        def mapped_device_item_match(device_item):\n            return crypt_item.mapper_name == device_item.name\n\n        raw_device_item = next((d for d in device_items if raw_device_item_match(d)), None)\n        mapper_device_item = next((d for d in device_items if mapped_device_item_match(d)), None)\n\n        if not raw_device_item:\n            logger.log(\"raw device not found for crypt_item {0}\".format(crypt_item), level='Warn')\n            logger.log(\"Skipping device\", level='Warn')\n            continue\n\n        if not mapper_device_item:\n            logger.log(\"mapper device not found for crypt_item {0}\".format(crypt_item))\n            if disk_util.is_luks_device(crypt_item.dev_path, crypt_item.luks_header_path):\n                logger.log(\"Found a luks device for this device item, yet couldn't open mapper: {0}\".format(crypt_item))\n                logger.log(\"Failing\".format(crypt_item))\n                return crypt_item\n            else:\n                continue\n\n        decryption_result_phase = None\n\n        status_prefix = \"Decrypting data volume {0}/{1}\".format(crypt_item_num + 1,\n                                                                len(crypt_items))\n\n        if crypt_item.luks_header_path:\n            decryption_result_phase = decrypt_inplace_with_separate_header_file(passphrase_file=passphrase_file,\n                                                                                crypt_item=crypt_item,\n                                                                                raw_device_item=raw_device_item,\n                                                                                mapper_device_item=mapper_device_item,\n                                                                                disk_util=disk_util,\n                                                                                status_prefix=status_prefix)\n        else:\n            decryption_result_phase = decrypt_inplace_without_separate_header_file(passphrase_file=passphrase_file,\n                                                                                   crypt_item=crypt_item,\n                                                                                   raw_device_item=raw_device_item,\n                                                                                   mapper_device_item=mapper_device_item,\n                                                                                   disk_util=disk_util,\n                                                                                   status_prefix=status_prefix)\n\n        if decryption_result_phase == CommonVariables.DecryptionPhaseDone:\n            disk_util.luks_close(crypt_item.mapper_name)\n            disk_util.remove_crypt_item(crypt_item)\n            #disk_util.mount_all()\n\n            continue\n        else:\n            # decryption failed for a crypt_item, return the failed item to caller\n            return crypt_item\n\n    disk_util.mount_all()\n\n    return None\n\n\ndef daemon_encrypt():\n    # Ensure the same configuration is executed only once\n    # If the previous enable failed, we do not have retry logic here.\n    # TODO Remount all\n    encryption_marker = EncryptionMarkConfig(logger, encryption_environment)\n    if encryption_marker.config_file_exists():\n        logger.log(\"encryption is marked.\")\n\n    \"\"\"\n    search for the bek volume, then mount it:)\n    \"\"\"\n    disk_util = DiskUtil(hutil, DistroPatcher, logger, encryption_environment)\n\n    encryption_config = EncryptionConfig(encryption_environment, logger)\n    bek_passphrase_file = None\n    \"\"\"\n    try to find the attached bek volume, and use the file to mount the crypted volumes,\n    and if the passphrase file is found, then we will re-use it for the future.\n    \"\"\"\n    bek_util = BekUtil(disk_util, logger)\n    if encryption_config.config_file_exists():\n        bek_passphrase_file = bek_util.get_bek_passphrase_file(encryption_config)\n\n    if bek_passphrase_file is None:\n        hutil.do_exit(exit_code=CommonVariables.passphrase_file_not_found,\n                      operation='EnableEncryption',\n                      status=CommonVariables.extension_error_status,\n                      code=CommonVariables.passphrase_file_not_found,\n                      message='Passphrase file not found.')\n\n    executor = CommandExecutor(logger)\n    is_not_in_stripped_os = bool(executor.Execute(\"mountpoint /oldroot\"))\n    volume_type = encryption_config.get_volume_type().lower()\n\n    if (volume_type == CommonVariables.VolumeTypeData.lower() or volume_type == CommonVariables.VolumeTypeAll.lower()) and \\\n       is_not_in_stripped_os:\n        try:\n            while not daemon_encrypt_data_volumes(encryption_marker=encryption_marker,\n                                                  encryption_config=encryption_config,\n                                                  disk_util=disk_util,\n                                                  bek_util=bek_util,\n                                                  bek_passphrase_file=bek_passphrase_file):\n                logger.log(\"Calling daemon_encrypt_data_volumes again\")\n        except Exception as e:\n            message = \"Failed to encrypt data volumes with error: {0}, stack trace: {1}\".format(e, traceback.format_exc())\n            logger.log(msg=message, level=CommonVariables.ErrorLevel)\n            hutil.do_exit(exit_code=CommonVariables.encryption_failed,\n                          operation='EnableEncryptionDataVolumes',\n                          status=CommonVariables.extension_error_status,\n                          code=CommonVariables.encryption_failed,\n                          message=message)\n        else:\n            hutil.do_status_report(operation='EnableEncryptionDataVolumes',\n                                   status=CommonVariables.extension_success_status,\n                                   status_code=str(CommonVariables.success),\n                                   message='Encryption succeeded for data volumes')\n            disk_util.log_lsblk_output()\n            mount_encrypted_disks(disk_util, bek_util, bek_passphrase_file, encryption_config)\n\n    if volume_type == CommonVariables.VolumeTypeOS.lower() or \\\n       volume_type == CommonVariables.VolumeTypeAll.lower():\n        # import OSEncryption here instead of at the top because it relies\n        # on pre-req packages being installed (specifically, python-six on Ubuntu)\n        distro_name = DistroPatcher.distro_info[0]\n        distro_version = DistroPatcher.distro_info[1]\n\n        os_encryption = None\n\n        if (((distro_name == 'redhat' and distro_version == '7.3') or\n             (distro_name == 'redhat' and distro_version == '7.4') or\n             (distro_name == 'redhat' and distro_version == '7.5') or\n             (distro_name == 'redhat' and distro_version == '7.6') or\n             (distro_name == 'redhat' and distro_version == '7.7')) and\n           (disk_util.is_os_disk_lvm() or os.path.exists('/volumes.lvm'))):\n            from oscrypto.rhel_72_lvm import RHEL72LVMEncryptionStateMachine\n            os_encryption = RHEL72LVMEncryptionStateMachine(hutil=hutil,\n                                                            distro_patcher=DistroPatcher,\n                                                            logger=logger,\n                                                            encryption_environment=encryption_environment)\n        elif (((distro_name == 'centos' and distro_version == '7.3.1611') or\n              (distro_name == 'centos' and distro_version.startswith('7.4')) or\n              (distro_name == 'centos' and distro_version.startswith('7.5')) or\n              (distro_name == 'centos' and distro_version.startswith('7.6')) or\n              (distro_name == 'centos' and distro_version.startswith('7.7'))) and\n              (disk_util.is_os_disk_lvm() or os.path.exists('/volumes.lvm'))):\n            from oscrypto.rhel_72_lvm import RHEL72LVMEncryptionStateMachine\n            os_encryption = RHEL72LVMEncryptionStateMachine(hutil=hutil,\n                                                            distro_patcher=DistroPatcher,\n                                                            logger=logger,\n                                                            encryption_environment=encryption_environment)\n        elif ((distro_name == 'redhat' and distro_version == '7.2') or\n              (distro_name == 'redhat' and distro_version == '7.3') or\n              (distro_name == 'redhat' and distro_version == '7.4') or\n              (distro_name == 'redhat' and distro_version == '7.5') or\n              (distro_name == 'redhat' and distro_version == '7.6') or\n              (distro_name == 'redhat' and distro_version == '7.7') or\n              (distro_name == 'centos' and distro_version.startswith('7.7')) or\n              (distro_name == 'centos' and distro_version.startswith('7.6')) or\n              (distro_name == 'centos' and distro_version.startswith('7.5')) or\n              (distro_name == 'centos' and distro_version.startswith('7.4')) or\n              (distro_name == 'centos' and distro_version == '7.3.1611') or\n              (distro_name == 'centos' and distro_version == '7.2.1511')):\n            from oscrypto.rhel_72 import RHEL72EncryptionStateMachine\n            os_encryption = RHEL72EncryptionStateMachine(hutil=hutil,\n                                                         distro_patcher=DistroPatcher,\n                                                         logger=logger,\n                                                         encryption_environment=encryption_environment)\n        elif distro_name == 'redhat' and distro_version == '6.8':\n            from oscrypto.rhel_68 import RHEL68EncryptionStateMachine\n            os_encryption = RHEL68EncryptionStateMachine(hutil=hutil,\n                                                         distro_patcher=DistroPatcher,\n                                                         logger=logger,\n                                                         encryption_environment=encryption_environment)\n        elif distro_name == 'centos' and (distro_version == '6.8' or distro_version == '6.9'):\n            from oscrypto.centos_68 import CentOS68EncryptionStateMachine\n            os_encryption = CentOS68EncryptionStateMachine(hutil=hutil,\n                                                           distro_patcher=DistroPatcher,\n                                                           logger=logger,\n                                                           encryption_environment=encryption_environment)\n        elif distro_name == 'Ubuntu' and distro_version in ['16.04', '18.04']:\n            from oscrypto.ubuntu_1604 import Ubuntu1604EncryptionStateMachine\n            os_encryption = Ubuntu1604EncryptionStateMachine(hutil=hutil,\n                                                             distro_patcher=DistroPatcher,\n                                                             logger=logger,\n                                                             encryption_environment=encryption_environment)\n        elif distro_name == 'Ubuntu' and distro_version == '14.04':\n            from oscrypto.ubuntu_1404 import Ubuntu1404EncryptionStateMachine\n            os_encryption = Ubuntu1404EncryptionStateMachine(hutil=hutil,\n                                                             distro_patcher=DistroPatcher,\n                                                             logger=logger,\n                                                             encryption_environment=encryption_environment)\n        else:\n            message = \"OS volume encryption is not supported on {0} {1}\".format(distro_name,\n                                                                                distro_version)\n            logger.log(msg=message, level=CommonVariables.ErrorLevel)\n            hutil.do_exit(exit_code=CommonVariables.encryption_failed,\n                          operation='EnableEncryptionOSVolume',\n                          status=CommonVariables.extension_error_status,\n                          code=CommonVariables.encryption_failed,\n                          message=message)\n\n        try:\n            os_encryption.start_encryption()\n\n            if not os_encryption.state == 'completed':\n                raise Exception(\"did not reach completed state\")\n            else:\n                encryption_marker.clear_config()\n\n        except Exception as e:\n            message = \"Failed to encrypt OS volume with error: {0}, stack trace: {1}, machine state: {2}\".format(e,\n                                                                                                                 traceback.format_exc(),\n                                                                                                                 os_encryption.state)\n            logger.log(msg=message, level=CommonVariables.ErrorLevel)\n            hutil.do_exit(exit_code=CommonVariables.encryption_failed,\n                          operation='EnableEncryptionOSVolume',\n                          status=CommonVariables.extension_error_status,\n                          code=CommonVariables.encryption_failed,\n                          message=message)\n\n        message = ''\n        if volume_type == CommonVariables.VolumeTypeAll.lower():\n            message = 'Encryption succeeded for all volumes'\n        else:\n            message = 'Encryption succeeded for OS volume'\n\n        logger.log(msg=message)\n        hutil.do_status_report(operation='EnableEncryptionOSVolume',\n                               status=CommonVariables.extension_success_status,\n                               status_code=str(CommonVariables.success),\n                               message=message)\n\n\ndef daemon_encrypt_data_volumes(encryption_marker, encryption_config, disk_util, bek_util, bek_passphrase_file):\n    try:\n        \"\"\"\n        check whether there's a scheduled encryption task\n        \"\"\"\n        mount_all_result = disk_util.mount_all()\n\n        if mount_all_result != CommonVariables.process_success:\n            logger.log(msg=\"mount all failed with code:{0}\".format(mount_all_result),\n                       level=CommonVariables.ErrorLevel)\n        \"\"\"\n        TODO: resuming the encryption for rebooting suddenly scenario\n        we need the special handling is because the half done device can be a error state: say, the file system header missing.so it could be\n        identified.\n        \"\"\"\n        ongoing_item_config = OnGoingItemConfig(encryption_environment=encryption_environment, logger=logger)\n\n        if ongoing_item_config.config_file_exists():\n            logger.log(\"OngoingItemConfig exists.\")\n            ongoing_item_config.load_value_from_file()\n            header_file_path = ongoing_item_config.get_header_file_path()\n            mount_point = ongoing_item_config.get_mount_point()\n            status_prefix = \"Resuming encryption after reboot\"\n            if not none_or_empty(mount_point):\n                logger.log(\"mount point is not empty {0}, trying to unmount it first.\".format(mount_point))\n                umount_status_code = disk_util.umount(mount_point)\n                logger.log(\"unmount return code is {0}\".format(umount_status_code))\n            if none_or_empty(header_file_path):\n                encryption_result_phase = encrypt_inplace_without_seperate_header_file(passphrase_file=bek_passphrase_file,\n                                                                                       device_item=None,\n                                                                                       disk_util=disk_util,\n                                                                                       bek_util=bek_util,\n                                                                                       status_prefix=status_prefix,\n                                                                                       ongoing_item_config=ongoing_item_config)\n                # TODO mount it back when shrink failed\n            else:\n                encryption_result_phase = encrypt_inplace_with_seperate_header_file(passphrase_file=bek_passphrase_file,\n                                                                                    device_item=None,\n                                                                                    disk_util=disk_util,\n                                                                                    bek_util=bek_util,\n                                                                                    status_prefix=status_prefix,\n                                                                                    ongoing_item_config=ongoing_item_config)\n            \"\"\"\n            if the resuming failed, we should fail.\n            \"\"\"\n            if encryption_result_phase != CommonVariables.EncryptionPhaseDone:\n                original_dev_path = ongoing_item_config.get_original_dev_path\n                message = 'EnableEncryption: resuming encryption for {0} failed'.format(original_dev_path)\n                raise Exception(message)\n            else:\n                ongoing_item_config.clear_config()\n        else:\n            logger.log(\"OngoingItemConfig does not exist\")\n            failed_item = None\n\n            if not encryption_marker.config_file_exists():\n                logger.log(\"Data volumes are not marked for encryption\")\n                return True\n\n            if encryption_marker.get_current_command() == CommonVariables.EnableEncryption:\n                failed_item = enable_encryption_all_in_place(passphrase_file=bek_passphrase_file,\n                                                             encryption_marker=encryption_marker,\n                                                             disk_util=disk_util,\n                                                             bek_util=bek_util)\n            elif encryption_marker.get_current_command() == CommonVariables.EnableEncryptionFormat:\n                disk_format_query = encryption_marker.get_encryption_disk_format_query()\n                failed_item = enable_encryption_format(passphrase=bek_passphrase_file,\n                                                       disk_format_query=disk_format_query,\n                                                       disk_util=disk_util)\n            elif encryption_marker.get_current_command() == CommonVariables.EnableEncryptionFormatAll:\n                failed_item = enable_encryption_all_format(passphrase_file=bek_passphrase_file,\n                                                           encryption_marker=encryption_marker,\n                                                           disk_util=disk_util,\n                                                           bek_util=bek_util)\n            else:\n                message = \"Command {0} not supported.\".format(encryption_marker.get_current_command())\n                logger.log(msg=message, level=CommonVariables.ErrorLevel)\n                raise Exception(message)\n\n            if failed_item:\n                message = 'Encryption failed for {0}'.format(failed_item)\n                raise Exception(message)\n            else:\n                return True\n    except Exception:\n        raise\n\n\ndef daemon_decrypt():\n    decryption_marker = DecryptionMarkConfig(logger, encryption_environment)\n\n    if not decryption_marker.config_file_exists():\n        logger.log(\"decryption is not marked.\")\n        return\n\n    logger.log(\"decryption is marked.\")\n\n    # mount and then unmount all the encrypted items\n    # in order to set-up all the mapper devices\n    # we don't need the BEK since all the drives that need decryption were made cleartext-key unlockable by first call to disable\n\n    disk_util = DiskUtil(hutil, DistroPatcher, logger, encryption_environment)\n    encryption_config = EncryptionConfig(encryption_environment, logger)\n    mount_encrypted_disks(disk_util=disk_util,\n                          bek_util=None,\n                          encryption_config=encryption_config,\n                          passphrase_file=None)\n    disk_util.umount_all_crypt_items()\n\n    # at this point all the /dev/mapper/* crypt devices should be open\n\n    ongoing_item_config = OnGoingItemConfig(encryption_environment=encryption_environment, logger=logger)\n\n    if ongoing_item_config.config_file_exists():\n        logger.log(\"ongoing item config exists.\")\n    else:\n        logger.log(\"ongoing item config does not exist.\")\n\n        failed_item = None\n\n        if decryption_marker.get_current_command() == CommonVariables.DisableEncryption:\n            failed_item = disable_encryption_all_in_place(passphrase_file=None,\n                                                          decryption_marker=decryption_marker,\n                                                          disk_util=disk_util)\n        else:\n            raise Exception(\"command {0} not supported.\".format(decryption_marker.get_current_command()))\n\n        if failed_item is not None:\n            hutil.do_exit(exit_code=CommonVariables.encryption_failed,\n                          operation='Disable',\n                          status=CommonVariables.extension_error_status,\n                          code=CommonVariables.encryption_failed,\n                          message='Decryption failed for {0}'.format(failed_item))\n        else:\n            encryption_config.clear_config()\n            logger.log(\"clearing the decryption mark after successful decryption\")\n            decryption_marker.clear_config()\n\n            hutil.do_exit(exit_code=0,\n                          operation='Disable',\n                          status=CommonVariables.extension_success_status,\n                          code=str(CommonVariables.success),\n                          message='Decryption succeeded')\n\n\ndef daemon():\n    hutil.find_last_nonquery_operation = True\n    hutil.do_parse_context('Executing')\n    lock = ProcessLock(logger, encryption_environment.daemon_lock_file_path)\n    if not lock.try_lock():\n        logger.log(\"there's another daemon running, please wait it to exit.\", level=CommonVariables.WarningLevel)\n        return\n\n    logger.log(\"daemon lock acquired sucessfully.\")\n\n    logger.log(\"waiting for 2 minutes before continuing the daemon\")\n    time.sleep(120)\n\n    logger.log(\"Installing pre-requisites\")\n    DistroPatcher.install_extras()\n\n    # try decrypt, if decryption marker exists\n    decryption_marker = DecryptionMarkConfig(logger, encryption_environment)\n    if decryption_marker.config_file_exists():\n        try:\n            daemon_decrypt()\n        except Exception as e:\n            error_msg = (\"Failed to disable the extension with error: {0}, stack trace: {1}\".format(e, traceback.format_exc()))\n\n            logger.log(msg=error_msg,\n                       level=CommonVariables.ErrorLevel)\n\n            hutil.do_exit(exit_code=CommonVariables.encryption_failed,\n                          operation='Disable',\n                          status=CommonVariables.extension_error_status,\n                          code=str(CommonVariables.encryption_failed),\n                          message=error_msg)\n        finally:\n            lock.release_lock()\n            logger.log(\"returned to daemon\")\n            logger.log(\"exiting daemon\")\n\n            return\n\n    # try encrypt, in absence of decryption marker\n    try:\n        daemon_encrypt()\n    except Exception as e:\n        # mount the file systems back.\n        error_msg = (\"Failed to enable the extension with error: {0}, stack trace: {1}\".format(e, traceback.format_exc()))\n        logger.log(msg=error_msg,\n                   level=CommonVariables.ErrorLevel)\n        hutil.do_exit(exit_code=CommonVariables.encryption_failed,\n                      operation='Enable',\n                      status=CommonVariables.extension_error_status,\n                      code=str(CommonVariables.encryption_failed),\n                      message=error_msg)\n    else:\n        encryption_marker = EncryptionMarkConfig(logger, encryption_environment)\n        # TODO not remove it, backed it up.\n        logger.log(\"returned to daemon successfully after encryption\")\n        logger.log(\"clearing the encryption mark.\")\n        encryption_marker.clear_config()\n        hutil.redo_current_status()\n    finally:\n        lock.release_lock()\n        logger.log(\"exiting daemon\")\n\n\ndef start_daemon(operation):\n    # This process will start a new background process by calling\n    #     extension_shim.sh -c handle.py -daemon\n    # to run the script and will exit itself immediatelly.\n    shim_path = os.path.join(os.getcwd(), CommonVariables.extension_shim_filename)\n    shim_opts = '-c ' + os.path.join(os.getcwd(), __file__) + ' -daemon'\n    args = [shim_path, shim_opts]\n    logger.log(\"start_daemon with args: {0}\".format(args))\n\n    # Redirect stdout and stderr to /dev/null.  Otherwise daemon process will\n    # throw broken pipe exception when parent process exit.\n    devnull = open(os.devnull, 'w')\n    subprocess.Popen(args, stdout=devnull, stderr=devnull)\n\n    encryption_config = EncryptionConfig(encryption_environment, logger)\n    if encryption_config.config_file_exists():\n        hutil.do_exit(exit_code=0,\n                      operation=operation,\n                      status=CommonVariables.extension_success_status,\n                      code=str(CommonVariables.success),\n                      message=encryption_config.get_secret_id())\n    else:\n        hutil.do_exit(exit_code=CommonVariables.encryption_failed,\n                      operation=operation,\n                      status=CommonVariables.extension_error_status,\n                      code=str(CommonVariables.encryption_failed),\n                      message='Encryption config not found.')\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/91ade/50-udev-ade.rules",
    "content": "ACTION==\"add|change\", SUBSYSTEM==\"block\", ATTRS{device_id}==\"?00000000-0000-*\", ATTR{partition}==\"ENCRYPTED_DISK_PARTITION\" GOTO=\"process_disk\"\nGOTO=\"disk_end\"\n\nLABEL=\"process_disk\"\nATTR{partition}==\"ENCRYPTED_DISK_PARTITION\", ENV{ID_FS_UUID}=\"osencrypt-locked\"\nATTR{partition}==\"ENCRYPTED_DISK_PARTITION\", ENV{ID_FS_UUID_ENC}=\"osencrypt-locked\"\nATTR{partition}==\"ENCRYPTED_DISK_PARTITION\", ENV{ID_FS_USAGE}=\"crypto\"\nLABEL=\"disk_end\"\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/91ade/cryptroot-ask-ade.sh",
    "content": "#!/bin/sh\n# -*- mode: shell-script; indent-tabs-mode: nil; sh-basic-offset: 4; -*-\n# ex: ts=8 sw=4 sts=4 et filetype=sh\n\nset -x\n\nPATH=/usr/sbin:/usr/bin:/sbin:/bin\nNEWROOT=${NEWROOT:-\"/sysroot\"}\n\n# do not ask, if we already have root\n[ -f $NEWROOT/proc ] && exit 0\n\n# check if destination already exists\n[ -b /dev/mapper/$2 ] && exit 0\n\n# we already asked for this device\n[ -f /tmp/cryptroot-ade-asked-$2 ] && exit 0\n\n# load dm_crypt if it is not already loaded\n[ -d /sys/module/dm_crypt ] || modprobe dm_crypt\n\n. /lib/dracut-crypt-lib.sh\n\n# default luksname - luks-UUID\nluksname=$2\n\n# fallback to passphrase\nask_passphrase=1\n\n# if device name is /dev/dm-X, convert to /dev/mapper/name\nif [ \"${1##/dev/dm-}\" != \"$1\" ]; then\n    device=\"/dev/mapper/$(dmsetup info -c --noheadings -o name \"$1\")\"\nelse\n    device=\"$1\"\nfi\n\nnumtries=${3:-10}\n\n#\n# Open LUKS device\n#\n\ninfo \"luksOpen $device $luksname\"\n\nls /mnt/azure_bek_disk/LinuxPassPhraseFileName* || (mkdir -p /mnt/azure_bek_disk/ && mount -L \"BEK VOLUME\" /mnt/azure_bek_disk/)\n\nfor luksfile in $(ls /mnt/azure_bek_disk/LinuxPassPhraseFileName*); do\n    break;\ndone\n\ncryptsetupopts=\"--header /osluksheader\"\n\nif [ -n \"$luksfile\" -a \"$luksfile\" != \"none\" -a -e \"$luksfile\" ]; then\n    if cryptsetup --key-file \"$luksfile\" $cryptsetupopts luksOpen \"$device\" \"$luksname\"; then\n        ask_passphrase=0\n    fi\nelse\n\tif [ $numtries -eq 0 ]; then\n\t\twarn \"No key found for $device.  Fallback to passphrase mode.\"\n\telse\n\t\tsleep 1\n\t\tinfo \"No key found for $device.  Will try $numtries time(s) more later.\"\n\t\tinitqueue --unique --onetime --settled \\\n\t\t\t--name cryptroot-ask-ade-$luksname \\\n\t\t\t$(command -v cryptroot-ask-ade) \"$device\" \"$luksname\" \"$(($numtries-1))\"\n\t\texit 0\n\tfi\nfi\n\nif [ $ask_passphrase -ne 0 ]; then\n    luks_open=\"$(command -v cryptsetup) $cryptsetupopts luksOpen\"\n    ask_for_password --ply-tries 5 \\\n        --ply-cmd \"$luks_open -T1 $device $luksname\" \\\n        --ply-prompt \"Password ($device)\" \\\n        --tty-tries 1 \\\n        --tty-cmd \"$luks_open -T5 $device $luksname\"\n    unset luks_open\nfi\n\numount /mnt/azure_bek_disk\n\nunset device luksname luksfile\n\n# mark device as asked\n>> /tmp/cryptroot-ade-asked-$2\n\nneed_shutdown\nudevsettle\n\nexit 0\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/91ade/module-setup.sh",
    "content": "#!/bin/bash\n# vim: set tabstop=8 shiftwidth=4 softtabstop=4 expandtab smarttab colorcolumn=80:\n#\n\ndepends() {\n    echo crypt systemd\n    return 0\n}\n\ninstall() {\n    inst_script \"$moddir\"/cryptroot-ask-ade.sh /sbin/cryptroot-ask-ade\n\n    inst_hook cmdline 30 \"$moddir/parse-crypt-ade.sh\"\n\n    inst_rules \"$moddir/50-udev-ade.rules\"\n\n    inst_multiple /etc/services\n        \n    inst /boot/luks/osluksheader /osluksheader\n   \n    dracut_need_initqueue\n}\n\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/91ade/parse-crypt-ade.sh",
    "content": "#!/bin/sh\n# -*- mode: shell-script; indent-tabs-mode: nil; sh-basic-offset: 4; -*-\n# ex: ts=8 sw=4 sts=4 et filetype=sh\nset -x\n\n{\n    echo 'SUBSYSTEM!=\"block\", GOTO=\"luks_ade_end\"'\n    echo 'ACTION!=\"add|change\", GOTO=\"luks_ade_end\"'\n} > /etc/udev/rules.d/70-luks-ade.rules.new\n\n{\n    printf -- 'ATTRS{device_id}==\"?00000000-0000-*\", ENV{ID_FS_UUID}==\"osencrypt-locked\",'\n    printf -- 'RUN+=\"%s --settled --unique --onetime ' $(command -v initqueue)\n    printf -- '--name cryptroot-ask-ade-%%k %s ' $(command -v cryptroot-ask-ade)\n    printf -- '$env{DEVNAME} osencrypt\"\\n'\n} >> /etc/udev/rules.d/70-luks-ade.rules.new\n\necho 'LABEL=\"luks_ade_end\"' >> /etc/udev/rules.d/70-luks-ade.rules.new\nmv /etc/udev/rules.d/70-luks-ade.rules.new /etc/udev/rules.d/70-luks-ade.rules\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/OSEncryptionState.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.7+\n#\n\nimport os.path\nimport re\n\nfrom collections import namedtuple\nfrom uuid import UUID\n\nfrom Common import *\nfrom CommandExecutor import *\nfrom BekUtil import *\nfrom DiskUtil import *\nfrom EncryptionConfig import *\n\nclass OSEncryptionState(object):\n    def __init__(self, state_name, context):\n        super(OSEncryptionState, self).__init__()\n\n        self.state_name = state_name\n        self.context = context\n        self.state_executed = False\n        self.state_marker = os.path.join(self.context.encryption_environment.os_encryption_markers_path, self.state_name)\n\n        self.command_executor = CommandExecutor(self.context.logger)\n\n        self.disk_util = DiskUtil(hutil=self.context.hutil,\n                                  patching=self.context.distro_patcher,\n                                  logger=self.context.logger,\n                                  encryption_environment=self.context.encryption_environment)\n\n        self.bek_util = BekUtil(disk_util=self.disk_util,\n                                logger=self.context.logger)\n\n        self.encryption_config = EncryptionConfig(encryption_environment=self.context.encryption_environment,\n                                                  logger=self.context.logger)\n\n        rootfs_mountpoint = '/'\n\n        if self._is_in_memfs_root():\n            rootfs_mountpoint = '/oldroot'\n\n        self.rootfs_sdx_path = self._get_fs_partition(rootfs_mountpoint)[0]\n\n        if self.rootfs_sdx_path == \"none\":\n            self.context.logger.log(\"self.rootfs_sdx_path is none, parsing UUID from fstab\")\n            self.rootfs_sdx_path = self._parse_uuid_from_fstab('/')\n            self.context.logger.log(\"rootfs_uuid: {0}\".format(self.rootfs_sdx_path))\n\n        if self.rootfs_sdx_path and (self.rootfs_sdx_path.startswith(\"/dev/disk/by-uuid/\") or self._is_uuid(self.rootfs_sdx_path)):\n            self.rootfs_sdx_path = self.disk_util.query_dev_sdx_path_by_uuid(self.rootfs_sdx_path)\n\n        self.context.logger.log(\"self.rootfs_sdx_path: {0}\".format(self.rootfs_sdx_path))\n\n        self.rootfs_disk = None\n        self.rootfs_block_device = None\n        self.bootfs_block_device = None\n        \n        if self.disk_util.is_os_disk_lvm():\n            proc_comm = ProcessCommunicator()\n            self.command_executor.Execute('pvs', True, communicator=proc_comm)\n\n            for line in proc_comm.stdout.split(\"\\n\"):\n                if \"rootvg\" in line:\n                    self.rootfs_block_device = line.strip().split()[0]\n                    self.rootfs_disk = self.rootfs_block_device[:-1]\n                    self.bootfs_block_device = self.rootfs_disk + '2'\n        elif not self.rootfs_sdx_path:\n            self.rootfs_disk = '/dev/sda'\n            self.rootfs_block_device = '/dev/sda2'\n            self.bootfs_block_device = '/dev/sda1'\n        elif self.rootfs_sdx_path == '/dev/mapper/osencrypt' or self.rootfs_sdx_path.startswith('/dev/dm-'):\n            self.rootfs_block_device = '/dev/mapper/osencrypt'\n            bootfs_uuid = self._parse_uuid_from_fstab('/boot')\n            self.context.logger.log(\"bootfs_uuid: {0}\".format(bootfs_uuid))\n            self.bootfs_block_device = self.disk_util.query_dev_sdx_path_by_uuid(bootfs_uuid)\n        else:\n            self.rootfs_block_device = self.disk_util.query_dev_id_path_by_sdx_path(self.rootfs_sdx_path)\n            if not self.rootfs_block_device.startswith('/dev/disk/by-id/'):\n                self.context.logger.log(\"rootfs_block_device: {0}\".format(self.rootfs_block_device))\n                raise Exception(\"Could not find rootfs block device\")\n\n            self.rootfs_disk = self.rootfs_block_device[:self.rootfs_block_device.index(\"-part\")]\n            self.bootfs_block_device = self.rootfs_disk + \"-part2\"\n\n            if self._get_block_device_size(self.bootfs_block_device) > self._get_block_device_size(self.rootfs_block_device):\n                self.context.logger.log(\"Swapping partition identifiers for rootfs and bootfs\")\n                self.rootfs_block_device, self.bootfs_block_device = self.bootfs_block_device, self.rootfs_block_device\n\n        self.context.logger.log(\"rootfs_disk: {0}\".format(self.rootfs_disk))\n        self.context.logger.log(\"rootfs_block_device: {0}\".format(self.rootfs_block_device))\n        self.context.logger.log(\"bootfs_block_device: {0}\".format(self.bootfs_block_device))\n        \n    def should_enter(self):\n        self.context.logger.log(\"OSEncryptionState.should_enter() called for {0}\".format(self.state_name))\n\n        if self.state_executed:\n            self.context.logger.log(\"State {0} has already executed, not entering\".format(self.state_name))\n            return False\n\n        if not os.path.exists(self.state_marker):\n            self.context.logger.log(\"State marker {0} does not exist, state {1} can be entered\".format(self.state_marker,\n                                                                                                       self.state_name))\n\n            return True\n        else:\n            self.context.logger.log(\"State marker {0} exists, state {1} has already executed\".format(self.state_marker,\n                                                                                                     self.state_name))\n            return False\n\n    def should_exit(self):\n        self.context.logger.log(\"OSEncryptionState.should_exit() called for {0}\".format(self.state_name))\n\n        if not os.path.exists(self.state_marker):\n            self.disk_util.make_sure_path_exists(self.context.encryption_environment.os_encryption_markers_path)\n            self.context.logger.log(\"Creating state marker {0}\".format(self.state_marker))\n            self.disk_util.touch_file(self.state_marker)\n\n        self.state_executed = True\n\n        self.context.logger.log(\"state_executed for {0}: {1}\".format(self.state_name, self.state_executed))\n\n        return self.state_executed\n\n    def _get_fs_partition(self, fs):\n        result = None\n        dev = os.lstat(fs).st_dev\n\n        for line in file('/proc/mounts'):\n            line = [s.decode('string_escape') for s in line.split()[:3]]\n            if dev == os.lstat(line[1]).st_dev:\n                result = tuple(line)\n\n        return result\n\n    def _is_in_memfs_root(self):\n        mounts = file('/proc/mounts', 'r').read()\n        return bool(re.search(r'/\\s+tmpfs', mounts))\n\n    def _parse_uuid_from_fstab(self, mountpoint):\n        contents = file('/etc/fstab', 'r').read()\n        matches = re.findall(r'UUID=(.*?)\\s+{0}\\s+'.format(mountpoint), contents)\n        if matches:\n            return matches[0]\n\n    def _get_block_device_size(self, dev):\n        if not os.path.exists(dev):\n            return 0\n\n        proc_comm = ProcessCommunicator()\n        self.command_executor.Execute('blockdev --getsize64 {0}'.format(dev),\n                                      raise_exception_on_failure=True,\n                                      communicator=proc_comm)\n        return int(proc_comm.stdout.strip())\n\n    def _is_uuid(self, s):\n        try:\n            UUID(s)\n        except:\n            return False\n        else:\n            return True\n\nOSEncryptionStateContext = namedtuple('OSEncryptionStateContext',\n                                      ['hutil',\n                                       'distro_patcher',\n                                       'logger',\n                                       'encryption_environment'])\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/OSEncryptionStateMachine.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.7+\n#\n\nimport inspect\nimport os\nimport sys\nimport traceback\nfrom time import sleep\n\nscriptdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\nmaindir = os.path.abspath(os.path.join(scriptdir, '../'))\nsys.path.append(maindir)\ntransitionsdir = os.path.abspath(os.path.join(scriptdir, '../../transitions'))\nsys.path.append(transitionsdir)\n\nfrom OSEncryptionState import *\nfrom Common import *\nfrom CommandExecutor import *\nfrom DiskUtil import *\n\nimport logging\n\nclass NullHandler(logging.Handler):\n    def emit(self, record):\n        pass\n\nlogging.getLogger(__name__).addHandler(NullHandler())\nlogging.NullHandler = NullHandler\n\nfrom transitions import *\n\nclass OSEncryptionStateMachine(object):\n    states = [\n        State(name='uninitialized'),\n        State(name='completed')\n    ]\n\n    transitions = [\n        {\n            'trigger': 'skip_encryption',\n            'source': 'uninitialized',\n            'dest': 'completed'\n        }\n    ]\n\n    def on_enter_state(self):\n        self.state_objs[self.state].enter()\n\n    def should_exit_previous_state(self):\n        # when this is called, self.state is still the \"source\" state in the transition\n        return self.state_objs[self.state].should_exit()\n\n    def __init__(self, hutil, distro_patcher, logger, encryption_environment):\n        super(OSEncryptionStateMachine, self).__init__()\n\n        self.hutil = hutil\n        self.distro_patcher = distro_patcher\n        self.logger = logger\n        self.encryption_environment = encryption_environment\n        self.command_executor = CommandExecutor(self.logger)\n\n        self.context = OSEncryptionStateContext(hutil=self.hutil,\n                                                distro_patcher=self.distro_patcher,\n                                                logger=self.logger,\n                                                encryption_environment=self.encryption_environment)\n        \n        self.state_machine = Machine(model=self,\n                                     states=OSEncryptionStateMachine.states,\n                                     transitions=OSEncryptionStateMachine.transitions,\n                                     initial='uninitialized')\n\n    def log_machine_state(self):\n        self.logger.log(\"======= MACHINE STATE: {0} =======\".format(self.state))\n\n    def start_encryption(self):\n        self.skip_encryption()\n        self.log_machine_state()\n\n    def _reboot(self):\n        self.command_executor.Execute('reboot')\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/__init__.py",
    "content": "#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom OSEncryptionState import *\nfrom OSEncryptionStateMachine import *\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/centos_68/CentOS68EncryptionStateMachine.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.7+\n#\n\nimport inspect\nimport os\nimport sys\nimport traceback\nfrom time import sleep\n\nscriptdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\nmaindir = os.path.abspath(os.path.join(scriptdir, '../../'))\nsys.path.append(maindir)\ntransitionsdir = os.path.abspath(os.path.join(scriptdir, '../../transitions'))\nsys.path.append(transitionsdir)\n\nfrom oscrypto import *\nfrom encryptstates import *\nfrom Common import *\nfrom CommandExecutor import *\nfrom DiskUtil import *\nfrom transitions import *\n\nclass CentOS68EncryptionStateMachine(OSEncryptionStateMachine):\n    states = [\n        State(name='uninitialized'),\n        State(name='prereq', on_enter='on_enter_state'),\n        State(name='selinux', on_enter='on_enter_state'),\n        State(name='stripdown', on_enter='on_enter_state'),\n        State(name='unmount_oldroot', on_enter='on_enter_state'),\n        State(name='split_root_partition', on_enter='on_enter_state'),\n        State(name='encrypt_block_device', on_enter='on_enter_state'),\n        State(name='patch_boot_system', on_enter='on_enter_state'),\n        State(name='completed'),\n    ]\n\n    transitions = [\n        {\n            'trigger': 'skip_encryption',\n            'source': 'uninitialized',\n            'dest': 'completed'\n        },\n        {\n            'trigger': 'enter_prereq',\n            'source': 'uninitialized',\n            'dest': 'prereq'\n        },\n        {\n            'trigger': 'enter_selinux',\n            'source': 'prereq',\n            'dest': 'selinux',\n            'before': 'on_enter_state',\n            'conditions': 'should_exit_previous_state'\n        },\n        {\n            'trigger': 'enter_stripdown',\n            'source': 'selinux',\n            'dest': 'stripdown',\n            'before': 'on_enter_state',\n            'conditions': 'should_exit_previous_state'\n        },\n        {\n            'trigger': 'enter_unmount_oldroot',\n            'source': 'stripdown',\n            'dest': 'unmount_oldroot',\n            'before': 'on_enter_state',\n            'conditions': 'should_exit_previous_state'\n        },\n        {\n            'trigger': 'retry_unmount_oldroot',\n            'source': 'unmount_oldroot',\n            'dest': 'unmount_oldroot',\n            'before': 'on_enter_state'\n        },\n        {\n            'trigger': 'enter_split_root_partition',\n            'source': 'unmount_oldroot',\n            'dest': 'split_root_partition',\n            'before': 'on_enter_state',\n            'conditions': 'should_exit_previous_state'\n        },\n        {\n            'trigger': 'enter_encrypt_block_device',\n            'source': 'split_root_partition',\n            'dest': 'encrypt_block_device',\n            'before': 'on_enter_state',\n            'conditions': 'should_exit_previous_state'\n        },\n        {\n            'trigger': 'enter_patch_boot_system',\n            'source': 'encrypt_block_device',\n            'dest': 'patch_boot_system',\n            'before': 'on_enter_state',\n            'conditions': 'should_exit_previous_state'\n        },\n        {\n            'trigger': 'stop_machine',\n            'source': 'patch_boot_system',\n            'dest': 'completed',\n            'conditions': 'should_exit_previous_state'\n        },\n    ]\n\n    def on_enter_state(self):\n        super(CentOS68EncryptionStateMachine, self).on_enter_state()\n\n    def should_exit_previous_state(self):\n        # when this is called, self.state is still the \"source\" state in the transition\n        return super(CentOS68EncryptionStateMachine, self).should_exit_previous_state()\n\n    def __init__(self, hutil, distro_patcher, logger, encryption_environment):\n        super(CentOS68EncryptionStateMachine, self).__init__(hutil, distro_patcher, logger, encryption_environment)\n\n        self.state_objs = {\n            'prereq': PrereqState(self.context),\n            'selinux': SelinuxState(self.context),\n            'stripdown': StripdownState(self.context),\n            'unmount_oldroot': UnmountOldrootState(self.context),\n            'split_root_partition': SplitRootPartitionState(self.context),\n            'encrypt_block_device': EncryptBlockDeviceState(self.context),\n            'patch_boot_system': PatchBootSystemState(self.context),\n        }\n\n        self.state_machine = Machine(model=self,\n                                     states=CentOS68EncryptionStateMachine.states,\n                                     transitions=CentOS68EncryptionStateMachine.transitions,\n                                     initial='uninitialized')\n\n    def start_encryption(self):\n        proc_comm = ProcessCommunicator()\n        self.command_executor.Execute(command_to_execute=\"mount\",\n                                      raise_exception_on_failure=True,\n                                      communicator=proc_comm)\n        if '/dev/mapper/osencrypt' in proc_comm.stdout:\n            self.logger.log(\"OS volume is already encrypted\")\n\n            self.skip_encryption()\n            self.log_machine_state()\n\n            return\n\n        self.log_machine_state()\n\n        self.enter_prereq()\n        self.log_machine_state()\n\n        self.enter_selinux()\n        self.log_machine_state()\n\n        self.enter_stripdown()\n        self.log_machine_state()\n        \n        oldroot_unmounted_successfully = False\n        attempt = 1\n\n        while not oldroot_unmounted_successfully:\n            self.logger.log(\"Attempt #{0} to unmount /oldroot\".format(attempt))\n\n            try:\n                if attempt == 1:\n                    self.enter_unmount_oldroot()\n                elif attempt > 10:\n                    raise Exception(\"Could not unmount /oldroot in 10 attempts\")\n                else:\n                    self.retry_unmount_oldroot()\n\n                self.log_machine_state()\n            except Exception as e:\n                message = \"Attempt #{0} to unmount /oldroot failed with error: {1}, stack trace: {2}\".format(attempt,\n                                                                                                             e,\n                                                                                                             traceback.format_exc())\n                self.logger.log(msg=message)\n                self.hutil.do_status_report(operation='EnableEncryptionOSVolume',\n                                            status=CommonVariables.extension_error_status,\n                                            status_code=str(CommonVariables.unmount_oldroot_error),\n                                            message=message)\n                \n                sleep(10)\n                if attempt > 10:\n                    raise Exception(message)\n            else:\n                oldroot_unmounted_successfully = True\n            finally:\n                attempt += 1\n        \n        self.enter_split_root_partition()\n        self.log_machine_state()\n        \n        self.enter_encrypt_block_device()\n        self.log_machine_state()\n\n        self.enter_patch_boot_system()\n        self.log_machine_state()\n        \n        self.stop_machine()\n        self.log_machine_state()\n\n        self._reboot()\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/centos_68/__init__.py",
    "content": "#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom CentOS68EncryptionStateMachine import *\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/centos_68/encryptpatches/centos_68_dracut.patch",
    "content": "diff -Naur 90crypt.orig/cryptroot-ask.sh 90crypt/cryptroot-ask.sh\n--- 90crypt.orig/cryptroot-ask.sh\t2016-11-20 18:43:12.697422815 -0800\n+++ 90crypt/cryptroot-ask.sh\t2016-11-20 18:43:28.033101905 -0800\n@@ -64,6 +64,25 @@\n # Open LUKS device\n #\n \n+MountPoint=/tmp-keydisk-mount\n+KeyFileName=LinuxPassPhraseFileName\n+echo \"Trying to get the key from disks ...\" >&2\n+mkdir -p $MountPoint >&2\n+modprobe vfat >/dev/null >&2\n+modprobe fuse >/dev/null >&2\n+for SFS in /dev/sd*; do\n+\techo \"> Trying device:$SFS...\" >&2\n+\tmount ${SFS}1 $MountPoint -t vfat -r >&2\n+\tif [ -f $MountPoint/$KeyFileName ]; then\n+\t\techo \"> keyfile got...\" >&2\n+\t\tcp $MountPoint/$KeyFileName /tmp-keyfile\n+\t\tluksfile=/tmp-keyfile\n+\t\tumount $MountPoint\n+\t\tbreak\n+\tfi\n+done\n+\n info \"luksOpen $device $luksname $luksfile\"\n \n if [ -n \"$luksfile\" -a \"$luksfile\" != \"none\" -a -e \"$luksfile\" ]; then\ndiff -Naur 90crypt.orig/parse-crypt.sh 90crypt/parse-crypt.sh\n--- 90crypt.orig/parse-crypt.sh\t2016-11-20 18:43:12.698422813 -0800\n+++ 90crypt/parse-crypt.sh\t2016-11-20 18:43:28.033101905 -0800\n@@ -12,13 +12,13 @@\n \techo '. /lib/dracut-lib.sh' > /emergency/90-crypt.sh\n \tfor luksid in $LUKS; do \n             luksid=${luksid##luks-}\n-\t    printf 'ENV{ID_FS_TYPE}==\"crypto_LUKS\", ENV{ID_FS_UUID}==\"%s*\", RUN+=\"/sbin/initqueue --unique --onetime --name cryptroot-ask-%%k /sbin/cryptroot-ask $env{DEVNAME} luks-$env{ID_FS_UUID}\"\\n' $luksid \\\n+            printf 'KERNEL==\"sda1\", RUN+=\"/sbin/initqueue --unique --onetime --name cryptroot-ask-%%k /sbin/cryptroot-ask $env{DEVNAME} osencrypt\"\\n' $luksid \\\n \t        >> /etc/udev/rules.d/70-luks.rules\t\n-\t    printf '[ -e /dev/disk/by-uuid/*%s* ] || exit 1 \\n'  $luksid >> /initqueue-finished/crypt.sh\n-\t    printf '[ -e /dev/disk/by-uuid/*%s* ] || warn \"crypto LUKS UUID \"%s\" not found\" \\n' $luksid $luksid >> /emergency/90-crypt.sh\n+\t    printf '[ -e /dev/mapper/osencrypt ] || ( /sbin/cryptroot-ask /dev/sda1 osencrypt && [ -e /dev/mapper/osencrypt ] ) || exit 1 \\n'  $luksid >> /initqueue-finished/crypt.sh\n+\t    printf '[ -e /dev/mapper/osencrypt ] || warn \"crypto LUKS UUID \"%s\" not found\" \\n' $luksid $luksid >> /emergency/90-crypt.sh\n \tdone\n     else\n-\techo 'ENV{ID_FS_TYPE}==\"crypto_LUKS\", RUN+=\"/sbin/initqueue --unique --onetime --name cryptroot-ask-%k /sbin/cryptroot-ask $env{DEVNAME} luks-$env{ID_FS_UUID}\"' \\\n+        echo 'KERNEL=\"sda1\", RUN+=\"/sbin/initqueue --unique --onetime --name cryptroot-ask-%k /sbin/cryptroot-ask $env{DEVNAME} osencrypt\"' \\\n \t    >> /etc/udev/rules.d/70-luks.rules\t\n     fi\n     echo 'LABEL=\"luks_end\"' >> /etc/udev/rules.d/70-luks.rules\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/centos_68/encryptstates/EncryptBlockDeviceState.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.7+\n#\n\nimport re\nimport os\nimport sys\n\nfrom inspect import ismethod\nfrom time import sleep\nfrom OSEncryptionState import *\n\nclass EncryptBlockDeviceState(OSEncryptionState):\n    def __init__(self, context):\n        super(EncryptBlockDeviceState, self).__init__('EncryptBlockDeviceState', context)\n\n    def should_enter(self):\n        self.context.logger.log(\"Verifying if machine should enter encrypt_block_device state\")\n\n        if not super(EncryptBlockDeviceState, self).should_enter():\n            return False\n        \n        self.context.logger.log(\"Performing enter checks for encrypt_block_device state\")\n                \n        return True\n\n    def enter(self):\n        if not self.should_enter():\n            return\n\n        self.context.logger.log(\"Entering encrypt_block_device state\")\n\n        self.context.logger.log(\"Resizing \" + self.rootfs_block_device)\n\n        current_rootfs_size = self._get_root_fs_size_in_sectors(sector_size=512)\n        desired_rootfs_size = current_rootfs_size - 8192\n        \n        self.command_executor.Execute('e2fsck -yf {0}'.format(self.rootfs_block_device), True)\n        self.command_executor.Execute('resize2fs {0} {1}s'.format(self.rootfs_block_device, desired_rootfs_size), True)\n        \n        self.command_executor.Execute('mount /boot', False)\n        # self._find_bek_and_execute_action('_dump_passphrase')\n\n        self.context.hutil.do_status_report(operation='EnableEncryptionDataVolumes',\n                                            status=CommonVariables.extension_success_status,\n                                            status_code=str(CommonVariables.success),\n                                            message='OS disk encryption started')\n\n        self._find_bek_and_execute_action('_luks_reencrypt')\n\n    def should_exit(self):\n        self.context.logger.log(\"Verifying if machine should exit encrypt_block_device state\")\n\n        if not os.path.exists('/dev/mapper/osencrypt'):\n            self._find_bek_and_execute_action('_luks_open')\n\n        self.command_executor.Execute('mount /dev/mapper/osencrypt /oldroot', True)\n        self.command_executor.Execute('umount /oldroot', True)\n\n        return super(EncryptBlockDeviceState, self).should_exit()\n\n    def _luks_open(self, bek_path):\n        self.command_executor.Execute('cryptsetup luksOpen {0} osencrypt -d {1}'.format(self.rootfs_block_device, bek_path),\n                                      raise_exception_on_failure=True)\n\n    def _luks_reencrypt(self, bek_path):\n        self.command_executor.ExecuteInBash('cat {0} | cryptsetup-reencrypt -N --reduce-device-size 8192s {1} -v'.format(bek_path,\n                                                                                                                         self.rootfs_block_device),\n                                            raise_exception_on_failure=True)\n\n    def _dump_passphrase(self, bek_path):\n        proc_comm = ProcessCommunicator()\n\n        self.command_executor.Execute(command_to_execute=\"od -c {0}\".format(bek_path),\n                                      raise_exception_on_failure=True,\n                                      communicator=proc_comm)\n        self.context.logger.log(\"Passphrase:\")\n        self.context.logger.log(proc_comm.stdout)\n\n    def _find_bek_and_execute_action(self, callback_method_name):\n        callback_method = getattr(self, callback_method_name)\n        if not ismethod(callback_method):\n            raise Exception(\"{0} is not a method\".format(callback_method_name))\n\n        bek_path = self.bek_util.get_bek_passphrase_file(self.encryption_config)\n        callback_method(bek_path)    \n\n    def _get_root_fs_size_in_sectors(self, sector_size):\n        proc_comm = ProcessCommunicator()\n        self.command_executor.Execute(command_to_execute=\"dumpe2fs -h {0}\".format(self.rootfs_block_device),\n                                      raise_exception_on_failure=True,\n                                      communicator=proc_comm)\n\n        root_fs_block_count = re.findall(r'Block count:\\s*(\\d+)', proc_comm.stdout)\n        root_fs_block_size = re.findall(r'Block size:\\s*(\\d+)', proc_comm.stdout)\n\n        if not root_fs_block_count or not root_fs_block_size:\n            raise Exception(\"Error parsing dumpe2fs output, count={0}, size={1}\".format(root_fs_block_count,\n                                                                                        root_fs_block_size))\n\n        root_fs_block_count = int(root_fs_block_count[0])\n        root_fs_block_size = int(root_fs_block_size[0])\n\n        return (root_fs_block_count * root_fs_block_size) / sector_size\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/centos_68/encryptstates/PatchBootSystemState.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.7+\n#\n\nimport inspect\nimport re\nimport os\nimport sys\n\nfrom time import sleep\nfrom OSEncryptionState import *\n\nclass PatchBootSystemState(OSEncryptionState):\n    def __init__(self, context):\n        super(PatchBootSystemState, self).__init__('PatchBootSystemState', context)\n\n    def should_enter(self):\n        self.context.logger.log(\"Verifying if machine should enter patch_boot_system state\")\n\n        if not super(PatchBootSystemState, self).should_enter():\n            return False\n        \n        self.context.logger.log(\"Performing enter checks for patch_boot_system state\")\n\n        self.command_executor.Execute('mount /dev/mapper/osencrypt /oldroot', True)\n        self.command_executor.Execute('umount /oldroot', True)\n                \n        return True\n\n    def enter(self):\n        if not self.should_enter():\n            return\n\n        self.context.logger.log(\"Entering patch_boot_system state\")\n\n        self.command_executor.Execute('mount /boot', False)\n        self.command_executor.Execute('mount /dev/mapper/osencrypt /oldroot', True)\n        self.command_executor.Execute('mount --make-rprivate /', True)\n        self.command_executor.Execute('mkdir /oldroot/memroot', True)\n        self.command_executor.Execute('pivot_root /oldroot /oldroot/memroot', True)\n\n        self.command_executor.ExecuteInBash('for i in dev proc sys boot; do mount --move /memroot/$i /$i; done', True)\n\n        try:\n            self._modify_pivoted_oldroot()\n        except Exception as e:\n            self.command_executor.Execute('mount --make-rprivate /')\n            self.command_executor.Execute('pivot_root /memroot /memroot/oldroot')\n            self.command_executor.Execute('rmdir /oldroot/memroot')\n            self.command_executor.ExecuteInBash('for i in dev proc sys boot; do mount --move /oldroot/$i /$i; done')\n\n            raise\n        else:\n            self.command_executor.Execute('mount --make-rprivate /')\n            self.command_executor.Execute('pivot_root /memroot /memroot/oldroot')\n            self.command_executor.Execute('rmdir /oldroot/memroot')\n            self.command_executor.ExecuteInBash('for i in dev proc sys boot; do mount --move /oldroot/$i /$i; done')\n\n            extension_full_name = 'Microsoft.Azure.Security.' + CommonVariables.extension_name\n            self.command_executor.Execute('cp -ax' +\n                                          ' /var/log/azure/{0}'.format(extension_full_name) +\n                                          ' /oldroot/var/log/azure/{0}.Stripdown'.format(extension_full_name),\n                                          True)\n            self.command_executor.Execute('umount /boot')\n            self.command_executor.Execute('umount /oldroot')\n\n            self.context.logger.log(\"Pivoted back into memroot successfully, restarting WALA\")\n\n            self.command_executor.Execute('service sshd restart')\n            self.command_executor.Execute('service atd restart')\n\n            with open(\"/restart-wala.sh\", \"w\") as f:\n                f.write(\"service waagent restart\\n\")\n\n            with open(\"/delete-lock.sh\", \"w\") as f:\n                f.write(\"rm -f /var/lib/azure_disk_encryption_config/daemon_lock_file.lck\\n\")\n\n            self.command_executor.Execute('at -f /delete-lock.sh now + 1 minutes', True)\n            self.command_executor.Execute('at -f /restart-wala.sh now + 2 minutes', True)\n\n            self.should_exit()\n\n            self.command_executor.ExecuteInBash('pkill -f .*ForLinux.*handle.py.*daemon.*', True)\n\n    def should_exit(self):\n        self.context.logger.log(\"Verifying if machine should exit patch_boot_system state\")\n\n        return super(PatchBootSystemState, self).should_exit()\n\n    def _append_contents_to_file(self, contents, path):\n        with open(path, 'a') as f:\n            f.write(contents)\n\n    def _modify_pivoted_oldroot(self):\n        self.context.logger.log(\"Pivoted into oldroot successfully\")\n\n        scriptdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\n        patchesdir = os.path.join(scriptdir, '../encryptpatches')\n        patchpath = os.path.join(patchesdir, 'centos_68_dracut.patch')\n\n        if not os.path.exists(patchpath):\n            message = \"Patch not found at path: {0}\".format(patchpath)\n            self.context.logger.log(message)\n            raise Exception(message)\n        else:\n            self.context.logger.log(\"Patch found at path: {0}\".format(patchpath))\n\n        self.disk_util.remove_mount_info('/')\n        self.disk_util.append_mount_info('/dev/mapper/osencrypt', '/')\n\n        self.command_executor.ExecuteInBash('patch -b -d /usr/share/dracut/modules.d/90crypt -p1 <{0}'.format(patchpath), True)\n\n        self._append_contents_to_file('\\nadd_drivers+=\" fuse vfat nls_cp437 nls_iso8859-1\"\\n',\n                                      '/etc/dracut.conf')\n        self._append_contents_to_file('\\nadd_dracutmodules+=\" crypt\"\\n',\n                                      '/etc/dracut.conf')\n\n        self.command_executor.Execute('/sbin/dracut -f -v', True)\n        self.command_executor.ExecuteInBash('mv -f /boot/initramfs* /boot/boot/', True)\n\n        with open(\"/boot/boot/grub/grub.conf\", \"r\") as f:\n            contents = f.read()\n\n        contents = re.sub(r\"rd_NO_LUKS \", r\"\", contents)\n        contents = re.sub(r\"root=(.*?)\\s\", r\"root=/dev/mapper/osencrypt rd_LUKS_UUID=osencrypt rdinitdebug \", contents)\n        contents = re.sub(r\"hd0,0\", r\"hd0,1\", contents)\n\n        with open(\"/boot/boot/grub/grub.conf\", \"w\") as f:\n            f.write(contents)\n\n        grub_input = \"root (hd0,1)\\nsetup (hd0)\\nquit\\n\"\n        self.command_executor.Execute('grub', input=grub_input, raise_exception_on_failure=True)\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/centos_68/encryptstates/PrereqState.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.7+\n#\n\nfrom OSEncryptionState import *\nfrom pprint import pprint\n\nclass PrereqState(OSEncryptionState):\n    def __init__(self, context):\n        super(PrereqState, self).__init__('PrereqState', context)\n\n    def should_enter(self):\n        self.context.logger.log(\"Verifying if machine should enter prereq state\")\n\n        if not super(PrereqState, self).should_enter():\n            return False\n        \n        self.context.logger.log(\"Performing enter checks for prereq state\")\n\n        return True\n\n    def enter(self):\n        if not self.should_enter():\n            return\n\n        self.context.logger.log(\"Entering prereq state\")\n\n        distro_info = self.context.distro_patcher.distro_info\n        self.context.logger.log(\"Distro info: {0}\".format(distro_info))\n\n        if ((distro_info[0] == 'redhat' and distro_info[1] == '6.8') or\n            (distro_info[0] == 'centos' and (distro_info[1] == '6.8' or distro_info[1] == '6.9'))):\n            self.context.logger.log(\"Enabling OS volume encryption on {0} {1}\".format(distro_info[0],\n                                                                                      distro_info[1]))\n        else:\n            raise Exception(\"CentOS68EncryptionStateMachine called for distro {0} {1}\".format(distro_info[0],\n                                                                                            distro_info[1]))\n\n        self.context.distro_patcher.install_extras()\n        self._patch_waagent()\n\n        self.command_executor.Execute('telinit u', True)\n\n    def should_exit(self):\n        self.context.logger.log(\"Verifying if machine should exit prereq state\")\n\n        return super(PrereqState, self).should_exit()\n\n    def _patch_waagent(self):\n        self.context.logger.log(\"Patching waagent\")\n\n        contents = None\n\n        with open('/etc/waagent.conf', 'r') as f:\n            contents = f.read()\n\n        contents = re.sub(r'ResourceDisk.EnableSwap=.', 'ResourceDisk.EnableSwap=n', contents)\n\n        with open('/etc/waagent.conf', 'w') as f:\n            f.write(contents)\n\n        self.context.logger.log(\"waagent patched successfully\")\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/centos_68/encryptstates/SelinuxState.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.7+\n#\n\nfrom OSEncryptionState import *\n\nclass SelinuxState(OSEncryptionState):\n    def __init__(self, context):\n        super(SelinuxState, self).__init__('SelinuxState', context)\n\n    def should_enter(self):\n        self.context.logger.log(\"Verifying if machine should enter selinux state\")\n\n        if not super(SelinuxState, self).should_enter():\n            return False\n        \n        self.context.logger.log(\"Performing enter checks for selinux state\")\n\n        return True\n\n    def enter(self):\n        if not self.should_enter():\n            return\n\n        self.context.logger.log(\"Entering selinux state\")\n\n        se_linux_status = self.context.encryption_environment.get_se_linux()\n        if se_linux_status.lower() == 'enforcing':\n            self.context.logger.log(\"SELinux is in enforcing mode, disabling\")\n            self.context.encryption_environment.disable_se_linux()\n\n    def should_exit(self):\n        self.context.logger.log(\"Verifying if machine should exit selinux state\")\n\n        return super(SelinuxState, self).should_exit()\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/centos_68/encryptstates/SplitRootPartitionState.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.7+\n#\n\nimport os\nimport re\nimport sys\n\nimport parted\n\nfrom time import sleep\nfrom OSEncryptionState import *\n\nclass SplitRootPartitionState(OSEncryptionState):\n    def __init__(self, context):\n        super(SplitRootPartitionState, self).__init__('SplitRootPartitionState', context)\n\n    def should_enter(self):\n        self.context.logger.log(\"Verifying if machine should enter split_root_partition state\")\n\n        if not super(SplitRootPartitionState, self).should_enter():\n            return False\n        \n        self.context.logger.log(\"Performing enter checks for split_root_partition state\")\n\n        self.command_executor.Execute(\"e2fsck -yf {0}\".format(self.rootfs_block_device), True)\n                \n        return True\n\n    def enter(self):\n        if not self.should_enter():\n            return\n\n        self.context.logger.log(\"Entering split_root_partition state\")\n\n        device = parted.getDevice(self.rootfs_disk)\n        disk = parted.Disk(device)\n\n        original_root_fs_size = self._get_root_fs_size_in(device.sectorSize)\n        self.context.logger.log(\"Original root filesystem size (sectors): {0}\".format(original_root_fs_size))\n\n        desired_boot_partition_size = self._size_to_sectors(256, 'MiB', device.sectorSize)\n        self.context.logger.log(\"Desired boot partition size (sectors): {0}\".format(desired_boot_partition_size))\n        \n        root_partition = disk.partitions[0]\n\n        original_root_partition_start = root_partition.geometry.start\n        original_root_partition_end = root_partition.geometry.end\n\n        self.context.logger.log(\"Original root partition start (sectors): {0}\".format(original_root_partition_start))\n        self.context.logger.log(\"Original root partition end (sectors): {0}\".format(original_root_partition_end))\n\n        desired_root_partition_start = original_root_partition_start\n        desired_root_partition_end = original_root_partition_end - desired_boot_partition_size\n        desired_root_partition_size = desired_root_partition_end - desired_root_partition_start\n\n        self.context.logger.log(\"Desired root partition start (sectors): {0}\".format(desired_root_partition_start))\n        self.context.logger.log(\"Desired root partition end (sectors): {0}\".format(desired_root_partition_end))\n        self.context.logger.log(\"Desired root partition size (sectors): {0}\".format(desired_root_partition_size))\n        \n        self.context.logger.log(\"Resizing root filesystem\")\n        desired_root_fs_size = desired_root_partition_size\n        self._resize_root_fs_to_sectors(desired_root_fs_size, device.sectorSize)\n\n        desired_root_partition_geometry = parted.Geometry(device=device,\n                                                          start=desired_root_partition_start,\n                                                          length=desired_root_partition_size)\n        root_partition_constraint = parted.Constraint(exactGeom=desired_root_partition_geometry)\n        disk.setPartitionGeometry(partition=root_partition,\n                                  constraint=root_partition_constraint,\n                                  start=desired_root_partition_start,\n                                  end=desired_root_partition_end)\n\n        desired_boot_partition_start = disk.getFreeSpaceRegions()[1].start\n        desired_boot_partition_end = disk.getFreeSpaceRegions()[1].end\n        desired_boot_partition_size = disk.getFreeSpaceRegions()[1].length\n\n        self.context.logger.log(\"Desired boot partition start (sectors): {0}\".format(desired_boot_partition_start))\n        self.context.logger.log(\"Desired boot partition end (sectors): {0}\".format(desired_boot_partition_end))\n\n        desired_boot_partition_geometry = parted.Geometry(device=device,\n                                                          start=desired_boot_partition_start,\n                                                          length=desired_boot_partition_size)\n        boot_partition_constraint = parted.Constraint(exactGeom=desired_boot_partition_geometry)\n        desired_boot_partition = parted.Partition(disk=disk,\n                                                  type=parted.PARTITION_NORMAL,\n                                                  geometry=desired_boot_partition_geometry)\n\n        disk.addPartition(partition=desired_boot_partition, constraint=boot_partition_constraint)\n\n        disk.commit()\n\n        probed_root_fs = parted.probeFileSystem(disk.partitions[0].geometry)\n        if not probed_root_fs == 'ext4':\n            raise Exception(\"Probed root fs is not ext4\")\n\n        disk.partitions[1].setFlag(parted.PARTITION_BOOT)\n\n        disk.commit()\n        \n        self.command_executor.Execute(\"partprobe\", False)\n\n        retry_counter = 0\n        while not os.path.exists(self.bootfs_block_device) and retry_counter < 10:\n            sleep(5)\n            self.command_executor.Execute(\"partprobe\", False)\n            retry_counter += 1\n\n        self.command_executor.Execute(\"mkfs.ext2 {0}\".format(self.bootfs_block_device), True)\n        \n        boot_partition_uuid = self._get_uuid(self.bootfs_block_device)\n\n        # Move stuff from /oldroot/boot to new partition, make new partition mountable at the same spot\n        self.command_executor.Execute(\"mount {0} /oldroot\".format(self.rootfs_block_device), True)\n        self.command_executor.Execute(\"mkdir /oldroot/memroot\", True)\n        self.command_executor.Execute(\"mount --make-rprivate /\", True)\n        self.command_executor.Execute(\"pivot_root /oldroot /oldroot/memroot\", True)\n        self.command_executor.ExecuteInBash(\"for i in dev proc sys; do mount --move /memroot/$i /$i; done\", True)\n        self.command_executor.Execute(\"mv /boot /boot.backup\", True)\n        self.command_executor.Execute(\"mkdir /boot\", True)\n        self.disk_util.remove_mount_info(\"/boot\")\n        self._append_boot_partition_uuid_to_fstab(boot_partition_uuid)\n        self.command_executor.Execute(\"cp /etc/fstab /memroot/etc/fstab\", True)\n        self.command_executor.Execute(\"mount /boot\", True)\n        self.command_executor.Execute(\"mkdir /boot/boot\", True)\n        self.command_executor.ExecuteInBash(\"shopt -s dotglob && mv /boot.backup/* /boot/boot/\", True)\n        self.command_executor.Execute(\"rmdir /boot.backup\", True)\n        self.command_executor.Execute(\"mount --make-rprivate /\", True)\n        self.command_executor.Execute(\"pivot_root /memroot /memroot/oldroot\", True)\n        self.command_executor.Execute(\"rmdir /oldroot/memroot\", True)\n        self.command_executor.ExecuteInBash(\"for i in dev proc sys; do mount --move /oldroot/$i /$i; done\", True)\n        self.command_executor.Execute(\"umount /oldroot/boot\", True)\n\n        try:\n            self.command_executor.Execute(\"umount /oldroot\", True)\n        except:\n            self.context.logger.log(\"Could not unmount /oldroot, attempting to restart WALA and unmount again\")\n\n            self.command_executor.Execute('at -f /restart-wala.sh now + 1 minutes', True)\n            self.command_executor.Execute('service waagent stop', True)\n\n            os.unlink('/var/lib/azure_disk_encryption_config/os_encryption_markers/UnmountOldrootState')\n            self.should_exit()\n\n            raise\n        \n    def should_exit(self):\n        self.context.logger.log(\"Verifying if machine should exit split_root_partition state\")\n        \n        self.command_executor.ExecuteInBash(\"mount /boot || mountpoint /boot\", True)\n        self.command_executor.ExecuteInBash(\"[ -e /boot/boot/grub ]\", True)\n        self.command_executor.Execute(\"umount /boot\", True)\n\n        return super(SplitRootPartitionState, self).should_exit()\n\n    def _size_to_sectors(self, bytes_, unit, sector_size):\n        exponents = {\n            \"B\":    1,       # byte\n            \"kB\":   1000**1, # kilobyte\n            \"MB\":   1000**2, # megabyte\n            \"GB\":   1000**3, # gigabyte\n            \"TB\":   1000**4, # terabyte\n            \"PB\":   1000**5, # petabyte\n            \"EB\":   1000**6, # exabyte\n            \"ZB\":   1000**7, # zettabyte\n            \"YB\":   1000**8, # yottabyte\n\n            \"KiB\":  1024**1, # kibibyte\n            \"MiB\":  1024**2, # mebibyte\n            \"GiB\":  1024**3, # gibibyte\n            \"TiB\":  1024**4, # tebibyte\n            \"PiB\":  1024**5, # pebibyte\n            \"EiB\":  1024**6, # exbibyte\n            \"ZiB\":  1024**7, # zebibyte\n            \"YiB\":  1024**8  # yobibyte\n        }\n\n        if unit not in exponents.keys():\n            raise SyntaxError(\"{:} is not a valid SI or IEC byte unit\".format(unit))\n        else:\n            return bytes_ * exponents[unit] // sector_size\n\n    def _get_uuid(self, partition_name):\n        proc_comm = ProcessCommunicator()\n        self.command_executor.Execute(command_to_execute=\"blkid -s UUID -o value {0}\".format(partition_name),\n                                      raise_exception_on_failure=True,\n                                      communicator=proc_comm)\n        return proc_comm.stdout.strip()\n\n    def _append_boot_partition_uuid_to_fstab(self, boot_partition_uuid):\n        self.context.logger.log(\"Updating fstab\")\n\n        contents = None\n\n        with open('/etc/fstab', 'r') as f:\n            contents = f.read()\n\n        contents += '\\n'\n        contents += 'UUID={0}\\t/boot\\text2\\tdefaults\\t0 0'.format(boot_partition_uuid)\n        contents += '\\n'\n\n        with open('/etc/fstab', 'w') as f:\n            f.write(contents)\n\n        self.context.logger.log(\"fstab updated successfully\")\n\n    def _get_root_fs_size_in(self, sector_size):\n        proc_comm = ProcessCommunicator()\n        self.command_executor.Execute(command_to_execute=\"dumpe2fs -h {0}\".format(self.rootfs_block_device),\n                                      raise_exception_on_failure=True,\n                                      communicator=proc_comm)\n\n        root_fs_block_count = re.findall(r'Block count:\\s*(\\d+)', proc_comm.stdout)\n        root_fs_block_size = re.findall(r'Block size:\\s*(\\d+)', proc_comm.stdout)\n\n        if not root_fs_block_count or not root_fs_block_size:\n            raise Exception(\"Error parsing dumpe2fs output, count={0}, size={1}\".format(root_fs_block_count,\n                                                                                        root_fs_block_size))\n\n        root_fs_block_count = int(root_fs_block_count[0])\n        root_fs_block_size = int(root_fs_block_size[0])\n        root_fs_size = self._size_to_sectors(root_fs_block_count * root_fs_block_size, 'B', sector_size)\n\n        return root_fs_size\n\n    def _resize_root_fs_to_sectors(self, desired_root_fs_size, sectorSize):\n        self.context.logger.log(\"Desired root filesystem size (sectors): {0}\".format(desired_root_fs_size))\n\n        self.command_executor.Execute(\"resize2fs {0} {1}s\".format(self.rootfs_block_device, desired_root_fs_size), True)\n\n        resized_root_fs_size = self._get_root_fs_size_in(sectorSize)\n\n        self.context.logger.log(\"Resized root filesystem size (sectors): {0}\".format(resized_root_fs_size))\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/centos_68/encryptstates/StripdownState.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.7+\n#\n\nimport os\nimport sys\n\nfrom OSEncryptionState import *\n\nclass StripdownState(OSEncryptionState):\n    def __init__(self, context):\n        super(StripdownState, self).__init__('StripdownState', context)\n\n    def should_enter(self):\n        self.context.logger.log(\"Verifying if machine should enter stripdown state\")\n\n        if not super(StripdownState, self).should_enter():\n            return False\n        \n        self.context.logger.log(\"Performing enter checks for stripdown state\")\n\n        self.command_executor.Execute('rm -rf /tmp/tmproot', True)\n        self.command_executor.ExecuteInBash('! [ -e \"/oldroot\" ]', True)\n\n        return True\n\n    def enter(self):\n        if not self.should_enter():\n            return\n\n        self.context.logger.log(\"Entering stripdown state\")\n\n        self.command_executor.Execute('umount -a')\n        self.command_executor.Execute('mkdir /tmp/tmproot', True)\n        self.command_executor.Execute('mount -t tmpfs none /tmp/tmproot', True)\n        self.command_executor.ExecuteInBash('for i in proc sys dev run usr var tmp root oldroot boot; do mkdir /tmp/tmproot/$i; done', True)\n        self.command_executor.ExecuteInBash('for i in bin etc mnt sbin lib lib64 root; do cp -ax /$i /tmp/tmproot/; done', True)\n        self.command_executor.ExecuteInBash('for i in bin sbin libexec lib lib64 share; do cp -ax /usr/$i /tmp/tmproot/usr/; done', True)\n        self.command_executor.ExecuteInBash('for i in lib local lock opt run spool tmp; do cp -ax /var/$i /tmp/tmproot/var/; done', True)\n        self.command_executor.ExecuteInBash('mkdir /tmp/tmproot/var/log', True)\n        self.command_executor.ExecuteInBash('cp -ax /var/log/azure /tmp/tmproot/var/log/', True)\n        self.command_executor.Execute('mount --make-rprivate /', True)\n        self.command_executor.ExecuteInBash('[ -e \"/tmp/tmproot/var/lib/azure_disk_encryption_config/azure_crypt_request_queue.ini\" ]', True)\n        self.command_executor.Execute('service waagent stop', True)\n        self.command_executor.Execute('pivot_root /tmp/tmproot /tmp/tmproot/oldroot', True)\n        self.command_executor.ExecuteInBash('for i in dev proc sys; do mount --move /oldroot/$i /$i; done', True)\n\n    def should_exit(self):\n        self.context.logger.log(\"Verifying if machine should exit stripdown state\")\n\n        if not os.path.exists(self.state_marker):\n            self.context.logger.log(\"First call to stripdown state (pid={0}), restarting process\".format(os.getpid()))\n\n            # create the marker, but do not advance the state machine\n            super(StripdownState, self).should_exit()\n\n            # the restarted process shall see the marker and advance the state machine\n            self.command_executor.Execute('service atd restart', True)\n\n            os.chdir('/')\n            with open(\"/restart-wala.sh\", \"w\") as f:\n                f.write(\"service waagent restart\\n\")\n            self.command_executor.Execute('at -f /restart-wala.sh now + 1 minutes', True)\n\n            self.context.hutil.do_exit(exit_code=CommonVariables.encryption_failed,\n                                       operation='EnableEncryptionOSVolume',\n                                       status=CommonVariables.extension_error_status,\n                                       code=CommonVariables.encryption_failed,\n                                       message=\"Restarted extension from stripped down OS\")\n        else:\n            self.context.logger.log(\"Second call to stripdown state (pid={0}), continuing process\".format(os.getpid()))\n            return True\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/centos_68/encryptstates/UnmountOldrootState.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.7+\n#\n\nimport os\nimport re\nimport sys\n\nfrom time import sleep\nfrom OSEncryptionState import *\n\nclass UnmountOldrootState(OSEncryptionState):\n    def __init__(self, context):\n        super(UnmountOldrootState, self).__init__('UnmountOldrootState', context)\n\n    def should_enter(self):\n        self.context.logger.log(\"Verifying if machine should enter unmount_oldroot state\")\n\n        if not super(UnmountOldrootState, self).should_enter():\n            return False\n        \n        self.context.logger.log(\"Performing enter checks for unmount_oldroot state\")\n\n        self.command_executor.ExecuteInBash('[ -e \"/oldroot\" ]', True)\n        \n        if self.command_executor.Execute('mountpoint /oldroot') != 0:\n            return False\n                \n        return True\n\n    def enter(self):\n        if not self.should_enter():\n            return\n\n        self.context.logger.log(\"Entering unmount_oldroot state\")\n\n        self.command_executor.ExecuteInBash('mkdir -p /var/empty/sshd', True)\n\n        self.command_executor.Execute('service sshd restart')\n        self.command_executor.Execute('dhclient')\n        \n        proc_comm = ProcessCommunicator()\n        self.command_executor.Execute(command_to_execute=\"/sbin/service --status-all\",\n                                      raise_exception_on_failure=True,\n                                      communicator=proc_comm)\n\n        for line in proc_comm.stdout.split('\\n'):\n            if not \"running\" in line:\n                continue\n\n            if \"waagent\" in line or \"ssh\" in line:\n                continue\n\n            splitted = line.split()\n            if len(splitted):\n                service = splitted[0]\n                self.command_executor.Execute('service {0} restart'.format(service))\n\n        self.command_executor.Execute('umount -a')\n        self.command_executor.Execute('mount -t proc proc /proc')\n        self.command_executor.Execute('mount -t sysfs sysfs /sys')\n        self.command_executor.Execute('swapoff -a', True)\n\n        self.bek_util.umount_azure_passhprase(self.encryption_config, force=True)\n\n        if os.path.exists(\"/oldroot/mnt/resource\"):\n            self.command_executor.Execute('umount /oldroot/mnt/resource')\n\n        if os.path.exists(\"/oldroot/mnt\"):\n            self.command_executor.Execute('umount /oldroot/mnt')\n\n        if os.path.exists(\"/oldroot/mnt/azure_bek_disk\"):\n            self.command_executor.Execute('umount /oldroot/mnt/azure_bek_disk')\n\n        if os.path.exists(\"/mnt\"):\n            self.command_executor.Execute('umount /mnt')\n\n        if os.path.exists(\"/mnt/azure_bek_disk\"):\n            self.command_executor.Execute('umount /mnt/azure_bek_disk')\n\n        self.command_executor.Execute('umount /oldroot/mnt/resource')\n        self.command_executor.Execute('umount /oldroot/boot')\n        self.command_executor.Execute('umount /oldroot/misc')\n        self.command_executor.Execute('umount /oldroot/net')\n\n        self.command_executor.Execute('telinit u', True)\n        self.command_executor.Execute('kill 1', True)\n\n        proc_comm = ProcessCommunicator()\n\n        self.command_executor.Execute(command_to_execute=\"fuser -vm /oldroot\",\n                                      raise_exception_on_failure=False,\n                                      communicator=proc_comm)\n\n        self.context.logger.log(\"Processes using oldroot:\\n{0}\".format(proc_comm.stdout))\n\n        procs_to_kill = filter(lambda p: p.isdigit(), proc_comm.stdout.split())\n        procs_to_kill = reversed(sorted(procs_to_kill))\n\n        for victim in procs_to_kill:\n            if int(victim) == os.getpid():\n                self.context.logger.log(\"Restarting WALA before committing suicide\")\n                self.context.logger.log(\"Current executable path: \" + sys.executable)\n                self.context.logger.log(\"Current executable arguments: \" + \" \".join(sys.argv))\n\n                # Kill any other daemons that are blocked and would be executed after this process commits\n                # suicide\n                self.command_executor.Execute('service atd restart')\n\n                os.chdir('/')\n                with open(\"/delete-lock.sh\", \"w\") as f:\n                    f.write(\"rm -f /var/lib/azure_disk_encryption_config/daemon_lock_file.lck\\n\")\n\n                self.command_executor.Execute('at -f /delete-lock.sh now + 1 minutes', True)\n                self.command_executor.Execute('at -f /restart-wala.sh now + 2 minutes', True)\n                self.command_executor.ExecuteInBash('pkill -f .*ForLinux.*handle.py.*daemon.*', True)\n\n            if int(victim) == 1:\n                self.context.logger.log(\"Skipping init\")\n                continue\n\n            self.command_executor.Execute('kill -9 {0}'.format(victim))\n\n        sleep(3)\n\n        self.command_executor.ExecuteInBash('for mp in `grep /oldroot /proc/mounts | cut -f2 -d\\' \\' | sort -r`; do umount $mp; done', True)\n\n        sleep(3)\n\n        attempt = 1\n\n        while True:\n            if attempt > 10:\n                raise Exception(\"Block device {0} did not appear in 10 restart attempts\".format(self.rootfs_block_device))\n\n            self.context.logger.log(\"Attempt #{0} for reloading udev rules\".format(attempt))\n            self.command_executor.ExecuteInBash('pkill -f .*udev.*')\n            self.command_executor.ExecuteInBash('udevd &')\n            self.command_executor.ExecuteInBash('udevadm control --reload-rules && sleep 3')\n\n            sleep(10)\n\n            if self.command_executor.ExecuteInBash('[ -b {0} ]'.format(self.rootfs_block_device), False) == 0:\n                break\n\n            attempt += 1\n\n        self.command_executor.Execute('e2fsck -yf {0}'.format(self.rootfs_block_device), True)\n\n    def should_exit(self):\n        self.context.logger.log(\"Verifying if machine should exit unmount_oldroot state\")\n\n        if os.path.exists('/oldroot/bin'):\n            self.context.logger.log(\"/oldroot was not unmounted\")\n            return False\n\n        return super(UnmountOldrootState, self).should_exit()\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/centos_68/encryptstates/__init__.py",
    "content": "#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.7+\n#\n\nimport inspect\nimport os\nimport sys\nimport traceback\nfrom time import sleep\n\nscriptdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\noscryptodir = os.path.abspath(os.path.join(scriptdir, '../../'))\nsys.path.append(oscryptodir)\n\nfrom OSEncryptionState import *\nfrom PrereqState import *\nfrom SelinuxState import *\nfrom StripdownState import *\nfrom UnmountOldrootState import *\nfrom SplitRootPartitionState import *\nfrom EncryptBlockDeviceState import *\nfrom PatchBootSystemState import *\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/rhel_68/RHEL68EncryptionStateMachine.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.7+\n#\n\nimport inspect\nimport os\nimport sys\nimport traceback\nfrom time import sleep\n\nscriptdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\nmaindir = os.path.abspath(os.path.join(scriptdir, '../../'))\nsys.path.append(maindir)\ntransitionsdir = os.path.abspath(os.path.join(scriptdir, '../../transitions'))\nsys.path.append(transitionsdir)\n\nfrom oscrypto import *\nfrom encryptstates import *\nfrom Common import *\nfrom CommandExecutor import *\nfrom DiskUtil import *\nfrom transitions import *\n\nclass RHEL68EncryptionStateMachine(OSEncryptionStateMachine):\n    states = [\n        State(name='uninitialized'),\n        State(name='prereq', on_enter='on_enter_state'),\n        State(name='selinux', on_enter='on_enter_state'),\n        State(name='stripdown', on_enter='on_enter_state'),\n        State(name='unmount_oldroot', on_enter='on_enter_state'),\n        State(name='encrypt_block_device', on_enter='on_enter_state'),\n        State(name='patch_boot_system', on_enter='on_enter_state'),\n        State(name='completed'),\n    ]\n\n    transitions = [\n        {\n            'trigger': 'skip_encryption',\n            'source': 'uninitialized',\n            'dest': 'completed'\n        },\n        {\n            'trigger': 'enter_prereq',\n            'source': 'uninitialized',\n            'dest': 'prereq'\n        },\n        {\n            'trigger': 'enter_selinux',\n            'source': 'prereq',\n            'dest': 'selinux',\n            'before': 'on_enter_state',\n            'conditions': 'should_exit_previous_state'\n        },\n        {\n            'trigger': 'enter_stripdown',\n            'source': 'selinux',\n            'dest': 'stripdown',\n            'before': 'on_enter_state',\n            'conditions': 'should_exit_previous_state'\n        },\n        {\n            'trigger': 'enter_unmount_oldroot',\n            'source': 'stripdown',\n            'dest': 'unmount_oldroot',\n            'before': 'on_enter_state',\n            'conditions': 'should_exit_previous_state'\n        },\n        {\n            'trigger': 'retry_unmount_oldroot',\n            'source': 'unmount_oldroot',\n            'dest': 'unmount_oldroot',\n            'before': 'on_enter_state'\n        },\n        {\n            'trigger': 'enter_encrypt_block_device',\n            'source': 'unmount_oldroot',\n            'dest': 'encrypt_block_device',\n            'before': 'on_enter_state',\n            'conditions': 'should_exit_previous_state'\n        },\n        {\n            'trigger': 'enter_patch_boot_system',\n            'source': 'encrypt_block_device',\n            'dest': 'patch_boot_system',\n            'before': 'on_enter_state',\n            'conditions': 'should_exit_previous_state'\n        },\n        {\n            'trigger': 'stop_machine',\n            'source': 'patch_boot_system',\n            'dest': 'completed',\n            'conditions': 'should_exit_previous_state'\n        },\n    ]\n\n    def on_enter_state(self):\n        super(RHEL68EncryptionStateMachine, self).on_enter_state()\n\n    def should_exit_previous_state(self):\n        # when this is called, self.state is still the \"source\" state in the transition\n        return super(RHEL68EncryptionStateMachine, self).should_exit_previous_state()\n\n    def __init__(self, hutil, distro_patcher, logger, encryption_environment):\n        super(RHEL68EncryptionStateMachine, self).__init__(hutil, distro_patcher, logger, encryption_environment)\n\n        self.state_objs = {\n            'prereq': PrereqState(self.context),\n            'selinux': SelinuxState(self.context),\n            'stripdown': StripdownState(self.context),\n            'unmount_oldroot': UnmountOldrootState(self.context),\n            'encrypt_block_device': EncryptBlockDeviceState(self.context),\n            'patch_boot_system': PatchBootSystemState(self.context),\n        }\n\n        self.state_machine = Machine(model=self,\n                                     states=RHEL68EncryptionStateMachine.states,\n                                     transitions=RHEL68EncryptionStateMachine.transitions,\n                                     initial='uninitialized')\n\n    def start_encryption(self):\n        proc_comm = ProcessCommunicator()\n        self.command_executor.Execute(command_to_execute=\"mount\",\n                                      raise_exception_on_failure=True,\n                                      communicator=proc_comm)\n        if '/dev/mapper/osencrypt' in proc_comm.stdout:\n            self.logger.log(\"OS volume is already encrypted\")\n\n            self.skip_encryption()\n            self.log_machine_state()\n\n            return\n\n        self.log_machine_state()\n\n        self.enter_prereq()\n        self.log_machine_state()\n\n        self.enter_selinux()\n        self.log_machine_state()\n\n        self.enter_stripdown()\n        self.log_machine_state()\n        \n        oldroot_unmounted_successfully = False\n        attempt = 1\n\n        while not oldroot_unmounted_successfully:\n            self.logger.log(\"Attempt #{0} to unmount /oldroot\".format(attempt))\n\n            try:\n                if attempt == 1:\n                    self.enter_unmount_oldroot()\n                elif attempt > 10:\n                    raise Exception(\"Could not unmount /oldroot in 10 attempts\")\n                else:\n                    self.retry_unmount_oldroot()\n\n                self.log_machine_state()\n            except Exception as e:\n                message = \"Attempt #{0} to unmount /oldroot failed with error: {1}, stack trace: {2}\".format(attempt,\n                                                                                                             e,\n                                                                                                             traceback.format_exc())\n                self.logger.log(msg=message)\n                self.hutil.do_status_report(operation='EnableEncryptionOSVolume',\n                                            status=CommonVariables.extension_error_status,\n                                            status_code=str(CommonVariables.unmount_oldroot_error),\n                                            message=message)\n                \n                sleep(10)\n\n                if attempt > 10:\n                    raise Exception(message)\n            else:\n                oldroot_unmounted_successfully = True\n            finally:\n                attempt += 1\n        \n        self.enter_encrypt_block_device()\n        self.log_machine_state()\n\n        self.enter_patch_boot_system()\n        self.log_machine_state()\n        \n        self.stop_machine()\n        self.log_machine_state()\n\n        self._reboot()\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/rhel_68/__init__.py",
    "content": "#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom RHEL68EncryptionStateMachine import *\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/rhel_68/encryptpatches/rhel_68_dracut.patch",
    "content": "diff -Naur 90crypt.orig/cryptroot-ask.sh 90crypt/cryptroot-ask.sh\n--- 90crypt.orig/cryptroot-ask.sh\t2016-11-20 18:43:12.697422815 -0800\n+++ 90crypt/cryptroot-ask.sh\t2016-11-20 18:43:28.033101905 -0800\n@@ -64,6 +64,25 @@\n # Open LUKS device\n #\n \n+MountPoint=/tmp-keydisk-mount\n+KeyFileName=LinuxPassPhraseFileName\n+echo \"Trying to get the key from disks ...\" >&2\n+mkdir -p $MountPoint >&2\n+modprobe vfat >/dev/null >&2\n+modprobe fuse >/dev/null >&2\n+for SFS in /dev/sd*; do\n+\techo \"> Trying device:$SFS...\" >&2\n+\tmount ${SFS}1 $MountPoint -t vfat -r >&2\n+\tif [ -f $MountPoint/$KeyFileName ]; then\n+\t\techo \"> keyfile got...\" >&2\n+\t\tcp $MountPoint/$KeyFileName /tmp-keyfile\n+\t\tluksfile=/tmp-keyfile\n+\t\tumount $MountPoint\n+\t\tbreak\n+\tfi\n+done\n+\n info \"luksOpen $device $luksname $luksfile\"\n \n if [ -n \"$luksfile\" -a \"$luksfile\" != \"none\" -a -e \"$luksfile\" ]; then\ndiff -Naur 90crypt.orig/parse-crypt.sh 90crypt/parse-crypt.sh\n--- 90crypt.orig/parse-crypt.sh\t2016-11-20 18:43:12.698422813 -0800\n+++ 90crypt/parse-crypt.sh\t2016-11-20 18:43:28.033101905 -0800\n@@ -12,13 +12,13 @@\n \techo '. /lib/dracut-lib.sh' > /emergency/90-crypt.sh\n \tfor luksid in $LUKS; do \n             luksid=${luksid##luks-}\n-\t    printf 'ENV{ID_FS_TYPE}==\"crypto_LUKS\", ENV{ID_FS_UUID}==\"%s*\", RUN+=\"/sbin/initqueue --unique --onetime --name cryptroot-ask-%%k /sbin/cryptroot-ask $env{DEVNAME} luks-$env{ID_FS_UUID}\"\\n' $luksid \\\n+            printf 'KERNEL==\"sda2\", RUN+=\"/sbin/initqueue --unique --onetime --name cryptroot-ask-%%k /sbin/cryptroot-ask $env{DEVNAME} osencrypt\"\\n' $luksid \\\n \t        >> /etc/udev/rules.d/70-luks.rules\t\n-\t    printf '[ -e /dev/disk/by-uuid/*%s* ] || exit 1 \\n'  $luksid >> /initqueue-finished/crypt.sh\n-\t    printf '[ -e /dev/disk/by-uuid/*%s* ] || warn \"crypto LUKS UUID \"%s\" not found\" \\n' $luksid $luksid >> /emergency/90-crypt.sh\n+\t    printf '[ -e /dev/mapper/osencrypt ] || ( /sbin/cryptroot-ask /dev/sda2 osencrypt && [ -e /dev/mapper/osencrypt ] ) || exit 1 \\n'  $luksid >> /initqueue-finished/crypt.sh\n+\t    printf '[ -e /dev/mapper/osencrypt ] || warn \"crypto LUKS UUID \"%s\" not found\" \\n' $luksid $luksid >> /emergency/90-crypt.sh\n \tdone\n     else\n-\techo 'ENV{ID_FS_TYPE}==\"crypto_LUKS\", RUN+=\"/sbin/initqueue --unique --onetime --name cryptroot-ask-%k /sbin/cryptroot-ask $env{DEVNAME} luks-$env{ID_FS_UUID}\"' \\\n+        echo 'KERNEL=\"sda2\", RUN+=\"/sbin/initqueue --unique --onetime --name cryptroot-ask-%k /sbin/cryptroot-ask $env{DEVNAME} osencrypt\"' \\\n \t    >> /etc/udev/rules.d/70-luks.rules\t\n     fi\n     echo 'LABEL=\"luks_end\"' >> /etc/udev/rules.d/70-luks.rules\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/rhel_68/encryptstates/EncryptBlockDeviceState.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.7+\n#\n\nimport re\nimport os\nimport sys\n\nfrom inspect import ismethod\nfrom time import sleep\nfrom OSEncryptionState import *\n\nclass EncryptBlockDeviceState(OSEncryptionState):\n    def __init__(self, context):\n        super(EncryptBlockDeviceState, self).__init__('EncryptBlockDeviceState', context)\n\n    def should_enter(self):\n        self.context.logger.log(\"Verifying if machine should enter encrypt_block_device state\")\n\n        if not super(EncryptBlockDeviceState, self).should_enter():\n            return False\n        \n        self.context.logger.log(\"Performing enter checks for encrypt_block_device state\")\n                \n        return True\n\n    def enter(self):\n        if not self.should_enter():\n            return\n\n        self.context.logger.log(\"Entering encrypt_block_device state\")\n\n        self.context.logger.log(\"Resizing \" + self.rootfs_block_device)\n\n        current_rootfs_size = self._get_root_fs_size_in_sectors(sector_size=512)\n        desired_rootfs_size = current_rootfs_size - 8192\n\n        self.command_executor.Execute('resize2fs {0} {1}s'.format(self.rootfs_block_device, desired_rootfs_size), True)\n        \n        self.command_executor.Execute('mount /boot', False)\n        # self._find_bek_and_execute_action('_dump_passphrase')\n\n        self.context.hutil.do_status_report(operation='EnableEncryptionDataVolumes',\n                                            status=CommonVariables.extension_success_status,\n                                            status_code=str(CommonVariables.success),\n                                            message='OS disk encryption started')\n\n        self._find_bek_and_execute_action('_luks_reencrypt')\n\n    def should_exit(self):\n        self.context.logger.log(\"Verifying if machine should exit encrypt_block_device state\")\n\n        if not os.path.exists('/dev/mapper/osencrypt'):\n            self._find_bek_and_execute_action('_luks_open')\n\n        self.command_executor.Execute('mount /dev/mapper/osencrypt /oldroot', True)\n        self.command_executor.Execute('umount /oldroot', True)\n\n        return super(EncryptBlockDeviceState, self).should_exit()\n\n    def _luks_open(self, bek_path):\n        self.command_executor.Execute('cryptsetup luksOpen {0} osencrypt -d {1}'.format(self.rootfs_block_device, bek_path),\n                                      raise_exception_on_failure=True)\n\n    def _luks_reencrypt(self, bek_path):\n        self.command_executor.ExecuteInBash('cat {0} | cryptsetup-reencrypt -N --reduce-device-size 8192s {1} -v'.format(bek_path,\n                                                                                                                         self.rootfs_block_device),\n                                            raise_exception_on_failure=True)\n\n    def _dump_passphrase(self, bek_path):\n        proc_comm = ProcessCommunicator()\n\n        self.command_executor.Execute(command_to_execute=\"od -c {0}\".format(bek_path),\n                                      raise_exception_on_failure=True,\n                                      communicator=proc_comm)\n        self.context.logger.log(\"Passphrase:\")\n        self.context.logger.log(proc_comm.stdout)\n\n    def _find_bek_and_execute_action(self, callback_method_name):\n        callback_method = getattr(self, callback_method_name)\n        if not ismethod(callback_method):\n            raise Exception(\"{0} is not a method\".format(callback_method_name))\n\n        bek_path = self.bek_util.get_bek_passphrase_file(self.encryption_config)\n        callback_method(bek_path)    \n\n    def _get_root_fs_size_in_sectors(self, sector_size):\n        proc_comm = ProcessCommunicator()\n        self.command_executor.Execute(command_to_execute=\"dumpe2fs -h {0}\".format(self.rootfs_block_device),\n                                      raise_exception_on_failure=True,\n                                      communicator=proc_comm)\n\n        root_fs_block_count = re.findall(r'Block count:\\s*(\\d+)', proc_comm.stdout)\n        root_fs_block_size = re.findall(r'Block size:\\s*(\\d+)', proc_comm.stdout)\n\n        if not root_fs_block_count or not root_fs_block_size:\n            raise Exception(\"Error parsing dumpe2fs output, count={0}, size={1}\".format(root_fs_block_count,\n                                                                                        root_fs_block_size))\n\n        root_fs_block_count = int(root_fs_block_count[0])\n        root_fs_block_size = int(root_fs_block_size[0])\n\n        return (root_fs_block_count * root_fs_block_size) / sector_size\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/rhel_68/encryptstates/PatchBootSystemState.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.7+\n#\n\nimport inspect\nimport re\nimport os\nimport sys\n\nfrom time import sleep\nfrom OSEncryptionState import *\n\nclass PatchBootSystemState(OSEncryptionState):\n    def __init__(self, context):\n        super(PatchBootSystemState, self).__init__('PatchBootSystemState', context)\n\n    def should_enter(self):\n        self.context.logger.log(\"Verifying if machine should enter patch_boot_system state\")\n\n        if not super(PatchBootSystemState, self).should_enter():\n            return False\n        \n        self.context.logger.log(\"Performing enter checks for patch_boot_system state\")\n\n        self.command_executor.Execute('mount /dev/mapper/osencrypt /oldroot', True)\n        self.command_executor.Execute('umount /oldroot', True)\n                \n        return True\n\n    def enter(self):\n        if not self.should_enter():\n            return\n\n        self.context.logger.log(\"Entering patch_boot_system state\")\n\n        self.command_executor.Execute('mount /boot', False)\n        self.command_executor.Execute('mount /dev/mapper/osencrypt /oldroot', True)\n        self.command_executor.Execute('mount --make-rprivate /', True)\n        self.command_executor.Execute('mkdir /oldroot/memroot', True)\n        self.command_executor.Execute('pivot_root /oldroot /oldroot/memroot', True)\n\n        self.command_executor.ExecuteInBash('for i in dev proc sys boot; do mount --move /memroot/$i /$i; done', True)\n\n        try:\n            self._modify_pivoted_oldroot()\n        except Exception as e:\n            self.command_executor.Execute('mount --make-rprivate /')\n            self.command_executor.Execute('pivot_root /memroot /memroot/oldroot')\n            self.command_executor.Execute('rmdir /oldroot/memroot')\n            self.command_executor.ExecuteInBash('for i in dev proc sys boot; do mount --move /oldroot/$i /$i; done')\n\n            raise\n        else:\n            self.command_executor.Execute('mount --make-rprivate /')\n            self.command_executor.Execute('pivot_root /memroot /memroot/oldroot')\n            self.command_executor.Execute('rmdir /oldroot/memroot')\n            self.command_executor.ExecuteInBash('for i in dev proc sys boot; do mount --move /oldroot/$i /$i; done')\n\n            extension_full_name = 'Microsoft.Azure.Security.' + CommonVariables.extension_name\n            self.command_executor.Execute('cp -ax' +\n                                          ' /var/log/azure/{0}'.format(extension_full_name) +\n                                          ' /oldroot/var/log/azure/{0}.Stripdown'.format(extension_full_name),\n                                          True)\n            self.command_executor.Execute('umount /boot')\n            self.command_executor.Execute('umount /oldroot')\n\n            self.context.logger.log(\"Pivoted back into memroot successfully, restarting WALA\")\n\n            self.command_executor.Execute('service sshd restart')\n            self.command_executor.Execute('service atd restart')\n\n            with open(\"/restart-wala.sh\", \"w\") as f:\n                f.write(\"service waagent restart\\n\")\n\n            with open(\"/delete-lock.sh\", \"w\") as f:\n                f.write(\"rm -f /var/lib/azure_disk_encryption_config/daemon_lock_file.lck\\n\")\n\n            self.command_executor.Execute('at -f /delete-lock.sh now + 1 minutes', True)\n            self.command_executor.Execute('at -f /restart-wala.sh now + 2 minutes', True)\n\n            self.should_exit()\n\n            self.command_executor.ExecuteInBash('pkill -f .*ForLinux.*handle.py.*daemon.*', True)\n\n    def should_exit(self):\n        self.context.logger.log(\"Verifying if machine should exit patch_boot_system state\")\n\n        return super(PatchBootSystemState, self).should_exit()\n\n    def _append_contents_to_file(self, contents, path):\n        with open(path, 'a') as f:\n            f.write(contents)\n\n    def _modify_pivoted_oldroot(self):\n        self.context.logger.log(\"Pivoted into oldroot successfully\")\n\n        scriptdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\n        patchesdir = os.path.join(scriptdir, '../encryptpatches')\n        patchpath = os.path.join(patchesdir, 'rhel_68_dracut.patch')\n\n        if not os.path.exists(patchpath):\n            message = \"Patch not found at path: {0}\".format(patchpath)\n            self.context.logger.log(message)\n            raise Exception(message)\n        else:\n            self.context.logger.log(\"Patch found at path: {0}\".format(patchpath))\n\n        self.disk_util.remove_mount_info('/')\n        self.disk_util.append_mount_info('/dev/mapper/osencrypt', '/')\n\n        self.command_executor.ExecuteInBash('patch -b -d /usr/share/dracut/modules.d/90crypt -p1 <{0}'.format(patchpath), True)\n\n        self._append_contents_to_file('\\nadd_drivers+=\" fuse vfat nls_cp437 nls_iso8859-1\"\\n',\n                                      '/etc/dracut.conf')\n        self._append_contents_to_file('\\nadd_dracutmodules+=\" crypt\"\\n',\n                                      '/etc/dracut.conf')\n\n        self.command_executor.Execute('/sbin/dracut -f -v', True)\n\n        with open(\"/boot/grub/grub.conf\", \"r\") as f:\n            contents = f.read()\n\n        contents = re.sub(r\"rd_NO_LUKS \", r\"\", contents)\n        contents = re.sub(r\"root=(.*?)\\s\", r\"root=/dev/mapper/osencrypt rd_LUKS_UUID=osencrypt rdinitdebug \", contents)\n\n        with open(\"/boot/grub/grub.conf\", \"w\") as f:\n            f.write(contents)\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/rhel_68/encryptstates/PrereqState.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.7+\n#\n\nfrom OSEncryptionState import *\nfrom pprint import pprint\n\nclass PrereqState(OSEncryptionState):\n    def __init__(self, context):\n        super(PrereqState, self).__init__('PrereqState', context)\n\n    def should_enter(self):\n        self.context.logger.log(\"Verifying if machine should enter prereq state\")\n\n        if not super(PrereqState, self).should_enter():\n            return False\n        \n        self.context.logger.log(\"Performing enter checks for prereq state\")\n\n        return True\n\n    def enter(self):\n        if not self.should_enter():\n            return\n\n        self.context.logger.log(\"Entering prereq state\")\n\n        distro_info = self.context.distro_patcher.distro_info\n        self.context.logger.log(\"Distro info: {0}\".format(distro_info))\n\n        if ((distro_info[0] == 'redhat' and distro_info[1] == '6.8') or\n            (distro_info[0] == 'centos' and distro_info[1] == '6.8')):\n            self.context.logger.log(\"Enabling OS volume encryption on {0} {1}\".format(distro_info[0],\n                                                                                      distro_info[1]))\n        else:\n            raise Exception(\"RHEL68EncryptionStateMachine called for distro {0} {1}\".format(distro_info[0],\n                                                                                            distro_info[1]))\n\n        self.context.distro_patcher.install_extras()\n        self._patch_waagent()\n\n        self.command_executor.Execute('telinit u', True)\n\n    def should_exit(self):\n        self.context.logger.log(\"Verifying if machine should exit prereq state\")\n\n        return super(PrereqState, self).should_exit()\n\n    def _patch_waagent(self):\n        self.context.logger.log(\"Patching waagent\")\n\n        contents = None\n\n        with open('/etc/waagent.conf', 'r') as f:\n            contents = f.read()\n\n        contents = re.sub(r'ResourceDisk.EnableSwap=.', 'ResourceDisk.EnableSwap=n', contents)\n\n        with open('/etc/waagent.conf', 'w') as f:\n            f.write(contents)\n\n        self.context.logger.log(\"waagent patched successfully\")\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/rhel_68/encryptstates/SelinuxState.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.7+\n#\n\nfrom OSEncryptionState import *\n\nclass SelinuxState(OSEncryptionState):\n    def __init__(self, context):\n        super(SelinuxState, self).__init__('SelinuxState', context)\n\n    def should_enter(self):\n        self.context.logger.log(\"Verifying if machine should enter selinux state\")\n\n        if not super(SelinuxState, self).should_enter():\n            return False\n        \n        self.context.logger.log(\"Performing enter checks for selinux state\")\n\n        return True\n\n    def enter(self):\n        if not self.should_enter():\n            return\n\n        self.context.logger.log(\"Entering selinux state\")\n\n        se_linux_status = self.context.encryption_environment.get_se_linux()\n        if se_linux_status.lower() == 'enforcing':\n            self.context.logger.log(\"SELinux is in enforcing mode, disabling\")\n            self.context.encryption_environment.disable_se_linux()\n\n    def should_exit(self):\n        self.context.logger.log(\"Verifying if machine should exit selinux state\")\n\n        return super(SelinuxState, self).should_exit()\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/rhel_68/encryptstates/StripdownState.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.7+\n#\n\nimport os\nimport sys\n\nfrom OSEncryptionState import *\n\nclass StripdownState(OSEncryptionState):\n    def __init__(self, context):\n        super(StripdownState, self).__init__('StripdownState', context)\n\n    def should_enter(self):\n        self.context.logger.log(\"Verifying if machine should enter stripdown state\")\n\n        if not super(StripdownState, self).should_enter():\n            return False\n        \n        self.context.logger.log(\"Performing enter checks for stripdown state\")\n\n        self.command_executor.Execute('rm -rf /tmp/tmproot', True)\n        self.command_executor.ExecuteInBash('! [ -e \"/oldroot\" ]', True)\n\n        return True\n\n    def enter(self):\n        if not self.should_enter():\n            return\n\n        self.context.logger.log(\"Entering stripdown state\")\n\n        self.command_executor.Execute('umount -a')\n        self.command_executor.Execute('mkdir /tmp/tmproot', True)\n        self.command_executor.Execute('mount -t tmpfs none /tmp/tmproot', True)\n        self.command_executor.ExecuteInBash('for i in proc sys dev run usr var tmp root oldroot boot; do mkdir /tmp/tmproot/$i; done', True)\n        self.command_executor.ExecuteInBash('for i in bin etc mnt sbin lib lib64 root; do cp -ax /$i /tmp/tmproot/; done', True)\n        self.command_executor.ExecuteInBash('for i in bin sbin libexec lib lib64 share; do cp -ax /usr/$i /tmp/tmproot/usr/; done', True)\n        self.command_executor.ExecuteInBash('for i in lib local lock opt run spool tmp; do cp -ax /var/$i /tmp/tmproot/var/; done', True)\n        self.command_executor.ExecuteInBash('mkdir /tmp/tmproot/var/log', True)\n        self.command_executor.ExecuteInBash('cp -ax /var/log/azure /tmp/tmproot/var/log/', True)\n        self.command_executor.Execute('mount --make-rprivate /', True)\n        self.command_executor.ExecuteInBash('[ -e \"/tmp/tmproot/var/lib/azure_disk_encryption_config/azure_crypt_request_queue.ini\" ]', True)\n        self.command_executor.Execute('service waagent stop', True)\n        self.command_executor.Execute('pivot_root /tmp/tmproot /tmp/tmproot/oldroot', True)\n        self.command_executor.ExecuteInBash('for i in dev proc sys; do mount --move /oldroot/$i /$i; done', True)\n\n    def should_exit(self):\n        self.context.logger.log(\"Verifying if machine should exit stripdown state\")\n\n        if not os.path.exists(self.state_marker):\n            self.context.logger.log(\"First call to stripdown state (pid={0}), restarting process\".format(os.getpid()))\n\n            # create the marker, but do not advance the state machine\n            super(StripdownState, self).should_exit()\n\n            # the restarted process shall see the marker and advance the state machine\n            self.command_executor.Execute('service atd restart', True)\n\n            os.chdir('/')\n            with open(\"/restart-wala.sh\", \"w\") as f:\n                f.write(\"service waagent restart\\n\")\n            self.command_executor.Execute('at -f /restart-wala.sh now + 1 minutes', True)\n\n            self.context.hutil.do_exit(exit_code=CommonVariables.encryption_failed,\n                                       operation='EnableEncryptionOSVolume',\n                                       status=CommonVariables.extension_error_status,\n                                       code=CommonVariables.encryption_failed,\n                                       message=\"Restarted extension from stripped down OS\")\n        else:\n            self.context.logger.log(\"Second call to stripdown state (pid={0}), continuing process\".format(os.getpid()))\n            return True\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/rhel_68/encryptstates/UnmountOldrootState.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.7+\n#\n\nimport os\nimport re\nimport sys\n\nfrom time import sleep\nfrom OSEncryptionState import *\n\nclass UnmountOldrootState(OSEncryptionState):\n    def __init__(self, context):\n        super(UnmountOldrootState, self).__init__('UnmountOldrootState', context)\n\n    def should_enter(self):\n        self.context.logger.log(\"Verifying if machine should enter unmount_oldroot state\")\n\n        if not super(UnmountOldrootState, self).should_enter():\n            return False\n        \n        self.context.logger.log(\"Performing enter checks for unmount_oldroot state\")\n\n        self.command_executor.ExecuteInBash('[ -e \"/oldroot\" ]', True)\n        \n        if self.command_executor.Execute('mountpoint /oldroot') != 0:\n            return False\n                \n        return True\n\n    def enter(self):\n        if not self.should_enter():\n            return\n\n        self.context.logger.log(\"Entering unmount_oldroot state\")\n\n        self.command_executor.ExecuteInBash('mkdir -p /var/empty/sshd', True)\n\n        self.command_executor.Execute('service sshd restart')\n        self.command_executor.Execute('dhclient')\n        \n        proc_comm = ProcessCommunicator()\n        self.command_executor.Execute(command_to_execute=\"/sbin/service --status-all\",\n                                      raise_exception_on_failure=True,\n                                      communicator=proc_comm)\n\n        for line in proc_comm.stdout.split('\\n'):\n            if not \"running\" in line:\n                continue\n\n            if \"waagent\" in line or \"ssh\" in line:\n                continue\n\n            splitted = line.split()\n            if len(splitted):\n                service = splitted[0]\n                self.command_executor.Execute('service {0} restart'.format(service))\n\n        self.command_executor.Execute('swapoff -a', True)\n\n        self.bek_util.umount_azure_passhprase(self.encryption_config, force=True)\n\n        if os.path.exists(\"/oldroot/mnt/resource\"):\n            self.command_executor.Execute('umount /oldroot/mnt/resource')\n\n        if os.path.exists(\"/oldroot/mnt\"):\n            self.command_executor.Execute('umount /oldroot/mnt')\n\n        if os.path.exists(\"/oldroot/mnt/azure_bek_disk\"):\n            self.command_executor.Execute('umount /oldroot/mnt/azure_bek_disk')\n\n        if os.path.exists(\"/mnt\"):\n            self.command_executor.Execute('umount /mnt')\n\n        if os.path.exists(\"/mnt/azure_bek_disk\"):\n            self.command_executor.Execute('umount /mnt/azure_bek_disk')\n\n        self.command_executor.Execute('umount /oldroot/mnt/resource')\n        self.command_executor.Execute('umount /oldroot/boot')\n        self.command_executor.Execute('umount /oldroot/misc')\n        self.command_executor.Execute('umount /oldroot/net')\n\n        self.command_executor.Execute('telinit u', True)\n        self.command_executor.Execute('kill 1', True)\n\n        proc_comm = ProcessCommunicator()\n\n        self.command_executor.Execute(command_to_execute=\"fuser -vm /oldroot\",\n                                      raise_exception_on_failure=False,\n                                      communicator=proc_comm)\n\n        self.context.logger.log(\"Processes using oldroot:\\n{0}\".format(proc_comm.stdout))\n\n        procs_to_kill = filter(lambda p: p.isdigit(), proc_comm.stdout.split())\n        procs_to_kill = reversed(sorted(procs_to_kill))\n\n        for victim in procs_to_kill:\n            if int(victim) == os.getpid():\n                self.context.logger.log(\"Restarting WALA before committing suicide\")\n                self.context.logger.log(\"Current executable path: \" + sys.executable)\n                self.context.logger.log(\"Current executable arguments: \" + \" \".join(sys.argv))\n\n                # Kill any other daemons that are blocked and would be executed after this process commits\n                # suicide\n                self.command_executor.Execute('service atd restart')\n\n                os.chdir('/')\n                with open(\"/delete-lock.sh\", \"w\") as f:\n                    f.write(\"rm -f /var/lib/azure_disk_encryption_config/daemon_lock_file.lck\\n\")\n\n                self.command_executor.Execute('at -f /delete-lock.sh now + 1 minutes', True)\n                self.command_executor.Execute('at -f /restart-wala.sh now + 2 minutes', True)\n                self.command_executor.ExecuteInBash('pkill -f .*ForLinux.*handle.py.*daemon.*', True)\n\n            if int(victim) == 1:\n                self.context.logger.log(\"Skipping init\")\n                continue\n\n            self.command_executor.Execute('kill -9 {0}'.format(victim))\n\n        sleep(3)\n\n        self.command_executor.ExecuteInBash('for mp in `grep /oldroot /proc/mounts | cut -f2 -d\\' \\' | sort -r`; do umount $mp; done', True)\n\n        sleep(3)\n\n        attempt = 1\n\n        while True:\n            if attempt > 10:\n                raise Exception(\"Block device {0} did not appear in 10 restart attempts\".format(self.rootfs_block_device))\n\n            self.context.logger.log(\"Attempt #{0} for reloading udev rules\".format(attempt))\n            self.command_executor.ExecuteInBash('pkill -f .*udev.*')\n            self.command_executor.ExecuteInBash('udevd &')\n            self.command_executor.ExecuteInBash('udevadm control --reload-rules && sleep 3')\n\n            sleep(10)\n\n            if self.command_executor.ExecuteInBash('[ -b {0} ]'.format(self.rootfs_block_device), False) == 0:\n                break\n\n            attempt += 1\n\n        self.command_executor.Execute('e2fsck -yf {0}'.format(self.rootfs_block_device), True)\n\n    def should_exit(self):\n        self.context.logger.log(\"Verifying if machine should exit unmount_oldroot state\")\n\n        if os.path.exists('/oldroot/bin'):\n            self.context.logger.log(\"/oldroot was not unmounted\")\n            return False\n\n        return super(UnmountOldrootState, self).should_exit()\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/rhel_68/encryptstates/__init__.py",
    "content": "#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.7+\n#\n\nimport inspect\nimport os\nimport sys\nimport traceback\nfrom time import sleep\n\nscriptdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\noscryptodir = os.path.abspath(os.path.join(scriptdir, '../../'))\nsys.path.append(oscryptodir)\n\nfrom OSEncryptionState import *\nfrom PrereqState import *\nfrom SelinuxState import *\nfrom StripdownState import *\nfrom UnmountOldrootState import *\nfrom EncryptBlockDeviceState import *\nfrom PatchBootSystemState import *\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/rhel_72/RHEL72EncryptionStateMachine.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.7+\n#\n\nimport inspect\nimport os\nimport sys\nimport traceback\nfrom time import sleep\n\nscriptdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\nmaindir = os.path.abspath(os.path.join(scriptdir, '../../'))\nsys.path.append(maindir)\ntransitionsdir = os.path.abspath(os.path.join(scriptdir, '../../transitions'))\nsys.path.append(transitionsdir)\n\nfrom oscrypto import *\nfrom encryptstates import *\nfrom Common import *\nfrom CommandExecutor import *\nfrom DiskUtil import *\nfrom transitions import *\n\nclass RHEL72EncryptionStateMachine(OSEncryptionStateMachine):\n    states = [\n        State(name='uninitialized'),\n        State(name='prereq', on_enter='on_enter_state'),\n        State(name='selinux', on_enter='on_enter_state'),\n        State(name='stripdown', on_enter='on_enter_state'),\n        State(name='unmount_oldroot', on_enter='on_enter_state'),\n        State(name='encrypt_block_device', on_enter='on_enter_state'),\n        State(name='patch_boot_system', on_enter='on_enter_state'),\n        State(name='completed'),\n    ]\n\n    transitions = [\n        {\n            'trigger': 'skip_encryption',\n            'source': 'uninitialized',\n            'dest': 'completed'\n        },\n        {\n            'trigger': 'enter_prereq',\n            'source': 'uninitialized',\n            'dest': 'prereq'\n        },\n        {\n            'trigger': 'enter_selinux',\n            'source': 'prereq',\n            'dest': 'selinux',\n            'before': 'on_enter_state',\n            'conditions': 'should_exit_previous_state'\n        },\n        {\n            'trigger': 'enter_stripdown',\n            'source': 'selinux',\n            'dest': 'stripdown',\n            'before': 'on_enter_state',\n            'conditions': 'should_exit_previous_state'\n        },\n        {\n            'trigger': 'enter_unmount_oldroot',\n            'source': 'stripdown',\n            'dest': 'unmount_oldroot',\n            'before': 'on_enter_state',\n            'conditions': 'should_exit_previous_state'\n        },\n        {\n            'trigger': 'retry_unmount_oldroot',\n            'source': 'unmount_oldroot',\n            'dest': 'unmount_oldroot',\n            'before': 'on_enter_state'\n        },\n        {\n            'trigger': 'enter_encrypt_block_device',\n            'source': 'unmount_oldroot',\n            'dest': 'encrypt_block_device',\n            'before': 'on_enter_state',\n            'conditions': 'should_exit_previous_state'\n        },\n        {\n            'trigger': 'enter_patch_boot_system',\n            'source': 'encrypt_block_device',\n            'dest': 'patch_boot_system',\n            'before': 'on_enter_state',\n            'conditions': 'should_exit_previous_state'\n        },\n        {\n            'trigger': 'stop_machine',\n            'source': 'patch_boot_system',\n            'dest': 'completed',\n            'conditions': 'should_exit_previous_state'\n        },\n    ]\n\n    def on_enter_state(self):\n        super(RHEL72EncryptionStateMachine, self).on_enter_state()\n\n    def should_exit_previous_state(self):\n        # when this is called, self.state is still the \"source\" state in the transition\n        return super(RHEL72EncryptionStateMachine, self).should_exit_previous_state()\n\n    def __init__(self, hutil, distro_patcher, logger, encryption_environment):\n        super(RHEL72EncryptionStateMachine, self).__init__(hutil, distro_patcher, logger, encryption_environment)\n\n        self.state_objs = {\n            'prereq': PrereqState(self.context),\n            'selinux': SelinuxState(self.context),\n            'stripdown': StripdownState(self.context),\n            'unmount_oldroot': UnmountOldrootState(self.context),\n            'encrypt_block_device': EncryptBlockDeviceState(self.context),\n            'patch_boot_system': PatchBootSystemState(self.context),\n        }\n\n        self.state_machine = Machine(model=self,\n                                     states=RHEL72EncryptionStateMachine.states,\n                                     transitions=RHEL72EncryptionStateMachine.transitions,\n                                     initial='uninitialized')\n\n    def start_encryption(self):\n        proc_comm = ProcessCommunicator()\n        self.command_executor.Execute(command_to_execute=\"mount\",\n                                      raise_exception_on_failure=True,\n                                      communicator=proc_comm)\n        if '/dev/mapper/osencrypt' in proc_comm.stdout:\n            self.logger.log(\"OS volume is already encrypted\")\n\n            self.skip_encryption()\n            self.log_machine_state()\n\n            return\n\n        self.log_machine_state()\n\n        self.enter_prereq()\n        self.log_machine_state()\n\n        self.enter_selinux()\n        self.log_machine_state()\n\n        self.enter_stripdown()\n        self.log_machine_state()\n        \n        oldroot_unmounted_successfully = False\n        attempt = 1\n\n        while not oldroot_unmounted_successfully:\n            self.logger.log(\"Attempt #{0} to unmount /oldroot\".format(attempt))\n\n            try:\n                if attempt == 1:\n                    self.enter_unmount_oldroot()\n                elif attempt > 10:\n                    raise Exception(\"Could not unmount /oldroot in 10 attempts\")\n                else:\n                    self.retry_unmount_oldroot()\n\n                self.log_machine_state()\n            except Exception as e:\n                message = \"Attempt #{0} to unmount /oldroot failed with error: {1}, stack trace: {2}\".format(attempt,\n                                                                                                             e,\n                                                                                                             traceback.format_exc())\n                self.logger.log(msg=message)\n                self.hutil.do_status_report(operation='EnableEncryptionOSVolume',\n                                            status=CommonVariables.extension_error_status,\n                                            status_code=str(CommonVariables.unmount_oldroot_error),\n                                            message=message)\n\n                sleep(10)\n                if attempt > 10:\n                    raise Exception(message)\n            else:\n                oldroot_unmounted_successfully = True\n            finally:\n                attempt += 1\n        \n        self.enter_encrypt_block_device()\n        self.log_machine_state()\n\n        self.enter_patch_boot_system()\n        self.log_machine_state()\n        \n        self.stop_machine()\n        self.log_machine_state()\n\n        self._reboot()\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/rhel_72/__init__.py",
    "content": "#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom RHEL72EncryptionStateMachine import *\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/rhel_72/encryptstates/EncryptBlockDeviceState.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.7+\n#\n\nimport os\nimport sys\n\nfrom inspect import ismethod\nfrom time import sleep\nfrom OSEncryptionState import *\nfrom distutils.version import LooseVersion\n\nclass EncryptBlockDeviceState(OSEncryptionState):\n    def __init__(self, context):\n        super(EncryptBlockDeviceState, self).__init__('EncryptBlockDeviceState', context)\n\n    def should_enter(self):\n        self.context.logger.log(\"Verifying if machine should enter encrypt_block_device state\")\n\n        if not super(EncryptBlockDeviceState, self).should_enter():\n            return False\n        \n        self.context.logger.log(\"Performing enter checks for encrypt_block_device state\")\n                \n        return True\n\n    def enter(self):\n        if not self.should_enter():\n            return\n\n        self.context.logger.log(\"Entering encrypt_block_device state\")\n        \n        self.command_executor.Execute('mount /boot', False)\n        # self._find_bek_and_execute_action('_dump_passphrase')\n        self._find_bek_and_execute_action('_luks_format')\n        self._find_bek_and_execute_action('_luks_open')\n\n        self.context.hutil.do_status_report(operation='EnableEncryptionDataVolumes',\n                                            status=CommonVariables.extension_success_status,\n                                            status_code=str(CommonVariables.success),\n                                            message='OS disk encryption started')\n\n        # Enable used space encryption on RHEL 7.3 and above\n        distro_info = self.context.distro_patcher.distro_info\n        if LooseVersion(distro_info[1]) >= LooseVersion('7.3'):\n            self.command_executor.Execute('dd if={0} of=/dev/mapper/osencrypt conv=sparse bs=64K'.format(self.rootfs_block_device), True)\n        else:\n            self.command_executor.Execute('dd if={0} of=/dev/mapper/osencrypt bs=52428800'.format(self.rootfs_block_device), True)\n\n    def should_exit(self):\n        self.context.logger.log(\"Verifying if machine should exit encrypt_block_device state\")\n\n        if not os.path.exists('/dev/mapper/osencrypt'):\n            self._find_bek_and_execute_action('_luks_open')\n\n        self.command_executor.Execute('mount /dev/mapper/osencrypt /oldroot', True)\n        self.command_executor.Execute('umount /oldroot', True)\n\n        return super(EncryptBlockDeviceState, self).should_exit()\n\n    def _luks_format(self, bek_path):\n        self.command_executor.Execute('mkdir /boot/luks', True)\n        self.command_executor.Execute('dd if=/dev/zero of=/boot/luks/osluksheader bs=33554432 count=1', True)\n        self.command_executor.Execute('cryptsetup luksFormat --header /boot/luks/osluksheader -d {0} {1} -q'.format(bek_path,\n                                                                                                                    self.rootfs_block_device),\n                                      raise_exception_on_failure=True)\n\n    def _luks_open(self, bek_path):\n        self.command_executor.Execute('cryptsetup luksOpen --header /boot/luks/osluksheader {0} osencrypt -d {1}'.format(self.rootfs_block_device,\n                                                                                                                         bek_path),\n                                      raise_exception_on_failure=True)\n\n    def _dump_passphrase(self, bek_path):\n        proc_comm = ProcessCommunicator()\n\n        self.command_executor.Execute(command_to_execute=\"od -c {0}\".format(bek_path),\n                                      raise_exception_on_failure=True,\n                                      communicator=proc_comm)\n        self.context.logger.log(\"Passphrase:\")\n        self.context.logger.log(proc_comm.stdout)\n\n    def _find_bek_and_execute_action(self, callback_method_name):\n        callback_method = getattr(self, callback_method_name)\n        if not ismethod(callback_method):\n            raise Exception(\"{0} is not a method\".format(callback_method_name))\n\n        bek_path = self.bek_util.get_bek_passphrase_file(self.encryption_config)\n        callback_method(bek_path)        \n"
  },
  {
    "path": "VMEncryption/main/oscrypto/rhel_72/encryptstates/PatchBootSystemState.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.7+\n#\n\nimport inspect\nimport os\nimport re\nimport sys\n\nfrom time import sleep\nfrom OSEncryptionState import *\n\nclass PatchBootSystemState(OSEncryptionState):\n    def __init__(self, context):\n        super(PatchBootSystemState, self).__init__('PatchBootSystemState', context)\n\n    def should_enter(self):\n        self.context.logger.log(\"Verifying if machine should enter patch_boot_system state\")\n\n        if not super(PatchBootSystemState, self).should_enter():\n            return False\n        \n        self.context.logger.log(\"Performing enter checks for patch_boot_system state\")\n\n        self.command_executor.Execute('mount /dev/mapper/osencrypt /oldroot', True)\n        self.command_executor.Execute('umount /oldroot', True)\n                \n        return True\n\n    def enter(self):\n        if not self.should_enter():\n            return\n\n        self.context.logger.log(\"Entering patch_boot_system state\")\n\n        self.command_executor.Execute('mount /boot', False)\n        self.command_executor.Execute('mount /dev/mapper/osencrypt /oldroot', True)\n        self.command_executor.Execute('mount --make-rprivate /', True)\n        self.command_executor.Execute('mkdir /oldroot/memroot', True)\n        self.command_executor.Execute('pivot_root /oldroot /oldroot/memroot', True)\n\n        self.command_executor.ExecuteInBash('for i in dev proc sys boot; do mount --move /memroot/$i /$i; done', True)\n        self.command_executor.ExecuteInBash('[ -e \"/boot/luks\" ]', True)\n\n        try:\n            self._modify_pivoted_oldroot()\n        except Exception as e:\n            self.command_executor.Execute('mount --make-rprivate /')\n            self.command_executor.Execute('pivot_root /memroot /memroot/oldroot')\n            self.command_executor.Execute('rmdir /oldroot/memroot')\n            self.command_executor.ExecuteInBash('for i in dev proc sys boot; do mount --move /oldroot/$i /$i; done')\n\n            raise\n        else:\n            self.command_executor.Execute('mount --make-rprivate /')\n            self.command_executor.Execute('pivot_root /memroot /memroot/oldroot')\n            self.command_executor.Execute('rmdir /oldroot/memroot')\n            self.command_executor.ExecuteInBash('for i in dev proc sys boot; do mount --move /oldroot/$i /$i; done')\n\n            extension_full_name = 'Microsoft.Azure.Security.' + CommonVariables.extension_name\n            self.command_executor.Execute('cp -ax' +\n                                          ' /var/log/azure/{0}'.format(extension_full_name) +\n                                          ' /oldroot/var/log/azure/{0}.Stripdown'.format(extension_full_name))\n            self.command_executor.Execute('umount /boot')\n            self.command_executor.Execute('umount /oldroot')\n            self.command_executor.Execute('systemctl restart waagent')\n\n            self.context.logger.log(\"Pivoted back into memroot successfully\")\n\n    def should_exit(self):\n        self.context.logger.log(\"Verifying if machine should exit patch_boot_system state\")\n\n        return super(PatchBootSystemState, self).should_exit()\n\n    def _append_contents_to_file(self, contents, path):\n        with open(path, 'a') as f:\n            f.write(contents)\n\n    def _modify_pivoted_oldroot(self):\n        self.context.logger.log(\"Pivoted into oldroot successfully\")\n\n        scriptdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\n        ademoduledir = os.path.join(scriptdir, '../../91ade')\n        dracutmodulesdir = '/lib/dracut/modules.d'\n        udevaderulepath = os.path.join(dracutmodulesdir, '91ade/50-udev-ade.rules')\n\n        proc_comm = ProcessCommunicator()\n\n        self.command_executor.Execute('cp -r {0} /lib/dracut/modules.d/'.format(ademoduledir), True)\n\n        udevadm_cmd = \"udevadm info --attribute-walk --name={0}\".format(self.rootfs_block_device)\n        self.command_executor.Execute(command_to_execute=udevadm_cmd, raise_exception_on_failure=True, communicator=proc_comm)\n\n        matches = re.findall(r'ATTR{partition}==\"(.*)\"', proc_comm.stdout)\n        if not matches:\n            raise Exception(\"Could not parse ATTR{partition} from udevadm info\")\n        partition = matches[0]\n        sed_cmd = 'sed -i.bak s/ENCRYPTED_DISK_PARTITION/{0}/ \"{1}\"'.format(partition, udevaderulepath)\n        self.command_executor.Execute(command_to_execute=sed_cmd, raise_exception_on_failure=True)\n\n        self._append_contents_to_file('\\nGRUB_CMDLINE_LINUX+=\" rd.debug\"\\n', \n                                      '/etc/default/grub')\n\n        self._append_contents_to_file('\\nadd_drivers+=\" fuse vfat nls_cp437 nls_iso8859-1\"\\n',\n                                      '/etc/dracut.conf')\n        self._append_contents_to_file('\\nadd_dracutmodules+=\" crypt\"\\n',\n                                      '/etc/dracut.conf')\n\n        self.command_executor.ExecuteInBash(\"/usr/sbin/dracut -f -v --kver `grubby --default-kernel | sed 's|/boot/vmlinuz-||g'`\", True)\n        self.command_executor.Execute('grub2-install --recheck --force {0}'.format(self.rootfs_disk), True)\n        self.command_executor.Execute('grub2-mkconfig -o /boot/grub2/grub.cfg', True)\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/rhel_72/encryptstates/PrereqState.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.7+\n#\n\nfrom OSEncryptionState import *\nfrom pprint import pprint\n\nclass PrereqState(OSEncryptionState):\n    def __init__(self, context):\n        super(PrereqState, self).__init__('PrereqState', context)\n\n    def should_enter(self):\n        self.context.logger.log(\"Verifying if machine should enter prereq state\")\n\n        if not super(PrereqState, self).should_enter():\n            return False\n        \n        self.context.logger.log(\"Performing enter checks for prereq state\")\n\n        return True\n\n    def enter(self):\n        if not self.should_enter():\n            return\n\n        self.context.logger.log(\"Entering prereq state\")\n\n        distro_info = self.context.distro_patcher.distro_info\n        self.context.logger.log(\"Distro info: {0}\".format(distro_info))\n\n        if ((distro_info[0] == 'redhat' and distro_info[1] == '7.2') or\n            (distro_info[0] == 'redhat' and distro_info[1] == '7.3') or\n            (distro_info[0] == 'redhat' and distro_info[1] == '7.4') or\n            (distro_info[0] == 'redhat' and distro_info[1] == '7.5') or\n            (distro_info[0] == 'redhat' and distro_info[1] == '7.6') or\n            (distro_info[0] == 'redhat' and distro_info[1] == '7.7') or\n            (distro_info[0] == 'centos' and distro_info[1].startswith('7.7')) or\n            (distro_info[0] == 'centos' and distro_info[1].startswith('7.6')) or\n            (distro_info[0] == 'centos' and distro_info[1].startswith('7.5')) or\n            (distro_info[0] == 'centos' and distro_info[1].startswith('7.4')) or\n            (distro_info[0] == 'centos' and distro_info[1] == '7.3.1611') or\n            (distro_info[0] == 'centos' and distro_info[1] == '7.2.1511')):\n            self.context.logger.log(\"Enabling OS volume encryption on {0} {1}\".format(distro_info[0],\n                                                                                      distro_info[1]))\n        else:\n            raise Exception(\"RHEL72EncryptionStateMachine called for distro {0} {1}\".format(distro_info[0],\n                                                                                            distro_info[1]))\n\n        self.context.distro_patcher.install_extras()\n\n        self._patch_waagent()\n        self.command_executor.Execute('systemctl daemon-reload', True)\n\n    def should_exit(self):\n        self.context.logger.log(\"Verifying if machine should exit prereq state\")\n\n        return super(PrereqState, self).should_exit()\n\n    def _patch_waagent(self):\n        self.context.logger.log(\"Patching waagent\")\n\n        contents = None\n\n        with open('/usr/lib/systemd/system/waagent.service', 'r') as f:\n            contents = f.read()\n\n        contents = re.sub(r'\\[Service\\]\\n', '[Service]\\nKillMode=process\\n', contents)\n\n        with open('/usr/lib/systemd/system/waagent.service', 'w') as f:\n            f.write(contents)\n\n        self.context.logger.log(\"waagent patched successfully\")\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/rhel_72/encryptstates/SelinuxState.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.7+\n#\n\nfrom OSEncryptionState import *\n\nclass SelinuxState(OSEncryptionState):\n    def __init__(self, context):\n        super(SelinuxState, self).__init__('SelinuxState', context)\n\n    def should_enter(self):\n        self.context.logger.log(\"Verifying if machine should enter selinux state\")\n\n        if not super(SelinuxState, self).should_enter():\n            return False\n        \n        self.context.logger.log(\"Performing enter checks for selinux state\")\n\n        return True\n\n    def enter(self):\n        if not self.should_enter():\n            return\n\n        self.context.logger.log(\"Entering selinux state\")\n\n        se_linux_status = self.context.encryption_environment.get_se_linux()\n        if se_linux_status.lower() == 'enforcing':\n            self.context.logger.log(\"SELinux is in enforcing mode, disabling\")\n            self.context.encryption_environment.disable_se_linux()\n\n    def should_exit(self):\n        self.context.logger.log(\"Verifying if machine should exit selinux state\")\n\n        return super(SelinuxState, self).should_exit()\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/rhel_72/encryptstates/StripdownState.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.7+\n#\n\nimport os\nimport sys\n\nfrom OSEncryptionState import *\n\nclass StripdownState(OSEncryptionState):\n    def __init__(self, context):\n        super(StripdownState, self).__init__('StripdownState', context)\n\n    def should_enter(self):\n        self.context.logger.log(\"Verifying if machine should enter stripdown state\")\n\n        if not super(StripdownState, self).should_enter():\n            return False\n        \n        self.context.logger.log(\"Performing enter checks for stripdown state\")\n\n        self.command_executor.Execute('rm -rf /tmp/tmproot', True)\n        self.command_executor.ExecuteInBash('! [ -e \"/oldroot\" ]', True)\n\n        return True\n\n    def enter(self):\n        if not self.should_enter():\n            return\n\n        self.context.logger.log(\"Entering stripdown state\")\n\n        self.command_executor.Execute('umount -a')\n        self.command_executor.Execute('mkdir /tmp/tmproot', True)\n        self.command_executor.Execute('mount -t tmpfs none /tmp/tmproot', True)\n        self.command_executor.ExecuteInBash('for i in proc sys dev run usr var tmp root oldroot boot; do mkdir /tmp/tmproot/$i; done', True)\n        self.command_executor.ExecuteInBash('for i in bin etc mnt sbin lib lib64 root; do cp -ax /$i /tmp/tmproot/; done', True)\n        self.command_executor.ExecuteInBash('for i in bin sbin libexec lib lib64 share; do cp -ax /usr/$i /tmp/tmproot/usr/; done', True)\n        self.command_executor.ExecuteInBash('for i in lib local lock opt run spool tmp; do cp -ax /var/$i /tmp/tmproot/var/; done', True)\n        self.command_executor.ExecuteInBash('mkdir /tmp/tmproot/var/log', True)\n        self.command_executor.ExecuteInBash('cp -ax /var/log/azure /tmp/tmproot/var/log/', True)\n        self.command_executor.Execute('mount --make-rprivate /', True)\n        self.command_executor.ExecuteInBash('[ -e \"/tmp/tmproot/var/lib/azure_disk_encryption_config/azure_crypt_request_queue.ini\" ]', True)\n        self.command_executor.Execute('systemctl stop waagent', True)\n        self.command_executor.Execute('pivot_root /tmp/tmproot /tmp/tmproot/oldroot', True)\n        self.command_executor.ExecuteInBash('for i in dev proc sys run; do mount --move /oldroot/$i /$i; done', True)\n\n    def should_exit(self):\n        self.context.logger.log(\"Verifying if machine should exit stripdown state\")\n\n        if not os.path.exists(self.state_marker):\n            self.context.logger.log(\"First call to stripdown state (pid={0}), restarting process\".format(os.getpid()))\n\n            # create the marker, but do not advance the state machine\n            super(StripdownState, self).should_exit()\n\n            # the restarted process shall see the marker and advance the state machine\n            self.command_executor.ExecuteInBash('sleep 30 && systemctl start waagent &', True)\n\n            self.context.hutil.do_exit(exit_code=0,\n                                       operation='EnableEncryptionOSVolume',\n                                       status=CommonVariables.extension_success_status,\n                                       code=str(CommonVariables.success),\n                                       message=\"Restarted extension from stripped down OS\")\n        else:\n            self.context.logger.log(\"Second call to stripdown state (pid={0}), continuing process\".format(os.getpid()))\n            return True\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/rhel_72/encryptstates/UnmountOldrootState.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.7+\n#\n\nimport os\nimport re\nimport sys\n\nfrom time import sleep\nfrom OSEncryptionState import *\n\nclass UnmountOldrootState(OSEncryptionState):\n    def __init__(self, context):\n        super(UnmountOldrootState, self).__init__('UnmountOldrootState', context)\n\n\n    def should_enter(self):\n        self.context.logger.log(\"Verifying if machine should enter unmount_oldroot state\")\n\n        if not super(UnmountOldrootState, self).should_enter():\n            return False\n        \n        self.context.logger.log(\"Performing enter checks for unmount_oldroot state\")\n\n        self.command_executor.ExecuteInBash('[ -e \"/oldroot\" ]', True)\n        \n        if self.command_executor.Execute('mountpoint /oldroot') != 0:\n            return False\n                \n        return True\n\n\n    def restart_systemd_services(self):\n        proc_comm = ProcessCommunicator()\n        self.command_executor.Execute(command_to_execute=\"systemctl list-units\",\n                                      raise_exception_on_failure=True,\n                                      communicator=proc_comm)\n\n        for line in proc_comm.stdout.split('\\n'):\n            if not \"running\" in line:\n                continue\n\n            if \"waagent.service\" in line or \"sshd.service\" in line:\n                continue\n\n            match = re.search(r'\\s(\\S*?\\.service)', line)\n            if match:\n                service = match.groups()[0]\n                self.command_executor.Execute('systemctl restart {0}'.format(service))\n\n\n    def enter(self):\n        if not self.should_enter():\n            return\n\n        self.context.logger.log(\"Entering unmount_oldroot state\")\n\n        self.command_executor.ExecuteInBash('mkdir -p /var/empty/sshd', True)\n        self.command_executor.ExecuteInBash('systemctl restart sshd.service')\n        self.command_executor.ExecuteInBash('dhclient')\n\n        self.restart_systemd_services()\n\n        self.command_executor.Execute('swapoff -a', True)\n        if os.path.exists(\"/oldroot/mnt/resource\"):\n            self.command_executor.Execute('umount /oldroot/mnt/resource')\n\n        proc_comm = ProcessCommunicator()\n        self.command_executor.Execute(command_to_execute=\"fuser -vm /oldroot\",\n                                      raise_exception_on_failure=True,\n                                      communicator=proc_comm)\n        self.context.logger.log(\"Processes using oldroot:\\n{0}\".format(proc_comm.stdout))\n        procs_to_kill = filter(lambda p: p.isdigit(), proc_comm.stdout.split())\n        procs_to_kill = reversed(sorted(procs_to_kill))\n\n        for victim in procs_to_kill:\n            if int(victim) == os.getpid():\n                self.context.logger.log(\"Restarting WALA in 30 seconds before committing suicide\")\n                \n                # This is a workaround for the bug on CentOS/RHEL 7.2 where systemd-udevd\n                # needs to be restarted and the drive mounted/unmounted.\n                # Otherwise the dir becomes inaccessible, fuse says: Transport endpoint is not connected\n                self.command_executor.Execute('systemctl restart systemd-udevd', True)\n                self.bek_util.umount_azure_passhprase(self.encryption_config, force=True)\n                self.command_executor.Execute('systemctl restart systemd-udevd', True)\n\n                self.bek_util.get_bek_passphrase_file(self.encryption_config)\n                self.bek_util.umount_azure_passhprase(self.encryption_config, force=True)\n                self.command_executor.Execute('systemctl restart systemd-udevd', True)\n                self.command_executor.ExecuteInBash('sleep 30 && systemctl start waagent &', True)\n\n            if int(victim) == 1:\n                self.context.logger.log(\"Skipping init\")\n                continue\n\n            self.command_executor.Execute('kill -9 {0}'.format(victim))\n\n        # Re-execute systemd, get pid 1 to use the new root\n        self.command_executor.Execute('telinit u', True)\n        sleep(3)\n        self.command_executor.Execute('umount /oldroot', True)\n        self.restart_systemd_services()\n\n        #\n        # With the recent release of 7.4 it was found that even after unmounting\n        # oldroot, there were some open handles to the root file system block device.\n        # The below logic tries to find the offending mount by grepping /proc/*/task/*/mountinfo\n        # and kill the respective processes so that encryption can proceed\n        #\n        proc_comm = ProcessCommunicator()\n\n        # Example: grep for /dev/sda2 in the files /proc/*task/*/mountinfo and remove results of the grep process itself.\n        # If grep -v grep is not applied, then the command throws an exception\n        self.command_executor.ExecuteInBash(\n                command_to_execute=\"grep {0} /proc/*/task/*/mountinfo | grep -v grep\".format(self.rootfs_sdx_path),\n                raise_exception_on_failure=False,\n                communicator=proc_comm)\n        procs_to_kill = filter(lambda path: path.startswith('/proc/'), proc_comm.stdout.split())\n        procs_to_kill = map(lambda path: int(path.split('/')[2]), procs_to_kill)\n        procs_to_kill = list(reversed(sorted(procs_to_kill)))\n        self.context.logger.log(\"Processes with tasks using {0}:\\n{1}\".format(self.rootfs_sdx_path, procs_to_kill))\n\n        for victim in procs_to_kill:\n            if int(victim) == os.getpid():\n                self.context.logger.log(\"This extension is holding on to {0}. \"\n                        \"This is not expected...\".format(self.rootfs_sdx_path))\n                continue\n\n            if int(victim) == 1:\n                self.context.logger.log(\"Skipping init\")\n                continue\n\n            self.command_executor.Execute('kill -9 {0}'.format(victim))\n\n        sleep(3)\n        attempt = 1\n\n        while True:\n            if attempt > 10:\n                raise Exception(\"Block device {0} did not appear in 10 restart attempts\".format(self.rootfs_block_device))\n\n            self.context.logger.log(\"Attempt #{0} for restarting systemd-udevd\".format(attempt))\n            self.command_executor.Execute('systemctl restart systemd-udevd')\n            sleep(10)\n\n            if self.command_executor.ExecuteInBash('[ -b {0} ]'.format(self.rootfs_block_device), False) == 0:\n                break\n\n            attempt += 1\n\n        sleep(3)\n        self.command_executor.Execute('xfs_repair {0}'.format(self.rootfs_block_device), True)\n\n\n    def should_exit(self):\n        self.context.logger.log(\"Verifying if machine should exit unmount_oldroot state\")\n\n        if os.path.exists('/oldroot/bin'):\n            self.context.logger.log(\"/oldroot was not unmounted\")\n            return False\n\n        return super(UnmountOldrootState, self).should_exit()\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/rhel_72/encryptstates/__init__.py",
    "content": "#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.7+\n#\n\nimport inspect\nimport os\nimport sys\nimport traceback\nfrom time import sleep\n\nscriptdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\noscryptodir = os.path.abspath(os.path.join(scriptdir, '../../'))\nsys.path.append(oscryptodir)\n\nfrom OSEncryptionState import *\nfrom PrereqState import *\nfrom SelinuxState import *\nfrom StripdownState import *\nfrom UnmountOldrootState import *\nfrom EncryptBlockDeviceState import *\nfrom PatchBootSystemState import *\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/rhel_72_lvm/RHEL72LVMEncryptionStateMachine.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.7+\n#\n\nimport inspect\nimport os\nimport sys\nimport traceback\nfrom time import sleep\n\nscriptdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\nmaindir = os.path.abspath(os.path.join(scriptdir, '../../'))\nsys.path.append(maindir)\ntransitionsdir = os.path.abspath(os.path.join(scriptdir, '../../transitions'))\nsys.path.append(transitionsdir)\n\nfrom oscrypto import *\nfrom encryptstates import *\nfrom Common import *\nfrom CommandExecutor import *\nfrom DiskUtil import *\nfrom transitions import *\n\nclass RHEL72LVMEncryptionStateMachine(OSEncryptionStateMachine):\n    states = [\n        State(name='uninitialized'),\n        State(name='prereq', on_enter='on_enter_state'),\n        State(name='selinux', on_enter='on_enter_state'),\n        State(name='stripdown', on_enter='on_enter_state'),\n        State(name='unmount_oldroot', on_enter='on_enter_state'),\n        State(name='encrypt_block_device', on_enter='on_enter_state'),\n        State(name='patch_boot_system', on_enter='on_enter_state'),\n        State(name='completed'),\n    ]\n\n    transitions = [\n        {\n            'trigger': 'skip_encryption',\n            'source': 'uninitialized',\n            'dest': 'completed'\n        },\n        {\n            'trigger': 'enter_prereq',\n            'source': 'uninitialized',\n            'dest': 'prereq'\n        },\n        {\n            'trigger': 'enter_selinux',\n            'source': 'prereq',\n            'dest': 'selinux',\n            'before': 'on_enter_state',\n            'conditions': 'should_exit_previous_state'\n        },\n        {\n            'trigger': 'enter_stripdown',\n            'source': 'selinux',\n            'dest': 'stripdown',\n            'before': 'on_enter_state',\n            'conditions': 'should_exit_previous_state'\n        },\n        {\n            'trigger': 'enter_unmount_oldroot',\n            'source': 'stripdown',\n            'dest': 'unmount_oldroot',\n            'before': 'on_enter_state',\n            'conditions': 'should_exit_previous_state'\n        },\n        {\n            'trigger': 'retry_unmount_oldroot',\n            'source': 'unmount_oldroot',\n            'dest': 'unmount_oldroot',\n            'before': 'on_enter_state'\n        },\n        {\n            'trigger': 'enter_encrypt_block_device',\n            'source': 'unmount_oldroot',\n            'dest': 'encrypt_block_device',\n            'before': 'on_enter_state',\n            'conditions': 'should_exit_previous_state'\n        },\n        {\n            'trigger': 'enter_patch_boot_system',\n            'source': 'encrypt_block_device',\n            'dest': 'patch_boot_system',\n            'before': 'on_enter_state',\n            'conditions': 'should_exit_previous_state'\n        },\n        {\n            'trigger': 'stop_machine',\n            'source': 'patch_boot_system',\n            'dest': 'completed',\n            'conditions': 'should_exit_previous_state'\n        },\n    ]\n\n    def on_enter_state(self):\n        super(RHEL72LVMEncryptionStateMachine, self).on_enter_state()\n\n    def should_exit_previous_state(self):\n        # when this is called, self.state is still the \"source\" state in the transition\n        return super(RHEL72LVMEncryptionStateMachine, self).should_exit_previous_state()\n\n    def __init__(self, hutil, distro_patcher, logger, encryption_environment):\n        super(RHEL72LVMEncryptionStateMachine, self).__init__(hutil, distro_patcher, logger, encryption_environment)\n\n        self.state_objs = {\n            'prereq': PrereqState(self.context),\n            'selinux': SelinuxState(self.context),\n            'stripdown': StripdownState(self.context),\n            'unmount_oldroot': UnmountOldrootState(self.context),\n            'encrypt_block_device': EncryptBlockDeviceState(self.context),\n            'patch_boot_system': PatchBootSystemState(self.context),\n        }\n\n        self.state_machine = Machine(model=self,\n                                     states=RHEL72LVMEncryptionStateMachine.states,\n                                     transitions=RHEL72LVMEncryptionStateMachine.transitions,\n                                     initial='uninitialized')\n\n    def start_encryption(self):\n        proc_comm = ProcessCommunicator()\n        self.command_executor.Execute(command_to_execute=\"pvdisplay\",\n                                      raise_exception_on_failure=True,\n                                      communicator=proc_comm)\n\n        patch_boot_system_state_marker = os.path.join(self.encryption_environment.os_encryption_markers_path, 'PatchBootSystemState')\n        if '/dev/mapper/osencrypt' in proc_comm.stdout and os.path.exists(patch_boot_system_state_marker):\n            self.logger.log(\"OS volume is already encrypted\")\n\n            self.skip_encryption()\n            self.log_machine_state()\n\n            return\n\n        self.log_machine_state()\n\n        self.enter_prereq()\n        self.log_machine_state()\n\n        self.enter_selinux()\n        self.log_machine_state()\n\n        self.enter_stripdown()\n        self.log_machine_state()\n        \n        oldroot_unmounted_successfully = False\n        attempt = 1\n\n        while not oldroot_unmounted_successfully:\n            self.logger.log(\"Attempt #{0} to unmount /oldroot\".format(attempt))\n\n            try:\n                if attempt == 1:\n                    self.enter_unmount_oldroot()\n                elif attempt > 10:\n                    raise Exception(\"Could not unmount /oldroot in 10 attempts\")\n                else:\n                    self.retry_unmount_oldroot()\n\n                self.log_machine_state()\n            except Exception as e:\n                message = \"Attempt #{0} to unmount /oldroot failed with error: {1}, stack trace: {2}\".format(attempt,\n                                                                                                             e,\n                                                                                                             traceback.format_exc())\n                self.logger.log(msg=message)\n                self.hutil.do_status_report(operation='EnableEncryptionOSVolume',\n                                            status=CommonVariables.extension_error_status,\n                                            status_code=str(CommonVariables.unmount_oldroot_error),\n                                            message=message)\n\n                sleep(10)\n                if attempt > 10:\n                    raise Exception(message)\n            else:\n                oldroot_unmounted_successfully = True\n            finally:\n                attempt += 1\n        \n        self.enter_encrypt_block_device()\n        self.log_machine_state()\n\n        self.enter_patch_boot_system()\n        self.log_machine_state()\n        \n        self.stop_machine()\n        self.log_machine_state()\n\n        self._reboot()\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/rhel_72_lvm/__init__.py",
    "content": "#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom RHEL72LVMEncryptionStateMachine import *\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/rhel_72_lvm/encryptstates/EncryptBlockDeviceState.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.7+\n#\n\nimport os\nimport sys\n\nfrom inspect import ismethod\nfrom time import sleep\nfrom OSEncryptionState import *\n\nclass EncryptBlockDeviceState(OSEncryptionState):\n    def __init__(self, context):\n        super(EncryptBlockDeviceState, self).__init__('EncryptBlockDeviceState', context)\n\n    def should_enter(self):\n        self.context.logger.log(\"Verifying if machine should enter encrypt_block_device state\")\n\n        if not super(EncryptBlockDeviceState, self).should_enter():\n            return False\n        \n        self.context.logger.log(\"Performing enter checks for encrypt_block_device state\")\n                \n        return True\n\n    def enter(self):\n        if not self.should_enter():\n            return\n\n        self.context.logger.log(\"Entering encrypt_block_device state\")\n        \n        self.command_executor.Execute('mount /boot', False)\n        # self._find_bek_and_execute_action('_dump_passphrase')\n        self._find_bek_and_execute_action('_luks_format')\n        self._find_bek_and_execute_action('_luks_open')\n\n        self.context.hutil.do_status_report(operation='EnableEncryptionDataVolumes',\n                                            status=CommonVariables.extension_success_status,\n                                            status_code=str(CommonVariables.success),\n                                            message='OS disk encryption started')\n\n        self.command_executor.Execute('dd if={0} of=/dev/mapper/osencrypt conv=sparse bs=64K'.format(self.rootfs_block_device), True)\n\n    def should_exit(self):\n        self.context.logger.log(\"Verifying if machine should exit encrypt_block_device state\")\n\n        if not os.path.exists('/dev/mapper/osencrypt'):\n            self._find_bek_and_execute_action('_luks_open')\n\n        return super(EncryptBlockDeviceState, self).should_exit()\n\n    def _luks_format(self, bek_path):\n        self.command_executor.Execute('mkdir /boot/luks', True)\n        self.command_executor.Execute('dd if=/dev/zero of=/boot/luks/osluksheader bs=33554432 count=1', True)\n        self.command_executor.Execute('cryptsetup luksFormat --header /boot/luks/osluksheader -d {0} {1} -q'.format(bek_path,\n                                                                                                                    self.rootfs_block_device),\n                                      raise_exception_on_failure=True)\n\n    def _luks_open(self, bek_path):\n        self.command_executor.Execute('cryptsetup luksOpen --header /boot/luks/osluksheader {0} osencrypt -d {1}'.format(self.rootfs_block_device,\n                                                                                                                         bek_path),\n                                      raise_exception_on_failure=True)\n\n    def _dump_passphrase(self, bek_path):\n        proc_comm = ProcessCommunicator()\n\n        self.command_executor.Execute(command_to_execute=\"od -c {0}\".format(bek_path),\n                                      raise_exception_on_failure=True,\n                                      communicator=proc_comm)\n        self.context.logger.log(\"Passphrase:\")\n        self.context.logger.log(proc_comm.stdout)\n\n    def _find_bek_and_execute_action(self, callback_method_name):\n        callback_method = getattr(self, callback_method_name)\n        if not ismethod(callback_method):\n            raise Exception(\"{0} is not a method\".format(callback_method_name))\n\n        bek_path = self.bek_util.get_bek_passphrase_file(self.encryption_config)\n        callback_method(bek_path)        \n"
  },
  {
    "path": "VMEncryption/main/oscrypto/rhel_72_lvm/encryptstates/PatchBootSystemState.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.7+\n#\n\nimport inspect\nimport os\nimport sys\n\nfrom inspect import ismethod\nfrom time import sleep\nfrom OSEncryptionState import *\n\nclass PatchBootSystemState(OSEncryptionState):\n    def __init__(self, context):\n        super(PatchBootSystemState, self).__init__('PatchBootSystemState', context)\n\n    def should_enter(self):\n        self.context.logger.log(\"Verifying if machine should enter patch_boot_system state\")\n\n        if not super(PatchBootSystemState, self).should_enter():\n            return False\n        \n        self.context.logger.log(\"Performing enter checks for patch_boot_system state\")\n\n        if not os.path.exists('/dev/mapper/osencrypt'):\n            return False\n                \n        return True\n\n    def enter(self):\n        if not self.should_enter():\n            return\n\n        self.context.logger.log(\"Entering patch_boot_system state\")\n\n        self.command_executor.Execute('systemctl restart lvm2-lvmetad', True)\n        self.command_executor.Execute('pvscan', True)\n        self.command_executor.Execute('vgcfgrestore -f /volumes.lvm rootvg', True)\n        self.command_executor.Execute('cryptsetup luksClose osencrypt', True)\n\n        self._find_bek_and_execute_action('_luks_open')\n\n        self.unmount_lvm_volumes()\n        \n        self.command_executor.Execute('mount /dev/rootvg/rootlv /oldroot', True)\n        self.command_executor.Execute('mount /dev/rootvg/varlv /oldroot/var', True)\n        self.command_executor.Execute('mount /dev/rootvg/usrlv /oldroot/usr', True)\n        self.command_executor.Execute('mount /dev/rootvg/tmplv /oldroot/tmp', True)\n        self.command_executor.Execute('mount /dev/rootvg/homelv /oldroot/home', True)\n        self.command_executor.Execute('mount /dev/rootvg/optlv /oldroot/opt', True)\n\n        self.command_executor.Execute('mount /boot', False)\n        self.command_executor.Execute('mount --make-rprivate /', True)\n        self.command_executor.Execute('mkdir /oldroot/memroot', True)\n        self.command_executor.Execute('pivot_root /oldroot /oldroot/memroot', True)\n\n        self.command_executor.ExecuteInBash('for i in dev proc sys boot; do mount --move /memroot/$i /$i; done', True)\n        self.command_executor.ExecuteInBash('[ -e \"/boot/luks\" ]', True)\n\n        try:\n            self._modify_pivoted_oldroot()\n        except Exception as e:\n            self.command_executor.Execute('mount --make-rprivate /')\n            self.command_executor.Execute('pivot_root /memroot /memroot/oldroot')\n            self.command_executor.Execute('rmdir /oldroot/memroot')\n            self.command_executor.ExecuteInBash('for i in dev proc sys boot; do mount --move /oldroot/$i /$i; done')\n\n            raise\n        else:\n            self.command_executor.Execute('mount --make-rprivate /')\n            self.command_executor.Execute('pivot_root /memroot /memroot/oldroot')\n            self.command_executor.Execute('rmdir /oldroot/memroot')\n            self.command_executor.ExecuteInBash('for i in dev proc sys boot; do mount --move /oldroot/$i /$i; done')\n\n            extension_full_name = 'Microsoft.Azure.Security.' + CommonVariables.extension_name\n            self.command_executor.Execute('/bin/cp -ax' +\n                                          ' /var/log/azure/{0}'.format(extension_full_name) +\n                                          ' /oldroot/var/log/azure/{0}.Stripdown'.format(extension_full_name))\n            self.command_executor.ExecuteInBash('/bin/cp -ax' +\n                                                ' /var/lib/azure_disk_encryption_config/os_encryption_markers/*' +\n                                                ' /oldroot/var/lib/azure_disk_encryption_config/os_encryption_markers/',\n                                                True)\n            self.command_executor.Execute('touch /oldroot/var/lib/azure_disk_encryption_config/os_encryption_markers/PatchBootSystemState', True)\n            self.command_executor.Execute('umount /boot')\n            self.command_executor.Execute('umount /oldroot')\n            self.command_executor.Execute('systemctl restart waagent')\n\n            self.context.logger.log(\"Pivoted back into memroot successfully\")\n\n            self.unmount_lvm_volumes()\n\n    def should_exit(self):\n        self.context.logger.log(\"Verifying if machine should exit patch_boot_system state\")\n\n        return super(PatchBootSystemState, self).should_exit()\n\n    def unmount_lvm_volumes(self):\n        self.command_executor.Execute('swapoff -a', True)\n        self.command_executor.Execute('umount -a')\n\n        for mountpoint in ['/var', '/opt', '/tmp', '/home', '/usr']:\n            if self.command_executor.Execute('mountpoint /oldroot' + mountpoint) == 0:\n                self.unmount('/oldroot' + mountpoint)\n            if self.command_executor.Execute('mountpoint ' + mountpoint) == 0:\n                self.unmount(mountpoint)\n\n        self.unmount_var()\n\n    def unmount_var(self):\n        unmounted = False\n\n        while not unmounted:\n            self.command_executor.Execute('systemctl stop NetworkManager')\n            self.command_executor.Execute('systemctl stop rsyslog')\n            self.command_executor.Execute('systemctl stop systemd-udevd')\n            self.command_executor.Execute('systemctl stop systemd-journald')\n            self.command_executor.Execute('systemctl stop systemd-hostnamed')\n            self.command_executor.Execute('systemctl stop atd')\n            self.command_executor.Execute('systemctl stop postfix')\n            self.unmount('/var')\n\n            sleep(3)\n\n            if self.command_executor.Execute('mountpoint /var'):\n                unmounted = True\n\n    def unmount(self, mountpoint):\n        if mountpoint != '/var':\n            self.unmount_var()\n\n        if self.command_executor.Execute(\"mountpoint \" + mountpoint):\n            return\n\n        proc_comm = ProcessCommunicator()\n\n        self.command_executor.Execute(command_to_execute=\"fuser -vm \" + mountpoint,\n                                      raise_exception_on_failure=True,\n                                      communicator=proc_comm)\n\n        self.context.logger.log(\"Processes using {0}:\\n{1}\".format(mountpoint, proc_comm.stdout))\n\n        procs_to_kill = filter(lambda p: p.isdigit(), proc_comm.stdout.split())\n        procs_to_kill = reversed(sorted(procs_to_kill))\n\n        for victim in procs_to_kill:\n            if int(victim) == os.getpid():\n                self.context.logger.log(\"Restarting WALA before committing suicide\")\n                self.context.logger.log(\"Current executable path: \" + sys.executable)\n                self.context.logger.log(\"Current executable arguments: \" + \" \".join(sys.argv))\n\n                # Kill any other daemons that are blocked and would be executed after this process commits\n                # suicide\n                self.command_executor.Execute('systemctl restart atd')\n\n                os.chdir('/')\n                with open(\"/delete-lock.sh\", \"w\") as f:\n                    f.write(\"rm -f /var/lib/azure_disk_encryption_config/daemon_lock_file.lck\\n\")\n\n                self.command_executor.Execute('at -f /delete-lock.sh now + 1 minutes', True)\n                self.command_executor.Execute('at -f /restart-wala.sh now + 2 minutes', True)\n                self.command_executor.ExecuteInBash('pkill -f .*ForLinux.*handle.py.*daemon.*', True)\n\n            if int(victim) == 1:\n                self.context.logger.log(\"Skipping init\")\n                continue\n\n            self.command_executor.Execute('kill -9 {0}'.format(victim))\n\n        self.command_executor.Execute('telinit u', True)\n\n        sleep(3)\n\n        self.command_executor.Execute('umount ' + mountpoint, True)\n\n    def _append_contents_to_file(self, contents, path):\n        with open(path, 'a') as f:\n            f.write(contents)\n\n    def _modify_pivoted_oldroot(self):\n        self.context.logger.log(\"Pivoted into oldroot successfully\")\n\n        scriptdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\n        ademoduledir = os.path.join(scriptdir, '../../91ade')\n        dracutmodulesdir = '/lib/dracut/modules.d'\n        udevaderulepath = os.path.join(dracutmodulesdir, '91ade/50-udev-ade.rules')\n\n        proc_comm = ProcessCommunicator()\n\n        self.command_executor.Execute('cp -r {0} /lib/dracut/modules.d/'.format(ademoduledir), True)\n\n        udevadm_cmd = \"udevadm info --attribute-walk --name={0}\".format(self.rootfs_block_device)\n        self.command_executor.Execute(command_to_execute=udevadm_cmd, raise_exception_on_failure=True, communicator=proc_comm)\n\n        matches = re.findall(r'ATTR{partition}==\"(.*)\"', proc_comm.stdout)\n        if not matches:\n            raise Exception(\"Could not parse ATTR{partition} from udevadm info\")\n        partition = matches[0]\n        sed_cmd = 'sed -i.bak s/ENCRYPTED_DISK_PARTITION/{0}/ \"{1}\"'.format(partition, udevaderulepath)\n        self.command_executor.Execute(command_to_execute=sed_cmd, raise_exception_on_failure=True)\n\n        self._append_contents_to_file('\\nGRUB_CMDLINE_LINUX+=\" rd.debug\"\\n',\n                                      '/etc/default/grub')\n\n        self._append_contents_to_file('\\nadd_drivers+=\" fuse vfat nls_cp437 nls_iso8859-1\"\\n',\n                                      '/etc/dracut.conf')\n        self._append_contents_to_file('\\nadd_dracutmodules+=\" crypt\"\\n',\n                                      '/etc/dracut.conf')\n\n        self.command_executor.ExecuteInBash(\"/usr/sbin/dracut -f -v --kver `grubby --default-kernel | sed 's|/boot/vmlinuz-||g'`\", True)\n        self.command_executor.Execute('grub2-install --recheck --force {0}'.format(self.rootfs_disk), True)\n        self.command_executor.Execute('grub2-mkconfig -o /boot/grub2/grub.cfg', True)\n\n    def _luks_open(self, bek_path):\n        self.command_executor.Execute('mount /boot')\n        self.command_executor.Execute('cryptsetup luksOpen --header /boot/luks/osluksheader {0} osencrypt -d {1}'.format(self.rootfs_block_device,\n                                                                                                                         bek_path),\n                                      raise_exception_on_failure=True)\n\n    def _find_bek_and_execute_action(self, callback_method_name):\n        callback_method = getattr(self, callback_method_name)\n        if not ismethod(callback_method):\n            raise Exception(\"{0} is not a method\".format(callback_method_name))\n\n        bek_path = self.bek_util.get_bek_passphrase_file(self.encryption_config)\n        callback_method(bek_path)    \n"
  },
  {
    "path": "VMEncryption/main/oscrypto/rhel_72_lvm/encryptstates/PrereqState.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.7+\n#\n\nfrom OSEncryptionState import *\nfrom pprint import pprint\n\nclass PrereqState(OSEncryptionState):\n    def __init__(self, context):\n        super(PrereqState, self).__init__('PrereqState', context)\n\n    def should_enter(self):\n        self.context.logger.log(\"Verifying if machine should enter prereq state\")\n\n        if not super(PrereqState, self).should_enter():\n            return False\n        \n        self.context.logger.log(\"Performing enter checks for prereq state\")\n\n        return True\n\n    def enter(self):\n        if not self.should_enter():\n            return\n\n        self.context.logger.log(\"Entering prereq state\")\n\n        distro_info = self.context.distro_patcher.distro_info\n        self.context.logger.log(\"Distro info: {0}\".format(distro_info))\n\n        if (((distro_info[0] == 'centos' and distro_info[1] == '7.3.1611') or\n             (distro_info[0] == 'centos' and distro_info[1].startswith('7.4')) or\n             (distro_info[0] == 'centos' and distro_info[1].startswith('7.5')) or\n             (distro_info[0] == 'centos' and distro_info[1].startswith('7.6')) or\n             (distro_info[0] == 'centos' and distro_info[1].startswith('7.7')) or\n             (distro_info[0] == 'redhat' and distro_info[1] == '7.3') or\n             (distro_info[0] == 'redhat' and distro_info[1] == '7.4') or\n             (distro_info[0] == 'redhat' and distro_info[1] == '7.5') or\n             (distro_info[0] == 'redhat' and distro_info[1] == '7.6') or\n             (distro_info[0] == 'redhat' and distro_info[1] == '7.7')) and\n            self.disk_util.is_os_disk_lvm()):\n            self.context.logger.log(\"Enabling OS volume encryption on {0} {1}\".format(distro_info[0],\n                                                                                      distro_info[1]))\n        else:\n            raise Exception(\"RHEL72LVMEncryptionStateMachine called for distro {0} {1}\".format(distro_info[0],\n                                                                                            distro_info[1]))\n\n        self.context.distro_patcher.install_extras()\n\n        self._patch_waagent()\n        self.command_executor.Execute('systemctl daemon-reload', True)\n\n    def should_exit(self):\n        self.context.logger.log(\"Verifying if machine should exit prereq state\")\n\n        return super(PrereqState, self).should_exit()\n\n    def _patch_waagent(self):\n        self.context.logger.log(\"Patching waagent\")\n\n        contents = None\n\n        with open('/usr/lib/systemd/system/waagent.service', 'r') as f:\n            contents = f.read()\n\n        contents = re.sub(r'\\[Service\\]\\n', '[Service]\\nKillMode=process\\n', contents)\n\n        with open('/usr/lib/systemd/system/waagent.service', 'w') as f:\n            f.write(contents)\n\n        self.context.logger.log(\"waagent patched successfully\")\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/rhel_72_lvm/encryptstates/SelinuxState.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.7+\n#\n\nfrom OSEncryptionState import *\n\nclass SelinuxState(OSEncryptionState):\n    def __init__(self, context):\n        super(SelinuxState, self).__init__('SelinuxState', context)\n\n    def should_enter(self):\n        self.context.logger.log(\"Verifying if machine should enter selinux state\")\n\n        if not super(SelinuxState, self).should_enter():\n            return False\n        \n        self.context.logger.log(\"Performing enter checks for selinux state\")\n\n        return True\n\n    def enter(self):\n        if not self.should_enter():\n            return\n\n        self.context.logger.log(\"Entering selinux state\")\n\n        se_linux_status = self.context.encryption_environment.get_se_linux()\n        if se_linux_status.lower() == 'enforcing':\n            self.context.logger.log(\"SELinux is in enforcing mode, disabling\")\n            self.context.encryption_environment.disable_se_linux()\n\n    def should_exit(self):\n        self.context.logger.log(\"Verifying if machine should exit selinux state\")\n\n        return super(SelinuxState, self).should_exit()\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/rhel_72_lvm/encryptstates/StripdownState.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.7+\n#\n\nimport os\nimport sys\n\nfrom OSEncryptionState import *\nfrom time import sleep\n\nclass StripdownState(OSEncryptionState):\n    def __init__(self, context):\n        super(StripdownState, self).__init__('StripdownState', context)\n\n    def should_enter(self):\n        self.context.logger.log(\"Verifying if machine should enter stripdown state\")\n\n        if not super(StripdownState, self).should_enter():\n            return False\n        \n        self.context.logger.log(\"Performing enter checks for stripdown state\")\n\n        self.command_executor.Execute('rm -rf /usr/tmproot', True)\n        self.command_executor.ExecuteInBash('! [ -e \"/oldroot\" ]', True)\n\n        return True\n\n    def enter(self):\n        if not self.should_enter():\n            return\n\n        self.context.logger.log(\"Entering stripdown state\")\n        \n        self.command_executor.Execute('swapoff -a')\n        self.command_executor.Execute('umount -a')\n        self.command_executor.Execute('mkdir /usr/tmproot', True)\n        self.command_executor.Execute('mount -t tmpfs none /usr/tmproot', True)\n        self.command_executor.ExecuteInBash('for i in proc sys dev run usr var tmp root oldroot boot; do mkdir /usr/tmproot/$i; done', True)\n        self.command_executor.ExecuteInBash('for i in bin etc mnt sbin lib lib64 root; do cp -ax /$i /usr/tmproot/; done', True)\n        self.command_executor.ExecuteInBash('for i in bin sbin libexec lib lib64 share; do cp -ax /usr/$i /usr/tmproot/usr/; done', True)\n        self.command_executor.ExecuteInBash('for i in lib local lock opt run spool tmp; do cp -ax /var/$i /usr/tmproot/var/; done', True)\n        self.command_executor.ExecuteInBash('mkdir /usr/tmproot/var/log', True)\n        self.command_executor.ExecuteInBash('cp -ax /var/log/azure /usr/tmproot/var/log/', True)\n        self.command_executor.Execute('mount --make-rprivate /', True)\n        self.command_executor.ExecuteInBash('[ -e \"/usr/tmproot/var/lib/azure_disk_encryption_config/azure_crypt_request_queue.ini\" ]', True)\n        self.command_executor.Execute('systemctl stop waagent', True)\n        self.command_executor.Execute('pivot_root /usr/tmproot /usr/tmproot/oldroot', True)\n        self.command_executor.ExecuteInBash('for i in dev proc sys run; do mount --move /oldroot/$i /$i; done', True)\n\n    def should_exit(self):\n        self.context.logger.log(\"Verifying if machine should exit stripdown state\")\n\n        if not os.path.exists(self.state_marker):\n            self.context.logger.log(\"First call to stripdown state (pid={0}), restarting process\".format(os.getpid()))\n\n            # create the marker, but do not advance the state machine\n            super(StripdownState, self).should_exit()\n\n            self.command_executor.ExecuteInBash('rm -f /run/systemd/generator/*.mount', True)\n            self.command_executor.ExecuteInBash('rm -f /run/systemd/generator/local-fs.target.requires/*.mount', True)\n            self.command_executor.Execute(\"sed -i.bak '/rootvg/d' /etc/fstab\", True)\n\n            self.command_executor.Execute('telinit u', True)\n\n            sleep(10)\n\n            if self.command_executor.Execute('mountpoint /var') == 0:\n                self.command_executor.Execute('umount /var', True)\n\n            # the restarted process shall see the marker and advance the state machine\n            self.command_executor.Execute('systemctl restart atd', True)\n\n            os.chdir('/')\n            with open(\"/restart-wala.sh\", \"w\") as f:\n                f.write(\"systemctl restart waagent\\n\")\n            self.command_executor.Execute('at -f /restart-wala.sh now + 1 minutes', True)\n\n            self.context.hutil.do_exit(exit_code=0,\n                                       operation='EnableEncryptionOSVolume',\n                                       status=CommonVariables.extension_success_status,\n                                       code=str(CommonVariables.success),\n                                       message=\"Restarted extension from stripped down OS\")\n        else:\n            self.context.logger.log(\"Second call to stripdown state (pid={0}), continuing process\".format(os.getpid()))\n            return True\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/rhel_72_lvm/encryptstates/UnmountOldrootState.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.7+\n#\n\nimport os\nimport re\nimport sys\n\nfrom time import sleep\nfrom OSEncryptionState import *\n\nclass UnmountOldrootState(OSEncryptionState):\n    def __init__(self, context):\n        super(UnmountOldrootState, self).__init__('UnmountOldrootState', context)\n\n    def should_enter(self):\n        self.context.logger.log(\"Verifying if machine should enter unmount_oldroot state\")\n\n        if not super(UnmountOldrootState, self).should_enter():\n            return False\n        \n        self.context.logger.log(\"Performing enter checks for unmount_oldroot state\")\n\n        self.command_executor.ExecuteInBash('[ -e \"/oldroot\" ]', True)\n        \n        if self.command_executor.Execute('mountpoint /oldroot') != 0:\n            return False\n                \n        return True\n\n    def enter(self):\n        if not self.should_enter():\n            return\n\n        self.context.logger.log(\"Entering unmount_oldroot state\")\n\n        self.unmount_var()\n\n        self.command_executor.ExecuteInBash('mkdir -p /var/empty/sshd', True)\n        self.command_executor.ExecuteInBash('systemctl restart sshd.service')\n        self.command_executor.ExecuteInBash('dhclient')\n        \n        proc_comm = ProcessCommunicator()\n        self.command_executor.Execute(command_to_execute=\"systemctl list-units\",\n                                      raise_exception_on_failure=True,\n                                      communicator=proc_comm)\n\n        for line in proc_comm.stdout.split('\\n'):\n            if not \"running\" in line:\n                continue\n\n            if \"waagent.service\" in line or \"sshd.service\" in line or \"journald.service\" in line:\n                continue\n\n            match = re.search(r'\\s(\\S*?\\.service)', line)\n            if match:\n                service = match.groups()[0]\n                self.command_executor.Execute('systemctl restart {0}'.format(service))\n\n        self.command_executor.Execute('swapoff -a', True)\n\n        if os.path.exists(\"/oldroot/mnt/resource\"):\n            self.command_executor.Execute('umount /oldroot/mnt/resource')\n\n        sleep(3)\n        \n        self.unmount('/oldroot/opt')\n        self.unmount('/oldroot/var')\n        self.unmount('/oldroot/usr')\n        self.unmount('/oldroot')\n\n        attempt = 1\n\n        while True:\n            if attempt > 10:\n                raise Exception(\"Block device {0} did not appear in 10 restart attempts\".format(self.rootfs_block_device))\n\n            self.context.logger.log(\"Attempt #{0} for restarting systemd-udevd\".format(attempt))\n            self.command_executor.Execute('systemctl restart systemd-udevd')\n\n            sleep(10)\n\n            if self.command_executor.ExecuteInBash('[ -b {0} ]'.format(self.rootfs_block_device), False) == 0:\n                break\n\n            attempt += 1\n\n        self.unmount_var()\n\n        sleep(3)\n\n        self.command_executor.Execute('vgcfgbackup -f /volumes.lvm rootvg', True)\n        self.command_executor.Execute('sed -i.bak \\'s/sda2/mapper\\/osencrypt/g\\' /volumes.lvm', True)\n        self.command_executor.Execute('lvremove -f rootvg', True)\n        self.command_executor.Execute('vgremove rootvg', True)\n\n    def unmount_var(self):\n        unmounted = False\n\n        while not unmounted:\n            self.command_executor.Execute('systemctl stop NetworkManager')\n            self.command_executor.Execute('systemctl stop rsyslog')\n            self.command_executor.Execute('systemctl stop systemd-udevd')\n            self.command_executor.Execute('systemctl stop systemd-journald')\n            self.command_executor.Execute('systemctl stop systemd-hostnamed')\n            self.command_executor.Execute('systemctl stop atd')\n            self.command_executor.Execute('systemctl stop postfix')\n            self.unmount('/var')\n\n            sleep(3)\n\n            if self.command_executor.Execute('mountpoint /var'):\n                unmounted = True\n\n    def unmount(self, mountpoint, call_unmount_var=True):\n        if mountpoint != '/var':\n            self.unmount_var()\n\n        if self.command_executor.Execute(\"mountpoint \" + mountpoint):\n            return\n\n        proc_comm = ProcessCommunicator()\n\n        self.command_executor.Execute(command_to_execute=\"fuser -vm \" + mountpoint,\n                                      raise_exception_on_failure=True,\n                                      communicator=proc_comm)\n\n        self.context.logger.log(\"Processes using {0}:\\n{1}\".format(mountpoint, proc_comm.stdout))\n\n        procs_to_kill = filter(lambda p: p.isdigit(), proc_comm.stdout.split())\n        procs_to_kill = reversed(sorted(procs_to_kill))\n\n        for victim in procs_to_kill:\n            if int(victim) == os.getpid():\n                self.context.logger.log(\"Restarting WALA before committing suicide\")\n                self.context.logger.log(\"Current executable path: \" + sys.executable)\n                self.context.logger.log(\"Current executable arguments: \" + \" \".join(sys.argv))\n\n                # Kill any other daemons that are blocked and would be executed after this process commits\n                # suicide\n                self.command_executor.Execute('systemctl restart atd')\n\n                os.chdir('/')\n                with open(\"/delete-lock.sh\", \"w\") as f:\n                    f.write(\"rm -f /var/lib/azure_disk_encryption_config/daemon_lock_file.lck\\n\")\n\n                self.command_executor.Execute('at -f /delete-lock.sh now + 1 minutes', True)\n                self.command_executor.Execute('at -f /restart-wala.sh now + 2 minutes', True)\n                self.command_executor.ExecuteInBash('pkill -f .*ForLinux.*handle.py.*daemon.*', True)\n\n            if int(victim) == 1:\n                self.context.logger.log(\"Skipping init\")\n                continue\n\n            self.command_executor.Execute('kill -9 {0}'.format(victim))\n\n        self.command_executor.Execute('telinit u', True)\n\n        sleep(10)\n        \n        proc_comm = ProcessCommunicator()\n        self.command_executor.Execute(command_to_execute=\"systemctl list-units\",\n                                      raise_exception_on_failure=True,\n                                      communicator=proc_comm)\n\n        for line in proc_comm.stdout.split('\\n'):\n\n            match = re.search(r'\\s(\\S*?\\.mount)', line)\n            if match:\n                mount = match.groups()[0]\n                self.command_executor.Execute('systemctl stop {0}'.format(mount))\n                continue\n\n        sleep(10)\n\n        if self.command_executor.Execute('mountpoint /var') == 0:\n            self.command_executor.Execute('umount /var', True)\n\n        sleep(3)\n\n        if self.command_executor.Execute('mountpoint ' + mountpoint) == 0:\n            self.command_executor.Execute('umount ' + mountpoint, True)\n\n    def should_exit(self):\n        self.context.logger.log(\"Verifying if machine should exit unmount_oldroot state\")\n\n        if os.path.exists('/oldroot/bin'):\n            self.context.logger.log(\"/oldroot was not unmounted\")\n            return False\n\n        return super(UnmountOldrootState, self).should_exit()\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/rhel_72_lvm/encryptstates/__init__.py",
    "content": "#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.7+\n#\n\nimport inspect\nimport os\nimport sys\nimport traceback\nfrom time import sleep\n\nscriptdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\noscryptodir = os.path.abspath(os.path.join(scriptdir, '../../'))\nsys.path.append(oscryptodir)\n\nfrom OSEncryptionState import *\nfrom PrereqState import *\nfrom SelinuxState import *\nfrom StripdownState import *\nfrom UnmountOldrootState import *\nfrom EncryptBlockDeviceState import *\nfrom PatchBootSystemState import *\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/ubuntu_1404/Ubuntu1404EncryptionStateMachine.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.7+\n#\n\nimport inspect\nimport os\nimport sys\nimport traceback\nfrom time import sleep\n\nscriptdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\nmaindir = os.path.abspath(os.path.join(scriptdir, '../../'))\nsys.path.append(maindir)\ntransitionsdir = os.path.abspath(os.path.join(scriptdir, '../../transitions'))\nsys.path.append(transitionsdir)\n\nfrom oscrypto import *\nfrom encryptstates import *\nfrom Common import *\nfrom CommandExecutor import *\nfrom DiskUtil import *\nfrom transitions import *\n\nclass Ubuntu1404EncryptionStateMachine(OSEncryptionStateMachine):\n    states = [\n        State(name='uninitialized'),\n        State(name='prereq', on_enter='on_enter_state'),\n        State(name='stripdown', on_enter='on_enter_state'),\n        State(name='unmount_oldroot', on_enter='on_enter_state'),\n        State(name='split_root_partition', on_enter='on_enter_state'),\n        State(name='encrypt_block_device', on_enter='on_enter_state'),\n        State(name='patch_boot_system', on_enter='on_enter_state'),\n        State(name='completed'),\n    ]\n\n    transitions = [\n        {\n            'trigger': 'skip_encryption',\n            'source': 'uninitialized',\n            'dest': 'completed'\n        },\n        {\n            'trigger': 'enter_prereq',\n            'source': 'uninitialized',\n            'dest': 'prereq'\n        },\n        {\n            'trigger': 'enter_stripdown',\n            'source': 'prereq',\n            'dest': 'stripdown',\n            'before': 'on_enter_state',\n            'conditions': 'should_exit_previous_state'\n        },\n        {\n            'trigger': 'enter_unmount_oldroot',\n            'source': 'stripdown',\n            'dest': 'unmount_oldroot',\n            'before': 'on_enter_state',\n            'conditions': 'should_exit_previous_state'\n        },\n        {\n            'trigger': 'retry_unmount_oldroot',\n            'source': 'unmount_oldroot',\n            'dest': 'unmount_oldroot',\n            'before': 'on_enter_state'\n        },\n        {\n            'trigger': 'enter_split_root_partition',\n            'source': 'unmount_oldroot',\n            'dest': 'split_root_partition',\n            'before': 'on_enter_state',\n            'conditions': 'should_exit_previous_state'\n        },\n        {\n            'trigger': 'enter_encrypt_block_device',\n            'source': 'split_root_partition',\n            'dest': 'encrypt_block_device',\n            'before': 'on_enter_state',\n            'conditions': 'should_exit_previous_state'\n        },\n        {\n            'trigger': 'enter_patch_boot_system',\n            'source': 'encrypt_block_device',\n            'dest': 'patch_boot_system',\n            'before': 'on_enter_state',\n            'conditions': 'should_exit_previous_state'\n        },\n        {\n            'trigger': 'stop_machine',\n            'source': 'patch_boot_system',\n            'dest': 'completed',\n            'conditions': 'should_exit_previous_state'\n        },\n    ]\n\n    def on_enter_state(self):\n        super(Ubuntu1404EncryptionStateMachine, self).on_enter_state()\n\n    def should_exit_previous_state(self):\n        # when this is called, self.state is still the \"source\" state in the transition\n        return super(Ubuntu1404EncryptionStateMachine, self).should_exit_previous_state()\n\n    def __init__(self, hutil, distro_patcher, logger, encryption_environment):\n        super(Ubuntu1404EncryptionStateMachine, self).__init__(hutil, distro_patcher, logger, encryption_environment)\n\n        self.state_objs = {\n            'prereq': PrereqState(self.context),\n            'stripdown': StripdownState(self.context),\n            'unmount_oldroot': UnmountOldrootState(self.context),\n            'split_root_partition': SplitRootPartitionState(self.context),\n            'encrypt_block_device': EncryptBlockDeviceState(self.context),\n            'patch_boot_system': PatchBootSystemState(self.context),\n        }\n\n        self.state_machine = Machine(model=self,\n                                     states=Ubuntu1404EncryptionStateMachine.states,\n                                     transitions=Ubuntu1404EncryptionStateMachine.transitions,\n                                     initial='uninitialized')\n\n    def start_encryption(self):\n        proc_comm = ProcessCommunicator()\n        self.command_executor.Execute(command_to_execute=\"mount\",\n                                      raise_exception_on_failure=True,\n                                      communicator=proc_comm)\n        if '/dev/mapper/osencrypt' in proc_comm.stdout:\n            self.logger.log(\"OS volume is already encrypted\")\n\n            self.skip_encryption()\n            self.log_machine_state()\n\n            return\n\n        self.log_machine_state()\n\n        self.enter_prereq()\n        self.log_machine_state()\n\n        self.enter_stripdown()\n        self.log_machine_state()\n        \n        oldroot_unmounted_successfully = False\n        attempt = 1\n\n        while not oldroot_unmounted_successfully:\n            self.logger.log(\"Attempt #{0} to unmount /oldroot\".format(attempt))\n\n            try:\n                if attempt == 1:\n                    self.enter_unmount_oldroot()\n                elif attempt > 10:\n                    raise Exception(\"Could not unmount /oldroot in 10 attempts\")\n                else:\n                    self.retry_unmount_oldroot()\n\n                self.log_machine_state()\n            except Exception as e:\n                message = \"Attempt #{0} to unmount /oldroot failed with error: {1}, stack trace: {2}\".format(attempt,\n                                                                                                             e,\n                                                                                                             traceback.format_exc())\n                self.logger.log(msg=message)\n                self.hutil.do_status_report(operation='EnableEncryptionOSVolume',\n                                            status=CommonVariables.extension_error_status,\n                                            status_code=str(CommonVariables.unmount_oldroot_error),\n                                            message=message)\n\n                sleep(10)\n                if attempt > 10:\n                    raise Exception(message)\n            else:\n                oldroot_unmounted_successfully = True\n            finally:\n                attempt += 1\n        \n        self.enter_split_root_partition()\n        self.log_machine_state()\n        \n        self.enter_encrypt_block_device()\n        self.log_machine_state()\n\n        self.enter_patch_boot_system()\n        self.log_machine_state()\n        \n        self.stop_machine()\n        self.log_machine_state()\n\n        self._reboot()\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/ubuntu_1404/__init__.py",
    "content": "#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom Ubuntu1404EncryptionStateMachine import *\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/ubuntu_1404/encryptpatches/ubuntu_1404_initramfs.patch",
    "content": "diff -Naur initramfs-tools.orig/hooks/cryptroot initramfs-tools/hooks/cryptroot\n--- initramfs-tools.orig/hooks/cryptroot\t2016-10-27 20:26:44.920064500 +0000\n+++ initramfs-tools/hooks/cryptroot\t2016-10-27 20:27:15.922161900 +0000\n@@ -511,10 +511,7 @@\n \n # Find the root and resume device(s)\n if [ -r /etc/crypttab ]; then\n-\trootdev=$(get_root_device)\n-\tif [ -z \"$rootdev\" ]; then\n-\t\techo \"cryptsetup: WARNING: could not determine root device from /etc/fstab\" >&2\n-\tfi\n+\trootdev=\"osencrypt\"\n \tresumedevs=$(get_resume_devices)\n fi\n \ndiff -Naur initramfs-tools.orig/scripts/local-top/cryptroot initramfs-tools/scripts/local-top/cryptroot\n--- initramfs-tools.orig/scripts/local-top/cryptroot\t2016-10-27 20:26:44.916064500 +0000\n+++ initramfs-tools/scripts/local-top/cryptroot\t2016-10-27 20:28:01.621309300 +0000\n@@ -229,11 +229,7 @@\n \tif [ \"$cryptdiscard\" = \"yes\" ]; then\n \t\tcryptcreate=\"$cryptcreate --allow-discards\"\n \tfi\n-\tif /sbin/cryptsetup isLuks $cryptsource >/dev/null 2>&1; then\n-\t\tcryptcreate=\"$cryptcreate luksOpen $cryptsource $crypttarget\"\n-\telse\n-\t\tcryptcreate=\"$cryptcreate -c $cryptcipher -s $cryptsize -h $crypthash create $crypttarget $cryptsource\"\n-\tfi\n+\tcryptcreate=\"$cryptcreate luksOpen $cryptsource $crypttarget --header=/boot/luks/osluksheader\"\n \tcryptremove=\"/sbin/cryptsetup remove $crypttarget\"\n \tNEWROOT=\"/dev/mapper/$crypttarget\"\n \n"
  },
  {
    "path": "VMEncryption/main/oscrypto/ubuntu_1404/encryptscripts/azure_crypt_key.sh",
    "content": "#!/bin/sh\nMountPoint=/tmp-keydisk-mount\nKeyFileName=LinuxPassPhraseFileName\necho \"Trying to get the key from disks ...\" >&2\nmkdir -p $MountPoint\nmodprobe nls_utf8 >/dev/null 2>&1\nmodprobe nls_cp437 >/dev/null 2>&1\nmodprobe vfat >/dev/null 2>&1\nsleep 2\nOPENED=0\ncd /sys/block\nfor DEV in sd*; do\n\techo \"> Trying device: $DEV ...\" >&2\n\tmount -t vfat -r /dev/${DEV}1 $MountPoint >&2 2>&1\n\tif [ -f $MountPoint/$KeyFileName ]; then\n\t\tcat $MountPoint/$KeyFileName && echo \"Success loading keyfile!\" >&2\n\t\tumount $MountPoint 2>/dev/null\n\t\tOPENED=1\n\t\tbreak\n\tfi\n\tumount $MountPoint 2>/dev/null\ndone\nif [ $OPENED -eq 0 ]; then\n\techo \"FAILED to find suitable passphrase file ...\" >&2\n\techo -n \"Try to enter your password: \" >&2\n\tread -r A </dev/console\n\techo -n \"$A\"\nfi\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/ubuntu_1404/encryptscripts/inject_luks_header.sh",
    "content": "#!/bin/sh -e\n\nPREREQS=\"\"\n\nprereqs() { echo \"$PREREQS\"; }\n\ncase \"$1\" in\n    prereqs)\n    prereqs\n    exit 0\n    ;;\nesac\n\n. /usr/share/initramfs-tools/hook-functions\n\nmkdir -p ${DESTDIR}/boot/luks\ncopy_exec /boot/luks/osluksheader /boot/luks\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/ubuntu_1404/encryptstates/EncryptBlockDeviceState.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.7+\n#\n\nimport os\nimport sys\n\nfrom inspect import ismethod\nfrom time import sleep\nfrom OSEncryptionState import *\n\nclass EncryptBlockDeviceState(OSEncryptionState):\n    def __init__(self, context):\n        super(EncryptBlockDeviceState, self).__init__('EncryptBlockDeviceState', context)\n\n    def should_enter(self):\n        self.context.logger.log(\"Verifying if machine should enter encrypt_block_device state\")\n\n        if not super(EncryptBlockDeviceState, self).should_enter():\n            return False\n        \n        self.context.logger.log(\"Performing enter checks for encrypt_block_device state\")\n                \n        return True\n\n    def enter(self):\n        if not self.should_enter():\n            return\n\n        self.context.logger.log(\"Entering encrypt_block_device state\")\n        \n        self.command_executor.Execute('mount /boot', False)\n        self.command_executor.Execute('service udev restart', False)\n\n        # self._find_bek_and_execute_action('_dump_passphrase')\n        self._find_bek_and_execute_action('_luks_format')\n        self._find_bek_and_execute_action('_luks_open')\n\n        self.context.hutil.do_status_report(operation='EnableEncryptionDataVolumes',\n                                            status=CommonVariables.extension_success_status,\n                                            status_code=str(CommonVariables.success),\n                                            message='OS disk encryption started')\n\n        self.command_executor.Execute('dd if={0} of=/dev/mapper/osencrypt conv=sparse bs=64K'.format(self.rootfs_block_device), True)\n\n    def should_exit(self):\n        self.context.logger.log(\"Verifying if machine should exit encrypt_block_device state\")\n\n        if not os.path.exists('/dev/mapper/osencrypt'):\n            self._find_bek_and_execute_action('_luks_open')\n\n        self.command_executor.Execute('mount /dev/mapper/osencrypt /oldroot', True)\n        self.command_executor.Execute('umount /oldroot', True)\n\n        return super(EncryptBlockDeviceState, self).should_exit()\n\n    def _luks_format(self, bek_path):\n        self.command_executor.Execute('rm -rf /boot/luks', True)\n        self.command_executor.Execute('mkdir /boot/luks', True)\n        self.command_executor.Execute('dd if=/dev/zero of=/boot/luks/osluksheader bs=33554432 count=1', True)\n        self.command_executor.Execute('cryptsetup luksFormat --header /boot/luks/osluksheader -d {0} {1} -q'.format(bek_path,\n                                                                                                                    self.rootfs_block_device),\n                                      raise_exception_on_failure=True)\n\n    def _luks_open(self, bek_path):\n        self.command_executor.Execute('cryptsetup luksOpen --header /boot/luks/osluksheader {0} osencrypt -d {1}'.format(self.rootfs_block_device,\n                                                                                                                         bek_path),\n                                      raise_exception_on_failure=True)\n\n    def _dump_passphrase(self, bek_path):\n        proc_comm = ProcessCommunicator()\n\n        self.command_executor.Execute(command_to_execute=\"od -c {0}\".format(bek_path),\n                                      raise_exception_on_failure=True,\n                                      communicator=proc_comm)\n        self.context.logger.log(\"Passphrase:\")\n        self.context.logger.log(proc_comm.stdout)\n\n    def _find_bek_and_execute_action(self, callback_method_name):\n        callback_method = getattr(self, callback_method_name)\n        if not ismethod(callback_method):\n            raise Exception(\"{0} is not a method\".format(callback_method_name))\n\n        bek_path = self.bek_util.get_bek_passphrase_file(self.encryption_config)\n        callback_method(bek_path)        \n"
  },
  {
    "path": "VMEncryption/main/oscrypto/ubuntu_1404/encryptstates/PatchBootSystemState.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.7+\n#\n\nimport inspect\nimport os\nimport sys\n\nfrom time import sleep\nfrom CommandExecutor import *\nfrom OSEncryptionState import *\n\nclass PatchBootSystemState(OSEncryptionState):\n    def __init__(self, context):\n        super(PatchBootSystemState, self).__init__('PatchBootSystemState', context)\n\n    def should_enter(self):\n        self.context.logger.log(\"Verifying if machine should enter patch_boot_system state\")\n\n        if not super(PatchBootSystemState, self).should_enter():\n            return False\n        \n        self.context.logger.log(\"Performing enter checks for patch_boot_system state\")\n\n        self.command_executor.Execute('mount /dev/mapper/osencrypt /oldroot', True)\n        self.command_executor.Execute('umount /oldroot', True)\n                \n        return True\n\n    def enter(self):\n        if not self.should_enter():\n            return\n\n        self.context.logger.log(\"Entering patch_boot_system state\")\n\n        self.command_executor.Execute('mount /boot', False)\n        self.command_executor.Execute('mount /dev/mapper/osencrypt /oldroot', True)\n        self.command_executor.Execute('mount --make-rprivate /', True)\n        self.command_executor.Execute('mkdir /oldroot/memroot', True)\n        self.command_executor.Execute('pivot_root /oldroot /oldroot/memroot', True)\n\n        self.command_executor.ExecuteInBash('for i in dev proc sys boot; do mount --move /memroot/$i /$i; done', True)\n        self.command_executor.ExecuteInBash('[ -e \"/boot/luks\" ]', True)\n\n        try:\n            self._modify_pivoted_oldroot()\n        except Exception as e:\n            self.command_executor.Execute('mount --make-rprivate /')\n            self.command_executor.Execute('pivot_root /memroot /memroot/oldroot')\n            self.command_executor.Execute('rmdir /oldroot/memroot')\n            self.command_executor.ExecuteInBash('for i in dev proc sys boot; do mount --move /oldroot/$i /$i; done')\n\n            raise\n        else:\n            self.command_executor.Execute('mount --make-rprivate /')\n            self.command_executor.Execute('pivot_root /memroot /memroot/oldroot')\n            self.command_executor.Execute('rmdir /oldroot/memroot')\n            self.command_executor.ExecuteInBash('for i in dev proc sys boot; do mount --move /oldroot/$i /$i; done')\n\n            extension_full_name = 'Microsoft.Azure.Security.' + CommonVariables.extension_name\n            self.command_executor.Execute('cp -ax' +\n                                          ' /var/log/azure/{0}'.format(extension_full_name) +\n                                          ' /oldroot/var/log/azure/{0}.Stripdown'.format(extension_full_name),\n                                          True)\n            self.command_executor.Execute('umount /boot')\n            self.command_executor.Execute('umount /oldroot')\n\n            self.context.logger.log(\"Pivoted back into memroot successfully\")\n\n    def should_exit(self):\n        self.context.logger.log(\"Verifying if machine should exit patch_boot_system state\")\n\n        return super(PatchBootSystemState, self).should_exit()\n\n    def _append_contents_to_file(self, contents, path):\n        with open(path, 'a') as f:\n            f.write(contents)\n\n    def _modify_pivoted_oldroot(self):\n        self.context.logger.log(\"Pivoted into oldroot successfully\")\n\n        scriptdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\n        encryptscriptsdir = os.path.join(scriptdir, '../encryptscripts')\n        injectscriptpath = os.path.join(encryptscriptsdir, 'inject_luks_header.sh')\n\n        if not os.path.exists(injectscriptpath):\n            message = \"Inject-script not found at path: {0}\".format(injectscriptpath)\n            self.context.logger.log(message)\n            raise Exception(message)\n        else:\n            self.context.logger.log(\"Inject-script found at path: {0}\".format(injectscriptpath))\n\n        self.command_executor.Execute('cp {0} /usr/share/initramfs-tools/hooks/luksheader'.format(injectscriptpath), True)\n        self.command_executor.Execute('chmod +x /usr/share/initramfs-tools/hooks/luksheader', True)\n\n        scriptdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\n        patchesdir = os.path.join(scriptdir, '../encryptpatches')\n        patchpath = os.path.join(patchesdir, 'ubuntu_1404_initramfs.patch')\n\n        if not os.path.exists(patchpath):\n            message = \"Patch not found at path: {0}\".format(patchpath)\n            self.context.logger.log(message)\n            raise Exception(message)\n        else:\n            self.context.logger.log(\"Patch found at path: {0}\".format(patchpath))\n        \n        self.command_executor.ExecuteInBash('patch -b -d /usr/share/initramfs-tools -p1 <{0}'.format(patchpath), True)\n        \n        entry = 'osencrypt /dev/sda1 none luks,discard,header=/boot/luks/osluksheader,keyscript=/usr/sbin/azure_crypt_key.sh'\n        self._append_contents_to_file(entry, '/etc/crypttab')\n\n        self.command_executor.Execute('update-initramfs -u -k all', True)\n\n        proc_comm = ProcessCommunicator()\n        self.command_executor.ExecuteInBash(command_to_execute=\"lsinitramfs /boot/initrd*\",\n                                            raise_exception_on_failure=True,\n                                            communicator=proc_comm)\n\n        if not \"azure_crypt_key.sh\" in proc_comm.stdout or not \"osluksheader\" in proc_comm.stdout:\n            raise Exception(\"initramfs update failed\")\n\n        self.command_executor.Execute('update-grub', True)\n        self.command_executor.Execute('grub-install --recheck --force {0}'.format(self.rootfs_disk), True)\n\n    def _get_uuid(self, partition_name):\n        proc_comm = ProcessCommunicator()\n        self.command_executor.Execute(command_to_execute=\"blkid -s UUID -o value {0}\".format(partition_name),\n                                      raise_exception_on_failure=True,\n                                      communicator=proc_comm)\n        return proc_comm.stdout.strip()\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/ubuntu_1404/encryptstates/PrereqState.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.7+\n#\n\nimport inspect\nimport os\nimport re\nimport sys\n\nfrom time import sleep\nfrom OSEncryptionState import *\n\nclass PrereqState(OSEncryptionState):\n    def __init__(self, context):\n        super(PrereqState, self).__init__('PrereqState', context)\n\n    def should_enter(self):\n        self.context.logger.log(\"Verifying if machine should enter prereq state\")\n\n        if not super(PrereqState, self).should_enter():\n            return False\n        \n        self.context.logger.log(\"Performing enter checks for prereq state\")\n                \n        return True\n\n    def enter(self):\n        if not self.should_enter():\n            return\n\n        self.context.logger.log(\"Entering prereq state\")\n\n        distro_info = self.context.distro_patcher.distro_info\n        self.context.logger.log(\"Distro info: {0}\".format(distro_info))\n\n        if distro_info[0] == 'Ubuntu' and distro_info[1] == '14.04':\n            self.context.logger.log(\"Enabling OS volume encryption on {0} {1}\".format(distro_info[0],\n                                                                                      distro_info[1]))\n        else:\n            raise Exception(\"Ubuntu1404EncryptionStateMachine called for distro {0} {1}\".format(distro_info[0],\n                                                                                                distro_info[1]))\n\n        self.context.distro_patcher.install_extras()\n\n        self.command_executor.Execute('telinit u', True)\n\n        self._copy_key_script()\n\n    def should_exit(self):\n        self.context.logger.log(\"Verifying if machine should exit prereq state\")\n\n        return super(PrereqState, self).should_exit()\n\n    def _copy_key_script(self):\n        scriptdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\n        encryptscriptsdir = os.path.join(scriptdir, '../encryptscripts')\n        keyscriptpath = os.path.join(encryptscriptsdir, 'azure_crypt_key.sh')\n\n        if not os.path.exists(keyscriptpath):\n            message = \"Key script not found at path: {0}\".format(keyscriptpath)\n            self.context.logger.log(message)\n            raise Exception(message)\n        else:\n            self.context.logger.log(\"Key script found at path: {0}\".format(keyscriptpath))\n\n        self.command_executor.Execute('cp {0} /usr/sbin/azure_crypt_key.sh'.format(keyscriptpath), True)\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/ubuntu_1404/encryptstates/SelinuxState.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.7+\n#\n\nfrom OSEncryptionState import *\n\nclass SelinuxState(OSEncryptionState):\n    def __init__(self, context):\n        super(SelinuxState, self).__init__('SelinuxState', context)\n\n    def should_enter(self):\n        self.context.logger.log(\"Verifying if machine should enter selinux state\")\n\n        if not super(SelinuxState, self).should_enter():\n            return False\n        \n        self.context.logger.log(\"Performing enter checks for selinux state\")\n\n        return True\n\n    def enter(self):\n        if not self.should_enter():\n            return\n\n        self.context.logger.log(\"Entering selinux state\")\n\n        se_linux_status = self.context.encryption_environment.get_se_linux()\n        if(se_linux_status.lower() == 'enforcing'):\n            self.context.logger.log(\"SELinux is in enforcing mode, disabling\")\n            self.context.encryption_environment.disable_se_linux()\n\n    def should_exit(self):\n        self.context.logger.log(\"Verifying if machine should exit selinux state\")\n\n        return super(SelinuxState, self).should_exit()\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/ubuntu_1404/encryptstates/SplitRootPartitionState.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.7+\n#\n\nimport os\nimport re\nimport sys\n\nimport parted\n\nfrom time import sleep\nfrom OSEncryptionState import *\n\nclass SplitRootPartitionState(OSEncryptionState):\n    def __init__(self, context):\n        super(SplitRootPartitionState, self).__init__('SplitRootPartitionState', context)\n\n    def should_enter(self):\n        self.context.logger.log(\"Verifying if machine should enter split_root_partition state\")\n\n        if not super(SplitRootPartitionState, self).should_enter():\n            return False\n        \n        self.context.logger.log(\"Performing enter checks for split_root_partition state\")\n\n        self.command_executor.Execute(\"e2fsck -yf {0}\".format(self.rootfs_block_device), True)\n                \n        return True\n\n    def enter(self):\n        if not self.should_enter():\n            return\n\n        self.context.logger.log(\"Entering split_root_partition state\")\n\n        device = parted.getDevice(self.rootfs_disk)\n        disk = parted.Disk(device)\n\n        original_root_fs_size = self._get_root_fs_size_in(device.sectorSize)\n        self.context.logger.log(\"Original root filesystem size (sectors): {0}\".format(original_root_fs_size))\n\n        desired_boot_partition_size = parted.sizeToSectors(256, 'MiB', device.sectorSize)\n        self.context.logger.log(\"Desired boot partition size (sectors): {0}\".format(desired_boot_partition_size))\n        \n        root_partition = disk.partitions[0]\n\n        original_root_partition_start = root_partition.geometry.start\n        original_root_partition_end = root_partition.geometry.end\n\n        self.context.logger.log(\"Original root partition start (sectors): {0}\".format(original_root_partition_start))\n        self.context.logger.log(\"Original root partition end (sectors): {0}\".format(original_root_partition_end))\n\n        desired_root_partition_start = original_root_partition_start\n        desired_root_partition_end = original_root_partition_end - desired_boot_partition_size\n        desired_root_partition_size = desired_root_partition_end - desired_root_partition_start\n\n        self.context.logger.log(\"Desired root partition start (sectors): {0}\".format(desired_root_partition_start))\n        self.context.logger.log(\"Desired root partition end (sectors): {0}\".format(desired_root_partition_end))\n        self.context.logger.log(\"Desired root partition size (sectors): {0}\".format(desired_root_partition_size))\n        \n        self.context.logger.log(\"Resizing root filesystem\")\n        desired_root_fs_size = desired_root_partition_size\n        self._resize_root_fs_to_sectors(desired_root_fs_size, device.sectorSize)\n\n        desired_root_partition_geometry = parted.Geometry(device=device,\n                                                          start=desired_root_partition_start,\n                                                          length=desired_root_partition_size)\n        root_partition_constraint = parted.Constraint(exactGeom=desired_root_partition_geometry)\n        disk.setPartitionGeometry(partition=root_partition,\n                                  constraint=root_partition_constraint,\n                                  start=desired_root_partition_start,\n                                  end=desired_root_partition_end)\n\n        desired_boot_partition_start = disk.getFreeSpaceRegions()[1].start\n        desired_boot_partition_end = disk.getFreeSpaceRegions()[1].end\n        desired_boot_partition_size = disk.getFreeSpaceRegions()[1].length\n\n        self.context.logger.log(\"Desired boot partition start (sectors): {0}\".format(desired_boot_partition_start))\n        self.context.logger.log(\"Desired boot partition end (sectors): {0}\".format(desired_boot_partition_end))\n\n        desired_boot_partition_geometry = parted.Geometry(device=device,\n                                                          start=desired_boot_partition_start,\n                                                          length=desired_boot_partition_size)\n        boot_partition_constraint = parted.Constraint(exactGeom=desired_boot_partition_geometry)\n        desired_boot_partition = parted.Partition(disk=disk,\n                                                  type=parted.PARTITION_NORMAL,\n                                                  geometry=desired_boot_partition_geometry)\n\n        disk.addPartition(partition=desired_boot_partition, constraint=boot_partition_constraint)\n\n        disk.commit()\n\n        probed_root_fs = parted.probeFileSystem(disk.partitions[0].geometry)\n        if not probed_root_fs == 'ext4':\n            raise Exception(\"Probed root fs is not ext4\")\n\n        disk.partitions[1].setFlag(parted.PARTITION_BOOT)\n\n        disk.commit()\n        \n        self.command_executor.Execute(\"partprobe\", False)\n        self.command_executor.Execute(\"mkfs.ext2 {0}\".format(self.bootfs_block_device), True)\n        \n        boot_partition_uuid = self._get_uuid(self.bootfs_block_device)\n\n        # Move stuff from /oldroot/boot to new partition, make new partition mountable at the same spot\n        self.command_executor.Execute(\"mount {0} /oldroot\".format(self.rootfs_block_device), True)\n        self.command_executor.Execute(\"mkdir -p /boot\", True)\n        self.command_executor.Execute(\"cp /oldroot/etc/fstab /etc/fstab\", True)\n        self._append_boot_partition_uuid_to_fstab(boot_partition_uuid)\n        self.command_executor.Execute(\"cp /etc/fstab /oldroot/etc/fstab\", True)\n        self.command_executor.Execute(\"mount /boot\", True)\n        self.command_executor.ExecuteInBash(\"mv /oldroot/boot/* /boot/\", True)\n        self.command_executor.Execute(\"umount /boot\", True)\n        self.command_executor.Execute(\"umount /oldroot\", True)\n        \n    def should_exit(self):\n        self.context.logger.log(\"Verifying if machine should exit split_root_partition state\")\n        \n        self.command_executor.ExecuteInBash(\"mount /boot || mountpoint /boot\", True)\n        self.command_executor.ExecuteInBash(\"[ -e /boot/grub ]\", True)\n        self.command_executor.Execute(\"umount /boot\", True)\n\n        return super(SplitRootPartitionState, self).should_exit()\n\n    def _get_uuid(self, partition_name):\n        proc_comm = ProcessCommunicator()\n        self.command_executor.Execute(command_to_execute=\"blkid -s UUID -o value {0}\".format(partition_name),\n                                      raise_exception_on_failure=True,\n                                      communicator=proc_comm)\n        return proc_comm.stdout.strip()\n\n    def _append_boot_partition_uuid_to_fstab(self, boot_partition_uuid):\n        self.context.logger.log(\"Updating fstab\")\n\n        contents = None\n\n        with open('/etc/fstab', 'r') as f:\n            contents = f.read()\n\n        contents += '\\n'\n        contents += 'UUID={0}\\t/boot\\text2\\tdefaults\\t0 0'.format(boot_partition_uuid)\n        contents += '\\n'\n\n        with open('/etc/fstab', 'w') as f:\n            f.write(contents)\n\n        self.context.logger.log(\"fstab updated successfully\")\n\n    def _get_root_fs_size_in(self, sector_size):\n        proc_comm = ProcessCommunicator()\n        self.command_executor.Execute(command_to_execute=\"dumpe2fs -h {0}\".format(self.rootfs_block_device),\n                                      raise_exception_on_failure=True,\n                                      communicator=proc_comm)\n\n        root_fs_block_count = re.findall(r'Block count:\\s*(\\d+)', proc_comm.stdout)\n        root_fs_block_size = re.findall(r'Block size:\\s*(\\d+)', proc_comm.stdout)\n\n        if not root_fs_block_count or not root_fs_block_size:\n            raise Exception(\"Error parsing dumpe2fs output, count={0}, size={1}\".format(root_fs_block_count,\n                                                                                        root_fs_block_size))\n\n        root_fs_block_count = int(root_fs_block_count[0])\n        root_fs_block_size = int(root_fs_block_size[0])\n        root_fs_size = parted.sizeToSectors(root_fs_block_count * root_fs_block_size, 'B', sector_size)\n\n        return root_fs_size\n\n    def _resize_root_fs_to_sectors(self, desired_root_fs_size, sectorSize):\n        self.context.logger.log(\"Desired root filesystem size (sectors): {0}\".format(desired_root_fs_size))\n\n        self.command_executor.Execute(\"resize2fs {0} {1}s\".format(self.rootfs_block_device, desired_root_fs_size), True)\n\n        resized_root_fs_size = self._get_root_fs_size_in(sectorSize)\n\n        self.context.logger.log(\"Resized root filesystem size (sectors): {0}\".format(resized_root_fs_size))\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/ubuntu_1404/encryptstates/StripdownState.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.7+\n#\n\nimport os\nimport re\nimport sys\n\nfrom time import sleep\nfrom CommandExecutor import *\nfrom OSEncryptionState import *\n\nclass StripdownState(OSEncryptionState):\n    def __init__(self, context):\n        super(StripdownState, self).__init__('StripdownState', context)\n\n    def should_enter(self):\n        self.context.logger.log(\"Verifying if machine should enter stripdown state\")\n\n        if not super(StripdownState, self).should_enter():\n            return False\n        \n        self.context.logger.log(\"Performing enter checks for stripdown state\")\n\n        self.command_executor.Execute('rm -rf /tmp/tmproot', True)\n        self.command_executor.ExecuteInBash('! [ -e \"/oldroot\" ]', True)\n                \n        return True\n\n    def enter(self):\n        if not self.should_enter():\n            return\n\n        self.context.logger.log(\"Entering stripdown state\")\n\n        self.command_executor.Execute('umount -a')\n        self.command_executor.Execute('mkdir /tmp/tmproot', True)\n        self.command_executor.Execute('mount -t tmpfs none /tmp/tmproot', True)\n        self.command_executor.ExecuteInBash('for i in proc sys dev run usr var tmp root oldroot boot; do mkdir /tmp/tmproot/$i; done', True)\n        self.command_executor.ExecuteInBash('for i in bin etc mnt sbin lib lib64 root; do cp -ax /$i /tmp/tmproot/; done', True)\n        self.command_executor.ExecuteInBash('for i in bin sbin lib share; do cp -ax /usr/$i /tmp/tmproot/usr/; done', True)\n        self.command_executor.ExecuteInBash('for i in lib local lock opt run spool tmp; do cp -ax /var/$i /tmp/tmproot/var/; done', True)\n        self.command_executor.ExecuteInBash('mkdir /tmp/tmproot/var/log', True)\n        self.command_executor.ExecuteInBash('cp -ax /var/log/azure /tmp/tmproot/var/log/', True)\n        self.command_executor.Execute('mount --make-rprivate /', True)\n        self.command_executor.ExecuteInBash('[ -e \"/tmp/tmproot/var/lib/azure_disk_encryption_config/azure_crypt_request_queue.ini\" ]', True)\n        self.command_executor.Execute('pivot_root /tmp/tmproot /tmp/tmproot/oldroot', True)\n        self.command_executor.ExecuteInBash('for i in dev proc sys run; do mount --move /oldroot/$i /$i; done', True)\n\n    def should_exit(self):\n        self.context.logger.log(\"Verifying if machine should exit stripdown state\")\n\n        if not os.path.exists(self.state_marker):\n            self.context.logger.log(\"First call to stripdown state (pid={0}), restarting process\".format(os.getpid()))\n\n            # create the marker, but do not advance the state machine\n            super(StripdownState, self).should_exit()\n\n            # the restarted process shall see the marker and advance the state machine\n            self.command_executor.Execute('service atd restart', True)\n            os.chdir('/')\n            with open(\"/restart-wala.sh\", \"w\") as f:\n                f.write(\"service walinuxagent restart\\n\")\n            self.command_executor.Execute('at -f /restart-wala.sh now + 1 minutes', True)\n\n            self.context.hutil.do_exit(exit_code=CommonVariables.encryption_failed,\n                                       operation='EnableEncryptionOSVolume',\n                                       status=CommonVariables.extension_error_status,\n                                       code=CommonVariables.encryption_failed,\n                                       message=\"Restarted extension from stripped down OS\")\n        else:\n            self.context.logger.log(\"Second call to stripdown state (pid={0}), continuing process\".format(os.getpid()))\n            return True\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/ubuntu_1404/encryptstates/UnmountOldrootState.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.7+\n#\n\nimport os\nimport re\nimport sys\n\nfrom time import sleep\nfrom OSEncryptionState import *\n\nclass UnmountOldrootState(OSEncryptionState):\n    def __init__(self, context):\n        super(UnmountOldrootState, self).__init__('UnmountOldrootState', context)\n\n    def should_enter(self):\n        self.context.logger.log(\"Verifying if machine should enter unmount_oldroot state\")\n\n        if not super(UnmountOldrootState, self).should_enter():\n            return False\n        \n        self.context.logger.log(\"Performing enter checks for unmount_oldroot state\")\n\n        self.command_executor.ExecuteInBash('[ -e \"/oldroot\" ]', True)\n        \n        if self.command_executor.Execute('mountpoint /oldroot') != 0:\n            return False\n                \n        return True\n\n    def enter(self):\n        if not self.should_enter():\n            return\n\n        self.context.logger.log(\"Entering unmount_oldroot state\")\n        \n        self.command_executor.Execute('service ssh restart', True)\n        \n        proc_comm = ProcessCommunicator()\n        self.command_executor.Execute(command_to_execute=\"initctl list\",\n                                      raise_exception_on_failure=True,\n                                      communicator=proc_comm)\n\n        for line in proc_comm.stdout.split('\\n'):\n            if not \"running\" in line:\n                continue\n\n            if \"walinuxagent\" in line or \"ssh\" in line or \"cryptdisks\" in line:\n                continue\n\n            splitted = line.split()\n            if len(splitted):\n                service = splitted[0]\n                self.command_executor.Execute('service {0} restart'.format(service))\n\n        self.command_executor.Execute('swapoff -a', True)\n\n        self.bek_util.umount_azure_passhprase(self.encryption_config, force=True)\n\n        if os.path.exists(\"/oldroot/mnt\"):\n            self.command_executor.Execute('umount /oldroot/mnt')\n\n        if os.path.exists(\"/oldroot/mnt/azure_bek_disk\"):\n            self.command_executor.Execute('umount /oldroot/mnt/azure_bek_disk')\n\n        if os.path.exists(\"/mnt\"):\n            self.command_executor.Execute('umount /mnt')\n\n        if os.path.exists(\"/mnt/azure_bek_disk\"):\n            self.command_executor.Execute('umount /mnt/azure_bek_disk')\n\n        proc_comm = ProcessCommunicator()\n\n        self.command_executor.Execute(command_to_execute=\"fuser -vm /oldroot\",\n                                      raise_exception_on_failure=True,\n                                      communicator=proc_comm)\n\n        self.context.logger.log(\"Processes using oldroot:\\n{0}\".format(proc_comm.stdout))\n\n        procs_to_kill = filter(lambda p: p.isdigit(), proc_comm.stdout.split())\n        procs_to_kill = reversed(sorted(procs_to_kill))\n\n        for victim in procs_to_kill:\n            proc_name = \"\"\n\n            try:\n                with open(\"/proc/{0}/cmdline\".format(victim)) as f:\n                    proc_name = f.read()\n            except IOError as e:\n                self.context.logger.log(\"Proc {0} is already dead\".format(victim))\n\n            self.context.logger.log(\"Killing process: {0} ({1})\".format(proc_name, victim))\n\n            if int(victim) == os.getpid():\n                self.context.logger.log(\"Restarting WALA in before committing suicide\")\n\n                # Kill any other daemons that are blocked and would be executed after this process commits\n                # suicide\n                self.command_executor.Execute('at -f /restart-wala.sh now + 1 minutes', True)\n                self.command_executor.ExecuteInBash('pkill -f .*ForLinux.*handle.py.*daemon.*', True)\n\n            if int(victim) == 1:\n                self.context.logger.log(\"Skipping init\")\n                continue\n\n            self.command_executor.Execute('kill -9 {0}'.format(victim))\n\n        self.command_executor.Execute('telinit u', True)\n\n        sleep(3)\n\n        self.command_executor.Execute('umount -a', False)\n\n        sleep(3)\n\n        for mount_item in self.disk_util.get_mount_items():\n            if \"/oldroot/\" in mount_item[\"dest\"]:\n                self.command_executor.Execute('umount ' + mount_item[\"dest\"], True)\n\n\n        if self.command_executor.Execute('mountpoint /oldroot', False):\n            self.should_exit()\n            return\n        \n        self.command_executor.Execute('umount /oldroot', True)\n\n        sleep(3)\n        \n        attempt = 1\n\n        while True:\n            if attempt > 10:\n                raise Exception(\"Block device {0} did not appear in 10 restart attempts\".format(self.rootfs_block_device))\n\n            self.context.logger.log(\"Restarting udev\")\n            self.command_executor.Execute('service udev restart')\n\n            sleep(10)\n\n            if self.command_executor.ExecuteInBash('[ -b {0} ]'.format(self.rootfs_block_device), False) == 0:\n                break\n\n            attempt += 1\n\n        self.command_executor.Execute('e2fsck -yf {0}'.format(self.rootfs_block_device), True)\n\n    def should_exit(self):\n        self.context.logger.log(\"Verifying if machine should exit unmount_oldroot state\")\n\n        if os.path.exists('/oldroot/bin'):\n            self.context.logger.log(\"/oldroot was not unmounted\")\n            return False\n\n        return super(UnmountOldrootState, self).should_exit()\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/ubuntu_1404/encryptstates/__init__.py",
    "content": "#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.7+\n#\n\nimport inspect\nimport os\nimport sys\nimport traceback\nfrom time import sleep\n\nscriptdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\noscryptodir = os.path.abspath(os.path.join(scriptdir, '../../'))\nsys.path.append(oscryptodir)\n\nfrom OSEncryptionState import *\nfrom PrereqState import *\nfrom StripdownState import *\nfrom UnmountOldrootState import *\nfrom SplitRootPartitionState import *\nfrom EncryptBlockDeviceState import *\nfrom PatchBootSystemState import *\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/ubuntu_1604/Ubuntu1604EncryptionStateMachine.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.7+\n#\n\nimport inspect\nimport os\nimport sys\nimport traceback\nfrom time import sleep\n\nscriptdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\nmaindir = os.path.abspath(os.path.join(scriptdir, '../../'))\nsys.path.append(maindir)\ntransitionsdir = os.path.abspath(os.path.join(scriptdir, '../../transitions'))\nsys.path.append(transitionsdir)\n\nfrom oscrypto import *\nfrom encryptstates import *\nfrom Common import *\nfrom CommandExecutor import *\nfrom DiskUtil import *\nfrom transitions import *\n\nclass Ubuntu1604EncryptionStateMachine(OSEncryptionStateMachine):\n    states = [\n        State(name='uninitialized'),\n        State(name='prereq', on_enter='on_enter_state'),\n        State(name='stripdown', on_enter='on_enter_state'),\n        State(name='unmount_oldroot', on_enter='on_enter_state'),\n        State(name='split_root_partition', on_enter='on_enter_state'),\n        State(name='encrypt_block_device', on_enter='on_enter_state'),\n        State(name='patch_boot_system', on_enter='on_enter_state'),\n        State(name='completed'),\n    ]\n\n    transitions = [\n        {\n            'trigger': 'skip_encryption',\n            'source': 'uninitialized',\n            'dest': 'completed'\n        },\n        {\n            'trigger': 'enter_prereq',\n            'source': 'uninitialized',\n            'dest': 'prereq'\n        },\n        {\n            'trigger': 'enter_stripdown',\n            'source': 'prereq',\n            'dest': 'stripdown',\n            'before': 'on_enter_state',\n            'conditions': 'should_exit_previous_state'\n        },\n        {\n            'trigger': 'enter_unmount_oldroot',\n            'source': 'stripdown',\n            'dest': 'unmount_oldroot',\n            'before': 'on_enter_state',\n            'conditions': 'should_exit_previous_state'\n        },\n        {\n            'trigger': 'retry_unmount_oldroot',\n            'source': 'unmount_oldroot',\n            'dest': 'unmount_oldroot',\n            'before': 'on_enter_state'\n        },\n        {\n            'trigger': 'enter_split_root_partition',\n            'source': 'unmount_oldroot',\n            'dest': 'split_root_partition',\n            'before': 'on_enter_state',\n            'conditions': 'should_exit_previous_state'\n        },\n        {\n            'trigger': 'enter_encrypt_block_device',\n            'source': 'split_root_partition',\n            'dest': 'encrypt_block_device',\n            'before': 'on_enter_state',\n            'conditions': 'should_exit_previous_state'\n        },\n        {\n            'trigger': 'enter_patch_boot_system',\n            'source': 'encrypt_block_device',\n            'dest': 'patch_boot_system',\n            'before': 'on_enter_state',\n            'conditions': 'should_exit_previous_state'\n        },\n        {\n            'trigger': 'stop_machine',\n            'source': 'patch_boot_system',\n            'dest': 'completed',\n            'conditions': 'should_exit_previous_state'\n        },\n    ]\n\n    def on_enter_state(self):\n        super(Ubuntu1604EncryptionStateMachine, self).on_enter_state()\n\n    def should_exit_previous_state(self):\n        # when this is called, self.state is still the \"source\" state in the transition\n        return super(Ubuntu1604EncryptionStateMachine, self).should_exit_previous_state()\n\n    def __init__(self, hutil, distro_patcher, logger, encryption_environment):\n        super(Ubuntu1604EncryptionStateMachine, self).__init__(hutil, distro_patcher, logger, encryption_environment)\n\n        self.state_objs = {\n            'prereq': PrereqState(self.context),\n            'stripdown': StripdownState(self.context),\n            'unmount_oldroot': UnmountOldrootState(self.context),\n            'split_root_partition': SplitRootPartitionState(self.context),\n            'encrypt_block_device': EncryptBlockDeviceState(self.context),\n            'patch_boot_system': PatchBootSystemState(self.context),\n        }\n\n        self.state_machine = Machine(model=self,\n                                     states=Ubuntu1604EncryptionStateMachine.states,\n                                     transitions=Ubuntu1604EncryptionStateMachine.transitions,\n                                     initial='uninitialized')\n\n    def start_encryption(self):\n        proc_comm = ProcessCommunicator()\n        self.command_executor.Execute(command_to_execute=\"mount\",\n                                      raise_exception_on_failure=True,\n                                      communicator=proc_comm)\n        if '/dev/mapper/osencrypt' in proc_comm.stdout:\n            self.logger.log(\"OS volume is already encrypted\")\n\n            self.skip_encryption()\n            self.log_machine_state()\n\n            return\n\n        self.log_machine_state()\n\n        self.enter_prereq()\n        self.log_machine_state()\n\n        self.enter_stripdown()\n        self.log_machine_state()\n        \n        oldroot_unmounted_successfully = False\n        attempt = 1\n\n        while not oldroot_unmounted_successfully:\n            self.logger.log(\"Attempt #{0} to unmount /oldroot\".format(attempt))\n\n            try:\n                if attempt == 1:\n                    self.enter_unmount_oldroot()\n                elif attempt > 10:\n                    raise Exception(\"Could not unmount /oldroot in 10 attempts\")\n                else:\n                    self.retry_unmount_oldroot()\n\n                self.log_machine_state()\n            except Exception as e:\n                message = \"Attempt #{0} to unmount /oldroot failed with error: {1}, stack trace: {2}\".format(attempt,\n                                                                                                             e,\n                                                                                                             traceback.format_exc())\n                self.logger.log(msg=message)\n                self.hutil.do_status_report(operation='EnableEncryptionOSVolume',\n                                            status=CommonVariables.extension_error_status,\n                                            status_code=str(CommonVariables.unmount_oldroot_error),\n                                            message=message)\n\n                sleep(10)\n                if attempt > 10:\n                    raise Exception(message)\n            else:\n                oldroot_unmounted_successfully = True\n            finally:\n                attempt += 1\n        \n        self.enter_split_root_partition()\n        self.log_machine_state()\n        \n        self.enter_encrypt_block_device()\n        self.log_machine_state()\n\n        self.enter_patch_boot_system()\n        self.log_machine_state()\n        \n        self.stop_machine()\n        self.log_machine_state()\n\n        self._reboot()\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/ubuntu_1604/__init__.py",
    "content": "#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom Ubuntu1604EncryptionStateMachine import *\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/ubuntu_1604/encryptpatches/ubuntu_1604_initramfs.patch",
    "content": "diff -Naur hooks.orig/cryptroot hooks/cryptroot\n--- hooks.orig/cryptroot\t2016-07-24 04:00:35.707468106 +0000\n+++ hooks/cryptroot\t2016-07-24 04:00:58.251574341 +0000\n@@ -521,14 +521,14 @@\n \t\t\t\tmkdir -p \"$DESTDIR/conf/conf.d/cryptheader\"\n \t\t\tfi\n \n-\t\t\t#if [ -e \"$CONFDIR/conf.d/cryptheader/$CRYPTHEADER\" ]; then\n-\t\t\t#\tcopy_exec \"$CONFDIR/conf.d/cryptheader/$CRYPTHEADER\" /conf/conf.d/cryptheader >&2\n-\t\t\t#elif [ -e \"$CRYPTHEADER\" ]; then\n-\t\t\t#\tcopy_exec \"$CRYPTHEADER\" /conf/conf.d/cryptheader >&2\n-\t\t\t#else\n-\t\t\t#\techo \"cryptsetup: WARNING: failed to find LUKS header $CRYPTHEADER\" >&2\n-\t\t\t#\tcontinue\n-\t\t\t#fi\n+\t\t\tif [ -e \"$CONFDIR/conf.d/cryptheader/$CRYPTHEADER\" ]; then\n+\t\t\t\tcopy_exec \"$CONFDIR/conf.d/cryptheader/$CRYPTHEADER\" /conf/conf.d/cryptheader >&2\n+\t\t\telif [ -e \"$CRYPTHEADER\" ]; then\n+\t\t\t\tcopy_exec \"$CRYPTHEADER\" /conf/conf.d/cryptheader >&2\n+\t\t\telse\n+\t\t\t\techo \"cryptsetup: WARNING: failed to find LUKS header $CRYPTHEADER\" >&2\n+\t\t\t\tcontinue\n+\t\t\tfi\n \t\tfi\n \t\t\n \n@@ -627,6 +627,9 @@\n \tif [ -z \"$rootdevs\" ]; then\n \t\techo \"cryptsetup: WARNING: could not determine root device from /etc/fstab\" >&2\n \tfi\n+\tif ! echo \"$rootdevs\" | grep -q \"osencrypt\"; then\n+\t\trootdevs=\"$rootdevs osencrypt\"\n+\tfi\n \tusrdevs=$(get_fs_devices /usr)\n \tresumedevs=$(get_resume_devices)\n \tinitramfsdevs=$(get_initramfs_devices)\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/ubuntu_1604/encryptscripts/azure_crypt_key.sh",
    "content": "#!/bin/sh\nMountPoint=/tmp-keydisk-mount\nKeyFileName=LinuxPassPhraseFileName\necho \"Trying to get the key from disks ...\" >&2\nmkdir -p $MountPoint\nmodprobe nls_utf8 >/dev/null 2>&1\nmodprobe nls_cp437 >/dev/null 2>&1\nmodprobe vfat >/dev/null 2>&1\nsleep 2\nOPENED=0\ncd /sys/block\nfor DEV in sd*; do\n\techo \"> Trying device: $DEV ...\" >&2\n\tmount -t vfat -r /dev/${DEV}1 $MountPoint >&2 2>&1\n\tif [ -f $MountPoint/$KeyFileName ]; then\n\t\tcat $MountPoint/$KeyFileName && echo \"Success loading keyfile!\" >&2\n\t\tumount $MountPoint 2>/dev/null\n\t\tOPENED=1\n\t\tbreak\n\tfi\n\tumount $MountPoint 2>/dev/null\ndone\nif [ $OPENED -eq 0 ]; then\n\techo \"FAILED to find suitable passphrase file ...\" >&2\n\techo -n \"Try to enter your password: \" >&2\n\tread -r A </dev/console\n\techo -n \"$A\"\nfi\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/ubuntu_1604/encryptscripts/inject_luks_header.sh",
    "content": "#!/bin/sh -e\n\nPREREQS=\"\"\n\nprereqs() { echo \"$PREREQS\"; }\n\ncase \"$1\" in\n    prereqs)\n    prereqs\n    exit 0\n    ;;\nesac\n\n. /usr/share/initramfs-tools/hook-functions\n\nmkdir -p ${DESTDIR}/boot/luks\ncopy_exec /boot/luks/osluksheader /boot/luks\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/ubuntu_1604/encryptstates/EncryptBlockDeviceState.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.7+\n#\n\nimport os\nimport sys\n\nfrom inspect import ismethod\nfrom time import sleep\nfrom OSEncryptionState import *\n\nclass EncryptBlockDeviceState(OSEncryptionState):\n    def __init__(self, context):\n        super(EncryptBlockDeviceState, self).__init__('EncryptBlockDeviceState', context)\n\n    def should_enter(self):\n        self.context.logger.log(\"Verifying if machine should enter encrypt_block_device state\")\n\n        if not super(EncryptBlockDeviceState, self).should_enter():\n            return False\n        \n        self.context.logger.log(\"Performing enter checks for encrypt_block_device state\")\n                \n        return True\n\n    def enter(self):\n        if not self.should_enter():\n            return\n\n        self.context.logger.log(\"Entering encrypt_block_device state\")\n        \n        self.command_executor.Execute('mount /boot', False)\n        self.command_executor.Execute('systemctl restart systemd-udevd', False)\n        self.command_executor.Execute('systemctl restart systemd-timesyncd', False)\n\n        # self._find_bek_and_execute_action('_dump_passphrase')\n        self._find_bek_and_execute_action('_luks_format')\n        self._find_bek_and_execute_action('_luks_open')\n\n        self.context.hutil.do_status_report(operation='EnableEncryptionDataVolumes',\n                                            status=CommonVariables.extension_success_status,\n                                            status_code=str(CommonVariables.success),\n                                            message='OS disk encryption started')\n\n        self.command_executor.Execute('dd if={0} of=/dev/mapper/osencrypt conv=sparse bs=64K'.format(self.rootfs_block_device), True)\n\n    def should_exit(self):\n        self.context.logger.log(\"Verifying if machine should exit encrypt_block_device state\")\n\n        if not os.path.exists('/dev/mapper/osencrypt'):\n            self._find_bek_and_execute_action('_luks_open')\n\n        self.command_executor.Execute('mount /dev/mapper/osencrypt /oldroot', True)\n        self.command_executor.Execute('umount /oldroot', True)\n\n        return super(EncryptBlockDeviceState, self).should_exit()\n\n    def _luks_format(self, bek_path):\n        self.command_executor.Execute('rm -rf /boot/luks', True)\n        self.command_executor.Execute('mkdir /boot/luks', True)\n        self.command_executor.Execute('dd if=/dev/zero of=/boot/luks/osluksheader bs=33554432 count=1', True)\n        self.command_executor.Execute('cryptsetup luksFormat --header /boot/luks/osluksheader -d {0} {1} -q'.format(bek_path,\n                                                                                                                    self.rootfs_block_device),\n                                      raise_exception_on_failure=True)\n\n    def _luks_open(self, bek_path):\n        self.command_executor.Execute('cryptsetup luksOpen --header /boot/luks/osluksheader {0} osencrypt -d {1}'.format(self.rootfs_block_device,\n                                                                                                                         bek_path),\n                                      raise_exception_on_failure=True)\n\n    def _dump_passphrase(self, bek_path):\n        proc_comm = ProcessCommunicator()\n\n        self.command_executor.Execute(command_to_execute=\"od -c {0}\".format(bek_path),\n                                      raise_exception_on_failure=True,\n                                      communicator=proc_comm)\n        self.context.logger.log(\"Passphrase:\")\n        self.context.logger.log(proc_comm.stdout)\n\n    def _find_bek_and_execute_action(self, callback_method_name):\n        callback_method = getattr(self, callback_method_name)\n        if not ismethod(callback_method):\n            raise Exception(\"{0} is not a method\".format(callback_method_name))\n\n        bek_path = self.bek_util.get_bek_passphrase_file(self.encryption_config)\n        callback_method(bek_path)        \n"
  },
  {
    "path": "VMEncryption/main/oscrypto/ubuntu_1604/encryptstates/PatchBootSystemState.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.7+\n#\n\nimport inspect\nimport os\nimport sys\n\nfrom time import sleep\nfrom OSEncryptionState import *\n\n\nclass PatchBootSystemState(OSEncryptionState):\n    def __init__(self, context):\n        super(PatchBootSystemState, self).__init__('PatchBootSystemState', context)\n\n    def should_enter(self):\n        self.context.logger.log(\"Verifying if machine should enter patch_boot_system state\")\n\n        if not super(PatchBootSystemState, self).should_enter():\n            return False\n        \n        self.context.logger.log(\"Performing enter checks for patch_boot_system state\")\n\n        self.command_executor.Execute('mount /dev/mapper/osencrypt /oldroot', True)\n        self.command_executor.Execute('umount /oldroot', True)\n                \n        return True\n\n    def enter(self):\n        if not self.should_enter():\n            return\n\n        self.context.logger.log(\"Entering patch_boot_system state\")\n\n        self.command_executor.Execute('mount /boot', False)\n        self.command_executor.Execute('mount /dev/mapper/osencrypt /oldroot', True)\n        self.command_executor.Execute('mount --make-rprivate /', True)\n        self.command_executor.Execute('mkdir /oldroot/memroot', True)\n        self.command_executor.Execute('pivot_root /oldroot /oldroot/memroot', True)\n\n        self.command_executor.ExecuteInBash('for i in dev proc sys boot; do mount --move /memroot/$i /$i; done', True)\n        self.command_executor.ExecuteInBash('[ -e \"/boot/luks\" ]', True)\n\n        try:\n            self._modify_pivoted_oldroot()\n        except Exception as e:\n            self.command_executor.Execute('mount --make-rprivate /')\n            self.command_executor.Execute('pivot_root /memroot /memroot/oldroot')\n            self.command_executor.Execute('rmdir /oldroot/memroot')\n            self.command_executor.ExecuteInBash('for i in dev proc sys boot; do mount --move /oldroot/$i /$i; done')\n\n            raise\n        else:\n            self.command_executor.Execute('mount --make-rprivate /')\n            self.command_executor.Execute('pivot_root /memroot /memroot/oldroot')\n            self.command_executor.Execute('rmdir /oldroot/memroot')\n            self.command_executor.ExecuteInBash('for i in dev proc sys boot; do mount --move /oldroot/$i /$i; done')\n\n            extension_full_name = 'Microsoft.Azure.Security.' + CommonVariables.extension_name\n            self.command_executor.Execute('cp -ax' +\n                                          ' /var/log/azure/{0}'.format(extension_full_name) +\n                                          ' /oldroot/var/log/azure/{0}.Stripdown'.format(extension_full_name))\n            self.command_executor.Execute('umount /boot')\n            self.command_executor.Execute('umount /oldroot')\n            self.command_executor.Execute('systemctl restart walinuxagent')\n\n            self.context.logger.log(\"Pivoted back into memroot successfully\")\n\n    def should_exit(self):\n        self.context.logger.log(\"Verifying if machine should exit patch_boot_system state\")\n\n        return super(PatchBootSystemState, self).should_exit()\n\n    def _append_contents_to_file(self, contents, path):\n        with open(path, 'a') as f:\n            f.write(contents)\n\n    def _modify_pivoted_oldroot(self):\n        self.context.logger.log(\"Pivoted into oldroot successfully\")\n\n        scriptdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\n        encryptscriptsdir = os.path.join(scriptdir, '../encryptscripts')\n        injectscriptpath = os.path.join(encryptscriptsdir, 'inject_luks_header.sh')\n\n        if not os.path.exists(injectscriptpath):\n            message = \"Inject-script not found at path: {0}\".format(injectscriptpath)\n            self.context.logger.log(message)\n            raise Exception(message)\n        else:\n            self.context.logger.log(\"Inject-script found at path: {0}\".format(injectscriptpath))\n\n        self.command_executor.Execute('cp {0} /usr/share/initramfs-tools/hooks/luksheader'.format(injectscriptpath), True)\n        self.command_executor.Execute('chmod +x /usr/share/initramfs-tools/hooks/luksheader', True)\n\n        scriptdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\n        patchesdir = os.path.join(scriptdir, '../encryptpatches')\n        patchpath = os.path.join(patchesdir, 'ubuntu_1604_initramfs.patch')\n\n        if not os.path.exists(patchpath):\n            message = \"Patch not found at path: {0}\".format(patchpath)\n            self.context.logger.log(message)\n            raise Exception(message)\n        else:\n            self.context.logger.log(\"Patch found at path: {0}\".format(patchpath))\n        \n        self.command_executor.ExecuteInBash('patch -b -d /usr/share/initramfs-tools/hooks -p1 <{0}'.format(patchpath), True)\n        \n        entry = 'osencrypt /dev/sda1 none luks,discard,header=/boot/luks/osluksheader,keyscript=/usr/sbin/azure_crypt_key.sh'\n        self._append_contents_to_file(entry, '/etc/crypttab')\n\n        self.command_executor.Execute('update-initramfs -u -k all', True)\n        self.command_executor.Execute('update-grub', True)\n        self.command_executor.Execute('grub-install --recheck --force {0}'.format(self.rootfs_disk), True)\n\n    def _get_uuid(self, partition_name):\n        proc_comm = ProcessCommunicator()\n        self.command_executor.Execute(command_to_execute=\"blkid -s UUID -o value {0}\".format(partition_name),\n                                      raise_exception_on_failure=True,\n                                      communicator=proc_comm)\n        return proc_comm.stdout.strip()\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/ubuntu_1604/encryptstates/PrereqState.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.7+\n#\n\nimport inspect\nimport os\nimport re\nimport sys\n\nfrom time import sleep\nfrom OSEncryptionState import *\n\nclass PrereqState(OSEncryptionState):\n    def __init__(self, context):\n        super(PrereqState, self).__init__('PrereqState', context)\n\n    def should_enter(self):\n        self.context.logger.log(\"Verifying if machine should enter prereq state\")\n\n        if not super(PrereqState, self).should_enter():\n            return False\n        \n        self.context.logger.log(\"Performing enter checks for prereq state\")\n                \n        return True\n\n    def enter(self):\n        if not self.should_enter():\n            return\n\n        self.context.logger.log(\"Entering prereq state\")\n\n        distro_info = self.context.distro_patcher.distro_info\n        self.context.logger.log(\"Distro info: {0}\".format(distro_info))\n\n        if distro_info[0] == 'Ubuntu' and distro_info[1] in ['16.04', '18.04']:\n            self.context.logger.log(\"Enabling OS volume encryption on {0} {1}\".format(distro_info[0],\n                                                                                      distro_info[1]))\n        else:\n            raise Exception(\"Ubuntu1604EncryptionStateMachine called for distro {0} {1}\".format(distro_info[0],\n                                                                                                distro_info[1]))\n\n        self.context.distro_patcher.install_extras()\n\n        self._patch_walinuxagent()\n        self.command_executor.Execute('systemctl daemon-reload', True)\n\n        self._copy_key_script()\n\n    def should_exit(self):\n        self.context.logger.log(\"Verifying if machine should exit prereq state\")\n\n        return super(PrereqState, self).should_exit()\n\n    def _patch_walinuxagent(self):\n        self.context.logger.log(\"Patching walinuxagent\")\n\n        contents = None\n\n        with open('/lib/systemd/system/walinuxagent.service', 'r') as f:\n            contents = f.read()\n\n        contents = re.sub(r'\\[Service\\]\\n', '[Service]\\nKillMode=process\\n', contents)\n\n        with open('/lib/systemd/system/walinuxagent.service', 'w') as f:\n            f.write(contents)\n\n        self.context.logger.log(\"walinuxagent patched successfully\")\n\n    def _copy_key_script(self):\n        scriptdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\n        encryptscriptsdir = os.path.join(scriptdir, '../encryptscripts')\n        keyscriptpath = os.path.join(encryptscriptsdir, 'azure_crypt_key.sh')\n\n        if not os.path.exists(keyscriptpath):\n            message = \"Key script not found at path: {0}\".format(keyscriptpath)\n            self.context.logger.log(message)\n            raise Exception(message)\n        else:\n            self.context.logger.log(\"Key script found at path: {0}\".format(keyscriptpath))\n\n        self.command_executor.Execute('cp {0} /usr/sbin/azure_crypt_key.sh'.format(keyscriptpath), True)\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/ubuntu_1604/encryptstates/SelinuxState.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.7+\n#\n\nfrom OSEncryptionState import *\n\nclass SelinuxState(OSEncryptionState):\n    def __init__(self, context):\n        super(SelinuxState, self).__init__('SelinuxState', context)\n\n    def should_enter(self):\n        self.context.logger.log(\"Verifying if machine should enter selinux state\")\n\n        if not super(SelinuxState, self).should_enter():\n            return False\n        \n        self.context.logger.log(\"Performing enter checks for selinux state\")\n\n        return True\n\n    def enter(self):\n        if not self.should_enter():\n            return\n\n        self.context.logger.log(\"Entering selinux state\")\n\n        se_linux_status = self.context.encryption_environment.get_se_linux()\n        if(se_linux_status.lower() == 'enforcing'):\n            self.context.logger.log(\"SELinux is in enforcing mode, disabling\")\n            self.context.encryption_environment.disable_se_linux()\n\n    def should_exit(self):\n        self.context.logger.log(\"Verifying if machine should exit selinux state\")\n\n        return super(SelinuxState, self).should_exit()\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/ubuntu_1604/encryptstates/SplitRootPartitionState.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.7+\n#\n\nimport os\nimport re\nimport sys\n\nimport parted\n\nfrom time import sleep\nfrom OSEncryptionState import *\n\nclass SplitRootPartitionState(OSEncryptionState):\n    def __init__(self, context):\n        super(SplitRootPartitionState, self).__init__('SplitRootPartitionState', context)\n\n    def should_enter(self):\n        self.context.logger.log(\"Verifying if machine should enter split_root_partition state\")\n\n        if not super(SplitRootPartitionState, self).should_enter():\n            return False\n\n        self.context.logger.log(\"Performing enter checks for split_root_partition state\")\n\n\n        attempt = 1\n        while attempt < 10:\n            fsck_result = self.command_executor.Execute(\"e2fsck -yf {0}\".format(self.rootfs_block_device))\n\n            if fsck_result == 0:\n                break\n\n            self.command_executor.Execute('systemctl restart systemd-udevd')\n            self.command_executor.Execute('systemctl restart systemd-timesyncd')\n            self.command_executor.Execute('systemctl restart systemd-networkd')\n            self.command_executor.Execute('udevadm trigger')\n\n            sleep(10)\n\n            attempt += 1\n\n        if not attempt < 10:\n            return False\n\n        return True\n\n    def enter(self):\n        if not self.should_enter():\n            return\n\n        self.context.logger.log(\"Entering split_root_partition state\")\n\n        device = parted.getDevice(self.rootfs_disk)\n        disk = parted.newDisk(device)\n\n        original_root_fs_size = self._get_root_fs_size_in(device.sectorSize)\n        self.context.logger.log(\"Original root filesystem size (sectors): {0}\".format(original_root_fs_size))\n\n        desired_boot_partition_size = parted.sizeToSectors(256, 'MiB', device.sectorSize)\n        self.context.logger.log(\"Desired boot partition size (sectors): {0}\".format(desired_boot_partition_size))\n\n        desired_root_fs_size = int(original_root_fs_size - desired_boot_partition_size)\n        self.context.logger.log(\"Desired root filesystem size (sectors): {0}\".format(desired_root_fs_size))\n\n        attempt = 1\n        while attempt < 10:\n            resize_result = self.command_executor.Execute(\"resize2fs {0} {1}s\".format(self.rootfs_block_device, desired_root_fs_size))\n\n            if resize_result == 0:\n                break\n            else:\n                self.command_executor.Execute('systemctl restart systemd-udevd')\n                self.command_executor.Execute('systemctl restart systemd-timesyncd')\n                self.command_executor.Execute('udevadm trigger')\n\n                sleep(10)\n\n                attempt += 1\n\n        resized_root_fs_size = self._get_root_fs_size_in(device.sectorSize)\n\n        self.context.logger.log(\"Resized root filesystem size (sectors): {0}\".format(resized_root_fs_size))\n\n        if not desired_root_fs_size == resized_root_fs_size:\n            raise Exception(\"resize2fs failed, desired: {0}, resized: {1}\".format(desired_root_fs_size,\n                                                                                  resized_root_fs_size))\n\n        self.context.logger.log(\"Root filesystem resized successfully\")\n\n        root_partition = disk.getPartitionByPath(os.path.realpath(self.rootfs_block_device))\n\n        original_root_partition_start = root_partition.geometry.start\n        original_root_partition_end = root_partition.geometry.end\n\n        self.context.logger.log(\"Original root partition start (sectors): {0}\".format(original_root_partition_start))\n        self.context.logger.log(\"Original root partition end (sectors): {0}\".format(original_root_partition_end))\n\n        desired_root_partition_start = original_root_partition_start\n        desired_root_partition_end = original_root_partition_end - desired_boot_partition_size\n        desired_root_partition_size = desired_root_partition_end - desired_root_partition_start\n\n        self.context.logger.log(\"Desired root partition start (sectors): {0}\".format(desired_root_partition_start))\n        self.context.logger.log(\"Desired root partition end (sectors): {0}\".format(desired_root_partition_end))\n        self.context.logger.log(\"Desired root partition size (sectors): {0}\".format(desired_root_partition_size))\n\n        desired_root_partition_geometry = parted.Geometry(device=device,\n                                                          start=desired_root_partition_start,\n                                                          length=desired_root_partition_size)\n        root_partition_constraint = parted.Constraint(exactGeom=desired_root_partition_geometry)\n        disk.setPartitionGeometry(partition=root_partition,\n                                  constraint=root_partition_constraint,\n                                  start=desired_root_partition_start,\n                                  end=desired_root_partition_end)\n\n        desired_boot_partition_start = disk.getFreeSpaceRegions()[1].start\n        desired_boot_partition_end = disk.getFreeSpaceRegions()[1].end\n        desired_boot_partition_size = disk.getFreeSpaceRegions()[1].length\n\n        self.context.logger.log(\"Desired boot partition start (sectors): {0}\".format(desired_boot_partition_start))\n        self.context.logger.log(\"Desired boot partition end (sectors): {0}\".format(desired_boot_partition_end))\n\n        desired_boot_partition_geometry = parted.Geometry(device=device,\n                                                          start=desired_boot_partition_start,\n                                                          length=desired_boot_partition_size)\n        boot_partition_constraint = parted.Constraint(exactGeom=desired_boot_partition_geometry)\n        desired_boot_partition = parted.Partition(disk=disk,\n                                                  type=parted.PARTITION_NORMAL,\n                                                  geometry=desired_boot_partition_geometry)\n\n        if (root_partition.getFlag(parted.PARTITION_BOOT)):\n            desired_boot_partition.setFlag(parted.PARTITION_BOOT)\n\n        disk.addPartition(partition=desired_boot_partition, constraint=boot_partition_constraint)\n\n        disk.commit()\n\n        probed_root_fs = parted.probeFileSystem(desired_root_partition_geometry)\n        if not probed_root_fs == 'ext4':\n            raise Exception(\"Probed root fs is not ext4\")\n\n        self.command_executor.Execute(\"partprobe {0}\".format(self.rootfs_disk), True)\n        self.command_executor.Execute(\"mkfs.ext2 {0}\".format(self.bootfs_block_device), True)\n\n        boot_partition_uuid = self._get_uuid(self.bootfs_block_device)\n\n        # Move stuff from /oldroot/boot to new partition, make new partition mountable at the same spot\n        self.command_executor.Execute(\"mount {0} /oldroot\".format(self.rootfs_block_device), True)\n        self.command_executor.Execute(\"mkdir -p /boot\", True)\n        self.command_executor.Execute(\"cp /oldroot/etc/fstab /etc/fstab\", True)\n        self._append_boot_partition_uuid_to_fstab(boot_partition_uuid)\n        self.command_executor.Execute(\"cp /etc/fstab /oldroot/etc/fstab\", True)\n        self.command_executor.Execute(\"mount /boot\", True)\n        self.command_executor.ExecuteInBash(\"mv /oldroot/boot/* /boot/\", True)\n        self.command_executor.Execute(\"umount /boot\", True)\n        self.command_executor.Execute(\"umount /oldroot\", True)\n\n    def should_exit(self):\n        self.context.logger.log(\"Verifying if machine should exit split_root_partition state\")\n\n        self.command_executor.ExecuteInBash(\"mount /boot || mountpoint /boot\", True)\n        self.command_executor.ExecuteInBash(\"[ -e /boot/grub ]\", True)\n        self.command_executor.Execute(\"umount /boot\", True)\n\n        return super(SplitRootPartitionState, self).should_exit()\n\n    def _get_uuid(self, partition_name):\n        proc_comm = ProcessCommunicator()\n        self.command_executor.Execute(command_to_execute=\"blkid -s UUID -o value {0}\".format(partition_name),\n                                      raise_exception_on_failure=True,\n                                      communicator=proc_comm)\n        return proc_comm.stdout.strip()\n\n    def _append_boot_partition_uuid_to_fstab(self, boot_partition_uuid):\n        self.context.logger.log(\"Updating fstab\")\n\n        contents = None\n\n        with open('/etc/fstab', 'r') as f:\n            contents = f.read()\n\n        contents += '\\n'\n        contents += 'UUID={0}\\t/boot\\text2\\tdefaults\\t0 0'.format(boot_partition_uuid)\n        contents += '\\n'\n\n        with open('/etc/fstab', 'w') as f:\n            f.write(contents)\n\n        self.context.logger.log(\"fstab updated successfully\")\n\n    def _get_root_fs_size_in(self, sector_size):\n        proc_comm = ProcessCommunicator()\n\n        attempt = 1\n        while attempt < 10:\n            dump_result = self.command_executor.Execute(command_to_execute=\"dumpe2fs -h {0}\".format(self.rootfs_block_device),\n                                                        raise_exception_on_failure=False,\n                                                        communicator=proc_comm)\n\n            if dump_result == 0:\n                break\n\n            self.command_executor.Execute('systemctl restart systemd-udevd')\n            self.command_executor.Execute('systemctl restart systemd-timesyncd')\n            self.command_executor.Execute('udevadm trigger')\n\n            sleep(10)\n\n            attempt += 1\n\n        if not attempt < 10:\n            return Exception(\"Could not dumpe2fs, stderr: \\n{0}\".format(proc_comm.stderr))\n\n\n        root_fs_block_count = re.findall(r'Block count:\\s*(\\d+)', proc_comm.stdout)\n        root_fs_block_size = re.findall(r'Block size:\\s*(\\d+)', proc_comm.stdout)\n\n        if not root_fs_block_count or not root_fs_block_size:\n            raise Exception(\"Error parsing dumpe2fs output, count={0}, size={1}\".format(root_fs_block_count,\n                                                                                        root_fs_block_size))\n\n        root_fs_block_count = int(root_fs_block_count[0])\n        root_fs_block_size = int(root_fs_block_size[0])\n        root_fs_size = parted.sizeToSectors(root_fs_block_count * root_fs_block_size, 'B', sector_size)\n\n        return root_fs_size\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/ubuntu_1604/encryptstates/StripdownState.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.7+\n#\n\nimport os\nimport re\nimport sys\n\nfrom time import sleep\nfrom OSEncryptionState import *\n\nclass StripdownState(OSEncryptionState):\n    def __init__(self, context):\n        super(StripdownState, self).__init__('StripdownState', context)\n\n    def should_enter(self):\n        self.context.logger.log(\"Verifying if machine should enter stripdown state\")\n\n        if not super(StripdownState, self).should_enter():\n            return False\n        \n        self.context.logger.log(\"Performing enter checks for stripdown state\")\n\n        self.command_executor.Execute('rm -rf /tmp/tmproot', True)\n        self.command_executor.ExecuteInBash('! [ -e \"/oldroot\" ]', True)\n                \n        return True\n\n    def enter(self):\n        if not self.should_enter():\n            return\n\n        self.context.logger.log(\"Entering stripdown state\")\n\n        self.command_executor.Execute('umount -a')\n        self.command_executor.Execute('mkdir /tmp/tmproot', True)\n        self.command_executor.Execute('mount -t tmpfs none /tmp/tmproot', True)\n        self.command_executor.ExecuteInBash('for i in proc sys dev run usr var tmp root oldroot boot; do mkdir /tmp/tmproot/$i; done', True)\n        self.command_executor.ExecuteInBash('for i in bin etc mnt sbin lib lib64 root; do cp -ax /$i /tmp/tmproot/; done', True)\n        self.command_executor.ExecuteInBash('for i in bin sbin lib share; do cp -ax /usr/$i /tmp/tmproot/usr/; done', True)\n        self.command_executor.ExecuteInBash('for i in lib local lock opt run spool tmp; do cp -ax /var/$i /tmp/tmproot/var/; done', True)\n        self.command_executor.ExecuteInBash('mkdir /tmp/tmproot/var/log', True)\n        self.command_executor.ExecuteInBash('cp -ax /var/log/azure /tmp/tmproot/var/log/', True)\n        self.command_executor.Execute('mount --make-rprivate /', True)\n        self.command_executor.ExecuteInBash('[ -e \"/tmp/tmproot/var/lib/azure_disk_encryption_config/azure_crypt_request_queue.ini\" ]', True)\n        self.command_executor.Execute('systemctl stop walinuxagent', True)\n        self.command_executor.Execute('pivot_root /tmp/tmproot /tmp/tmproot/oldroot', True)\n        self.command_executor.ExecuteInBash('for i in dev proc sys run; do mount --move /oldroot/$i /$i; done', True)\n\n    def should_exit(self):\n        self.context.logger.log(\"Verifying if machine should exit stripdown state\")\n\n        if not os.path.exists(self.state_marker):\n            self.context.logger.log(\"First call to stripdown state (pid={0}), restarting process\".format(os.getpid()))\n\n            # create the marker, but do not advance the state machine\n            super(StripdownState, self).should_exit()\n\n            # the restarted process shall see the marker and advance the state machine\n            self.command_executor.ExecuteInBash('sleep 30 && systemctl start walinuxagent &', True)\n\n            self.context.hutil.do_exit(exit_code=CommonVariables.encryption_failed,\n                                       operation='EnableEncryptionOSVolume',\n                                       status=CommonVariables.extension_error_status,\n                                       code=CommonVariables.encryption_failed,\n                                       message=\"Restarted extension from stripped down OS\")\n        else:\n            self.context.logger.log(\"Second call to stripdown state (pid={0}), continuing process\".format(os.getpid()))\n            return True\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/ubuntu_1604/encryptstates/UnmountOldrootState.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.7+\n#\n\nimport os\nimport re\nimport sys\n\nfrom time import sleep\nfrom OSEncryptionState import *\n\nclass UnmountOldrootState(OSEncryptionState):\n    def __init__(self, context):\n        super(UnmountOldrootState, self).__init__('UnmountOldrootState', context)\n\n    def should_enter(self):\n        self.context.logger.log(\"Verifying if machine should enter unmount_oldroot state\")\n\n        if not super(UnmountOldrootState, self).should_enter():\n            return False\n        \n        self.context.logger.log(\"Performing enter checks for unmount_oldroot state\")\n\n        self.command_executor.ExecuteInBash('[ -e \"/oldroot\" ]', True)\n        \n        if self.command_executor.Execute('mountpoint /oldroot') != 0:\n            return False\n                \n        return True\n\n    def enter(self):\n        if not self.should_enter():\n            return\n\n        self.context.logger.log(\"Entering unmount_oldroot state\")\n        \n        self.command_executor.Execute('systemctl rescue', True)\n        self.command_executor.Execute('systemctl start sshd.service', True)\n        self.command_executor.Execute('systemctl start walinuxagent.service', True)\n        \n        proc_comm = ProcessCommunicator()\n        self.command_executor.Execute(command_to_execute=\"systemctl list-units\",\n                                      raise_exception_on_failure=True,\n                                      communicator=proc_comm)\n\n        for line in proc_comm.stdout.split('\\n'):\n            if not \"running\" in line:\n                continue\n\n            if \"walinuxagent.service\" in line or \"sshd.service\" in line:\n                continue\n\n            match = re.search(r'\\s(\\S*?\\.service)', line)\n            if match:\n                service = match.groups()[0]\n                self.command_executor.Execute('systemctl restart {0}'.format(service))\n\n        self.command_executor.Execute('swapoff -a', True)\n\n        self.bek_util.umount_azure_passhprase(self.encryption_config, force=True)\n\n        if os.path.exists(\"/oldroot/mnt\"):\n            self.command_executor.Execute('umount /oldroot/mnt')\n\n        if os.path.exists(\"/oldroot/mnt/azure_bek_disk\"):\n            self.command_executor.Execute('umount /oldroot/mnt/azure_bek_disk')\n\n        if os.path.exists(\"/mnt\"):\n            self.command_executor.Execute('umount -R /mnt')\n\n        if os.path.exists(\"/mnt/azure_bek_disk\"):\n            self.command_executor.Execute('umount /mnt/azure_bek_disk')\n\n        proc_comm = ProcessCommunicator()\n\n        self.command_executor.Execute(command_to_execute=\"fuser -vm /oldroot\",\n                                      raise_exception_on_failure=True,\n                                      communicator=proc_comm)\n\n        self.context.logger.log(\"Processes using oldroot:\\n{0}\".format(proc_comm.stdout))\n\n        procs_to_kill = filter(lambda p: p.isdigit(), proc_comm.stdout.split())\n        procs_to_kill = reversed(sorted(procs_to_kill))\n\n        for victim in procs_to_kill:\n            proc_name = \"\"\n\n            try:\n                with open(\"/proc/{0}/cmdline\".format(victim)) as f:\n                    proc_name = f.read()\n            except IOError as e:\n                self.context.logger.log(\"Proc {0} is already dead\".format(victim))\n\n            self.context.logger.log(\"Killing process: {0} ({1})\".format(proc_name, victim))\n\n            if int(victim) == os.getpid():\n                self.context.logger.log(\"Restarting WALA in 30 seconds before committing suicide\")\n\n                # Kill any other daemons that are blocked and would be executed after this process commits\n                # suicide\n                self.command_executor.ExecuteInBash('sleep 30 && pkill -f .*ForLinux.*handle.py.*daemon.* && systemctl start walinuxagent &', True)\n\n            if int(victim) == 1:\n                self.context.logger.log(\"Skipping init\")\n                continue\n\n            self.command_executor.Execute('kill -9 {0}'.format(victim))\n\n        self.command_executor.Execute('telinit u', True)\n\n        sleep(3)\n\n        self.command_executor.Execute('umount /oldroot', True)\n\n        sleep(3)\n        \n        attempt = 1\n\n        while True:\n            if attempt > 10:\n                raise Exception(\"Block device {0} did not appear in 10 restart attempts\".format(self.rootfs_block_device))\n\n            self.context.logger.log(\"Restarting systemd-udevd\")\n            self.command_executor.Execute('systemctl restart systemd-udevd')\n            self.context.logger.log(\"Restarting systemd-timesyncd\")\n            self.command_executor.Execute('systemctl restart systemd-timesyncd')\n            self.context.logger.log(\"Restarting systemd-networkd\")\n            self.command_executor.Execute('systemctl restart systemd-networkd')\n\n            sleep(10)\n\n            if self.command_executor.ExecuteInBash('[ -b {0} ]'.format(self.rootfs_block_device), False) == 0:\n                break\n\n            attempt += 1\n\n        self.command_executor.Execute('e2fsck -yf {0}'.format(self.rootfs_block_device), True)\n\n    def should_exit(self):\n        self.context.logger.log(\"Verifying if machine should exit unmount_oldroot state\")\n\n        if os.path.exists('/oldroot/bin'):\n            self.context.logger.log(\"/oldroot was not unmounted\")\n            return False\n\n        return super(UnmountOldrootState, self).should_exit()\n"
  },
  {
    "path": "VMEncryption/main/oscrypto/ubuntu_1604/encryptstates/__init__.py",
    "content": "#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.7+\n#\n\nimport inspect\nimport os\nimport sys\nimport traceback\nfrom time import sleep\n\nscriptdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\noscryptodir = os.path.abspath(os.path.join(scriptdir, '../../'))\nsys.path.append(oscryptodir)\n\nfrom OSEncryptionState import *\nfrom PrereqState import *\nfrom StripdownState import *\nfrom UnmountOldrootState import *\nfrom SplitRootPartitionState import *\nfrom EncryptBlockDeviceState import *\nfrom PatchBootSystemState import *\n"
  },
  {
    "path": "VMEncryption/main/patch/AbstractPatching.py",
    "content": "#!/usr/bin/python\n#\n# AbstractPatching is the base patching class of all the linux distros\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.4+\n\n\nimport os\nimport sys\nimport imp\nimport base64\nimport re\nimport json\nimport platform\nimport shutil\nimport time\nimport traceback\nimport datetime\nimport subprocess\nclass AbstractPatching(object):\n    \"\"\"\n    AbstractPatching defines a skeleton neccesary for a concrete Patching class.\n    \"\"\"\n    def __init__(self, distro_info):\n        self.distro_info = distro_info\n        self.base64_path = '/usr/bin/base64'\n        self.bash_path = '/bin/bash'\n        self.blkid_path = '/usr/bin/blkid'\n        self.cat_path = '/bin/cat'\n        self.cryptsetup_path = '/usr/sbin/cryptsetup'\n        self.dd_path = '/usr/bin/dd'\n        self.e2fsck_path = '/sbin/e2fsck'\n        self.echo_path = '/usr/bin/echo'\n        self.lsblk_path = '/usr/bin/lsblk'\n        self.lsscsi_path = '/usr/bin/lsscsi'\n        self.mkdir_path = '/usr/bin/mkdir'\n        self.mount_path = '/usr/bin/mount'\n        self.openssl_path = '/usr/bin/openssl'\n        self.resize2fs_path = '/sbin/resize2fs'\n        self.umount_path = '/usr/bin/umount'\n        self.kernel_version = platform.release()\n\n    def install_adal(self):\n        pass\n\n    def install_extras(self):\n        pass\n\n    def update_prereq(self):\n        pass"
  },
  {
    "path": "VMEncryption/main/patch/SuSEPatching.py",
    "content": "#!/usr/bin/python\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.4+\n\n\nimport os\nimport sys\nimport imp\nimport base64\nimport re\nimport json\nimport platform\nimport shutil\nimport time\nimport traceback\nimport datetime\nimport subprocess\n\nfrom AbstractPatching import AbstractPatching\nfrom Common import *\nfrom CommandExecutor import *\n\n\nclass SuSEPatching(AbstractPatching):\n    def __init__(self, logger, distro_info):\n        super(SuSEPatching, self).__init__(distro_info)\n\n        self.distro_info = distro_info\n        self.command_executor = CommandExecutor(logger)\n\n        if distro_info[1] == \"11\":\n            self.logger = logger\n            self.base64_path = '/usr/bin/base64'\n            self.bash_path = '/bin/bash'\n            self.blkid_path = '/sbin/blkid'\n            self.cryptsetup_path = '/sbin/cryptsetup'\n            self.cat_path = '/bin/cat'\n            self.dd_path = '/bin/dd'\n            self.e2fsck_path = '/sbin/e2fsck'\n            self.echo_path = '/bin/echo'\n            self.lsblk_path = '/bin/lsblk'\n            self.lsscsi_path = '/usr/bin/lsscsi'\n            self.mkdir_path = '/bin/mkdir'\n            self.mount_path = '/bin/mount'\n            self.openssl_path = '/usr/bin/openssl'\n            self.resize2fs_path = '/sbin/resize2fs'\n            self.umount_path = '/bin/umount'\n            self.blockdev_path = '/sbin/blockdev'\n        else:\n            self.logger = logger\n            self.base64_path = '/usr/bin/base64'\n            self.bash_path = '/bin/bash'\n            self.blkid_path = '/usr/bin/blkid'\n            self.cat_path = '/bin/cat'\n            self.cryptsetup_path = '/usr/sbin/cryptsetup'\n            self.dd_path = '/usr/bin/dd'\n            self.e2fsck_path = '/sbin/e2fsck'\n            self.echo_path = '/usr/bin/echo'\n            self.lsblk_path = '/usr/bin/lsblk'\n            self.lsscsi_path = '/usr/bin/lsscsi'\n            self.mkdir_path = '/usr/bin/mkdir'\n            self.mount_path = '/usr/bin/mount'\n            self.openssl_path = '/usr/bin/openssl'\n            self.resize2fs_path = '/sbin/resize2fs'\n            self.umount_path = '/usr/bin/umount'\n\n    def install_adal(self):\n        if self.distro_info[1] == \"11\":\n            try:\n                self.command_executor.ExecuteInBash('pip list | grep -F adal', raise_exception_on_failure=True)\n            except: \n                raise Exception('SLES 11 environment is missing python-pip and adal')\n        else:\n            self.command_executor.Execute('zypper --gpg-auto-import-keys install -l -y python-pip')\n            self.command_executor.Execute('python -m pip install --upgrade pip')\n            self.command_executor.Execute('python -m pip install adal')\n\n    def install_extras(self):\n        packages = ['cryptsetup', 'lsscsi']\n        cmd = \" \".join((['zypper', 'install', '-l', '-y'] + packages))\n        self.command_executor.Execute(cmd)\n\n    def update_prereq(self):\n        pass"
  },
  {
    "path": "VMEncryption/main/patch/UbuntuPatching.py",
    "content": "#!/usr/bin/python\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.4+\n\n\nimport os\nimport sys\nimport imp\nimport base64\nimport re\nimport json\nimport platform\nimport shutil\nimport time\nimport traceback\nimport datetime\nimport subprocess\n\nfrom AbstractPatching import AbstractPatching\nfrom Common import *\nfrom CommandExecutor import *\n\n\nclass UbuntuPatching(AbstractPatching):\n    def __init__(self, logger, distro_info):\n        super(UbuntuPatching, self).__init__(distro_info)\n        self.logger = logger\n        self.command_executor = CommandExecutor(logger)\n        self.base64_path = '/usr/bin/base64'\n        self.bash_path = '/bin/bash'\n        self.blkid_path = '/sbin/blkid'\n        self.cat_path = '/bin/cat'\n        self.cryptsetup_path = '/sbin/cryptsetup'\n        self.dd_path = '/bin/dd'\n        self.e2fsck_path = '/sbin/e2fsck'\n        self.echo_path = '/bin/echo'\n        self.lsblk_path = '/bin/lsblk'\n        self.lsscsi_path = '/usr/bin/lsscsi'\n        self.mkdir_path = '/bin/mkdir'\n        self.mount_path = '/bin/mount'\n        self.openssl_path = '/usr/bin/openssl'\n        self.resize2fs_path = '/sbin/resize2fs'\n        self.umount_path = '/bin/umount'\n        self.touch_path = '/usr/bin/touch'\n\n    def install_adal(self):\n        return_code = self.command_executor.Execute('apt-get install -y python-pip')\n        # If install fails, try running apt-get update and then try install again\n        if return_code != 0:\n            self.logger.log('python-pip installation failed. Retrying installation after running update')\n            return_code = self.command_executor.Execute('apt-get -o Acquire::ForceIPv4=true -y update', timeout=30)\n            # Fail early if apt-get update times out.\n            if return_code == -9:\n                msg = \"Command: apt-get -o Acquire::ForceIPv4=true -y update timed out. Make sure apt-get is configured correctly.\"\n                raise Exception(msg)\n            self.command_executor.Execute('apt-get install -y python-pip')\n        self.command_executor.Execute('python -m pip install --upgrade pip')\n        self.command_executor.Execute('python -m pip install --upgrade setuptools')\n        self.command_executor.Execute('python -m pip install adal')\n\n    def install_extras(self):\n        \"\"\"\n        install the sg_dd because the default dd do not support the sparse write\n        \"\"\"\n        packages = ['at',\n                    'cryptsetup-bin',\n                    'lsscsi',\n                    'python-parted',\n                    'python-six',\n                    'procps',\n                    'psmisc']\n\n        cmd = \" \".join(['apt-get', 'install', '-y'] + packages)\n        return_code = self.command_executor.Execute(cmd)\n\n        # If install fails, try running apt-get update and then try install again\n        if return_code != 0:\n            self.logger.log('prereq packages installation failed. Retrying installation after running update')\n            return_code = self.command_executor.Execute('apt-get -o Acquire::ForceIPv4=true -y update')\n            # Fail early if apt-get update times out.\n            if return_code == -9:\n                msg = \"Command: apt-get -o Acquire::ForceIPv4=true -y update timed out. Make sure apt-get is configured correctly.\"\n                raise Exception(msg)\n            cmd = \" \".join(['apt-get', 'install', '-y'] + packages)\n            self.command_executor.Execute(cmd)\n        \n    def update_prereq(self):\n        pass"
  },
  {
    "path": "VMEncryption/main/patch/__init__.py",
    "content": "#!/usr/bin/python\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.4+\n\nimport os\nimport re\nimport platform\n\nfrom UbuntuPatching import UbuntuPatching\nfrom debianPatching import debianPatching\nfrom redhatPatching import redhatPatching\nfrom centosPatching import centosPatching\nfrom SuSEPatching import SuSEPatching\nfrom oraclePatching import oraclePatching\n\n# Define the function in case waagent(<2.0.4) doesn't have DistInfo()\ndef DistInfo():\n    if 'FreeBSD' in platform.system():\n        release = re.sub('\\-.*\\Z', '', str(platform.release()))\n        distinfo = ['FreeBSD', release]\n        return distinfo\n    if 'linux_distribution' in dir(platform):\n        distinfo = list(platform.linux_distribution(full_distribution_name=0))\n        # remove trailing whitespace in distro name\n        distinfo[0] = distinfo[0].strip()\n        return distinfo\n    else:\n        return platform.dist()\n\ndef GetDistroPatcher(logger):\n    \"\"\"\n    Return DistroPatcher object.\n    NOTE: Logging is not initialized at this point.\n    \"\"\"\n    dist_info = DistInfo()\n    if 'Linux' in platform.system():\n        Distro = dist_info[0]\n    else: # I know this is not Linux!\n        if 'FreeBSD' in platform.system():\n            Distro = platform.system()\n    Distro = Distro.strip('\"')\n    Distro = Distro.strip(' ')\n    patching_class_name = Distro + 'Patching'\n\n    if not globals().has_key(patching_class_name):\n        logger.log('{0} is not a supported distribution.'.format(Distro))\n        return None\n    patchingInstance = globals()[patching_class_name](logger, dist_info)\n    return patchingInstance"
  },
  {
    "path": "VMEncryption/main/patch/centosPatching.py",
    "content": "#!/usr/bin/python\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.4+\n\n\nimport os\nimport os.path\nimport sys\nimport imp\nimport base64\nimport re\nimport json\nimport platform\nimport shutil\nimport time\nimport traceback\nimport datetime\nimport subprocess\n\nfrom redhatPatching import redhatPatching\nfrom Common import *\nfrom CommandExecutor import *\n\nclass centosPatching(redhatPatching):\n    def __init__(self, logger, distro_info):\n        super(centosPatching, self).__init__(logger, distro_info)\n        self.logger = logger\n        self.command_executor = CommandExecutor(logger)\n        if distro_info[1] in [\"6.9\", \"6.8\", \"6.7\", \"6.6\", \"6.5\"]:\n            self.base64_path = '/usr/bin/base64'\n            self.bash_path = '/bin/bash'\n            self.blkid_path = '/sbin/blkid'\n            self.cat_path = '/bin/cat'\n            self.cryptsetup_path = '/sbin/cryptsetup'\n            self.dd_path = '/bin/dd'\n            self.e2fsck_path = '/sbin/e2fsck'\n            self.echo_path = '/bin/echo'\n            self.lsblk_path = '/bin/lsblk' \n            self.lsscsi_path = '/usr/bin/lsscsi'\n            self.mkdir_path = '/bin/mkdir'\n            self.mount_path = '/bin/mount'\n            self.openssl_path = '/usr/bin/openssl'\n            self.resize2fs_path = '/sbin/resize2fs'\n            self.umount_path = '/bin/umount'\n        else:\n            self.base64_path = '/usr/bin/base64'\n            self.bash_path = '/usr/bin/bash'\n            self.blkid_path = '/usr/bin/blkid'\n            self.cat_path = '/bin/cat'\n            self.cryptsetup_path = '/usr/sbin/cryptsetup'\n            self.dd_path = '/usr/bin/dd'\n            self.e2fsck_path = '/sbin/e2fsck'\n            self.echo_path = '/usr/bin/echo'\n            self.lsblk_path = '/usr/bin/lsblk'\n            self.lsscsi_path = '/usr/bin/lsscsi'\n            self.mkdir_path = '/usr/bin/mkdir'\n            self.mount_path = '/usr/bin/mount'\n            self.openssl_path = '/usr/bin/openssl'\n            self.resize2fs_path = '/sbin/resize2fs'\n            self.umount_path = '/usr/bin/umount'\n    \n    def install_adal(self):\n        # epel-release and python-pip >= version 8.1 are adal prerequisites\n        # https://github.com/AzureAD/azure-activedirectory-library-for-python/\n        self.command_executor.Execute(\"yum install -y epel-release\")\n        self.command_executor.Execute(\"yum install -y python-pip\")\n        self.command_executor.Execute(\"python -m pip install --upgrade pip\")\n        self.command_executor.Execute(\"python -m pip install adal\")\n\n    def install_extras(self):\n        packages = ['cryptsetup',\n                    'lsscsi',\n                    'psmisc',\n                    'cryptsetup-reencrypt',\n                    'lvm2',\n                    'uuid',\n                    'at',\n                    'patch',\n                    'procps-ng',\n                    'util-linux',\n                    'pyparted']\n\n        if self.distro_info[1].startswith(\"6.\"):\n            packages.add('python-six')\n            packages.remove('cryptsetup')\n            packages.remove('procps-ng')\n            packages.remove('util-linux')\n\n        if self.command_executor.Execute(\"rpm -q \" + \" \".join(packages)):\n            self.command_executor.Execute(\"yum install -y \" + \" \".join(packages))\n\n    def update_prereq(self):\n        if (self.distro_info[1].startswith('7.')):\n            dracut_repack_needed = False\n\n            if os.path.exists(\"/lib/dracut/modules.d/91lvm/\"):\n                # If 90lvm already exists 91lvm will cause problems, so remove it.\n                if os.path.exists(\"/lib/dracut/modules.d/90lvm/\"):\n                    shutil.rmtree(\"/lib/dracut/modules.d/91lvm/\")\n                else:\n                    os.rename(\"/lib/dracut/modules.d/91lvm/\",\"/lib/dracut/modules.d/90lvm/\")\n                dracut_repack_needed = True\n\n            if redhatPatching.is_old_patching_system():\n                redhatPatching.remove_old_patching_system(self.logger, self.command_executor)\n                dracut_repack_needed = True\n\n            if os.path.exists(\"/lib/dracut/modules.d/91ade/\"):\n                shutil.rmtree(\"/lib/dracut/modules.d/91ade/\")\n                dracut_repack_needed = True\n\n            if os.path.exists(\"/dev/mapper/osencrypt\"):\n                #TODO: only do this if needed (if code and existing module are different)\n                redhatPatching.add_91_ade_dracut_module(self.command_executor)\n                dracut_repack_needed = True\n\n            if dracut_repack_needed:\n                self.command_executor.ExecuteInBash(\"/usr/sbin/dracut -f -v --kver `grubby --default-kernel | sed 's|/boot/vmlinuz-||g'`\", True)\n"
  },
  {
    "path": "VMEncryption/main/patch/debianPatching.py",
    "content": "#!/usr/bin/python\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.4+\n\n\nimport os\nimport sys\nimport imp\nimport base64\nimport re\nimport json\nimport platform\nimport shutil\nimport time\nimport traceback\nimport datetime\nimport subprocess\nfrom AbstractPatching import AbstractPatching\nfrom Common import *\n\n\nclass debianPatching(AbstractPatching):\n    def __init__(self, logger, distro_info):\n        super(debianPatching, self).__init__(distro_info)\n        self.logger = logger\n        self.base64_path = '/usr/bin/base64'\n        self.bash_path = '/bin/bash'\n        self.blkid_path = '/sbin/blkid'\n        self.cat_path = '/bin/cat'\n        self.cryptsetup_path = '/sbin/cryptsetup'\n        self.dd_path = '/bin/dd'\n        self.e2fsck_path = '/sbin/e2fsck'\n        self.echo_path = '/bin/echo'\n        self.lsblk_path = '/bin/lsblk'\n        self.lsscsi_path = '/usr/bin/lsscsi'\n        self.mkdir_path = '/bin/mkdir'\n        self.mount_path = '/bin/mount'\n        self.openssl_path = '/usr/bin/openssl'\n        self.resize2fs_path = '/sbin/resize2fs'\n        self.umount_path = '/bin/umount'\n\n    def install_adal(self):\n        pass\n\n    def install_extras(self):\n        pass\n\n    def update_prereq(self):\n        pass"
  },
  {
    "path": "VMEncryption/main/patch/oraclePatching.py",
    "content": "#!/usr/bin/python\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.4+\n\n\nimport os\nimport sys\nimport imp\nimport base64\nimport re\nimport json\nimport platform\nimport shutil\nimport time\nimport traceback\nimport datetime\nimport subprocess\nfrom redhatPatching import redhatPatching\nfrom Common import *\n\n\nclass oraclePatching(redhatPatching):\n    def __init__(self,logger,distro_info):\n        super(oraclePatching,self).__init__(logger,distro_info)\n        self.logger = logger\n        if(distro_info is not None and len(distro_info) > 0 and distro_info[1].startswith(\"6.\")):\n            self.base64_path = '/usr/bin/base64'\n            self.bash_path = '/bin/bash'\n            self.blkid_path = '/sbin/blkid'\n            self.cat_path = '/bin/cat'\n            self.cryptsetup_path = '/sbin/cryptsetup'\n            self.dd_path = '/bin/dd'\n            self.e2fsck_path = '/sbin/e2fsck'\n            self.echo_path = '/bin/echo'\n            self.getenforce_path = '/usr/sbin/getenforce'\n            self.setenforce_path = '/usr/sbin/setenforce'\n            self.lsblk_path = '/bin/lsblk' \n            self.lsscsi_path = '/usr/bin/lsscsi'\n            self.mkdir_path = '/bin/mkdir'\n            self.mount_path = '/bin/mount'\n            self.openssl_path = '/usr/bin/openssl'\n            self.resize2fs_path = '/sbin/resize2fs'\n            self.umount_path = '/bin/umount'\n        else:\n            self.base64_path = '/usr/bin/base64'\n            self.bash_path = '/usr/bin/bash'\n            self.blkid_path = '/usr/bin/blkid'\n            self.cat_path = '/bin/cat'\n            self.cryptsetup_path = '/usr/sbin/cryptsetup'\n            self.dd_path = '/usr/bin/dd'\n            self.e2fsck_path = '/sbin/e2fsck'\n            self.echo_path = '/usr/bin/echo'\n            self.getenforce_path = '/usr/sbin/getenforce'\n            self.setenforce_path = '/usr/sbin/setenforce'\n            self.lsblk_path = '/usr/bin/lsblk'\n            self.lsscsi_path = '/usr/bin/lsscsi'\n            self.mkdir_path = '/usr/bin/mkdir'\n            self.mount_path = '/usr/bin/mount'\n            self.openssl_path = '/usr/bin/openssl'\n            self.resize2fs_path = '/sbin/resize2fs'\n            self.umount_path = '/usr/bin/umount'\n\n    def install_adal(self):\n        pass\n\n    def install_extras(self):\n        common_extras = ['cryptsetup','lsscsi']\n        for extra in common_extras:\n            self.logger.log(\"installation for \" + extra + 'result is ' + str(subprocess.call(['yum', 'install','-y', extra])))\n\n    def update_prereq(self):\n        pass"
  },
  {
    "path": "VMEncryption/main/patch/redhatPatching.py",
    "content": "#!/usr/bin/python\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.4+\n\n\nimport os\nimport os.path\nimport sys\nimport imp\nimport base64\nimport re\nimport json\nimport platform\nimport shutil\nimport time\nimport traceback\nimport datetime\nimport subprocess\nimport inspect\n\nfrom AbstractPatching import AbstractPatching\nfrom Common import *\nfrom CommandExecutor import *\n\nclass redhatPatching(AbstractPatching):\n    def __init__(self, logger, distro_info):\n        super(redhatPatching, self).__init__(distro_info)\n        self.logger = logger\n        self.command_executor = CommandExecutor(logger)\n        self.distro_info = distro_info\n        if distro_info[1].startswith(\"6.\"):\n            self.base64_path = '/usr/bin/base64'\n            self.bash_path = '/bin/bash'\n            self.blkid_path = '/sbin/blkid'\n            self.cat_path = '/bin/cat'\n            self.cryptsetup_path = '/sbin/cryptsetup'\n            self.dd_path = '/bin/dd'\n            self.e2fsck_path = '/sbin/e2fsck'\n            self.echo_path = '/bin/echo'\n            self.getenforce_path = '/usr/sbin/getenforce'\n            self.setenforce_path = '/usr/sbin/setenforce'\n            self.lsblk_path = '/bin/lsblk' \n            self.lsscsi_path = '/usr/bin/lsscsi'\n            self.mkdir_path = '/bin/mkdir'\n            self.mount_path = '/bin/mount'\n            self.openssl_path = '/usr/bin/openssl'\n            self.resize2fs_path = '/sbin/resize2fs'\n            self.touch_path = '/bin/touch'\n            self.umount_path = '/bin/umount'\n        else:\n            self.base64_path = '/usr/bin/base64'\n            self.bash_path = '/usr/bin/bash'\n            self.blkid_path = '/usr/bin/blkid'\n            self.cat_path = '/bin/cat'\n            self.cryptsetup_path = '/usr/sbin/cryptsetup'\n            self.dd_path = '/usr/bin/dd'\n            self.e2fsck_path = '/sbin/e2fsck'\n            self.echo_path = '/usr/bin/echo'\n            self.getenforce_path = '/usr/sbin/getenforce'\n            self.setenforce_path = '/usr/sbin/setenforce'\n            self.lsblk_path = '/usr/bin/lsblk'\n            self.lsscsi_path = '/usr/bin/lsscsi'\n            self.mkdir_path = '/usr/bin/mkdir'\n            self.mount_path = '/usr/bin/mount'\n            self.openssl_path = '/usr/bin/openssl'\n            self.resize2fs_path = '/sbin/resize2fs'\n            self.touch_path = '/usr/bin/touch'\n            self.umount_path = '/usr/bin/umount'\n\n    def install_adal(self):\n        # On RHEL, RHSCL pip >= version 8.1 is the supported mechanism to install adal \n        # https://access.redhat.com/solutions/1519803 \n        self.command_executor.Execute('yum install -y python27-python-pip')\n        self.command_executor.Execute('scl enable python27 \"pip install --upgrade pip\"')\n        self.command_executor.Execute('scl enable python27 \"pip install adal\"')\n\n    def install_extras(self):\n        packages = ['cryptsetup',\n                    'lsscsi',\n                    'psmisc',\n                    'cryptsetup-reencrypt',\n                    'lvm2',\n                    'uuid',\n                    'at',\n                    'patch',\n                    'procps-ng',\n                    'util-linux']\n\n        if self.distro_info[1].startswith(\"6.\"):\n            packages.remove('cryptsetup')\n            packages.remove('procps-ng')\n            packages.remove('util-linux')\n\n        if self.command_executor.Execute(\"rpm -q \" + \" \".join(packages)):\n            self.command_executor.Execute(\"yum install -y \" + \" \".join(packages))\n\n    def update_prereq(self):\n        if (self.distro_info[1].startswith('7.')):\n            dracut_repack_needed = False\n\n            if os.path.exists(\"/lib/dracut/modules.d/91lvm/\"):\n                # If 90lvm already exists 91lvm will cause problems, so remove it.\n                if os.path.exists(\"/lib/dracut/modules.d/90lvm/\"):\n                    shutil.rmtree(\"/lib/dracut/modules.d/91lvm/\")\n                else:\n                    os.rename(\"/lib/dracut/modules.d/91lvm/\",\"/lib/dracut/modules.d/90lvm/\")\n                dracut_repack_needed = True\n\n            if redhatPatching.is_old_patching_system():\n                redhatPatching.remove_old_patching_system(self.logger, self.command_executor)\n                dracut_repack_needed = True\n\n            if os.path.exists(\"/lib/dracut/modules.d/91ade/\"):\n                shutil.rmtree(\"/lib/dracut/modules.d/91ade/\")\n                dracut_repack_needed = True\n\n            if os.path.exists(\"/dev/mapper/osencrypt\"):\n                #TODO: only do this if needed (if code and existing module are different)\n                redhatPatching.add_91_ade_dracut_module(self.command_executor)\n                dracut_repack_needed = True\n\n            if dracut_repack_needed:\n                self.command_executor.ExecuteInBash(\"/usr/sbin/dracut -f -v --kver `grubby --default-kernel | sed 's|/boot/vmlinuz-||g'`\", True)\n\n    @staticmethod\n    def is_old_patching_system():\n        # Execute unpatching commands only if all the three patch files are present.\n        if os.path.exists(\"/lib/dracut/modules.d/90crypt/cryptroot-ask.sh.orig\"):\n            if os.path.exists(\"/lib/dracut/modules.d/90crypt/module-setup.sh.orig\"):\n                if os.path.exists(\"/lib/dracut/modules.d/90crypt/parse-crypt.sh.orig\"):\n                    return True\n        return False\n\n    @staticmethod\n    def append_contents_to_file(contents, path):\n        with open(path, 'a') as f:\n            f.write(contents)\n\n    @staticmethod\n    def add_91_ade_dracut_module(command_executor):\n        scriptdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\n        ademoduledir = os.path.join(scriptdir, '../oscrypto/91ade')\n        dracutmodulesdir = '/lib/dracut/modules.d'\n        udevaderulepath = os.path.join(dracutmodulesdir, '91ade/50-udev-ade.rules')\n\n        proc_comm = ProcessCommunicator()\n\n        command_executor.Execute('cp -r {0} /lib/dracut/modules.d/'.format(ademoduledir), True)\n\n        crypt_cmd = \"cryptsetup status osencrypt | grep device:\"\n        command_executor.ExecuteInBash(crypt_cmd, communicator=proc_comm, suppress_logging=True)\n        matches = re.findall(r'device:(.*)', proc_comm.stdout)\n        if not matches:\n            raise Exception(\"Could not find device in cryptsetup output\")\n        root_device = matches[0].strip()\n\n        udevadm_cmd = \"udevadm info --attribute-walk --name={0}\".format(root_device)\n        command_executor.Execute(command_to_execute=udevadm_cmd, raise_exception_on_failure=True, communicator=proc_comm)\n        matches = re.findall(r'ATTR{partition}==\"(.*)\"', proc_comm.stdout)\n        if not matches:\n            raise Exception(\"Could not parse ATTR{partition} from udevadm info\")\n        partition = matches[0]\n        sed_cmd = 'sed -i.bak s/ENCRYPTED_DISK_PARTITION/{0}/ \"{1}\"'.format(partition, udevaderulepath)\n        command_executor.Execute(command_to_execute=sed_cmd, raise_exception_on_failure=True)\n        sed_grub_cmd = \"sed -i.bak '/osencrypt-locked/d' /etc/crypttab\"\n        command_executor.Execute(command_to_execute=sed_grub_cmd, raise_exception_on_failure=True)\n\n\n    @staticmethod\n    def remove_old_patching_system(logger, command_executor):\n        logger.log(\"Removing patches and recreating initrd image\")\n\n        command_executor.Execute('mv /lib/dracut/modules.d/90crypt/cryptroot-ask.sh.orig /lib/dracut/modules.d/90crypt/cryptroot-ask.sh', False)\n        command_executor.Execute('mv /lib/dracut/modules.d/90crypt/module-setup.sh.orig /lib/dracut/modules.d/90crypt/module-setup.sh', False)\n        command_executor.Execute('mv /lib/dracut/modules.d/90crypt/parse-crypt.sh.orig /lib/dracut/modules.d/90crypt/parse-crypt.sh', False)\n        \n        sed_grub_cmd = \"sed -i.bak '/rd.luks.uuid=osencrypt/d' /etc/default/grub\"\n        command_executor.Execute(sed_grub_cmd)\n    \n        redhatPatching.append_contents_to_file('\\nGRUB_CMDLINE_LINUX+=\" rd.debug\"\\n', \n                                               '/etc/default/grub')\n\n        command_executor.Execute('grub2-mkconfig -o /boot/grub2/grub.cfg', True)"
  },
  {
    "path": "VMEncryption/references",
    "content": "Utils/\n"
  },
  {
    "path": "VMEncryption/requirements.txt",
    "content": "funcsigs==1.0.2\nmock==2.0.0\npbr==4.3.0\nsix==1.11.0\n"
  },
  {
    "path": "VMEncryption/setup.py",
    "content": "#!/usr/bin/env python\n#\n# VM Backup extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# To build:\n# python setup.py sdist\n#\n# To install:\n# python setup.py install\n#\n# To register (only needed once):\n# python setup.py register\n#\n# To upload:\n# python setup.py sdist upload\n\nimport codecs\nimport json\nimport os\nimport subprocess\nfrom distutils.core import setup\nfrom zipfile import ZipFile\nfrom shutil import copy2\n\nfrom main.Common import CommonVariables\n\npackages_array = []\nmain_folder = 'main'\nmain_entry = main_folder + '/handle.py'\npackages_array.append(main_folder)\n\npatch_folder = main_folder + '/patch'\npackages_array.append(patch_folder)\n\noscrypto_folder = main_folder + '/oscrypto'\npackages_array.append(oscrypto_folder)\n\npackages_array.append(oscrypto_folder + '/91ade')\npackages_array.append(oscrypto_folder + '/rhel_72_lvm')\npackages_array.append(oscrypto_folder + '/rhel_72_lvm/encryptstates')\npackages_array.append(oscrypto_folder + '/rhel_72')\npackages_array.append(oscrypto_folder + '/rhel_72/encryptstates')\npackages_array.append(oscrypto_folder + '/rhel_68')\npackages_array.append(oscrypto_folder + '/rhel_68/encryptstates')\npackages_array.append(oscrypto_folder + '/centos_68')\npackages_array.append(oscrypto_folder + '/centos_68/encryptstates')\npackages_array.append(oscrypto_folder + '/ubuntu_1604')\npackages_array.append(oscrypto_folder + '/ubuntu_1604/encryptstates')\npackages_array.append(oscrypto_folder + '/ubuntu_1404')\npackages_array.append(oscrypto_folder + '/ubuntu_1404/encryptstates')\n\ntransitions_folder = 'transitions/transitions'\npackages_array.append(transitions_folder)\n\n\"\"\"\ncopy the dependency to the local\n\"\"\"\n\n\"\"\"\ncopy the utils lib to local\n\"\"\"\ntarget_utils_path = main_folder + '/' + CommonVariables.utils_path_name\npackages_array.append(target_utils_path)\n\n\n\"\"\"\ngenerate the HandlerManifest.json file.\n\"\"\"\nmanifest_obj = [{\n  \"name\": CommonVariables.extension_name,\n  \"version\": \"1.0\",\n  \"handlerManifest\": {\n    \"installCommand\": \"extension_shim.sh -c {0} --install\".format(main_entry),\n    \"uninstallCommand\": \"extension_shim.sh -c {0} --uninstall\".format(main_entry),\n    \"updateCommand\": \"extension_shim.sh -c {0} --update\".format(main_entry),\n    \"enableCommand\": \"extension_shim.sh -c {0} --enable\".format(main_entry),\n    \"disableCommand\": \"extension_shim.sh -c {0} --disable\".format(main_entry),\n    \"rebootAfterInstall\": False,\n    \"reportHeartbeat\": False\n  }\n}]\n\nmanifest_str = json.dumps(manifest_obj, sort_keys = True, indent = 4)\nmanifest_file = open(\"HandlerManifest.json\", \"w\") \nmanifest_file.write(manifest_str)\nmanifest_file.close()\n\n\n\"\"\"\ngenerate the extension xml file\n\"\"\"\nextension_xml_file_content = \"\"\"<ExtensionImage xmlns=\"http://schemas.microsoft.com/windowsazure\">\n<ProviderNameSpace>Microsoft.Azure.Security</ProviderNameSpace>\n<Type>%s</Type>\n<Version>%s</Version>\n<Label>%s</Label>\n<HostingResources>VmRole</HostingResources>\n<MediaLink></MediaLink>\n<Description>%s</Description>\n<IsInternalExtension>true</IsInternalExtension>\n<Eula>https://azure.microsoft.com/en-us/support/legal/</Eula>\n<PrivacyUri>https://azure.microsoft.com/en-us/support/legal/</PrivacyUri>\n<HomepageUri>https://github.com/Azure/azure-linux-extensions</HomepageUri>\n<IsJsonExtension>true</IsJsonExtension>\n<SupportedOS>Linux</SupportedOS>\n<CompanyName>Microsoft</CompanyName>\n<!--%%REGIONS%%-->\n</ExtensionImage>\"\"\" % (CommonVariables.extension_type, CommonVariables.extension_version, CommonVariables.extension_label, CommonVariables.extension_description)\n\nextension_xml_file = open('manifest.xml', 'w')\nextension_xml_file.write(extension_xml_file_content)\nextension_xml_file.close()\n\n\"\"\"\nsetup script, to package the files up\n\"\"\"\nsetup(name = CommonVariables.extension_name,\n      version = CommonVariables.extension_version,\n      description=CommonVariables.extension_description,\n      license='Apache License 2.0',\n      author='Microsoft Corporation',\n      author_email='andliu@microsoft.com',\n      url='https://github.com/Azure/azure-linux-extensions',\n      classifiers = ['Development Status :: 5 - Production/Stable',\n        'Programming Language :: Python',\n        'Programming Language :: Python :: 2',\n        'Programming Language :: Python :: 2.7',\n        'Programming Language :: Python :: 3',\n        'Programming Language :: Python :: 3.3',\n        'Programming Language :: Python :: 3.4',\n        'License :: OSI Approved :: Apache Software License'],\n      packages = packages_array)\n\n\"\"\"\nunzip the package files and re-package it.\n\"\"\"\ntarget_zip_file_location = './dist/'\ntarget_folder_name = CommonVariables.extension_name + '-' + str(CommonVariables.extension_version)\ntarget_zip_file_path = target_zip_file_location + target_folder_name + '.zip'\n\ntarget_zip_file = ZipFile(target_zip_file_path)\ntarget_zip_file.extractall(target_zip_file_location)\n\ndef dos2unix(src):\n    args = [\"dos2unix\", src]\n    devnull = open(os.devnull, 'w')\n    child = subprocess.Popen(args, stdout=devnull, stderr=devnull)\n    print('dos2unix %s ' % (src))\n    child.wait()\n\ndef remove_utf8_bom(src):\n    print('removing utf-8 bom from %s ' % (src))\n\n    contents = None\n\n    with open(src, \"r+b\") as fp:\n        bincontents = fp.read()\n        if bincontents[:len(codecs.BOM_UTF8)] == codecs.BOM_UTF8:\n            contents = bincontents.decode('utf-8-sig')\n        elif bincontents[:3] == '\\xef\\x00\\x00':\n            contents = bincontents[3:].decode('utf-8')\n        else:\n            contents = bincontents.decode('utf8')\n\n    with open(src, \"wb\") as fp:\n        fp.write(contents.encode('utf-8'))\n\ndef zip(src, dst):\n    zf = ZipFile(\"%s\" % (dst), \"w\")\n    abs_src = os.path.abspath(src)\n    for dirname, subdirs, files in os.walk(src):\n        for filename in files:\n            absname = os.path.abspath(os.path.join(dirname, filename))\n            dos2unix(absname)\n            remove_utf8_bom(absname)\n            arcname = absname[len(abs_src) + 1:]\n            print('zipping %s as %s' % (os.path.join(dirname, filename), arcname))\n            zf.write(absname, arcname)\n    zf.close()\n\nfinal_folder_path = target_zip_file_location + target_folder_name\n# Manually add SupportedOS.json file as setup seems to only copy py file\ncopy2(main_folder+'/SupportedOS.json', final_folder_path+'/'+main_folder )\nzip(final_folder_path, target_zip_file_path)\n\n"
  },
  {
    "path": "VMEncryption/test/__init__.py",
    "content": "#\n# Copyright 2018 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n"
  },
  {
    "path": "VMEncryption/test/console_logger.py",
    "content": "#!/usr/bin/env python\n#\n# *********************************************************\n# Copyright (c) Microsoft. All rights reserved.\n#\n# Apache 2.0 License\n#\n# You may obtain a copy of the License at\n# http:#www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied. See the License for the specific language governing\n# permissions and limitations under the License.\n#\n# *********************************************************\n\nimport os\nimport string\nimport json\n\nclass HandlerContext:\n    def __init__(self, name):\n        self._name = name\n        self._version = '0.0'\n        return\n\nclass ConsoleLogger(object):\n    def __init__(self):\n        self.current_process_id = os.getpid()\n        self._context = HandlerContext(\"test\")\n        self._context._config = json.loads('{\"runtimeSettings\": [{\"handlerSettings\": {\"publicSettings\": {\"EncryptionOperation\": \"EnableEncryptionFormatAll\"}}}]}')\n\n    def log(self, msg, level='Info'):\n        \"\"\" simple logging mechanism to print to stdout \"\"\"\n        log_msg = \"{0}: [{1}] {2}\".format(self.current_process_id, level, msg)\n        print(log_msg)\n\n    def error(self, msg):\n        log(msg,'Error')\n"
  },
  {
    "path": "VMEncryption/test/test_check_util.py",
    "content": "import unittest\nimport mock\n\nfrom main.check_util import CheckUtil\nfrom main.Common import CommonVariables\nfrom StringIO import StringIO\nfrom console_logger import ConsoleLogger\n\nclass MockDistroPatcher:\n    def __init__(self, name, version, kernel):\n        self.distro_info = [None] * 2\n        self.distro_info[0] = name\n        self.distro_info[1] = version\n        self.kernel_version = kernel\n\nclass TestCheckUtil(unittest.TestCase):\n    \"\"\" unit tests for functions in the check_util module \"\"\"\n    def setUp(self):\n        self.logger = ConsoleLogger()\n        self.cutil = CheckUtil(self.logger)\n\n    def get_mock_filestream(self, somestring):\n        stream = StringIO()\n        stream.write(somestring)\n        stream.seek(0)\n        return stream\n\n    @mock.patch('os.path.isfile', return_value=False)\n    @mock.patch('os.path.isdir', return_value=False)\n    def test_appcompat(self, os_path_isdir, os_path_isfile):\n        self.assertFalse(self.cutil.is_app_compat_issue_detected())\n\n    @mock.patch('os.popen')\n    def test_memory(self, os_popen):\n        output = \"8000000\"\n        os_popen.return_value = self.get_mock_filestream(output)\n        self.assertFalse(self.cutil.is_insufficient_memory())\n\n    @mock.patch('os.popen')\n    def test_memory_low_memory(self, os_popen):\n        output = \"6000000\"\n        os_popen.return_value = self.get_mock_filestream(output)\n        self.assertTrue(self.cutil.is_insufficient_memory())\n\n    def test_is_kv_url(self):\n        dns_suffix_list = [\"vault.azure.net\", \"vault.azure.cn\", \"vault.usgovcloudapi.net\", \"vault.microsoftazure.de\"]\n\n        for dns_suffix in dns_suffix_list:\n            self.cutil.check_kv_url(\"https://testkv.\" + dns_suffix + \"/\", \"\")\n            self.cutil.check_kv_url(\"https://test-kv2.\" + dns_suffix + \"/\", \"\")\n            self.cutil.check_kv_url(\"https://test-kv2.\" + dns_suffix + \":443/\", \"\")\n            self.cutil.check_kv_url(\"https://test-kv2.\" + dns_suffix + \":443/keys/kekname/kekversion\", \"\")\n            self.assertRaises(Exception, self.cutil.check_kv_url, \"http://testkv.\" + dns_suffix + \"/\", \"\")\n            # self.assertRaises(Exception, self.cutil.check_kv_url, \"https://https://testkv.\" + dns_suffix + \"/\", \"\")\n            # self.assertRaises(Exception, self.cutil.check_kv_url, \"https://testkv.testkv.\" + dns_suffix + \"/\", \"\")\n        # self.assertRaises(Exception, self.cutil.check_kv_url, \"https://testkv.vault.azure.com/\", \"\")\n        self.assertRaises(Exception, self.cutil.check_kv_url, \"https://\", \"\")\n\n    def test_validate_volume_type(self):\n        self.cutil.validate_volume_type({CommonVariables.VolumeTypeKey: \"DATA\"})\n        self.cutil.validate_volume_type({CommonVariables.VolumeTypeKey: \"ALL\"})\n        self.cutil.validate_volume_type({CommonVariables.VolumeTypeKey: \"all\"})\n        self.cutil.validate_volume_type({CommonVariables.VolumeTypeKey: \"Os\"})\n        self.cutil.validate_volume_type({CommonVariables.VolumeTypeKey: \"OS\"})\n        self.cutil.validate_volume_type({CommonVariables.VolumeTypeKey: \"os\"})\n        self.cutil.validate_volume_type({CommonVariables.VolumeTypeKey: \"Data\"})\n        self.cutil.validate_volume_type({CommonVariables.VolumeTypeKey: \"data\"})\n        for vt in CommonVariables.SupportedVolumeTypes:\n            self.cutil.validate_volume_type({CommonVariables.VolumeTypeKey: vt})\n\n        self.assertRaises(Exception, self.cutil.validate_volume_type, {CommonVariables.VolumeTypeKey: \"NON-OS\"})\n        self.assertRaises(Exception, self.cutil.validate_volume_type, {CommonVariables.VolumeTypeKey: \"\"})\n        self.assertRaises(Exception, self.cutil.validate_volume_type, {CommonVariables.VolumeTypeKey: \"123\"})\n\n    @mock.patch('main.check_util.CheckUtil.validate_memory_os_encryption')\n    @mock.patch('main.CommandExecutor.CommandExecutor.Execute', return_value=0)\n    def test_fatal_checks(self, mock_exec, mock_validate_memory):\n        mock_distro_patcher = MockDistroPatcher('Ubuntu', '14.04', '4.15')\n        self.cutil.precheck_for_fatal_failures({\n            CommonVariables.EncryptionEncryptionOperationKey: CommonVariables.QueryEncryptionStatus\n            }, { \"os\": \"NotEncrypted\" }, mock_distro_patcher)\n        self.cutil.precheck_for_fatal_failures({\n            CommonVariables.VolumeTypeKey: \"DATA\",\n            CommonVariables.EncryptionEncryptionOperationKey: CommonVariables.DisableEncryption\n            }, { \"os\": \"NotEncrypted\" }, mock_distro_patcher)\n        self.cutil.precheck_for_fatal_failures({\n            CommonVariables.VolumeTypeKey: \"ALL\",\n            CommonVariables.KeyVaultURLKey: \"https://vaultname.vault.azure.net/\",\n            CommonVariables.AADClientIDKey: \"00000000-0000-0000-0000-000000000000\",\n            CommonVariables.EncryptionEncryptionOperationKey: CommonVariables.EnableEncryption\n            }, { \"os\": \"NotEncrypted\" }, mock_distro_patcher)\n        self.cutil.precheck_for_fatal_failures({\n            CommonVariables.VolumeTypeKey: \"ALL\",\n            CommonVariables.KeyVaultURLKey: \"https://vaultname.vault.azure.net/\",\n            CommonVariables.KeyEncryptionKeyURLKey: \"https://vaultname.vault.azure.net/keys/keyname/ver\",\n            CommonVariables.AADClientIDKey: \"00000000-0000-0000-0000-000000000000\",\n            CommonVariables.EncryptionEncryptionOperationKey: CommonVariables.EnableEncryptionFormat\n            }, { \"os\": \"NotEncrypted\" }, mock_distro_patcher)\n        self.cutil.precheck_for_fatal_failures({\n            CommonVariables.VolumeTypeKey: \"ALL\",\n            CommonVariables.KeyVaultURLKey: \"https://vaultname.vault.azure.net/\",\n            CommonVariables.KeyEncryptionKeyURLKey: \"https://vaultname.vault.azure.net/keys/keyname/ver\",\n            CommonVariables.KeyEncryptionAlgorithmKey: 'rsa-OAEP-256',\n            CommonVariables.AADClientIDKey: \"00000000-0000-0000-0000-000000000000\",\n            CommonVariables.EncryptionEncryptionOperationKey: CommonVariables.EnableEncryptionFormatAll\n            }, { \"os\": \"NotEncrypted\" }, mock_distro_patcher)\n        self.assertRaises(Exception, self.cutil.precheck_for_fatal_failures, {})\n        self.assertRaises(Exception, self.cutil.precheck_for_fatal_failures, {\n            CommonVariables.VolumeTypeKey: \"ALL\",\n            CommonVariables.KeyVaultURLKey: \"https://vaultname.vault.azure.net/\",\n            CommonVariables.KeyEncryptionKeyURLKey: \"https://vaultname.vault.azure.net/keys/keyname/ver\",\n            CommonVariables.KeyEncryptionAlgorithmKey: 'rsa-OAEP-256',\n            CommonVariables.AADClientIDKey: \"INVALIDKEY\",\n            CommonVariables.EncryptionEncryptionOperationKey: CommonVariables.EnableEncryptionFormatAll\n            }, mock_distro_patcher)\n        self.assertRaises(Exception, self.cutil.precheck_for_fatal_failures, {\n            CommonVariables.VolumeTypeKey: \"123\",\n            CommonVariables.AADClientIDKey: \"00000000-0000-0000-0000-000000000000\",\n            CommonVariables.EncryptionEncryptionOperationKey: CommonVariables.EnableEncryption\n            }, { \"os\": \"NotEncrypted\" }, mock_distro_patcher)\n        self.assertRaises(Exception, self.cutil.precheck_for_fatal_failures, {\n            CommonVariables.VolumeTypeKey: \"ALL\",\n            CommonVariables.KeyVaultURLKey: \"https://vaultname.vault.azure.net/\",\n            CommonVariables.KeyEncryptionKeyURLKey: \"https://vaultname.vault.azure.net/keys/keyname/ver\",\n            CommonVariables.KeyEncryptionAlgorithmKey: 'rsa-OAEP-25600',\n            CommonVariables.AADClientIDKey: \"00000000-0000-0000-0000-000000000000\",\n            CommonVariables.EncryptionEncryptionOperationKey: CommonVariables.EnableEncryptionFormatAll\n            }, { \"os\": \"NotEncrypted\" }, mock_distro_patcher)\n        mock_distro_patcher = MockDistroPatcher('Ubuntu', '14.04', '4.4')\n        self.assertRaises(Exception, self.cutil.precheck_for_fatal_failures, {\n            CommonVariables.VolumeTypeKey: \"ALL\"\n            }, { \"os\": \"NotEncrypted\" }, mock_distro_patcher)\n\n    def test_mount_scheme(self):\n        proc_mounts_output = \"\"\"\n        sysfs /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0\n        proc /proc proc rw,nosuid,nodev,noexec,relatime 0 0\n        udev /dev devtmpfs rw,relatime,size=4070564k,nr_inodes=1017641,mode=755 0 0\n        devpts /dev/pts devpts rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0\n        tmpfs /run tmpfs rw,nosuid,noexec,relatime,size=815720k,mode=755 0 0\n        /dev/sda1 / ext4 rw,relatime,discard,data=ordered 0 0\n        none /sys/fs/cgroup tmpfs rw,relatime,size=4k,mode=755 0 0\n        none /sys/fs/fuse/connections fusectl rw,relatime 0 0\n        none /sys/kernel/debug debugfs rw,relatime 0 0\n        none /sys/kernel/security securityfs rw,relatime 0 0\n        none /run/lock tmpfs rw,nosuid,nodev,noexec,relatime,size=5120k 0 0\n        none /run/shm tmpfs rw,nosuid,nodev,relatime 0 0\n        none /run/user tmpfs rw,nosuid,nodev,noexec,relatime,size=102400k,mode=755 0 0\n        none /sys/fs/pstore pstore rw,relatime 0 0\n        systemd /sys/fs/cgroup/systemd cgroup rw,nosuid,nodev,noexec,relatime,name=systemd 0 0\n        /dev/mapper/fee16d98-9c18-4e7d-af70-afd7f3dfb2d9 /mnt/resource ext4 rw,relatime,data=ordered 0 0\n        /dev/mapper/vg0-lv0 /data ext4 rw,relatime,discard,data=ordered 0 0\n        \"\"\"\n        with mock.patch(\"__builtin__.open\", mock.mock_open(read_data=proc_mounts_output)):\n            self.assertFalse(self.cutil.is_unsupported_mount_scheme())\n\n    # Skip LVM OS validation when OS volume is not being targeted\n    def test_skip_lvm_os_check_if_data_only_enable(self):\n        # skip lvm detection if data only \n        self.cutil.validate_lvm_os({CommonVariables.VolumeTypeKey: \"DATA\", CommonVariables.EncryptionEncryptionOperationKey: CommonVariables.EnableEncryption})\n\n    def test_skip_lvm_os_check_if_data_only_ef(self):\n        # skip lvm detection if data only \n        self.cutil.validate_lvm_os({CommonVariables.VolumeTypeKey: \"DATA\", CommonVariables.EncryptionEncryptionOperationKey: CommonVariables.EnableEncryptionFormat})\n\n    def test_skip_lvm_os_check_if_data_only_efa(self):\n        # skip lvm detection if data only \n        self.cutil.validate_lvm_os({CommonVariables.VolumeTypeKey: \"DATA\", CommonVariables.EncryptionEncryptionOperationKey: CommonVariables.EnableEncryptionFormatAll})\n\n    def test_skip_lvm_os_check_if_data_only_disable(self):\n        # skip lvm detection if data only \n        self.cutil.validate_lvm_os({CommonVariables.VolumeTypeKey: \"DATA\", CommonVariables.EncryptionEncryptionOperationKey: CommonVariables.DisableEncryption})\n\n    def test_skip_lvm_os_check_if_query(self):\n        # skip lvm detection if query status operation is invoked without volume type\n        self.cutil.validate_lvm_os({CommonVariables.EncryptionEncryptionOperationKey: CommonVariables.QueryEncryptionStatus})\n\n    def test_skip_lvm_no_encryption_operation(self):\n        # skip lvm detection if no encryption operation \n        self.cutil.validate_lvm_os({CommonVariables.VolumeTypeKey: \"ALL\"})\n\n    def test_skip_lvm_no_volume_type(self):\n        # skip lvm detection if no volume type specified\n        self.cutil.validate_lvm_os({CommonVariables.EncryptionEncryptionOperationKey: CommonVariables.EnableEncryptionFormatAll})\n\n    @mock.patch(\"os.system\", return_value=-1)\n    def test_no_lvm_no_config(self, os_system):\n        # simulate no LVM OS, no config \n        self.cutil.validate_lvm_os({})\n\n    @mock.patch(\"os.system\", return_value=0)\n    def test_lvm_no_config(self, os_system):\n        # simulate valid LVM OS, no config\n        self.cutil.validate_lvm_os({})\n\n    @mock.patch(\"os.system\", side_effect=[0, -1])\n    def test_invalid_lvm_no_config(self, os_system):\n        # simulate invalid LVM naming scheme, but no config setting to encrypt OS\n        self.cutil.validate_lvm_os({})\n\n    @mock.patch(\"os.system\", return_value=-1)\n    def test_lvm_os_lvm_absent(self, os_system):\n        # using patched return value of -1, simulate no LVM OS \n        self.cutil.validate_lvm_os({CommonVariables.VolumeTypeKey: \"ALL\", CommonVariables.EncryptionEncryptionOperationKey: CommonVariables.EnableEncryption})\n\n    @mock.patch(\"os.system\", return_value=0)\n    def test_lvm_os_valid(self, os_system):\n        # simulate a valid LVM OS and a valid naming scheme by always returning 0\n        self.cutil.validate_lvm_os({CommonVariables.VolumeTypeKey: \"ALL\", CommonVariables.EncryptionEncryptionOperationKey: CommonVariables.EnableEncryption})\n\n    @mock.patch(\"os.system\", side_effect=[0, -1])\n    def test_lvm_os_lv_missing_expected_name(self, os_system):\n        # using patched side effects, first simulate LVM OS present, then simulate not finding the expected LV name \n        self.assertRaises(Exception, self.cutil.validate_lvm_os, {CommonVariables.VolumeTypeKey: \"ALL\", CommonVariables.EncryptionEncryptionOperationKey: CommonVariables.EnableEncryption})\n\n    @mock.patch(\"main.CommandExecutor.CommandExecutor.Execute\", return_value=0)\n    def test_vfat(self, os_system):\n        # simulate call to modprobe vfat that succeeds and returns cleanly from execute \n        self.cutil.validate_vfat()\n\n    @mock.patch(\"main.CommandExecutor.CommandExecutor.Execute\", side_effect=Exception(\"Test\"))\n    def test_no_vfat(self, os_system):\n        # simulate call to modprobe vfat that fails and raises exception from execute \n        self.assertRaises(Exception, self.cutil.validate_vfat) \n\n    def test_validate_aad(self):\n        # positive tests\n        test_settings = {} \n        test_settings[CommonVariables.AADClientIDKey] = \"00000000-0000-0000-0000-000000000000\"\n        test_settings[CommonVariables.EncryptionEncryptionOperationKey] = CommonVariables.EnableEncryption\n        self.cutil.validate_aad(test_settings)\n\n        test_settings = {} \n        test_settings[CommonVariables.AADClientIDKey] = \"00000000-0000-aaaa-0000-000000000000\"\n        test_settings[CommonVariables.EncryptionEncryptionOperationKey] = CommonVariables.EnableEncryptionFormat\n        self.cutil.validate_aad(test_settings)\n\n        test_settings = {} \n        test_settings[CommonVariables.AADClientIDKey] = \"00000000-0000-AAAA-0000-000000000000\"\n        test_settings[CommonVariables.EncryptionEncryptionOperationKey] = CommonVariables.EnableEncryptionFormatAll\n        self.cutil.validate_aad(test_settings)\n\n        test_settings = {} \n        test_settings[CommonVariables.EncryptionEncryptionOperationKey] = CommonVariables.DisableEncryption\n        self.cutil.validate_aad(test_settings)\n\n        test_settings = {} \n        test_settings[CommonVariables.EncryptionEncryptionOperationKey] = CommonVariables.QueryEncryptionStatus\n        self.cutil.validate_aad(test_settings)\n\n        # negative tests\n        # settings file that does not include AAD client ID field\n        test_settings = {} \n        test_settings[CommonVariables.EncryptionEncryptionOperationKey] = CommonVariables.EnableEncryption\n        self.assertRaises(Exception, self.cutil.validate_aad, test_settings)\n\n        # invalid characters in the client ID\n        test_settings = {} \n        test_settings[CommonVariables.AADClientIDKey] = \"BORKED\"\n        test_settings[CommonVariables.EncryptionEncryptionOperationKey] = CommonVariables.EnableEncryption\n        self.assertRaises(Exception, self.cutil.validate_aad, test_settings)\n\n        # empty string\n        test_settings = {} \n        test_settings[CommonVariables.AADClientIDKey] = \"\"\n        test_settings[CommonVariables.EncryptionEncryptionOperationKey] = CommonVariables.EnableEncryption\n        self.assertRaises(Exception, self.cutil.validate_aad, test_settings)\n\n        # unicode left and right double quotes (simulating a copy-paste error)\n        test_settings = {} \n        test_settings[CommonVariables.AADClientIDKey] = u'\\u201c' + \"00000000-0000-0000-0000-000000000000\" + u'\\u201d'\n        test_settings[CommonVariables.EncryptionEncryptionOperationKey] = CommonVariables.EnableEncryption\n        self.assertRaises(Exception, self.cutil.validate_aad, test_settings)\n\n    @mock.patch('os.popen')\n    def test_minimum_memory(self, os_popen):\n        output = \"6000000\"\n        os_popen.return_value = self.get_mock_filestream(output)\n        self.assertRaises(Exception, self.cutil.validate_memory_os_encryption, {\n            CommonVariables.VolumeTypeKey: \"ALL\",\n            CommonVariables.KeyVaultURLKey: \"https://vaultname.vault.azure.net/\",\n            CommonVariables.KeyEncryptionKeyURLKey: \"https://vaultname.vault.azure.net/keys/keyname/ver\",\n            CommonVariables.KeyEncryptionAlgorithmKey: 'rsa-OAEP-25600',\n            CommonVariables.AADClientIDKey: \"00000000-0000-0000-0000-000000000000\",\n            CommonVariables.EncryptionEncryptionOperationKey: CommonVariables.EnableEncryptionFormatAll\n            }, { \"os\": \"NotEncrypted\" })\n        try:\n            self.cutil.validate_memory_os_encryption( {\n            CommonVariables.VolumeTypeKey: \"ALL\",\n            CommonVariables.KeyVaultURLKey: \"https://vaultname.vault.azure.net/\",\n            CommonVariables.KeyEncryptionKeyURLKey: \"https://vaultname.vault.azure.net/keys/keyname/ver\",\n            CommonVariables.KeyEncryptionAlgorithmKey: 'rsa-OAEP-25600',\n            CommonVariables.AADClientIDKey: \"00000000-0000-0000-0000-000000000000\",\n            CommonVariables.EncryptionEncryptionOperationKey: CommonVariables.EnableEncryptionFormatAll\n            }, { \"os\": \"Encrypted\" })\n        except Exception:\n            self.fail(\"validate_memory_os_encryption threw unexpected exception\\nException message was:\\n\" + str(e))\n        try:\n            output = \"8000000\"\n            os_popen.return_value = self.get_mock_filestream(output)\n            self.cutil.validate_memory_os_encryption( {\n            CommonVariables.VolumeTypeKey: \"ALL\",\n            CommonVariables.KeyVaultURLKey: \"https://vaultname.vault.azure.net/\",\n            CommonVariables.KeyEncryptionKeyURLKey: \"https://vaultname.vault.azure.net/keys/keyname/ver\",\n            CommonVariables.KeyEncryptionAlgorithmKey: 'rsa-OAEP-25600',\n            CommonVariables.AADClientIDKey: \"00000000-0000-0000-0000-000000000000\",\n            CommonVariables.EncryptionEncryptionOperationKey: CommonVariables.EnableEncryptionFormatAll\n            }, { \"os\": \"Encrypted\" })\n        except Exception:\n            self.fail(\"validate_memory_os_encryption threw unexpected exception\\nException message was:\\n\" + str(e))\n        try:\n            output = \"8000000\"\n            os_popen.return_value = self.get_mock_filestream(output)\n            self.cutil.validate_memory_os_encryption( {\n            CommonVariables.VolumeTypeKey: \"ALL\",\n            CommonVariables.KeyVaultURLKey: \"https://vaultname.vault.azure.net/\",\n            CommonVariables.KeyEncryptionKeyURLKey: \"https://vaultname.vault.azure.net/keys/keyname/ver\",\n            CommonVariables.KeyEncryptionAlgorithmKey: 'rsa-OAEP-25600',\n            CommonVariables.AADClientIDKey: \"00000000-0000-0000-0000-000000000000\",\n            CommonVariables.EncryptionEncryptionOperationKey: CommonVariables.EnableEncryptionFormatAll\n            }, { \"os\": \"NotEncrypted\" })\n        except Exception:\n            self.fail(\"validate_memory_os_encryption threw unexpected exception\\nException message was:\\n\" + str(e))\n\n    def test_supported_os(self):\n        # test exception is raised for Ubuntu 14.04 kernel version\n        self.assertRaises(Exception, self.cutil.is_supported_os, {\n            CommonVariables.VolumeTypeKey: \"ALL\"\n            }, MockDistroPatcher('Ubuntu', '14.04', '4.4'), {\"os\" : \"NotEncrypted\"})\n        # test exception is not raised for Ubuntu 14.04 kernel version 4.15\n        try:\n            self.cutil.is_supported_os( {\n            CommonVariables.VolumeTypeKey: \"ALL\"\n            }, MockDistroPatcher('Ubuntu', '14.04', '4.15'), {\"os\" : \"NotEncrypted\"})\n        except Exception as e:\n            self.fail(\"is_unsupported_os threw unexpected exception.\\nException message was:\\n\" + str(e))\n        # test exception is not raised for already encrypted OS volume\n        try:\n            self.cutil.is_supported_os( {\n            CommonVariables.VolumeTypeKey: \"ALL\"\n            }, MockDistroPatcher('Ubuntu', '14.04', '4.4'), {\"os\" : \"Encrypted\"})\n        except Exception as e:\n            self.fail(\"is_unsupported_os threw unexpected exception.\\nException message was:\\n\" + str(e))\n        # test exception is raised for unsupported OS\n        self.assertRaises(Exception, self.cutil.is_supported_os, {\n            CommonVariables.VolumeTypeKey: \"ALL\"\n            }, MockDistroPatcher('Ubuntu', '12.04', ''), {\"os\" : \"NotEncrypted\"})\n        self.assertRaises(Exception, self.cutil.is_supported_os, {\n            CommonVariables.VolumeTypeKey: \"ALL\"\n            }, MockDistroPatcher('redhat', '6.7', ''), {\"os\" : \"NotEncrypted\"})\n        self.assertRaises(Exception, self.cutil.is_supported_os, {\n            CommonVariables.VolumeTypeKey: \"ALL\"\n            }, MockDistroPatcher('centos', '7.9', ''), {\"os\" : \"NotEncrypted\"})\n        # test exception is not raised for supported OS\n        try:\n            self.cutil.is_supported_os( {\n            CommonVariables.VolumeTypeKey: \"ALL\"\n            }, MockDistroPatcher('Ubuntu', '18.04', ''), {\"os\" : \"NotEncrypted\"})\n        except Exception as e:\n            self.fail(\"is_unsupported_os threw unexpected exception.\\nException message was:\\n\" + str(e))\n        try:\n            self.cutil.is_supported_os( {\n            CommonVariables.VolumeTypeKey: \"ALL\"\n            }, MockDistroPatcher('centos', '7.2.1511', ''), {\"os\" : \"NotEncrypted\"})\n        except Exception as e:\n            self.fail(\"is_unsupported_os threw unexpected exception.\\nException message was:\\n\" + str(e))\n        # test exception is not raised for DATA volume\n        try:\n            self.cutil.is_supported_os( {\n            CommonVariables.VolumeTypeKey: \"DATA\"\n            }, MockDistroPatcher('SuSE', '12.4', ''), {\"os\" : \"NotEncrypted\"})\n        except Exception as e:\n            self.fail(\"is_unsupported_os threw unexpected exception.\\nException message was:\\n\" + str(e))\n"
  },
  {
    "path": "VMEncryption/test/test_command_executor.py",
    "content": "import unittest\nfrom main.CommandExecutor import CommandExecutor\nfrom console_logger import ConsoleLogger\n\nclass TestCommandExecutor(unittest.TestCase):\n    \"\"\" unit tests for functions in the CommandExecutor module \"\"\"\n    def setUp(self):\n        self.logger = ConsoleLogger()\n        self.cmd_executor = CommandExecutor(self.logger)\n\n    def test_command_timeout(self):\n        return_code = self.cmd_executor.Execute('sleep 15', timeout=10)\n        self.assertEqual(return_code, -9, msg=\"The command didn't timeout as expected\")\n\n    def test_command_no_timeout(self):\n        return_code = self.cmd_executor.Execute('sleep 5', timeout=10)\n        self.assertEqual(return_code, 0, msg=\"The command should have completed successfully\")"
  },
  {
    "path": "VMEncryption/test/test_disk_util.py",
    "content": "import unittest\nimport mock\n\nfrom main.Common import CryptItem\nfrom main.EncryptionEnvironment import EncryptionEnvironment\nfrom main.DiskUtil import DiskUtil\nfrom console_logger import ConsoleLogger\nfrom test_utils import MockDistroPatcher\n\n\nclass TestDiskUtil(unittest.TestCase):\n    \"\"\" unit tests for functions in the CryptMountConfig module \"\"\"\n    def setUp(self):\n        self.logger = ConsoleLogger()\n        self.disk_util = DiskUtil(None, MockDistroPatcher('Ubuntu', '14.04', '4.15'), self.logger, EncryptionEnvironment(None, self.logger))\n\n    def _mock_open_with_read_data_dict(self, open_mock, read_data_dict):\n        open_mock.content_dict = read_data_dict\n\n        def _open_side_effect(filename, mode, *args, **kwargs):\n            read_data = open_mock.content_dict.get(filename)\n            mock_obj = mock.mock_open(read_data=read_data)\n            handle = mock_obj.return_value\n\n            def write_handle(data, *args, **kwargs):\n                if 'a' in mode:\n                    open_mock.content_dict[filename] += data\n                else:\n                    open_mock.content_dict[filename] = data\n\n            def write_lines_handle(data, *args, **kwargs):\n                if 'a' in mode:\n                    open_mock.content_dict[filename] += \"\".join(data)\n                else:\n                    open_mock.content_dict[filename] = \"\".join(data)\n            handle.write.side_effect = write_handle\n            handle.writelines.side_effect = write_lines_handle\n            return handle\n\n        open_mock.side_effect = _open_side_effect\n\n    def _create_expected_crypt_item(self,\n                                    mapper_name=None,\n                                    dev_path=None,\n                                    uses_cleartext_key=None,\n                                    luks_header_path=None,\n                                    mount_point=None,\n                                    file_system=None,\n                                    current_luks_slot=None):\n        crypt_item = CryptItem()\n        crypt_item.mapper_name = mapper_name\n        crypt_item.dev_path = dev_path\n        crypt_item.uses_cleartext_key = uses_cleartext_key\n        crypt_item.luks_header_path = luks_header_path\n        crypt_item.mount_point = mount_point\n        crypt_item.file_system = file_system\n        crypt_item.current_luks_slot = current_luks_slot\n        return crypt_item\n\n    def test_parse_crypttab_line(self):\n        # empty line\n        line = \"\"\n        crypt_item = self.disk_util.parse_crypttab_line(line)\n        self.assertEquals(None, crypt_item)\n\n        # line with not enough entries\n        line = \"mapper_name dev_path\"\n        crypt_item = self.disk_util.parse_crypttab_line(line)\n        self.assertEquals(None, crypt_item)\n\n        # commented out line\n        line = \"# mapper_name dev_path\"\n        crypt_item = self.disk_util.parse_crypttab_line(line)\n        self.assertEquals(None, crypt_item)\n\n        # An unfamiliar key_file_path implies that we shouln't be processing this crypttab line\n        line = \"mapper_name /dev/dev_path /non_managed_key_file_path\"\n        crypt_item = self.disk_util.parse_crypttab_line(line)\n        self.assertEquals(None, crypt_item)\n\n        # a bare bones crypttab line\n        line = \"mapper_name /dev/dev_path /mnt/azure_bek_disk/LinuxPassPhraseFileName luks\"\n        expected_crypt_item = self._create_expected_crypt_item(mapper_name=\"mapper_name\",\n                                                               dev_path=\"/dev/dev_path\")\n        crypt_item = self.disk_util.parse_crypttab_line(line)\n        self.assertEquals(str(expected_crypt_item), str(crypt_item))\n\n        # a line that implies a cleartext key\n        line = \"mapper_name /dev/dev_path /var/lib/azure_disk_encryption_config/cleartext_key_mapper_name luks\"\n        expected_crypt_item = self._create_expected_crypt_item(mapper_name=\"mapper_name\",\n                                                               dev_path=\"/dev/dev_path\",\n                                                               uses_cleartext_key=True)\n        crypt_item = self.disk_util.parse_crypttab_line(line)\n        self.assertEquals(str(expected_crypt_item), str(crypt_item))\n\n        # a line that implies a luks header\n        line = \"mapper_name /dev/dev_path /var/lib/azure_disk_encryption_config/cleartext_key_mapper_name luks,header=headerfile\"\n        expected_crypt_item = self._create_expected_crypt_item(mapper_name=\"mapper_name\",\n                                                               dev_path=\"/dev/dev_path\",\n                                                               uses_cleartext_key=True,\n                                                               luks_header_path=\"headerfile\")\n        crypt_item = self.disk_util.parse_crypttab_line(line)\n        self.assertEquals(str(expected_crypt_item), str(crypt_item))\n\n    @mock.patch('__builtin__.open')\n    @mock.patch('os.path.exists', return_value=True)\n    def test_should_use_azure_crypt_mount(self, exists_mock, open_mock):\n        # if the acm file exists and has only a root disk\n        acm_contents = \"\"\"\n        osencrypt /dev/dev_path None / ext4 False 0\n        \"\"\"\n        mock.mock_open(open_mock, acm_contents)\n        self.assertFalse(self.disk_util.should_use_azure_crypt_mount())\n\n        # if the acm file exists and has a data disk\n        acm_contents = \"\"\"\n        mapper_name /dev/dev_path None /mnt/point ext4 False 0\n        mapper_name2 /dev/dev_path2 None /mnt/point2 ext4 False 0\n        \"\"\"\n        mock.mock_open(open_mock, acm_contents)\n        self.assertTrue(self.disk_util.should_use_azure_crypt_mount())\n\n        # empty file\n        mock.mock_open(open_mock, \"\")\n        self.assertFalse(self.disk_util.should_use_azure_crypt_mount())\n\n        # no file\n        exists_mock.return_value = False\n        open_mock.reset_mock()\n        self.assertFalse(self.disk_util.should_use_azure_crypt_mount())\n        open_mock.assert_not_called()\n\n    @mock.patch('os.path.exists', return_value=True)\n    @mock.patch('main.DiskUtil.ProcessCommunicator')\n    @mock.patch('main.CommandExecutor.CommandExecutor', autospec=True)\n    @mock.patch('__builtin__.open')\n    @mock.patch('main.DiskUtil.DiskUtil.should_use_azure_crypt_mount')\n    @mock.patch('main.DiskUtil.DiskUtil.get_encryption_status')\n    @mock.patch('main.DiskUtil.DiskUtil.get_mount_items')\n    def test_get_crypt_items(self, get_mount_items_mock, get_enc_status_mock, use_acm_mock, open_mock, ce_mock, pc_mock, exists_mock):\n\n        self.disk_util.command_executor = ce_mock\n\n        use_acm_mock.return_value = True  # Use the Azure_Crypt_Mount file\n\n        get_enc_status_mock.return_value = \"{\\\"os\\\" : \\\"Encrypted\\\"}\"\n        acm_contents = \"\"\"\n        osencrypt /dev/dev_path None / ext4 True 0\n        \"\"\"\n        mock.mock_open(open_mock, acm_contents)\n\n        crypt_items = self.disk_util.get_crypt_items()\n        self.assertListEqual([self._create_expected_crypt_item(mapper_name=\"osencrypt\",\n                                                               dev_path=\"/dev/dev_path\",\n                                                               uses_cleartext_key=True,\n                                                               mount_point=\"/\",\n                                                               file_system=\"ext4\",\n                                                               current_luks_slot=0)],\n                             crypt_items)\n\n        ce_mock.ExecuteInBash.return_value = 0  # The grep on cryptsetup succeeds\n        pc_mock.return_value.stdout = \"osencrypt /dev/dev_path\"  # The grep find this line in there\n        mock.mock_open(open_mock, \"\")  # No content in the azure crypt mount file\n        get_mount_items_mock.return_value = [{\"src\": \"/dev/mapper/osencrypt\", \"dest\": \"/\", \"fs\": \"ext4\"}]\n        exists_mock.return_value = False  # No luksheader file found\n        crypt_items = self.disk_util.get_crypt_items()\n        self.assertListEqual([self._create_expected_crypt_item(mapper_name=\"osencrypt\",\n                                                               dev_path=\"/dev/dev_path\",\n                                                               mount_point=\"/\",\n                                                               file_system=\"ext4\")],\n                             crypt_items)\n\n        use_acm_mock.return_value = False  # Now, use the /etc/crypttab file\n        exists_mock.return_value = True  # Crypttab file found\n        self._mock_open_with_read_data_dict(open_mock, {\"/etc/fstab\": \"/dev/mapper/osencrypt / ext4 defaults,nofail 0 0\",\n                                                        \"/etc/crypttab\": \"osencrypt /dev/sda1 /mnt/azure_bek_disk/LinuxPassPhraseFileName luks,discard\"})\n        crypt_items = self.disk_util.get_crypt_items()\n        self.assertListEqual([self._create_expected_crypt_item(mapper_name=\"osencrypt\",\n                                                               dev_path=\"/dev/sda1\",\n                                                               file_system=None,\n                                                               mount_point=\"/\")],\n                             crypt_items)\n\n        # if there was no crypttab entry for osencrypt\n        exists_mock.side_effect = [True, False]  # Crypttab file found but luksheader not found\n        self._mock_open_with_read_data_dict(open_mock, {\"/etc/fstab\": \"/dev/mapper/osencrypt / ext4 defaults,nofail 0 0\", \"/etc/crypttab\": \"\"})\n        ce_mock.ExecuteInBash.return_value = 0  # The grep on cryptsetup succeeds\n        pc_mock.return_value.stdout = \"osencrypt /dev/sda1\"  # The grep find this line in there\n        crypt_items = self.disk_util.get_crypt_items()\n        self.assertListEqual([self._create_expected_crypt_item(mapper_name=\"osencrypt\",\n                                                               dev_path=\"/dev/sda1\",\n                                                               file_system=\"ext4\",\n                                                               mount_point=\"/\")],\n                             crypt_items)\n\n        exists_mock.side_effect = None  # Crypttab file found\n        exists_mock.return_value = True  # Crypttab file found\n        get_enc_status_mock.return_value = \"{\\\"os\\\" : \\\"NotEncrypted\\\"}\"\n        self._mock_open_with_read_data_dict(open_mock, {\"/etc/fstab\": \"\",\n                                                        \"/etc/crypttab\": \"\"})\n        crypt_items = self.disk_util.get_crypt_items()\n        self.assertListEqual([],\n                             crypt_items)\n\n        self._mock_open_with_read_data_dict(open_mock, {\"/etc/fstab\": \"/dev/mapper/encrypteddatadisk /mnt/datadisk auto defaults,nofail 0 0\",\n                                                        \"/etc/crypttab\": \"encrypteddatadisk /dev/disk/azure/scsi1/lun0 /someplainfile luks\"})\n        crypt_items = self.disk_util.get_crypt_items()\n        self.assertListEqual([],\n                             crypt_items)\n\n        self._mock_open_with_read_data_dict(open_mock, {\"/etc/fstab\": \"/dev/mapper/encrypteddatadisk /mnt/datadisk auto defaults,nofail 0 0\",\n                                                        \"/etc/crypttab\": \"encrypteddatadisk /dev/disk/azure/scsi1/lun0 /mnt/azure_bek_disk/LinuxPassPhraseFileName luks,discard,header=/headerfile\"})\n        crypt_items = self.disk_util.get_crypt_items()\n        self.assertListEqual([self._create_expected_crypt_item(mapper_name=\"encrypteddatadisk\",\n                                                               dev_path=\"/dev/disk/azure/scsi1/lun0\",\n                                                               file_system=None,\n                                                               luks_header_path=\"/headerfile\",\n                                                               mount_point=\"/mnt/datadisk\")],\n                             crypt_items)\n\n    @mock.patch('shutil.copy2', return_value=True)\n    @mock.patch('os.rename', return_value=True)\n    @mock.patch('os.path.exists', return_value=True)\n    @mock.patch('__builtin__.open')\n    @mock.patch('main.DiskUtil.DiskUtil.should_use_azure_crypt_mount', return_value=True)\n    @mock.patch('main.DiskUtil.DiskUtil.get_encryption_status')\n    def test_migrate_crypt_items(self, get_enc_status_mock, use_acm_mock, open_mock, exists_mock, rename_mock, shutil_mock):\n\n        def rename_side_effect(name1, name2):\n            use_acm_mock.return_value = False\n            return True\n        rename_mock.side_effect = rename_side_effect\n        get_enc_status_mock.return_value = \"{\\\"os\\\" : \\\"NotEncrypted\\\"}\"\n\n        # Test 1: migrate an entry\n        self._mock_open_with_read_data_dict(open_mock, {\"/var/lib/azure_disk_encryption_config/azure_crypt_mount\": \"mapper_name /dev/dev_path None /mnt/point ext4 False 0\",\n                                                        \"/etc/fstab.azure.backup\": \"/dev/dev_path /mnt/point ext4 defaults,nofail 0 0\",\n                                                        \"/etc/fstab\": \"\",\n                                                        \"/etc/crypttab\": \"\"})\n        self.disk_util.migrate_crypt_items(\"/test_passphrase_path\")\n        self.assertTrue(\"/dev/mapper/mapper_name /mnt/point\" in open_mock.content_dict[\"/etc/fstab\"])\n        self.assertTrue(\"mapper_name /dev/dev_path /test_passphrase_path\" in open_mock.content_dict[\"/etc/crypttab\"])\n\n        # Test 2: migrate no entry\n        use_acm_mock.return_value = True\n        self._mock_open_with_read_data_dict(open_mock, {\"/var/lib/azure_disk_encryption_config/azure_crypt_mount\": \"\",\n                                                        \"/etc/fstab.azure.backup\": \"\",\n                                                        \"/etc/fstab\": \"\",\n                                                        \"/etc/crypttab\": \"\"})\n        self.disk_util.migrate_crypt_items(\"/test_passphrase_path\")\n        self.assertTrue(\"\" == open_mock.content_dict[\"/etc/fstab\"].strip())\n        self.assertTrue(\"\" == open_mock.content_dict[\"/etc/crypttab\"].strip())\n\n        # Test 3: skip migrating the OS entry\n        use_acm_mock.return_value = True\n        self._mock_open_with_read_data_dict(open_mock, {\"/var/lib/azure_disk_encryption_config/azure_crypt_mount\": \"osencrypt /dev/dev_path None / ext4 False 0\",\n                                                        \"/etc/fstab.azure.backup\": \"/dev/dev_path / ext4 defaults 0 0\",\n                                                        \"/etc/fstab\": \"\",\n                                                        \"/etc/crypttab\": \"\"})\n        self.disk_util.migrate_crypt_items(\"/test_passphrase_path\")\n        self.assertTrue(\"\" == open_mock.content_dict[\"/etc/fstab\"].strip())\n        self.assertTrue(\"\" == open_mock.content_dict[\"/etc/crypttab\"].strip())\n\n        # Test 4: migrate many entries\n        use_acm_mock.return_value = True\n        acm_contents = \"\"\"\n        mapper_name /dev/dev_path None /mnt/point ext4 False 0\n        mapper_name2 /dev/dev_path2 None /mnt/point2 ext4 False 0\n        \"\"\"\n        fstab_backup_contents = \"\"\"\n        /dev/dev_path /mnt/point ext4 defaults,nofail 0 0\n        /dev/dev_path2 /mnt/point2 ext4 defaults,nofail 0 0\n        \"\"\"\n        self._mock_open_with_read_data_dict(open_mock, {\"/var/lib/azure_disk_encryption_config/azure_crypt_mount\": acm_contents,\n                                                        \"/etc/fstab.azure.backup\": fstab_backup_contents,\n                                                        \"/etc/fstab\": \"\",\n                                                        \"/etc/crypttab\": \"\"})\n        self.disk_util.migrate_crypt_items(\"/test_passphrase_path\")\n        self.assertTrue(\"/dev/mapper/mapper_name /mnt/point ext4 defaults,nofail 0 0\\n\" in open_mock.content_dict[\"/etc/fstab\"])\n        self.assertTrue(\"\\n/dev/mapper/mapper_name2 /mnt/point2 ext4 defaults,nofail 0 0\" in open_mock.content_dict[\"/etc/fstab\"])\n        self.assertTrue(\"\\nmapper_name /dev/dev_path /test_passphrase_path\" in open_mock.content_dict[\"/etc/crypttab\"])\n        self.assertTrue(\"\\nmapper_name2 /dev/dev_path2 /test_passphrase_path\" in open_mock.content_dict[\"/etc/crypttab\"])\n"
  },
  {
    "path": "VMEncryption/test/test_handler_util.py",
    "content": "#!/usr/bin/env python\n#\n# *********************************************************\n# Copyright (c) Microsoft. All rights reserved.\n#\n# Apache 2.0 License\n#\n# You may obtain a copy of the License at\n# http:#www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied. See the License for the specific language governing\n# permissions and limitations under the License.\n#\n# *********************************************************\n\n\"\"\" Unit tests for the HandlerUtil module \"\"\"\n\nimport unittest\nimport os\nimport console_logger\nimport patch\nimport glob\nfrom Utils import HandlerUtil\nfrom tempfile import mkstemp\n\nclass TestHandlerUtil(unittest.TestCase):\n    def setUp(self):\n        self.logger = console_logger.ConsoleLogger()\n        self.distro_patcher = patch.GetDistroPatcher(self.logger)\n        self.hutil = HandlerUtil.HandlerUtility(self.logger.log, self.logger.error, \"AzureDiskEncryptionForLinux\")\n        self.hutil.patching = self.distro_patcher\n        # invoke unit test from within main for setup (to avoid having to change dependencies)\n        # then move cwd to parent to emulate calling convention of guest agent \n        if os.getcwd().endswith('main'):\n            os.chdir(os.path.dirname(os.getcwd()))\n        else:\n            self.logger.log(os.getcwd())\n            \n    def test_parse_config_sp(self):\n        # test 0.1 sp config syntax\n        test_sp = '{\"runtimeSettings\": [{\"handlerSettings\": {\"protectedSettings\": null, \"publicSettings\": {\"VolumeType\": \"OS\", \"KeyEncryptionKeyURL\": \"\", \"KekVaultResourceId\": \"\", \"KeyEncryptionAlgorithm\": \"RSA-OAEP\", \"KeyVaultURL\": \"https://testkv.vault.azure.net/\", \"KeyVaultResourceId\": \"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/testrg/providers/Microsoft.KeyVault/vaults/testkv\", \"EncryptionOperation\": \"EnableEncryption\"}, \"protectedSettingsCertThumbprint\": null} }]}'\n        self.assertIsNotNone(self.hutil._parse_config(test_sp))\n\n    def test_parse_config_dp_enable(self):\n        # test 1.1 dp config syntax \n        test_dp = '{\"runtimeSettings\": [{\"handlerSettings\": {\"protectedSettings\": \"MIIB8AYJKoZIhvcNAQcDoIIB4TCCAd0CAQAxggFpMIIBZQIBADBNMDkxNzA1BgoJkiaJk/IsZAEZFidXaW5kb3dzIEF6dXJlIENSUCBDZXJ0aWZpY2F0ZSBHZW5lcmF0b3ICEG5XyHr6J9qxRLVe/RzaobIwDQYJKoZIhvcNAQEBBQAEggEASDt5QPp0i8R408Ho2JNs0gEAKmjo17qg7Wk+Ihy5I3krCHY4pGGzWAXafvZ3Y1rLh7m/k1+uwK94o3taI27NEvz4YAbCkzLdgiNZx3yZdn5KkRzSbakztnf1a/MTEXY0dYjEjK9ZN5H5XiS8OLhpXaOgayaz1ZFS5MnOufBFXWuL2qeYK/txfBXIJujBHru80b+YahwnHU7/nislCslYVxENn9Jp9VpKGEcCeDFo/KKi0BTbpkxPj3OScNcsPuSRUP9xgT/b96bARJKeLjrxHQa398gzp291OlDYTr4sKBPqGNk8wER0aSpOm6igE857YAc0tShKQhGI14jcEHUu2jBrBgkqhkiG9w0BBwEwFAYIKoZIhvcNAwcECPpjFE+mGCN7gEj0rWo00NbAoQ6VhMnzdnZ3MnKOCjdWr/NTOdTgHMXU732rfDL89dMHLmUnBHq4SyTqIAi0M6sPEJ38anxx/msIQl15/w8qmL8=\", \"publicSettings\": {\"AADClientID\": \"00000000-0000-0000-0000-000000000000\", \"VolumeType\": \"DATA\", \"KeyEncryptionKeyURL\": \"https://testkv.vault.azure.net/keys/adelpackek/a022ed2b1eba4befb0dc9dc07bf33578\", \"KeyEncryptionAlgorithm\": \"RSA-OAEP\", \"KeyVaultURL\": \"https://testkv.vault.azure.net\", \"SequenceVersion\": \"eec80fc4-e0a2-434e-9007-974a150c3407\", \"AADClientCertThumbprint\": null, \"EncryptionOperation\": \"EnableEncryption\"}, \"protectedSettingsCertThumbprint\": \"45E4EC25EECAD03EC81F8177CEF16CD3CAF6297A\"} }]}'\n        self.assertIsNotNone(self.hutil._parse_config(test_dp))\n\n    def test_parse_config_dp_query(self):\n        test_dpq = '{\"runtimeSettings\": [{\"handlerSettings\": {\"protectedSettings\": \"MIIBsAYJKoZIhvcNAQcDoIIBoTCCAZ0CAQAxggFpMIIBZQIBADBNMDkxNzA1BgoJkiaJk/IsZAEZFidXaW5kb3dzIEF6dXJlIENSUCBDZXJ0aWZpY2F0ZSBHZW5lcmF0b3ICEG5XyHr6J9qxRLVe/RzaobIwDQYJKoZIhvcNAQEBBQAEggEAE92LccPctK0h52F+WOjKPWat5O3nxjQpsLKquMtwiKsc5BMot8dLEAE1h7V7SJJ8kiGRLS232mwvVbOA+nOs3l1lCUNDnckbzvvuu/rgz+if1sHvYIn0Xd/kXHSMNm9loh9lTLagGblEFxGupcBcsAEptcjL0f7zUG1NrlnKPVDGceOw7I3dQK6X8rPrMHJ8m6wiHpTvjpa/xmG0mrVyOGjJv7cEDnJ0A8pvRHUrZGGuqi/4WeGPGDKQzmVc6O5oGFfke3bAOd9GJxFWhLwZ1lb1XrKNImVDT2vnWWFiy2lKDwUvKSdqRpaqRNr6f7tZcDWiB+v+vZ6V4GC33kT0mDArBgkqhkiG9w0BBwEwFAYIKoZIhvcNAwcECJeXx+KpPZqdgAgiUsAz+Acz6A==\", \"publicSettings\": {\"SequenceVersion\": \"3838692e-4827-4175-8286-86828d199f85\", \"EncryptionOperation\": \"QueryEncryptionStatus\"}, \"protectedSettingsCertThumbprint\": \"45E4EC25EECAD03EC81F8177CEF16CD3CAF6297A\"} }]}'\n        self.assertIsNotNone(self.hutil._parse_config(test_dpq))\n\n    def test_do_parse_context_install(self):\n        self.assertIsNotNone(self.hutil.do_parse_context('Install'))\n\n    def test_do_parse_context_enable(self):\n        self.assertIsNotNone(self.hutil.do_parse_context('Enable'))\n\n    def test_do_parse_context_enable_encryption(self):\n        self.assertIsNotNone(self.hutil.do_parse_context('EnableEncryption'))\n        \n    def test_do_parse_context_disable(self):\n        self.assertIsNotNone(self.hutil.do_parse_context('Disable'))\n\n    def test_do_parse_context_disable_nosettings(self):\n        # simulate missing settings file by adding .bak extension\n        config_dir = os.path.join(os.getcwd(), 'config')\n        settings_files = glob.glob(os.path.join(config_dir, '*.settings'))\n        for settings_file in settings_files:\n            os.rename(settings_file, settings_file + '.bak')\n        try:\n            # test to simulate disable when no settings are available\n            self.hutil.do_parse_context('Disable')\n            self.hutil.archive_old_configs()\n        finally:\n            # restore settings files back to original name\n            for settings_file in settings_files:\n                os.rename(settings_file + '.bak', settings_file)\n\n    def test_do_parse_context_uninstall(self):\n        self.assertIsNotNone(self.hutil.do_parse_context('Uninstall'))\n\n    def test_do_parse_context_disable_encryption(self):\n        self.assertIsNotNone(self.hutil.do_parse_context('DisableEncryption'))\n\n    def test_do_parse_context_update_encryption_settings(self):\n        self.assertIsNotNone(self.hutil.do_parse_context('UpdateEncryptionSettings'))\n\n    def test_do_parse_context_update(self):\n        self.assertIsNotNone(self.hutil.do_parse_context('Update'))\n\n    def test_do_parse_context_executing(self):\n        self.assertIsNotNone(self.hutil.do_parse_context('Executing'))\n\n    def test_try_parse_context(self):\n        self.assertIsNotNone(self.hutil.try_parse_context())\n\n    def test_is_valid_nonquery_true(self):\n        nonquery_settings = '{\"runtimeSettings\": [{\"handlerSettings\": {\"protectedSettingsCertThumbprint\": null, \"publicSettings\": {\"VolumeType\": \"DATA\", \"KekVaultResourceId\": \"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/testrg/providers/Microsoft.KeyVault/vaults/testkv\", \"EncryptionOperation\": \"EnableEncryption\", \"KeyEncryptionAlgorithm\": \"RSA-OAEP\", \"KeyEncryptionKeyURL\": \"https://testkv.vault.azure.net/keys/testkek/805291e00028474a87e302ce507ed049\", \"KeyVaultURL\": \"https://testkv.vault.azure.net\", \"KeyVaultResourceId\": \"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/testrg/providers/Microsoft.KeyVault/vaults/testkv\", \"SequenceVersion\": \"c8608bb5-df18-43a7-9f0e-dbe09a57fd0b\"}, \"protectedSettings\": null} }]}'\n\n        # use a temp file path for this test, not the config folder\n        tmp_fd, tmp_path = mkstemp(text=True)\n        with os.fdopen(tmp_fd,'w') as f:\n            f.write(nonquery_settings)\n        test_result = self.hutil.is_valid_nonquery(tmp_path)\n        os.remove(tmp_path)\n\n        # assert true, this is not a QueryEncryptionStatus operation\n        self.assertTrue(test_result)\n\n    def test_is_valid_nonquery_false(self):\n        query_settings = '{\"runtimeSettings\": [{\"handlerSettings\": {\"protectedSettings\": \"MIIBsAYJKoZIhvcNAQcDoIIBoTCCAZ0CAQAxggFpMIIBZQIBADBNMDkxNzA1BgoJkiaJk/IsZAEZFidXaW5kb3dzIEF6dXJlIENSUCBDZXJ0aWZpY2F0ZSBHZW5lcmF0b3ICEG5XyHr6J9qxRLVe/RzaobIwDQYJKoZIhvcNAQEBBQAEggEAE92LccPctK0h52F+WOjKPWat5O3nxjQpsLKquMtwiKsc5BMot8dLEAE1h7V7SJJ8kiGRLS232mwvVbOA+nOs3l1lCUNDnckbzvvuu/rgz+if1sHvYIn0Xd/kXHSMNm9loh9lTLagGblEFxGupcBcsAEptcjL0f7zUG1NrlnKPVDGceOw7I3dQK6X8rPrMHJ8m6wiHpTvjpa/xmG0mrVyOGjJv7cEDnJ0A8pvRHUrZGGuqi/4WeGPGDKQzmVc6O5oGFfke3bAOd9GJxFWhLwZ1lb1XrKNImVDT2vnWWFiy2lKDwUvKSdqRpaqRNr6f7tZcDWiB+v+vZ6V4GC33kT0mDArBgkqhkiG9w0BBwEwFAYIKoZIhvcNAwcECJeXx+KpPZqdgAgiUsAz+Acz6A==\", \"publicSettings\": {\"SequenceVersion\": \"3838692e-4827-4175-8286-86828d199f85\", \"EncryptionOperation\": \"QueryEncryptionStatus\"}, \"protectedSettingsCertThumbprint\": \"45E4EC25EECAD03EC81F8177CEF16CD3CAF6297A\"} }]}'\n\n        # use a temp file path for this test, not the config folder\n        tmp_fd, tmp_path = mkstemp(text=True)\n        with os.fdopen(tmp_fd,'w') as f:\n            f.write(query_settings)\n        test_result = self.hutil.is_valid_nonquery(tmp_path)\n        os.remove(tmp_path)\n        \n        # assert false, this is a QueryEncryptionStatus operation\n        self.assertFalse(test_result)\n\n    def test_get_last_nonquery_config_path(self):\n        self.assertIsNotNone(self.hutil.do_parse_context('Enable'))\n        self.assertIsNotNone(self.hutil.get_last_nonquery_config_path())\n\n    def test_get_last_config(self):\n        self.assertIsNotNone(self.hutil.do_parse_context('Enable'))\n        self.assertIsNotNone(self.hutil.get_last_config(nonquery=False))\n\n    def test_get_last_nonquery_config(self):\n        self.assertIsNotNone(self.hutil.do_parse_context('Enable'))\n        config = self.hutil.get_last_config(nonquery=True)\n        self.assertIsNotNone(config)        \n\n    def test_get_handler_env(self):\n        self.assertIsNotNone(self.hutil.get_handler_env())\n\n    def test_archive_old_configs(self):\n        self.assertIsNotNone(self.hutil.do_parse_context('Enable'))\n        self.hutil.archive_old_configs()\n\n    def test_archive_old_configs_overwrite_lnq(self):\n        self.assertIsNotNone(self.hutil.do_parse_context('Enable'))\n\n        # this test ensures that the archive_old_configs method will properly overwrite an existing lnq.settings file\n        # with any newer non query settings file that might exist on the system \n\n        # stuff a bogus lnq.settings file in the archived settings folder\n        # and backdate the file time to older than current settings prior to testing\n        tmpstr = 'test_archive_old_configs_overwrite_lnq : the contents of this file are intended to be overwritten and never used'\n        if not os.path.exists(self.hutil.config_archive_folder):\n            os.makedirs(self.hutil.config_archive_folder)\n        dest = os.path.join(self.hutil.config_archive_folder, 'lnq.settings')\n        with open(dest,'w') as f:\n            f.write(tmpstr)\n\n        # backdate\n        os.utime(dest,(0,0))\n\n        # run the test \n        self.hutil.archive_old_configs()\n\n        # ensure the new lnq.settings file in the folder has the expected content \n"
  },
  {
    "path": "VMEncryption/test/test_resource_disk_util.py",
    "content": "#!/usr/bin/env python\n#\n# *********************************************************\n# Copyright (c) Microsoft. All rights reserved.\n#\n# Apache 2.0 License\n#\n# You may obtain a copy of the License at\n# http:#www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied. See the License for the specific language governing\n# permissions and limitations under the License.\n#\n# *********************************************************\n\n\"\"\" Unit tests for the ResourceDiskUtil module \"\"\"\n\nimport unittest\nimport mock\n\nfrom main.ResourceDiskUtil import ResourceDiskUtil\nfrom main.DiskUtil import DiskUtil\nfrom main.Common import CommonVariables\nfrom console_logger import ConsoleLogger\n\n\nclass TestResourceDiskUtil(unittest.TestCase):\n    def setUp(self):\n        self.logger = ConsoleLogger()\n        self.mock_disk_util = mock.create_autospec(DiskUtil)\n        self.mock_passhprase_filename = \"mock_passphrase_filename\"\n        mock_public_settings = {}\n        self.resource_disk = ResourceDiskUtil(self.logger, self.mock_disk_util, self.mock_passhprase_filename, mock_public_settings, [\"ubuntu\", \"16\"])\n\n    def _test_resource_disk_partition_dependant_method(self, method, mock_partition_exists, mock_execute):\n        \"\"\"\n        A lot of methods have a common pattern [ if (partition_exists()): return execute_something() else return False ]\n        This is a generic method which accepts the mock objects and the method pointer and tests the method.\n        NOTE: make sure its a fresh instance of the mocked Executor (mock_execute)\n        \"\"\"\n        # case 1: partition doesn't exist\n        mock_partition_exists.return_value = False\n        self.assertEqual(method(), False)\n        mock_execute.assert_not_called()\n\n        # case 2: partition exists but call fails\n        mock_partition_exists.return_value = True\n        mock_execute.return_value = -1  # simulate that the internal execute call failed.\n        self.assertEqual(method(), False)\n\n        # case 3: partition exists and call succeeds\n        mock_partition_exists.return_value = True\n        mock_execute.return_value = CommonVariables.process_success  # simulate that the internal execute call succeeded\n        self.assertEqual(method(), True)\n\n    @mock.patch('main.CommandExecutor.CommandExecutor.Execute')\n    @mock.patch('main.ResourceDiskUtil.ResourceDiskUtil._resource_disk_partition_exists')\n    def test_is_luks_device(self, mock_partition_exists, mock_execute):\n        self._test_resource_disk_partition_dependant_method(self.resource_disk._is_luks_device, mock_partition_exists, mock_execute)\n\n    @mock.patch('main.CommandExecutor.CommandExecutor.Execute')\n    def test_configure_waagent(self, mock_execute):\n        mock_execute.side_effect = [-1,\n                                    0,\n                                    0]\n        self.assertEqual(self.resource_disk._configure_waagent(), False)\n        mock_execute.assert_called_once()\n        self.assertEqual(self.resource_disk._configure_waagent(), True)\n\n    def test_is_plain_mounted(self):\n        self.resource_disk.disk_util.get_mount_items.return_value = []\n        self.assertEqual(self.resource_disk._is_plain_mounted(), False)\n\n        self.resource_disk.disk_util.get_mount_items.return_value = [{\"src\": \"/dev/dm-0\", \"dest\": \"/mnt/resource\"}]\n        self.assertEqual(self.resource_disk._is_plain_mounted(), False)\n\n        self.resource_disk.disk_util.get_mount_items.return_value = [{\"src\": \"/dev/mapper/something\", \"dest\": \"/mnt/\"}]\n        self.assertEqual(self.resource_disk._is_plain_mounted(), False)\n\n        self.resource_disk.disk_util.get_mount_items.return_value = [{\"src\": \"/dev/sdcx\", \"dest\": \"/mnt/resource\"}]\n        self.assertEqual(self.resource_disk._is_plain_mounted(), True)\n\n        self.resource_disk.disk_util.get_mount_items.return_value = [{\"src\": \"/dev/sdb2\", \"dest\": \"/mnt/resource\"}]\n        self.assertEqual(self.resource_disk._is_plain_mounted(), True)\n\n    def test_is_crypt_mounted(self):\n        self.resource_disk.disk_util.get_mount_items.return_value = []\n        self.assertEqual(self.resource_disk._is_crypt_mounted(), False)\n\n        self.resource_disk.disk_util.get_mount_items.return_value = [{\"src\": \"/dev/dm-0\", \"dest\": \"/mnt/resource\"}]\n        self.assertEqual(self.resource_disk._is_crypt_mounted(), True)\n\n        self.resource_disk.disk_util.get_mount_items.return_value = [{\"src\": \"/dev/mapper/something\", \"dest\": \"/mnt/\"}]\n        self.assertEqual(self.resource_disk._is_crypt_mounted(), False)\n\n        self.resource_disk.disk_util.get_mount_items.return_value = [{\"src\": \"/dev/mapper/something\", \"dest\": \"/mnt/resource\"}]\n        self.assertEqual(self.resource_disk._is_crypt_mounted(), True)\n\n        self.resource_disk.disk_util.get_mount_items.return_value = [{\"src\": \"/dev/sdcx\", \"dest\": \"/mnt/resource\"}]\n        self.assertEqual(self.resource_disk._is_crypt_mounted(), False)\n\n        self.resource_disk.disk_util.get_mount_items.return_value = [{\"src\": \"/dev/sdb2\", \"dest\": \"/mnt/resource\"}]\n        self.assertEqual(self.resource_disk._is_crypt_mounted(), False)\n\n    @mock.patch('main.ResourceDiskUtil.ResourceDiskUtil.add_resource_disk_to_crypttab')\n    @mock.patch('main.ResourceDiskUtil.ResourceDiskUtil._resource_disk_partition_exists')\n    @mock.patch('main.ResourceDiskUtil.ResourceDiskUtil._is_luks_device')\n    @mock.patch('main.ResourceDiskUtil.ResourceDiskUtil._is_crypt_mounted')\n    @mock.patch('main.ResourceDiskUtil.ResourceDiskUtil._is_plain_mounted')\n    @mock.patch('main.ResourceDiskUtil.ResourceDiskUtil._mount_resource_disk')\n    def test_try_remount(self, mock_mount, mock_plain_mounted, mock_crypt_mounted, mock_is_luks, mock_partition_exists, mock_add_rd_to_crypttab):\n\n        # Case 1, when there is a passphrase and the resource disk is not already encrypted and mounted.\n        mock_partition_exists.return_value = True\n        mock_is_luks.return_value = False\n        mock_crypt_mounted.return_value = False\n        mock_mount.return_value = True\n        self.resource_disk.passphrase_filename = self.mock_passhprase_filename\n\n        self.assertEqual(self.resource_disk.try_remount(), False)\n\n        mock_mount.assert_not_called()\n        mock_add_rd_to_crypttab.assert_not_called()\n\n        # Case 2, resource disk is encrypted but not mounted\n        mock_is_luks.return_value = True\n\n        self.assertEqual(self.resource_disk.try_remount(), True)\n\n        mock_mount.assert_called_with(ResourceDiskUtil.RD_MAPPER_PATH)\n        self.mock_disk_util.luks_open.assert_called_with(passphrase_file=self.mock_passhprase_filename,\n                                                         dev_path=ResourceDiskUtil.RD_DEV_PATH,\n                                                         mapper_name=ResourceDiskUtil.RD_MAPPER_NAME,\n                                                         header_file=None,\n                                                         uses_cleartext_key=False)\n        mock_add_rd_to_crypttab.assert_called()\n\n        # Case 2, when the resoure disk mount fails\n        mock_mount.return_value = False\n        self.assertEqual(self.resource_disk.try_remount(), False)\n\n        mock_mount.assert_called_with(ResourceDiskUtil.RD_MAPPER_PATH)\n\n        # Case 3, The RD is encyrpted and mounted.\n        mock_crypt_mounted.return_value = True\n        mock_mount.reset_mock()\n        mock_add_rd_to_crypttab.reset_mock()\n        mock_mount.return_value = True\n        self.assertEqual(self.resource_disk.try_remount(), True)\n        mock_mount.assert_not_called()\n        mock_add_rd_to_crypttab.assert_not_called()\n\n        # Case 4, The RD is plain mounted already and there is no passphrase\n        mock_plain_mounted.return_value = True\n        self.resource_disk.passphrase_filename = None\n        self.assertEqual(self.resource_disk.try_remount(), True)\n\n        # Case 5, The RD is not plain mounted but the mount fails for some reason.\n        mock_mount.return_value = False\n        mock_plain_mounted.return_value = False\n        self.assertEqual(self.resource_disk.try_remount(), False)\n        mock_mount.assert_called_once_with(ResourceDiskUtil.RD_DEV_PATH)\n\n        # Case 6, The RD is not plain mounted and mount succeeds\n        mock_mount.return_value = True\n        self.assertEqual(self.resource_disk.try_remount(), True)\n        mock_mount.assert_called_with(ResourceDiskUtil.RD_DEV_PATH)\n\n    @mock.patch('main.ResourceDiskUtil.ResourceDiskUtil._is_crypt_mounted', return_value=False)\n    @mock.patch('main.ResourceDiskUtil.ResourceDiskUtil._is_plain_mounted', return_value=True)\n    @mock.patch('main.ResourceDiskUtil.ResourceDiskUtil.encrypt_format_mount')\n    @mock.patch('main.ResourceDiskUtil.ResourceDiskUtil.try_remount')\n    def test_automount(self, mock_try_remount, mock_encrypt_format_mount, mock_is_plain_mounted, mock_is_crypt_mounted):\n        # Case 1: try_remount succeds\n        mock_try_remount.return_value = True\n        self.assertEqual(self.resource_disk.automount(), True)\n        mock_try_remount.assert_called_once()\n\n        # Case 2: try_remount fails and public settings is non-EFA:\n        mock_try_remount.return_value = False\n\n        # Case 2.x: these are basically gonna be a bunch of tests for \"is_encrypt_format\"\n        self.resource_disk.public_settings = {}\n        self.assertEqual(self.resource_disk.automount(), True)\n        mock_encrypt_format_mount.assert_not_called()\n\n        self.resource_disk.public_settings = {\n            CommonVariables.EncryptionEncryptionOperationKey: CommonVariables.EnableEncryption}\n        self.assertEqual(self.resource_disk.automount(), True)\n        mock_encrypt_format_mount.assert_not_called()\n\n        self.resource_disk.public_settings = {\n            CommonVariables.EncryptionEncryptionOperationKey: CommonVariables.DisableEncryption}\n        self.assertEqual(self.resource_disk.automount(), True)\n        mock_encrypt_format_mount.assert_not_called()\n\n        # Case 3: EFA case. A try remount failure should lead to a hard encrypt_format_mount.\n        self.resource_disk.public_settings = {\n            CommonVariables.EncryptionEncryptionOperationKey: CommonVariables.EnableEncryptionFormatAll}\n        mock_encrypt_format_mount.return_value = True\n        self.assertEqual(self.resource_disk.automount(), True)\n        mock_encrypt_format_mount.assert_called_once()\n\n        # case 4: EFA case, but EFA fails for some reason\n        mock_encrypt_format_mount.reset_mock()\n        mock_encrypt_format_mount.return_value = False\n        self.assertEqual(self.resource_disk.automount(), False)\n        mock_encrypt_format_mount.assert_called_once()\n"
  },
  {
    "path": "VMEncryption/test/test_utils.py",
    "content": "import os\n\n\nclass MockDistroPatcher:\n    def __init__(self, name, version, kernel):\n        self.distro_info = [None] * 2\n        self.distro_info[0] = name\n        self.distro_info[1] = version\n        self.kernel_version = kernel\n\n\ndef mock_dir_structure(artificial_dir_structure, isdir_mock, listdir_mock, exists_mock):\n    \"\"\"\n    Takes in an artificial directory structure dict and adds side_effects to mocks which are hooked to that directory\n    example:\n        artificial_dir_structure = {\n            \"/dev/disk/azure\": [\"root\", \"root-part1\", \"root-part2\", \"scsi1\"],\n            os.path.join(\"/dev/disk/azure\", \"scsi1\"): [\"lun0\", \"lun0-part1\", \"lun0-part2\", \"lun1-part1\", \"lun1\"]\n            }\n\n    any string that has an entry in this dict is mocked as a directory. So, /dev/disk/azure and /dev/disk/azure/scsi1 are dicts.\n    Everything else that is implied to exist is a file (e.g. /dev/disk/azure/root, /dev/disk/azure/root-part1, etc)\n\n    For an example look at test_disk_util.test_get_controller_and_lun_numbers method\n    NOTE: this method just modifies supplied mocks, it doesn't return anything.\n    \"\"\"\n    def mock_isdir(string):\n        return string in artificial_dir_structure\n    isdir_mock.side_effect = mock_isdir\n\n    def mock_listdir(string):\n        dir_content = artificial_dir_structure[string]\n        return dir_content\n    listdir_mock.side_effect = mock_listdir\n\n    def mock_exists(string):\n        if string in artificial_dir_structure:\n            return True\n\n        for dir in artificial_dir_structure:\n            listing = artificial_dir_structure[dir]\n            for entry in listing:\n                entry_full_path = os.path.join(dir, entry)\n                if string == entry_full_path:\n                    return True\n\n        return string in artificial_dir_structure\n    exists_mock.side_effect = mock_exists\n"
  },
  {
    "path": "docs/advanced-topics.md",
    "content": "# Advanced Topics\n\n## Azure Templates Samples\n\nYou can add a sample template into [Azure/azure-quickstart-templates](https://github.com/Azure/azure-quickstart-templates) to deploy your extension.\n\n## Azure Powershell and CLI\n\nYou can add the specific commands in Azure Powershell or CLI to install and enable your extension.\n"
  },
  {
    "path": "docs/contribution-guide.md",
    "content": "# Contribution Guide\n\n3rd party partners are welcomed to contribute the Linux extensions. Before you make a contribution, you should read the following guide.\n\n## Table of Contents\n\n* [**Overview**](./overview.md)\n  * [Terminology](./overview.md#terminology)\n  * [Requirements](./overview.md#requirements)\n  * [Architecture Overview](./overview.md#architecture-overview)\n* **Development**\n  * [Design Details](./design-details.md)\n    * [Handler Artifacts](./design-details.md#handler-artifacts)\n    * [Handler Lifecycle Management](./design-details.md#handler-lifecycle-management)\n    * [Report Status and Heartbeat](./design-details.md#report-status-and-heartbeat)\n    * [Logging](./design-details.md#logging)\n  * [Utils](./utils.md)\n  * [Sample Extension](./sample-extension.md)\n  * [Advanced Topics](./advanced-topics.md)\n    * [Azure Templates Samples](./advanced-topics.md#azure-templates-samples)\n    * [Azure Powershell and CLI](./advanced-topics.md#azure-powershell-and-cli)\n* [**Handler Registration**](./handler-registration.md)\n    * [Package and upload your extension](./handler-registration.md#package-and-upload-your-extension)\n    * [Register your extension](./handler-registration.md#register-your-extension)\n* [**Test**](./test.md)\n  * Test Matrix\n  * ASM or ARM\n  * Azure Templates\n  * Jenkins\n* [Document](./document.md)\n"
  },
  {
    "path": "docs/design-details.md",
    "content": "# Design Details\n\nThis page descibes the design details of the extension. You can write an extension from scrach folloing this page.\n\n<a name=\"handler-artifacts\"/>\n## Handler Artifacts\n\nAn Azure Extension Handler is composed of the following artifacts:\n\n1. **Handler Package**: This is the package that contains your Handler binary files and all standard static configuration files. This package is registered with the Azure ecosystem.\n\n2. **Handler Environment**: This is the set of files and folders that WALA sets up for the Handlers to use at runtime. These files can be used for communicating with WALA (heartbeat and status) or for writing debugging information (logging). The details of handler environment created by WALA is discussed in the section [Handler Environment](#handler-environment).\n\n3. **Handler Configuration**: This is a configuration file that contains various settings needed to configure this Handler at runtime. Extension configuration is the input provided by the end user based on the schema provided by the handler publisher during registration. For example, a handler might get the client authentication details for writing logs to his storage account via the handler configuration.\n\n<a name=\"handler-package\"/>\n### Handler Package\n\nThe Handlers are packaged as simple zip files for being registered in the Azure ecosystem. The zip file is supposed to contain the following:\n\n* The handler binaries.\n* HandlerManifest.json file that is used by WALA to manage the handler. This HandlerManifest.json file should be located in the root folder of the zip file.\n\n  The JSON file should be of the format:\n\n  ```\n  [{\n    \"version\": 1.0,\n    \"handlerManifest\": {\n      \"installCommand\": \"<your install command>\",\n      \"uninstallCommand\": \"<your uninstall command>\",\n      \"updateCommand\": \"<your update command>\",\n      \"enableCommand\": \"<your enable command>\",\n      \"disableCommand\": \"<your disable command>\",\n      \"rebootAfterInstall\": <true | false>,\n      \"reportHeartbeat\": <true | false>\n    }\n  }]\n  ```\n\n  The above JSON file provides a list of all commands that will be executed by WALA for managing various handlers on the VM.\n\n  * **Version**: indicates the version of the protocol which should be used by WALA to deserialize this JSON. \n\n  * **Install\\Uninstall\\Update\\Enable\\Disable** point to the command line that will be executed by WALA in various scenarios. The paths of the command line provided in HandlerManifest.json should be relative to the root directory of the handler. The current working directory of the handler is the path of the root folder of the handler. All these command lines are launched as LOCAL SYSTEM with administrative privileges.   \n\n    **Note**: It is valid for multiple commands in the HandlerManifest to point to the same command line. For e.g. the install and Update command might point to the same binary with same parameters.\n\n  * **RebootAfterInstall** notifies WALA if a reboot is required to complete the installation of a handler. Handlers should not reboot the system independently to avoid interfering with each other. \n\n  * **ReportHearbeat** indicates WALA if the handler will be reporting heartbeat or not. The details of heartbeat and status is discussed in section [Heartbeat Reporting](#heartbeat_reporting).\n\n  **Note:** All of the fields in the JSON specified above are required fields and registration of the handler with Azure will fail if one of these fields in not specified. The explanation of the meaning of various fields in the JSON with respect to WALA is provided in the below sections.\n\n  An example of the directory structure of the zip file for a handler is:\n\n  ```\n  SampleExtension.zip\n      |-HandlerManifest.json\n      |-install.py\n      |-uninstall.py\n      |-enable.py\n      |-disable.py\n      |-update.py\n  ```\n\n  A sample HandlerManifest.json for the above sample handler would be:\n\n  ```\n  [{\n    \"version\": 1.0,\n    \"handlerManifest\": {\n      \"installCommand\": \"./install.py\",\n      \"uninstallCommand\": \"./uninstall.py\",\n      \"updateCommand\": \"./update.py\",\n      \"enableCommand\": \"./enable.py\",\n      \"disableCommand\": \"./disable.py\",\n      \"rebootAfterInstall\": false,\n      \"reportHeartbeat\": true\n    }\n  }]\n  ```\n\n<a name=\"handler-environment\"/>\n### HandlerEnvironment\n\nWhen WALA installs a handler on the VM, it creates a bunch of files and folders that are needed by the handler at runtime for various purposes. The location of all these files and folders are communicated to the handler via the HandlerEnvironment.json file.\n\nHandlerEnvironment.json is the file that is created under the root directory where the handler is unpackaged. The structure of HandlerEnvironment.json is:\n\n  ```\n  [{\n    \"version\": 1.0,\n    \"handlerEnvironment\": {\n      \"logFolder\": \"<your log folder location>\",\n      \"configFolder\": \"<your config folder location>\",\n      \"statusFolder\": \"<your status folder location>\",\n      \"heartbeatFile\": \"<your heartbeat file location>\",\n    }\n  }]\n  ```\n\n  * **version** - contains the version of the protocol that WALA is abiding with. In the initial release the only supported version is 1.0.\n\n  * **handlerEnvironment** – This is the object that encapsulates all the properties of a handler defined in the version 1.0 of the protocol.\n\n  * **logFolder** – contains the location where the handler should put its log files that might be needed to debug any customer issues. The advantage of putting log files under the folder directed by this location is that these files can be automatically retrieved from the customers VM by using a tool, without actually logging into the VM and copying them over manually.\n\n  * **configFolder** – contains the location where the handler will get its configuration settings file. \n\n  * **statusFolder** – contains the location where the handler is supposed to write back a file with a structured status of the current state of the work being done by the handler.\n\n  * **heartbeatFile** - this is the file that is used to communicate the heartbeat of the handler back to WALA.\n\nErrors while reading HandlerEnvironment.json – In rare cases a handler might encounter errors when trying to read the HandlerEnvironment.json file, since WALA might be writing the file at the same time as well. The handler should be capable of handling such errors. Our recommendation for handler publishers would be to have a retry logic with some sort of backoff.\n\n<a name=\"handler-configuration\"/>\n### Handler Configuration\n\nThere are scenarios when a handler needs some user input parameters to configure its handler. All such user provided input is communicated from WALA to the handler via the configuration file. For e.g. a handler might require the user to provide the account name and the key of a user storage account where the logs will be saved. This account information can be passed by the user to the handler via the configuration file.\n\n#### Configuration File Structure\n\nThe configuration file should be a valid JSON with the only property of the root object as `handlerSettings` and with two child objects `protectedSettings` and `publicSettings`. Apart from that the complete schema of the handler configuration file under the `publicSettings`\\`protectedSettings` property is defined by the handler publisher during the registration process. When a call to add the handler to the VM is made, the user needs to provide a configuration that complies with the structure that the handler publisher had provided during registration. \n\n**Managing user secrets**: There may be parts of the handler configuration that contain user secrets (like passwords, storage keys, etc). These secrets in general should never be persisted in plain text to prevent accidently disclosure. To support this concept, the Azure Extension Handler publishers can allow users to store all or part of the handler configuration in a protected section of the config. All settings under this section are encrypted by an X509 certificate before being sent over to the VM. The WALA will persist the protected settings as encrypted only and will provide the thumbprint of the certificate that needs to be used for decrypting this information. To extract the setting, the handler will need to retrieve the certificate from the Local Machine store and decrypt the settings using the certificate private key. The publisher of the Azure Extension Handler decides what, if any, part of the configuration should be protected in this manner.\n\nA sample configuration file would look like:\n\n```\n{\n  \"handlerSettings\": {\n    \"protectedSettings\": {\n      \"storageaccountname\": \"MY SECRET STORAGE ACCOUNT NAME\",\n      \"storageaccountkey\": \"MY SECRET STORAGE ACCOUNT KEY\"\n    },\n    \"publicSettings\": {\n      \"MyHandlerConfiguration\": {\n        \"configurationChangePollInterval\": \" \",\n        \"overallQuotaInMB\": 12\n      },\n      \"MyHandlerInfrastructureLogs\": {\n        \"scheduledTransferLogLevelFilter\": \"Verbose\",\n        \"bufferQuotaInMB\": \"100\",\n        \"scheduledTransferPeriod\": \"PT1M\"\n      }\n    }\n  }\n}\n```\n\nIn the above example the storageaccountname and storageaccountkey are protected secrets. When these secrets are persisted on a file in the VM for consumption by the handler the protected section would be encrypted and base64 encoded. In the case of above settings, the configuration file for the above sample on the VM would look like:\n\n```\n{\n  \"handlerSettings\": {\n    \"protectedSettingsCertThumbprint\": \"a811c3f4058542418abb\",\n    \"protectedSettings\": \"ICB7DQogICAgInN0b3JhZ2VhY2NvdW50IiA6ICJbcGFyY\n                          W1ldGVycy5TdG9yYWdlQWNjb3VudF0iLA0KICB9LA0K\",\n    \"publicSettings\": {\n      \"DiagnosticMonitorConfiguration\": {\n        \"configurationChangePollInterval\": \" \",\n        \"overallQuotaInMB\": 12\n      },\n      \"DiagnosticInfrastructureLogs\": {\n        \"scheduledTransferLogLevelFilter\": \"Verbose\",\n        \"bufferQuotaInMB\": \"100\",\n        \"scheduledTransferPeriod\": \"PT1M\"\n      }\n    }\n  }\n}\n```\n\n<a name=\"location-of-handler-configuration\"/>\n#### Location of Handler Configuration\n\nThe location where the configuration setting files will be written can be retrieved by the \"configFolder\" property in the HandlerEnvironment.json file.\n\n<a name=\"handler-configuration-filename\"/>\n#### Handler Configuration Filename\n\nWhenever a new configuration is received, WALA will write the configuration settings file named <SequenceNumber>.settings under the configFolder with the configuration provided by the user and launches [the enable command of the handler](#enable). \n\nThe handler is expected to retrieve the last sequence number of the configuration file written by WALA bylooking under the configfolder directory for the highest sequence number. This sequence number can then be used to apply the latest user provided configuration settings to the handler.\n\n## Handler Lifecycle management\n\n### Add a new handler on the VM (Install and Enable)\n\nWhen a handler is requested on a VM by the user, WALA will do the following inside the VM:\n\n1. Download the handler package zip from Azure repository to `/var/lib/waagent`.\n2. Unzip the package under a unique location corresponding to the handler identity. The handler should not take any dependency on the location where the handler package is unpacked, since this location might change in future depending on future requirements. Currently, the unique location is formatted as `<ProviderNamespace>.<Type>-<Version>`. \n3. Create the configuration, logging and status folders for the handler.\n4. Create the <SequenceNumber>.settings file with the initial configuration.\n5. Creates the HandlerEnvironment.json file under the root folder where the handler is unpacked.\n6. Parse the HandlerManifest.json file and execute the install command in a separate process.\n7. The install command is executed in the process with the root privileges.\n8. If there are multiple handlers that are being installed, WALA will download and unzip them in parallel but will invoke the install command sequentially only. \n9. WALA will wait for the installation to complete and monitor the exit code of the install process.\n10. If the install process exits **SUCCESSFULLY** (exit code 0), WALA maintains state that the handler was installed successfully and does not run the install command for the same handler again ever unless the handler has been uninstalled first.\n  * WALA will wait for a maximum of 5 minutes before timing out the install process and considering the install to be failed.\n11. If the install process exits **SUCCESSFULLY**, WALA will provide the handler configuration settings in the defined location and launch the `Enable` command in a separate process that runs with root privileges.\n12. If the install process exits **UNSUCCESSFULLY**, WALA will retry to install the handler under two circumstances:\n  * When WALA receives a new goal state triggered by a user action. (e.g. Adding\\removing\\updating any handler or updating handler configuration etc.)\n  * When WALA restarts (which should only happen when the machine itself is rebooted).\n\n#### Install command\n\nIn the install command, the handler is expected to install its processes and services on the system and create the necessary setup that is required for the handler to run at runtime. \n\n### Remove a handler from the VM (Disable and Uninstall)\n\nWhen a user explicitly requests to remove the handler from the VM, WALA will execute the following actions:\n\n1. The disable command specified in the HandlerManifest.json will be executed in a separate process that runs with root privileges. The handler is expected to complete the pending tasks and then stop any processes or services related to the handler that have been running on the machine.\n  * WALA will wait for a max of 5 minutes for the disable process to finish before timing out to the next steps.\n2. The uninstall command will be invoked in a separate process that runs with root privileges. WALA will wait for a maximum of 5 mins for the uninstall process to finish.\n3. WALA will remove all the package binaries and configuration files that were associated with the handler. The handler log files will be maintained on the machine for any future debugging purposes.\n\n### Disable\n\nA user might explicitly request to disable a handler without uninstalling it. On disable WALA will execute the disable command in a separate process with root privileges. On the execution of the disable command the handler is expected to complete the pending tasks and then stop any processes or services related to the handler that have been running on the machine.\n\nWALA will wait a max of 5 mins for the disable process to finish before timing out to the next steps.\n\n### Enable\n\nA user might explicitly request to enable a handler that has been previously disabled. On enable WALA will execute the enable command in a separate process with root privileges.\n\nThe enable command will be invoked every time the machine reboots or the machine receives a new configuration settings file. \n\n**Note:**\nUnlike the install state, WALA will not maintain the enabled\\disabled state of the handler. Every time the machine restarts (which in turn will restart WALA) or a new goal state is received, WALA will try to set the machine to the latest goal state. Thus it might invoke the enabled\\disabled commands multiple times even if the handler is already enabled\\disabled. So the enable and disable commands need to be idempotent i.e. if the handler is already enabled and the enable command is invoked again, the command should check if all the processes are running as expected, if yes, then the command should just exit with a success code. \n\n### Update\n\nThere are two scenarios when an update can happen:\n* The user triggers an explicit update of the handler.\n* The handler is updated on Azure repository and it automatically gets picked up by WALA.\n\nIn both these cases WALA will identify that a handler with the same name and publisher and a lower version is already installed on the machine. \n\n1. It will download the updated version of the handler from Azure repository, unpack it under the handler identity folder.\n2. WALA will call disable on the existing handler with the lower version.\n3. WALA will invoke the update command in the newly downloaded packages under a separate process with root privileges. During update the handler has an opportunity to transfer any state information from the previous handler.\n4. WALA will invoke the uninstall command on the existing handler with lower version.\n5. WALA will invoke the enable command on the newly downloaded package\n\n<a name=\"reporting-status-and-heartbeat\"/>\n## Reporting Status and Heartbeat\n\nMicrosoft Azure provides two facilities to report back the health of the handler and the status of the operations being performed by it.\n\n1. **Heartbeat**: Heartbeat channel should be used to report the health of the handler itself. Providing heartbeat is an optional facility that the handler can opt into by setting the reportHearbeat property to true in the HandlerManifest. Heartbeat is generally expected to be reported by long running services or processes. For e.g. an antivirus handler service might use the heartbeat channel to indicate if its service has stopped for some reason.\n\n2. **Configuration Status**: Status channel should be used to report the success or failures of any operations that were conducted when applying the new configuration provided by the user. For e.g. Diagnostics agent might report issues connecting to the storage account via this channel. \nThe WALA collects the heartbeat and status information for all handlers and aggregates them into VM health which is returned to the user when he queries for it via the GetDeployment RDFE API call.\n\n### Heartbeat reporting\n\nThe handler that have opted into reporting heartbeat are supposed to report it via the file specified in the heartbeat property of the HandlerEnvironment file. The structure of the heartbeat file should be:\n\n```\n[{\n    \"version\": 1.0,\n    \"heartbeat\" : {\n        \"status\": \"<ready | notready>\",\n        \"code\": <Valid integer status code>,\n        \"Message\": \"<Human readable information or error message passed to the user>\"\n    }\n}]\n```\n\nVarious fields in the above JSON document correspond to the following:\n* **Version** – This is the version of the protocol being used to communicate heartbeat to WALA. Currently the only version WALA understands is 1.0.\n* **Heartbeat** – This object encapsulates all the heartbeat related information for the handler.\n* **Status** – The current status of the handler. The only valid values are “ready” and “notready”.\n* **Code** – The status code the handler. This is an optional field.\n* **Message** – A human readable\\actionable error message for the user. This is an optional field. \n\nHandlers can report successful heartbeat by setting the status to \"ready\". To report repeated successful heartbeats, the handler can just change the last modified timestamp of this file. The status field only needs to be changed to \"notready\" if the handler has encountered some error\\exception condition while executing. For e.g. If after the handler is installed and before the first configuration settings file is processed, if there is an exception, it can be reported via the status section in the heartbeat file.\n\nWALA will read the heartbeat file once every 2 minutes to check if the plugin is running or not. If the last modified timestamp is within the last 1 minute and the status is set to \"ready\" then WALA will consider the plugin to be working properly. If the last modified timestamp is older than 10 minutes, WALA will consider the plugin handler to be unresponsive. If the last modified timestamp is between 1 minute and 10 minute, WALA will consider the plugin to be in \"Unknown\" state. If the status is set to \"NotReady\", the error code and the message will be returned back to the user in the next GetDeployment call. \n\nA sample heartbeat file would look like:\n\n```\n[{\n    \"version\": 1.0,\n    \"heartbeat\" : {\n        \"status\": \"ready\",\n        \"code\": 0,\n        \"Message\": \"Sample Handler running. Waiting for a new configuration from user.\"\n    }\n}]\n```\n\nErrors while writing to the HeartBeat file – In rare cases a handler might encounter errors when trying to write the heartbeat file, since WALA might be reading the file at the same time as well. The handler should be capable of handling such errors. Our recommendation for handler publishers would be to have a retry logic with some sort of exponential backoff.\n\n### Status reporting\n\nThe handler can report status back to WALA by writing to the status file \"<SequenceNumber>.status\" under the status folder specified in the HandlerEnvironment. The status file structure supported by WALA is:\n\n```\n[{\n    \"version\": 1.0,\n    \"timestampUTC\": \"<current utc time>\",\n    \"status\" : {\n        \"name\": \"<Handler workload name>\",\n        \"operation\": \"<name of the operation being performed>\",\n        \"configurationAppliedTime\": \"<UTC time indicating when the configuration was last successfully applied>\",\n        \"status\": \"<transitioning | error | success | warning>\",\n        \"code\": <Valid integer status code>,\n        \"message\": {\n            \"id\": \"id of the localized resource\",\n            \"params\": [\n                \"MyParam0\",\n                \"MyParam1\"\n            ]\n        },\n        \"formattedMessage\": {\n            \"lang\": \"Lang[-locale]\",\n            \"message\": \"formatted user message\"\n        },\n        \"substatus\": [{\n            \"name\": \"<Handler workload subcomponent name>\",\n            \"status\": \"<transitioning | error | success | warning>\",\n            \"code\": <Valid integer status code>,\n            \"Message\": {\n            \t\t\"id\": \"id of the localized resource\",\n            \t\t\"params\": [\n                \t\t\"MyParam0\",\n                \t\t\"MyParam1\"\n            \t\t]\n        \t},\n      \t  \t\"FormattedMessage\": {\n            \t\t\"Lang\": \"Lang[-locale]\",\n            \t\t\"Message\": \"formatted user message\"\n        \t},        \n        }]\n    }\n}]\n```\n\n* **version** – indicates the version of the protocol being used for communicating the status back to WALA.\n* **timestampUTC** – The current time in UTC during which this status structure is being created.\n* **status** – The object that encapsulates the top level status about the configuration corresponding to what the status is being reported.\n* **status\\name** – This property is optional. This property can be used by handlers to point to the VM workload name that are being managed by the handler.\n* **status\\operation** – This property is optional. This property can be used by handlers to indicate the current operation being performed to enable the VM workload on the machine.\n* **status\\configurationappliedtime** – This property is optional. This property can be used by handlers to indicate the last time the configuration corresponding to the current sequence number was successfully applied on the VM.\n* **status\\status** – This property indicates the current status of the operation being performed. The only acceptable values are: Transitioning, error, success and warning.\n* **status\\code** – A valid integer status code for the current operation.\n* **status\\message** – This is an optional localized message that will be passed back to the user on a GetDeployment call via RDFE.\n* **status\\message\\id** – This is the message identifier, to be used for lookup of a localized message. Treated as a string. A symbolic id is preferred for human interpretation, for example Error_CannotConnect. The file that contains all the localized strings corresponding to the id would be provided by the handler author to Azure during registration.\n* **status\\message\\params** - This is an Ordered list of parameter (placeholder) values to be filled into the message template corresponding to the message id. The first Param is used for placeholder “{0}” in the message template (from the provided language resources); the second for placeholder “{1}”, etc.\n* **status\\formattedMessage\\lang** - The language/locale of the preformatted message. \n* **status\\formattedMessage\\message** - The human readable message that will be returned to the user.\n* **substatus** – An array of nested substatus objects that can be used by the handler to pass the substatus of complicated operations. The fields in the substatus array are supposed to be used in the same manner as they are used in the parent status array.\n\nEverytime a handler receives a new handler pack via a new configuration, it is expected to periodically report the status corresponding to that configuration in a file names <SequenceNumber.status>. The status should be reported at least once every 2 minutes for the time when the handler is in (transitioning\\Warning) state. Once the handler reaches a terminal state (success\\error) it can stop reporting the status messages for that sequence number.\n\nEach time the handler has new status to report, it should overwrite <SequenceNumber.status> file. The status provided in the status file should be an aggregate status (even if that status has been reported before) of all the operation performed for this configuration so far. If writing to the file fails, the handler should retry with backoff. The handler can write to the status file whenever it has something new to report. WALA will only read this status file after it has fed a new configuration to the handler and till the time the handler does not report status of a terminal state (success\\error). During this time WALA will read the status file with a default frequency of 5 mins (configurable).\n\nA simple status report without localization from a handler would look like:\n\n```\n[{\n    \"version\": 1.0,\n    \"timestampUTC\": \"2013/11/13, 17:46:30.447\",\n    \"status\" : {\n        \"name\": \"enable wordpress\",\n        \"operation\": \"installing wordpress\",\n        \"status\": \"transitioning\",\n        \"formattedMessage\": {\n            \"Lang\": \"en\",\n            \"Message\": \"Enable IIS on the VM.\"\n        },\n        \"substatus\": [{\n            \"name\": \"Wordpress plugin\",\n            \"status\": \"success\",\n            \"code\": 0,\n\t     \"formattedMessage\": {\n            \t\t\"Lang\": \"en\",\n            \t\t\"Message\": \"Successfully downloaded wordpress plugin.\"\n        \t}\n        },\n        {\n            \"name\": \"Enable IIS\",\n            \"status\": \"transitioning\",\n            \"Message\": \"Turning windows feature for enabling IIS on.\"\n        }]\n    }\n}]\n```\n\n#### Localization Support\n\nTo enable showing these messages in the user’s preferred language and, ideally, to enable multiple users to view the same captured execution status in different languages, we need to defer message resource lookup until the user queries for handler status. The current user’s preferred language would be retrieved from the HTTP header.\n\nLocalization support is optional. If the handler does not wish to participate in localization they can just return the FormattedMessage strings in a default language which will be directly returned to the user.\n\nA localized status report would look like:\n\n```\n[{\n    \"version\": 1.0,\n    \"timestampUTC\": \"<current utc time>\",\n    \"status\" : {\n        \"name\": \"SharePointFrontEnd\",\n        \"operation\": \"ResExtProvisioning\",\n        \"status\": \"error\",\n        \"code\": 12,\n        \"Message\": {\n            \"id\": \"1215\",\n            \"params\": [\n                \"spo-sqldb.cloudapp.net\", \"JoeAdmin\"\n                ]\n        }\n    }\n}]\n```\n\n#### Localized message formatting\n\nAs part of handler registration with Azure, a set of localization resources will be provided for looking up the status messages from the handler.\n\nA language/locale lookup sequence similar to the one for .NET resources will be applied, with the ultimate fallback being \"en\", a resource file for which must always be provided.\n \nThe structure of the JSON resource files will be as follows.\n\n```\n[{\n    \"version\": 1.0,\n    \"lang\": \"lang[-locale]\",\n    \"messages\": [\n        {\n            \"id\": \"message id\",\n            \"text\": \"Message text with {0}, {1} placeholder.\"\n        }]\n}]\n```\n\n**Placeholder ordering** - The order of Status/Param values from the in-guest handler must be fixed (independent of language) and should correspond to the sequence of {n} placeholders in the English version of the message. If translation of a message in some language requires different order of the placeholders, the message template in the resource file for that language should have the placeholders reordered accordingly. To continue the earlier Status sample the message corresponding to id 1215, if in English we have:\n\n  ```\n  Failed to establish connection to {0} as {1}\n  ```\n\nIn German it might be:\n\n  ```\n  {1} fehler beim Anschluss an {0} herzustellen\n  ```\n\n<a name=\"logging\"/>\n# Logging\n\nHandlers should use the folder provided in the \"logfolder\" property of the handler environment for writing logs required for debugging their handlers in lieu of any issues reported on a live customer VM.\n"
  },
  {
    "path": "docs/document.md",
    "content": "# Document\n\nA `README.md` is recommended in your extension directory. You can refer to [**README.md of CustomScript**](../CustomScript/README.md).\n\nWhat you should include in `README.md`:\n\n* Configuration schema (Public and Protected)\n* How to deploy the extension using Azure CLI or Azure Powershell\n* How to deploy the extension in ASM and ARM mode\n* How to deploy the extension using ARM templates\n* Configuration Examples\n* How to debug\n* Supported Linux Distributions\n"
  },
  {
    "path": "docs/handler-registration.md",
    "content": "# Handler Registration\n\nIn this page, we will show you the steps to package and register your extensions to Azure repository. We assume that you have prepared your `SampleExtension` in `~/azure-linux-extensions/`.\n\nFor registering a handler the following two components are required:\n\n* the handler package - The extension handler package needs to be uploaded to a storage location.\n* the definition xml file - This section gives an overview of some of the key elements that are required in the definition file.\n\nAlso, the extension should be registered under the Publisher’s Azure Subscription. Prior to Registration, the subscription should be approved for publishing by Azure Runtime team. During the handler registration, you need specify the certificate of your Azure subscription.\n\nWe provide some scripts to help package and register your extensions.\n\n```\nregistration-scripts/\n├── api\n│   ├── add-extension.sh\n│   ├── check-request-status.sh\n│   ├── del-extension.sh\n│   ├── get-extension.sh\n│   ├── get-subscription.sh\n│   ├── list-extension.sh\n│   ├── params\n│   └── update-extension.sh\n├── bin\n│   ├── add.sh\n│   ├── blob\n│   │   ├── list.sh\n│   │   └── upload.sh\n│   ├── check.sh\n│   ├── del.sh\n│   ├── get.sh\n│   ├── list.sh\n│   ├── subscription.sh\n│   └── update.sh\n├── create_zip.sh\n├── mooncake\n│   └── sample-extension-1.0.xml\n└── public\n    └── sample-extension-1.0.xml\n```\n\n<a name=\"package-and-upload-your-extension\"/>\n## Package and upload your extension\n\nYou can package your extension into a zip file using the following command.\n\n```\ncd ~/azure-linux-extensions/\n./registration-scripts/create_zip.sh SampleExtension/ 1.0.0.0\n```\n\nThen you will get `SampleExtension-1.0.0.0.zip` in `build` directory.\n\nYou should upload your extension to a downloadable storage, for e.g. [Azure Blob Storage](https://azure.microsoft.com/en-us/services/storage/).\n\n```\nbin/blob/upload.sh ~/azure-linux-extensions/build/SampleExtension-1.0.0.0.zip\n```\n\n<a name=\"register-your-extension\"/>\n## Register your extension\n\n### Prepare your subscription for registration\n\nThe extension should be registered under the Publisher’s Azure Subscription.\n\n### How to use the publish scripts\n\nThe following scripts are executed in `registration-scripts` directory.\n\nYou can configure `api/params` to change the endpoint (Public Azure or Mooncake).\n\n### Definition File\n\nFor registration, the publisher would have to provide the definition file.\n\n| Property | Description | Requirements |\n|:---------------:|:----- |:----- |\n| ProviderNamespace | This has to be a unique namespace per each subscription. The namespace is a combination of company team, team name (optional) and product name. E.g.: Microsoft.Azure.RemoteAcccess | Namespace cannot be empty, should be less than 256 chars and underscores cannot be used. |\n| Type | Name of the Extension Handler. The type indicate the purpose of the extension | Type cannot be empty, should be less than 256 chars and underscores cannot be used. |\n| Version | Version number of the handler. The combination of namespace, type and version uniquely identifies an extension. | The version number needs to be changed for every release. The format of version number has to be `<major>.<minor>.<build>.<revision>` Eg: 1.0.1.1 |\n| Label | The label of the extension | |\n| HostingResource | This should be either WebRole or WorkerRole or VmRole depending on whether it’s targeted for PaaS or IaaS. | These values are case sensitive. |\n| MediaLink | The blob url which has the Extension Package. | MediaLink value must point to a URL(either Http or Https) in a blob storage and is downloadable. |\n| Description | The description of the extension | |\n| IsInternalExtension | If this is set to \"true\", the handler is not visible for public use. It can be still accessed by referring to the Namespace, Type & Version combo. | Possible values are case-sensitive true or false |\n| Eula | If the software requires any additional EULAs, a link to the EULA should be provided. | |\n| PrivacyUri | If the software collects any data and transfers out the VM, then a additional Privacy document might be needed. | |\n| HomepageUri | A public URL that has usage information and contact information for customer support. | |\n| IsJsonExtension | Whether the Extension configuration is json format | It should always be \"true\". |\n| SupportedOS | The supported OS | It should always be \"Linux\". |\n| CompanyName | The company name | |\n\nYou can prepare your sample definition file `public/sample-extension-1.0.xml`.\n\n```\n<ExtensionImage xmlns=\"http://schemas.microsoft.com/windowsazure\">\n<ProviderNameSpace>Microsoft.Love.Linux</ProviderNameSpace>\n<Type>SampleExtension</Type>\n<Version>1.0.0.0</Version>\n<Label>Microsoft loves Linux</Label>\n<HostingResources>VmRole</HostingResources>\n<MediaLink>Storage blob location of the Zip file</MediaLink>\n<Description>Microsoft loves Linux</Description>\n<IsInternalExtension>false</IsInternalExtension>\n<Eula>https://github.com/Azure/azure-linux-extensions/blob/1.0/LICENSE-2_0.txt</Eula>\n<PrivacyUri>https://github.com/Azure/azure-linux-extensions/blob/1.0/LICENSE-2_0.txt</PrivacyUri>\n<HomepageUri>https://github.com/Azure/azure-linux-extensions</HomepageUri>\n<IsJsonExtension>true</IsJsonExtension>\n<SupportedOS>Linux</SupportedOS>\n<CompanyName>Microsoft</CompanyName>\n</ExtensionImage>\n```\n\n### Register the new extension\n\n```\nbin/add.sh public/sample-extension-1.0.xml\n```\n\nThe operation of registration and unregistration is asynchronous. You can check the status of the operation using the following command.\n\n```\nbin/check.sh <x-ms-request-id>\n```\n\nYou can get `<x-ms-request-id>` from the output of the registration operation.\n\n### Update your extension\n\nOnce the extension is published, any changes to the handler can be published as newer versions, using the update API.\n\n```\nbin/update.sh public/sample-extension-1.0.xml\n```\n\nHere is an overview of updates are done:\n\n* **Hotfixes** - Publisher should release hotfixes by changing the revision number. Eg: If the current version is 1.0.0.0, then the hotfixed version would be 1.0.0.1. All hotfixes would be automatically installed on the VM.\n* **Minor Version Changes** - Any minor features can be released as a minor update. E.g.: If the current version is 1.0.0.0, then a minor version update would be 1.1.0.0.\nIf the client opts in for auto upgrade, all minor version changes would be automatically applied.\n* **Major Version Change** - Any breaking changes in the handler should be released as a major version update. The client has to explicitly request the major version changes.\n\n### List your extensions\n\n**NOTE:** After registration and updating, you need to wait some time to **replicate** your extension. The wait time depends on the work load of the replication system, from half an hour to one day.\n\nYou can get the replication status of the extension using the following command:\n\n```\nbin/list.sh\n```\n\n### Unregister your extension\n\n```\nbin/delete.sh <ProviderNameSpace> <Type> <Version>\n```\n\nSample:\n\n```\nbin/delete.sh Microsoft.Love.Linux SampleExtension 1.0.0.0\n```\n\n**NOTE:** Unregistration is supported for internal extensions only. You need to update your extension from public into internal before unregistration.\n"
  },
  {
    "path": "docs/overview.md",
    "content": "# Overview\n\nIn order to make the Microsoft Azure IaaS VMs customizable, Microsoft Azure is releasing a set of capabilities which will enable users to automate software deployment and configuration on IaaS VMs. As a part of these capabilities, a protocol is being released which can be used by various existing VM customization products to integrate with the Microsoft Azure VM ecosystem. This document discusses the requirements to participate in Microsoft Azure VM ecosystem and provides a guide for integrating VM customization products with Microsoft Azure.\n\n<a name=\"terminology\"></a>\n## Terminology\n\n| Teminology | Description |\n|:---------------:|:----- |\n| WALA | The Microsoft Azure component that runs inside the Linux VM and is responsible for managing the extension handlers. You can get the source code of WALA from https://github.com/Azure/WALinuxAgent. |\n| Handlers | Partner authored component to deliver software and configuration to the customer VM. This component needs to implement handler configuration and status contracts and be provided as a handler package. Generally a handler will consist of an Azure interoperability wrapper around an existing VM customization product. In the overview documents handlers are more broadly referred to as `extensions`. The term `handler` and `extension` are used somewhat interchangeably. |\n| Extension Pack | Specific job, workload, or script to be executed by the extension handler. |\n| Handler identity | An identifier used to uniquely define the handler. This identity is a tuple of <Handler name>, <Publisher> and <Version> |\n| Handler Manifest | A JSON based manifest that defines various properties needed by WALA to manage the handler. |\n\n<a name=\"requirements\"></a>\n## Requirements\n\nTo participate in the Microsoft Azure ecosystem, any VM customization product needs to create a handler that implements the WALA defined protocol to integrate with the Azure ecosystem. The basic requirements for creating a handler that implements the Azure protocol are:\n\n1. Handler Packaging – The Handler should be packaged as a zip file. This zip package should contain all the binaries related of the handler and HandlerManifest. This package needs to be registered with the Azure image repository. Azure image repository is responsible for managing all versions of all the handlers that are registered with the Azure ecosystem.\n\n2. Handler Environment - Handler needs the capability to read the environment file in the format that WALA defines. The environment file defines the locations of various files and folder that the handler needs to use for reading configuration and writing back heartbeat and status.\n\n3. Handler Configuration – Various extension packs that the handler needs to manage are passed to the handler in form of configuration settings. For example if a script is needed by the handler to install an extension, that script is passed to it via the handler configuration file. The handler should have the ability to read this file in the format defined by the Azure Agent and should be able to execute its contents and report the status of that execution with a frequency that complies with the Azure Agent protocol.\n\n4. Handler heartbeat and status – The handler is supposed to report the status of the most recently executed configuration with a frequency that complies with the Azure Agent protocol. In addition to status, if the handler opts into reporting heartbeat it needs to report the heartbeat for the complete lifetime of the handler on the VM with a frequency that complies with the Azure Agent protocol.\n\n<a name=\"architecture-overview\"></a>\n## Architecture Overview\n\nThe below diagram gives an overview of how the handlers are supposed to interact with the Azure ecosystem.\n\n![Architecture Overview](./architecture.jpg)\n"
  },
  {
    "path": "docs/sample-extension.md",
    "content": "# Sample Extension\n\nIn this page, we offer a sample extension using [Utils](./utils.md).\n\nAfter this section, you can get the following directory:\n\n```\nSampleExtension/\n├── disable.py\n├── enable.py\n├── HandlerManifest.json\n├── install.py\n├── references\n├── uninstall.py\n└── update.py\n```\n\n## HandlerManifest.json\n\n```\n[{\n  \"name\": \"SampleExtension\",\n  \"version\": 1.0,\n  \"handlerManifest\": {\n    \"installCommand\": \"./install.py\",\n    \"uninstallCommand\": \"./uninstall.py\",\n    \"updateCommand\": \"./update.py\",\n    \"enableCommand\": \"./enable.py\",\n    \"disableCommand\": \"./disable.py\",\n    \"rebootAfterInstall\": false,\n    \"reportHeartbeat\": false\n  }\n}]\n```\n\n## enable.py\n\n1. Get the paramter `name` in the public settings.\n2. Log the `name` into `extension.log`.\n\n## references\n\nThis file is used to package the extension using [create_zip.sh](https://github.com/Azure/azure-linux-extensions/blob/master/script/create_zip.sh).\n\nYou can put `Utils` in `references`. Then `create_zip.sh` will put the direcotry `SampleExtension` and `Utils` into `SampleExtension-1.0.zip`.\n"
  },
  {
    "path": "docs/test.md",
    "content": "# Test\n\n## Test Matrix\n\nYou should test your extension in the distros which you want to support.\n\nHere is the distro list:\n\n* Ubuntu 12.04 and higher\n* CentOS 6.5 and higher\n* Oracle Linux 6.4.0.0.0 and higher\n* openSUSE 13.1 and higher\n* SUSE Linux Enterprise Server 11 SP3 and higher\n* FreeBSD\n* CoreOS\n\nYou can choose some or all of them to support.\n\n## ASM or ARM\n\nIt's important to understand that Azure currently has two deployment models: Resource Manager, and classic. Make sure you understand [deployment models and tools](https://azure.microsoft.com/en-us/documentation/articles/azure-classic-rm/) before working with any Azure resource.\n\n## Azure Templates\n\nIf you decide to support the scenario of deploying your extension using ARM Templates, you need to test it.\n\n## Continuous Integration\n\nThere are many tools to do the CI work, for e.g. Jenkins, Concourse and so on.\n"
  },
  {
    "path": "docs/utils.md",
    "content": "# Utils\n\nYou can write an extension from scrach using your favourate language following [Design Details](./design-details.md).\n\nThe utils we offer are optional. They are writen in Python, and they can accelerate your development. Without them, you need to handle the protocal between WALA and extensions by yourself.\n\n## HandlerUtils\n\n[HandlerUtils.py](https://github.com/Azure/azure-linux-extensions/blob/master/Utils/HandlerUtil.py) handles the protocal between WALA and extensions, status and heartbeat reporting, and the logging.\n\n* Get your settings\n  * `get_public_settings()` method returns the public settings\n  * `get_protected_settings()` method returns the protected settings which have been decrypted.\n* Status reporting\n  * `do_status_report` method reports the status, but not exists.\n  * `do_exit` method reports the status and exists.\n* Logging\n  * HandlerUtils.py will put the logs into the log file `extension.log` which is located in `logFolder` of `handlerEnvironment.json`.\n  * The method `log` and `error` can be used.\n\n## WAAgentUtil\n\nWAAgentUtil.py helps to load the source of [WALA](https://github.com/Azure/WALinuxAgent). You can use the function in WALA, for e.g. GetFileContents.\n"
  },
  {
    "path": "go.mod",
    "content": "module github.com/ChrisCoe/azure-linux-extensions\n\ngo 1.21\n"
  },
  {
    "path": "go.sum",
    "content": ""
  },
  {
    "path": "registration-scripts/api/add-extension.sh",
    "content": "#!/bin/bash\noriginal_dir=`pwd`\nscript=`dirname $0`\ncd $script\nsource params\nexport script=`pwd`\ncd $original_dir\n\necho $1\ncurl -v -X 'POST'  -H \"$VERSION\" -H 'Content-Type: application/xml' -E $CERT -d@$1 $ENDPOINT/$SUBSCRIPTION/services/extensions\n"
  },
  {
    "path": "registration-scripts/api/check-request-status.sh",
    "content": "#!/bin/bash\noriginal_dir=`pwd`\nscript=`dirname $0`\ncd $script\nsource params\nexport script=`pwd`\ncd $original_dir\n\necho $1\ncurl -v -X 'GET' --keepalive-time 30 --user-agent ' Microsoft.WindowsAzure.Management.Compute.ComputeManagementClient/0.9.0.0 WindowsAzurePowershell/v0.8.0' -H 'x-ms-version: 2014-06-01' -H 'Content-Type: application/xml'  --insecure  -E $CERT --data-binary @$1  $ENDPOINT/$SUBSCRIPTION/operations/$1\n"
  },
  {
    "path": "registration-scripts/api/del-extension.sh",
    "content": "#!/bin/bash\noriginal_dir=`pwd`\nscript=`dirname $0`\ncd $script\nsource params\nexport script=`pwd`\ncd $original_dir\n\ncurl -v -X 'DELETE'  -H \"$VERSION\" -H 'Content-Type: application/xml' -E $CERT $ENDPOINT/$SUBSCRIPTION/services/extensions/$1/$2/$3\n"
  },
  {
    "path": "registration-scripts/api/get-extension.sh",
    "content": "#!/bin/bash\noriginal_dir=`pwd`\nscript=`dirname $0`\ncd $script\nsource params\nexport script=`pwd`\ncd $original_dir\n\ncurl -v -X 'GET'  -H \"$VERSION\" -H 'Content-Type: application/xml' -E $CERT $ENDPOINT/$SUBSCRIPTION/services/resourceextensions/$1/$2\n"
  },
  {
    "path": "registration-scripts/api/get-subscription.sh",
    "content": "#!/bin/bash\noriginal_dir=`pwd`\nscript=`dirname $0`\ncd $script\nsource params\nexport script=`pwd`\ncd $original_dir\n\necho \"GET $ENDPOINT/$SUBSCRIPTION\"\n\ncurl -v -X 'GET' --keepalive-time 30 --user-agent ' Microsoft.WindowsAzure.Management.Compute.ComputeManagementClient/0.9.0.0 WindowsAzurePowershell/v0.8.0' -H 'x-ms-version: 2014-06-01' -H 'Content-Type: application/xml'  --insecure  -E $CERT $ENDPOINT/$SUBSCRIPTION\n"
  },
  {
    "path": "registration-scripts/api/list-extension.sh",
    "content": "#!/bin/bash\noriginal_dir=`pwd`\nscript=`dirname $0`\ncd $script\nsource params\nexport script=`pwd`\ncd $original_dir\n\ncurl -v -X 'GET' -H 'x-ms-version: 2014-06-01' -H 'Content-Type: application/xml' -E $CERT $ENDPOINT/$SUBSCRIPTION/services/publisherextensions\n"
  },
  {
    "path": "registration-scripts/api/params",
    "content": "AZURE_PRODUCTION=1\n#MOONCAKE_PRODUCTION=1\n\nif [ $MOONCAKE_PRODUCTION -eq 1 ] ; then\n    export ENDPOINT=\"https://management.core.chinacloudapi.cn\"\n    export SUBSCRIPTION=\"REPLACE-ME\"\n    export VERSION='x-ms-version: 2014-06-01' \n    export CERT=\"REPLACE-ME\"\n    export CONN_STR=\"REPLACE-ME\"\nelif [ $AZURE_PRODUCTION -eq 1 ] ; then\n    export ENDPOINT=\"https://management.core.windows.net\"\n    export SUBSCRIPTION=\"REPLACE-ME\"\n    export VERSION='x-ms-version: 2014-06-01' \n    export CERT=\"REPLACE-ME\"\n    export CONN_STR=\"REPLACE-ME\"\nfi\n\necho ENDPOINT: $ENDPOINT >&2\necho SUBSCRIPTION: $SUBSCRIPTION >&2\necho CERT: $CERT >&2\necho VERSION: $VERSION >&2\n"
  },
  {
    "path": "registration-scripts/api/update-extension.sh",
    "content": "#!/bin/bash\noriginal_dir=`pwd`\nscript=`dirname $0`\ncd $script\nsource params\nexport script=`pwd`\ncd $original_dir\n\ncurl -v -X 'PUT' -H \"$VERSION\" -H 'Content-Type: application/xml' -E $CERT -d@$1 $ENDPOINT/$SUBSCRIPTION/services/extensions?action=update\n"
  },
  {
    "path": "registration-scripts/bin/add.sh",
    "content": "#!/bin/bash\n\noriginal_dir=`pwd`\nscript=$(dirname $0)\nroot=$script/..\ncd $root\nroot=`pwd`\ncd $original_dir\n\necho \"Add extension: $1\"\n$root/api/add-extension.sh 2>/tmp/restoutput $1 | sed -e 's/></>\\n</g'\n\necho \"====================\"\necho \"Check request by running bin/check.sh <request-id>\"\ntail /tmp/restoutput\necho \"====================\"\necho \"More info is saved in /tmp/restoutput\"\n"
  },
  {
    "path": "registration-scripts/bin/blob/list.sh",
    "content": "#!/bin/bash\noriginal_dir=`pwd`\nscript=`dirname $0`\ncd $script/../../api\nsource params\nexport script=`pwd`\ncd $original_dir\n\nazure storage blob list -c $CONN_STR extensions\n"
  },
  {
    "path": "registration-scripts/bin/blob/upload.sh",
    "content": "#!/bin/bash\noriginal_dir=`pwd`\nscript=`dirname $0`\ncd $script/../../api\nsource params\nexport script=`pwd`\ncd $original_dir\n\nzip_file=$(readlink -f $1)\nif [ ! -f $zip_file ] ; then\n    echo \"File not found: $zip_file\"\n    exit 1\nfi\nfile_name=$(basename $zip_file)\necho \"Uploading $zip_file to azure...\"\nazure storage blob upload -c $CONN_STR $zip_file extensions $file_name\n"
  },
  {
    "path": "registration-scripts/bin/check.sh",
    "content": "#!/bin/bash\n\noriginal_dir=`pwd`\nscript=$(dirname $0)\nroot=$script/..\ncd $root\nroot=`pwd`\ncd $original_dir\n\necho \"Check Request: $1\"\n$root/api/check-request-status.sh 2>>/tmp/restoutput $1 | sed -e 's/></>\\n</g'\n\n"
  },
  {
    "path": "registration-scripts/bin/del.sh",
    "content": "#!/bin/bash\n\noriginal_dir=`pwd`\nscript=$(dirname $0)\nroot=$script/..\ncd $root\nroot=`pwd`\ncd $original_dir\n\necho \"Delete extension: $1 $2 $3\"\n$root/api/del-extension.sh 2>/tmp/restoutput $1 $2 $3| sed -e 's/></>\\n</g'\n\necho \"====================\"\necho \"Check request by running bin/check.sh <request-id>\"\ntail /tmp/restoutput\necho \"====================\"\necho \"More info is saved in /tmp/restoutput\"\n"
  },
  {
    "path": "registration-scripts/bin/get.sh",
    "content": "#!/bin/bash\noriginal_dir=`pwd`\nscript=$(dirname $0)\nroot=$script/..\ncd $root\nroot=`pwd`\ncd $original_dir\n\necho \"Get extension: $1 $2\"\n$root/api/get-extension.sh 2>/tmp/restoutput $1 $2 | sed -e 's/></>\\n</g'\n\n"
  },
  {
    "path": "registration-scripts/bin/list.sh",
    "content": "#!/bin/bash\n\noriginal_dir=`pwd`\nscript=$(dirname $0)\nroot=$script/..\ncd $root\nroot=`pwd`\ncd $original_dir\n\necho \"List extensions: $1\"\n$root/api/list-extension.sh 2>/tmp/restoutput | sed -e 's/></>\\n</g' | sed -e 's/<\\/ExtensionImage>/<\\/ExtensionImage>\\n/g'\n"
  },
  {
    "path": "registration-scripts/bin/subscription.sh",
    "content": "#!/bin/bash\n\noriginal_dir=`pwd`\nscript=$(dirname $0)\nroot=$script/..\ncd $root\nroot=`pwd`\ncd $original_dir\n\necho \"Get subscription\"\n$root/api/get-subscription.sh 2>>/tmp/restoutput | sed -e 's/></>\\n</g'\n"
  },
  {
    "path": "registration-scripts/bin/update.sh",
    "content": "#!/bin/bash\n\noriginal_dir=`pwd`\nscript=$(dirname $0)\nroot=$script/..\ncd $root\nroot=`pwd`\ncd $original_dir\n\necho \"Update extension: $1\"\n$root/api/update-extension.sh 2>/tmp/restoutput $1 | sed -e 's/></>\\n</g'\n\necho \"====================\"\necho \"Check request by running bin/check.sh <request-id>\"\ntail /tmp/restoutput\necho \"====================\"\necho \"More info is saved in /tmp/restoutput\"\n"
  },
  {
    "path": "registration-scripts/create_zip.sh",
    "content": "#!/bin/bash\n#\n# This script is used to set up a test env for extensions\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nif [ $# != 2 ]  ; then\n    echo \"\" \n    echo \"    Usage: $0 <path-to-extension> <version>\"\n    echo \"    Example: $0 SampleExtension 1.0.0.0\"\n    echo \"\"\n    exit 1\nfi\n\nif [ ! -d $1 ]  ; then\n    echo \"\" \n    echo \"    Error: Couldn't find dir: $1\"\n    echo \"\"\n    exit 1\nfi\n\ncurr_dir=`pwd`\next_dir=$1\ncd $ext_dir\next_dir=`pwd`\ncd $curr_dir\n\nscript=$(dirname $0)\nroot=$script/..\ncd $root\nroot=`pwd`\n\nutil_dir=$root/Utils\nbuild_dir=$root/build\n\nif [ ! -d $build_dir ] ; then\n    mkdir $build_dir\nfi\n\next_name=`echo $1 | sed 's/\\/$//'`\next_version=$2\next_full_name=$ext_name-$ext_version\ntmp_dir=$build_dir/$ext_full_name\n\necho \"Create zip for $ext_name version $ext_version\"\n\necho \"Creat tmp dir: $tmp_dir\"\nmkdir $tmp_dir\n\necho \"Copy files...\"\ncp -r $ext_dir/* $tmp_dir\nrm $tmp_dir/references\n\necho \"Copy dependecies...\"\ncat $ext_dir/references\ncat $ext_dir/references | xargs cp -r -t $tmp_dir\n\necho \"Switch to tmp dir...\"\ncd $tmp_dir\n\necho \"Remove test dir...\"\nrm -r test\nrm -r */test\n\necho \"Remove *.pyc...\"\nfind . -name \"*.pyc\" | xargs rm -f\n\necho \"Create zip...\"\nzip -r $build_dir/$ext_full_name.zip .\n\necho \"Delete tmp dir...\"\nrm $tmp_dir -r\necho \"Done!\"\n"
  },
  {
    "path": "registration-scripts/mooncake/sample-extension-1.0.xml",
    "content": "<ExtensionImage xmlns=\"http://schemas.microsoft.com/windowsazure\">\n<ProviderNameSpace>Microsoft.Loves.Linux</ProviderNameSpace>\n<Type>SampleExtension</Type>\n<Version>1.0.0.0</Version>\n<Label>Microsoft loves Linux</Label>\n<HostingResources>VmRole</HostingResources>\n<MediaLink>Storage blob location of the Zip file</MediaLink>\n<Description>Microsoft loves Linux</Description>\n<IsInternalExtension>false</IsInternalExtension>\n<Eula>https://github.com/Azure/azure-linux-extensions/blob/1.0/LICENSE-2_0.txt</Eula>\n<PrivacyUri>https://github.com/Azure/azure-linux-extensions/blob/1.0/LICENSE-2_0.txt</PrivacyUri>\n<HomepageUri>https://github.com/Azure/azure-linux-extensions</HomepageUri>\n<IsJsonExtension>true</IsJsonExtension>\n<SupportedOS>Linux</SupportedOS>\n<CompanyName>Microsoft</CompanyName>\n</ExtensionImage>\n"
  },
  {
    "path": "registration-scripts/public/sample-extension-1.0.xml",
    "content": "<ExtensionImage xmlns=\"http://schemas.microsoft.com/windowsazure\">\n<ProviderNameSpace>Microsoft.Loves.Linux</ProviderNameSpace>\n<Type>SampleExtension</Type>\n<Version>1.0.0.0</Version>\n<Label>Microsoft loves Linux</Label>\n<HostingResources>VmRole</HostingResources>\n<MediaLink>Storage blob location of the Zip file</MediaLink>\n<Description>Microsoft loves Linux</Description>\n<IsInternalExtension>false</IsInternalExtension>\n<Eula>https://github.com/Azure/azure-linux-extensions/blob/1.0/LICENSE-2_0.txt</Eula>\n<PrivacyUri>https://github.com/Azure/azure-linux-extensions/blob/1.0/LICENSE-2_0.txt</PrivacyUri>\n<HomepageUri>https://github.com/Azure/azure-linux-extensions</HomepageUri>\n<IsJsonExtension>true</IsJsonExtension>\n<SupportedOS>Linux</SupportedOS>\n<CompanyName>Microsoft</CompanyName>\n</ExtensionImage>\n"
  },
  {
    "path": "script/0.settings",
    "content": "{\"runtimeSettings\":[{\"handlerSettings\":{\"protectedSettingsCertThumbprint\":\"TEST\",\"protectedSettings\":\"MIIByAYJKoZIhvcNAQcDoIIBuTCCAbUCAQAxggFxMIIBbQIBADBVMEExPzA9BgoJkiaJk/IsZAEZFi9XaW5kb3dzIEF6dXJlIFNlcnZpY2UgTWFuYWdlbWVudCBmb3IgRXh0ZW5zaW9ucwIQJ1fD4ZQMF7RKAOgzHVJRRDANBgkqhkiG9w0BAQEFAASCAQBrnH4vyuPreCPD53g4e/ixZ7F9+iHzG3Vp4R7LnZoFLVejLcPfxQ1yhaDtXiIAXs19LfnwukbSe2gxpEIkNqohSh4EvRn2RI2ss4Lmmp69qnccr3g8/uHdgYBKUxyZbG+Ul2tjzcu173uOKpr6fSrAGKyGX0KqPCBFMD7vxhem3sd/9oQwfsxXUvkl3zkFioOP5oor6BKvfMQ8kxRv0UfvXF0mqDzXLF8/vQ6kexqglAH+L8L5dcXFF1+D/WyNUkZJOr4ax4BMgtrV/HGoWoNkjxmFrRcsiEpJ2JGCPduAuWUYHrLjV59Jjf30pszN2D/K1naYwNDY79zRDm/8CTJEMDsGCSqGSIb3DQEHATAUBggqhkiG9w0DBwQIUStUI4paw9uAGHvktyCyAIwMBP/AB5iOs34BuT5vXdGH7g==\"}}]}\n"
  },
  {
    "path": "script/HandlerEnvironment.json",
    "content": "[{\n    \"name\": \"VMAccess\", \n    \"seqNo\": \"1\", \n    \"version\": 1.0,\n    \"handlerEnvironment\": {    \n        \"logFolder\": \"/var/log/azure/VMAccess/1.0\",\n        \"configFolder\": \"/var/lib/waagent/VMAccess-1.0/config\",\n        \"statusFolder\": \"/var/lib/waagent/VMAccess-1.0/status\",\n        \"heartbeatFile\": \"/var/lib/waagent/VMAccess-1.0/heartbeat.log\"\n    }\n}]\n"
  },
  {
    "path": "script/create_zip.sh",
    "content": "#!/bin/bash\n#\n# This script is used to set up a test env for extensions\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nif [ ! $1 ]  || [ ! $2 ] || [ ! $3 ]  ; then\n    echo \"\" \n    echo \"    Usage: create_zip.sh <path_to_extension_dir> <name> <version>\"\n    echo \"\"\n    exit 1\nfi\n\nif [ ! -d $1 ]  ; then\n    echo \"\" \n    echo \"    Error: Couldn't find dir: $1>\"\n    echo \"\"\n    exit 1\nfi\n\ncurr_dir=`pwd`\next_dir=$1\next_name=$2\next_version=$3\ncd $ext_dir\next_dir=`pwd`\ncd $curr_dir\n\n\nscript=$(dirname $0)\nroot=$script/..\ncd $root\nroot=`pwd`\n\necho $ext_name\necho $ext_version\n\nutil_dir=$root/Utils\nbuild_dir=$root/build\n\n\nif [ ! $ext_name ] ; then\n    echo \"\"\n    echo \"    Error: Couldn't detect extention name: $ext_name\"\n    echo \"\"\n    exit 1\nfi\n\nif [ ! $ext_version ] ; then\n    echo \"\"\n    echo \"    Error: Couldn't detect extention version: $ext_version\"\n    echo \"\"\n    exit 1\nfi\n\nif [ ! -d $build_dir ] ; then\n    mkdir $build_dir\nfi\n\next_full_name=$ext_name-$ext_version\ntmp_dir=$build_dir/$ext_full_name\n\necho \"Create zip for $ext_name version $ext_version\"\n\necho \"Creat tmp dir: $tmp_dir\"\nmkdir $tmp_dir\n\necho \"Copy files...\"\ncp -r $ext_dir/* $tmp_dir\nrm $tmp_dir/references\n\necho \"Copy dependecies...\"\ncat $ext_dir/references\ncat $ext_dir/references | xargs cp -r -t $tmp_dir\n\necho \"Switch to tmp dir...\"\ncd $tmp_dir\n\necho \"Remove test dir...\"\nrm -r test\nrm -r */test\nrm *.pyc\n\necho \"Create zip...\"\nzip -r $build_dir/$ext_full_name.zip .\n\necho \"Delete tmp dir...\"\nrm $tmp_dir -r\necho \"Done!\"\n"
  },
  {
    "path": "script/mkstub.sh",
    "content": "#!/bin/bash\n#\n# This script is used to create stub for unit test\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nif [ ! $1 ]  ; then\n    echo \"\" \n    echo \"    Usage: mkstub.sh <path_to_extension_dir>\"\n    echo \"\"\n    exit 1\nfi\n\nif [ ! -d $1 ]  ; then\n    echo \"\" \n    echo \"    Error: Couldn't find dir: $1>\"\n    echo \"\"\n    exit 1\nfi\n\next_dir=$1\next_meta=$ext_dir/HandlerManifest.json\n\nif [ ! -f $ext_meta ] ; then\n    echo \"\"\n    echo \"    Error: Couldn't find \\\"HandlerManifest.json\\\" file under $ext_dir\"\n    echo \"\"\n    exit 1\nfi\n\ncur_dir=`pwd`\nscript=$(dirname $0)\nroot=$script/..\ncd $root\nroot=`pwd`\nwaagent_path='/usr/sbin/waagent'\nwaagent_lib_dir='/var/lib/waagent'\next_log_dir='/var/log/azure'\n\next_name=`grep 'name' $ext_meta | sed 's/[\\\"| |,]//g' |gawk -F ':' '{print $2}'`\next_version=`grep 'version' $ext_meta | sed 's/[\\\"| |,]//g' |gawk -F ':' '{print $2}'`\n\next_full_name=$ext_name-$ext_version\next_dir=$waagent_lib_dir/$ext_full_name\next_status_dir=$ext_dir/status\next_config_dir=$ext_dir/config\next_env_json=$ext_dir/HandlerEnvironment.json\ntest_cert_file=$waagent_lib_dir/TEST.crt\ntest_pk_file=$waagent_lib_dir/TEST.prv\novf_env_file=$waagent_lib_dir/ovf-env.xml\n\nif [ ! -f $waagent_path ] ; then\n    echo \"Download latest waagent code\"\n    wget https://raw.githubusercontent.com/Azure/WALinuxAgent/2.0/waagent -O $waagent_path\n    chmod +x $waagent_path\nfi\n\nif [ ! -d $waagent_lib_dir ] ; then\n    echo \"Create lib dir\"\n    mkdir $waagent_lib_dir\nfi\n\nif [ ! -d $ext_dir ] ; then\n    echo \"Create extension dir\"\n    mkdir $ext_dir\nfi\n\nif [ ! -d $ext_config_dir ] ; then\n    echo \"Create extension config dir\"\n    mkdir $ext_config_dir\nfi\n\nif [ ! -d $ext_status_dir ] ; then\n    echo \"Create extension status dir\"\n    mkdir $ext_status_dir\nfi\n\nif [ ! -f $ext_env_json ] ; then\n    echo \"Create HandlerEnvironment.json file\"\n    cp $script/HandlerEnvironment.json $ext_env_json\nfi\n\nif [ ! -f $test_cert_file ] ; then\n    echo \"Create test cert file\"\n    cp $script/test.crt $test_cert_file\nfi\n\nif [ ! -f $test_pk_file ] ; then\n    echo \"Create test pk file\"\n    cp $script/test.prv $test_pk_file\nfi\n\nif [ ! -f $ovf_env_file ] ; then\n    echo \"Create ovf-env.xml file\"\n    cp $script/ovf-env.xml $ovf_env_file\nfi\n\nif [ ! -f $ext_config_dir/0.settings ] ; then\n    echo \"Create 0.settings\"\n    cp $script/0.settings $ext_config_dir/0.settings\nfi\n\nif [ ! -d $ext_log_dir ] ; then\n    echo \"Create ext log dir\"\n    mkdir $ext_log_dir\nfi\n\nif [ ! -d $ext_log_dir/$ext_name ] ; then\n    echo \"Create ext log dir for $ext_name\"\n    mkdir $ext_log_dir/$ext_name\nfi\n\nif [ ! -d $ext_log_dir/$ext_name/$ext_version ] ; then\n    echo \"Create ext log dir for $ext_name $ext_version\"\n    mkdir $ext_log_dir/$ext_name/$ext_version\nfi\n\necho \"Change permission of waagent lib dir\"\nchmod -R 600 $waagent_lib_dir\n"
  },
  {
    "path": "script/ovf-env.xml",
    "content": "﻿<?xml version='1.0' encoding='utf-8'?>\n<Environment xmlns='http://schemas.dmtf.org/ovf/environment/1' xmlns:oe='http://schemas.dmtf.org/ovf/environment/1' xmlns:wa='http://schemas.microsoft.com/windowsazure' xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'>\n  <wa:ProvisioningSection>\n    <wa:Version>1.0</wa:Version>\n    <LinuxProvisioningConfigurationSet xmlns='http://schemas.microsoft.com/windowsazure' xmlns:i='http://www.w3.org/2001/XMLSchema-instance'>\n      <ConfigurationSetType>LinuxProvisioningConfiguration</ConfigurationSetType>\n      <HostName>test-ext</HostName>\n      <UserName>azureuser</UserName>\n      <UserPassword>User@123</UserPassword>\n      <DisableSshPasswordAuthentication>false</DisableSshPasswordAuthentication>\n      <SSH>\n        <PublicKeys>\n          <PublicKey>\n            <Fingerprint>test</Fingerprint>\n            <Path>/home/azureuser/.ssh/authorized_keys</Path>\n          </PublicKey>\n        </PublicKeys>\n      </SSH>\n    </LinuxProvisioningConfigurationSet>\n  </wa:ProvisioningSection>\n  <wa:PlatformSettingsSection>\n    <wa:Version>1.0</wa:Version>\n    <PlatformSettings xmlns='http://schemas.microsoft.com/windowsazure' xmlns:i='http://www.w3.org/2001/XMLSchema-instance'>\n      <KmsServerHostname>kms.core.windows.net</KmsServerHostname>\n      <ProvisionGuestAgent>true</ProvisionGuestAgent>\n      <GuestAgentPackageName>Win7_Win8_IaaS_rd_art_stable_140703-0050_GuestAgentPackage.zip</GuestAgentPackageName>\n    </PlatformSettings>\n  </wa:PlatformSettingsSection>\n</Environment>\n"
  },
  {
    "path": "script/set_env.sh",
    "content": "#!/bin/bash\n#\n# This script is used to set up a test env for extensions\n#\n# Copyright 2014 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nscript=$(dirname $0)\nroot=$script/..\ncd $root\nroot=`pwd`\nlib_path=\".\"\n\necho \"\\$PYTHONPATH=$PYTHONPATH\"\n\nif [ ! `echo $PYTHONPATH | grep $root` ] ; then\n    lib_path=$lib_path:$root\nfi\n\nif [ $lib_path != \".\" ] ; then\n    echo \"echo \\\"export PYTHONPATH=\\$PYTHONPATH:$lib_path\\\" >> /etc/bash.bashrc\"\n    echo \"export PYTHONPATH=\\$PYTHONPATH:$lib_path\" >> /etc/bash.bashrc\n    echo \"Enviroment variable PYTHONPATH has been set.\"\n    echo \"Run \\\"bash\\\" to reload bash.\"\nelse\n    echo \"Your enviroment is cool. No action required.\"\nfi\n"
  },
  {
    "path": "script/test.crt",
    "content": "Bag Attributes: <Empty Attributes>\nsubject=/C=ab/ST=ab/L=ab/O=ab/OU=ab/CN=ab/emailAddress=ab\nissuer=/C=ab/ST=ab/L=ab/O=ab/OU=ab/CN=ab/emailAddress=ab\n-----BEGIN CERTIFICATE-----\nMIICOTCCAaICCQD7F0nb+GtpcTANBgkqhkiG9w0BAQsFADBhMQswCQYDVQQGEwJh\nYjELMAkGA1UECAwCYWIxCzAJBgNVBAcMAmFiMQswCQYDVQQKDAJhYjELMAkGA1UE\nCwwCYWIxCzAJBgNVBAMMAmFiMREwDwYJKoZIhvcNAQkBFgJhYjAeFw0xNDA4MDUw\nODIwNDZaFw0xNTA4MDUwODIwNDZaMGExCzAJBgNVBAYTAmFiMQswCQYDVQQIDAJh\nYjELMAkGA1UEBwwCYWIxCzAJBgNVBAoMAmFiMQswCQYDVQQLDAJhYjELMAkGA1UE\nAwwCYWIxETAPBgkqhkiG9w0BCQEWAmFiMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCB\niQKBgQC4Vugyj4uAKGYHW/D1eAg1DmLAv01e+9I0zIi8HzJxP87MXmS8EdG5SEzR\nN6tfQQie76JBSTYI4ngTaVCKx5dVT93LiWxLV193Q3vs/HtwwH1fLq0rAKUhREQ6\n+CsRGNyeVfJkNsxAvNvQkectnYuOtcDxX5n/25eWAofobxVbSQIDAQABMA0GCSqG\nSIb3DQEBCwUAA4GBAF20gkq/DeUSXkZA+jjmmbCPioB3KL63GpoTXfP65d6yU4xZ\nTlMoLkqGKe3WoXmhjaTOssulgDAGA24IeWy/u7luH+oHdZEmEufFhj4M7tQ1pAhN\nCT8JCL2dI3F76HD6ZutTOkwRar3PYk5q7RsSJdAemtnwVpgp+RBMtbmct7MQ\n-----END CERTIFICATE-----\n"
  },
  {
    "path": "script/test.prv",
    "content": "-----BEGIN RSA PRIVATE KEY-----\nMIICXAIBAAKBgQC4Vugyj4uAKGYHW/D1eAg1DmLAv01e+9I0zIi8HzJxP87MXmS8\nEdG5SEzRN6tfQQie76JBSTYI4ngTaVCKx5dVT93LiWxLV193Q3vs/HtwwH1fLq0r\nAKUhREQ6+CsRGNyeVfJkNsxAvNvQkectnYuOtcDxX5n/25eWAofobxVbSQIDAQAB\nAoGAIakE506c238E+m0Id9o+LWn+EFIeT6zN+oQqp6dOr61GFr1ZyZm7YQjZtg5j\nRZZ7e4Iob6Fts3ufD3RYl67QbBzRwsKwI7sAmzdCmqkopY2H6xv421cEGjkqZIJV\n2Xyp9Idji6GfUB6+t1SZDOssbZx3SUkyim0hixK2HCJT4u0CQQDw6rNLZwEmwuhY\nz1jSERyeTtIcRJ47+Y79tX2xmkyKxZ2Kf28V3Fw/6biCIlmuvxHNhlLijimOME7/\nrkqDiscnAkEAw+FpkM96xLlDCqNL2AcNxVnmNyO0Boxw0AKrogfcnDh6S3rD5tZQ\nIdcIAsEYNjhEJ+/hVCByIUArC885PTzQDwJBAMaDfm3ZWHeKD05uvG+MLhq8NCGa\n4Q/mWU7xZ7sau4t1vpTK4MwQoesAOUrx5xg41QCXeGC6Z7+ESvQft8Kgbe0CQAkS\nOExPf3T6y2MDuvBvKzEXf7TP/3dKK7NGXGJtkMbfSrKSJd5b0GwwxBs0jAV+x5E9\n56Z4tjBaA2RRnWn7lfsCQA5SWuDMtlOzyWir09fparnnRL1JFvOwDAHTE0iwS8dO\nUFHIIw4nqqUYuHb+r/eyRzVtokJ9bSPZOjtTWSVL4W4=\n-----END RSA PRIVATE KEY-----\n"
  },
  {
    "path": "ui-extension-packages/microsoft.custom-script-linux/Artifacts/CreateUiDefinition.json",
    "content": "{\n  \"handler\": \"Microsoft.ClassicCompute.VmExtension\",\n  \"version\": \"0.0.1-preview\",\n  \"parameters\": {\n    \"elements\": [\n      {\n        \"name\": \"fileUris\",\n        \"type\": \"Microsoft.Common.FileUpload\",\n        \"label\": \"Script files\",\n        \"toolTip\": \"The script files that will be downloaded to the virtual machine.\",\n        \"constraints\": {\n          \"required\": false\n        },\n        \"options\": {\n          \"multiple\": true,\n          \"uploadMode\": \"url\"\n        }\n      },\n      {\n        \"name\": \"commandToExecute\",\n        \"type\": \"Microsoft.Common.TextBox\",\n        \"label\": \"Command\",\n        \"defaultValue\": \"sh script.sh\",\n        \"toolTip\": \"The command to execute, for example: sh script.sh\",\n        \"constraints\": {\n          \"required\": true\n        }\n      }\n    ],\n    \"outputs\": {\n      \"vmName\": \"[vmName()]\",\n      \"location\": \"[location()]\",\n      \"fileUris\": \"[elements('fileUris')]\",\n      \"commandToExecute\": \"[elements('commandToExecute')]\"\n    }\n  }\n}\n"
  },
  {
    "path": "ui-extension-packages/microsoft.custom-script-linux/Artifacts/MainTemplate.json",
    "content": "{\n  \"$schema\": \"http://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#\",\n  \"contentVersion\": \"1.0.0.0\",\n  \"parameters\": {\n    \"vmName\": {\n      \"type\": \"string\"\n    },\n    \"location\": {\n      \"type\": \"string\"\n    },\n    \"fileUris\": {\n      \"type\": \"array\"\n    },\n    \"commandToExecute\": {\n      \"type\": \"string\"\n    }\n  },\n  \"resources\": [\n    {\n      \"name\": \"[concat(parameters('vmName'),'/CustomScriptForLinux')]\",\n      \"type\": \"Microsoft.ClassicCompute/virtualMachines/extensions\",\n      \"location\": \"[parameters('location')]\",\n      \"apiVersion\": \"2015-06-01\",\n      \"properties\": {\n        \"publisher\": \"Microsoft.OSTCExtensions\",\n        \"extension\": \"CustomScriptForLinux\",\n        \"version\": \"1.*\",\n        \"parameters\": {\n          \"public\": {\n            \"fileUris\": \"[parameters('fileUris')]\",\n            \"commandToExecute\": \"[parameters('commandToExecute')]\"\n          }\n        }\n      }\n    }\n  ]\n}\n"
  },
  {
    "path": "ui-extension-packages/microsoft.custom-script-linux/Manifest.json",
    "content": "{\n  \"$schema\": \"https://gallery.azure.com/schemas/2015-04-01/manifest.json#\",\n  \"name\": \"custom-script-linux\",\n  \"publisher\": \"microsoft\",\n  \"version\": \"1.0.0\",\n  \"displayName\": \"ms-resource:displayName\",\n  \"publisherDisplayName\": \"ms-resource:publisherDisplayName\",\n  \"publisherLegalName\": \"ms-resource:publisherDisplayName\",\n  \"summary\": \"ms-resource:summary\",\n  \"longSummary\": \"ms-resource:summary\",\n  \"description\": \"ms-resource:description\",\n  \"uiDefinition\": {\n    \"path\": \"UiDefinition.json\"\n  },\n  \"artifacts\": [\n    {\n      \"name\": \"MainTemplate\",\n      \"type\": \"Template\",\n      \"path\": \"Artifacts\\\\MainTemplate.json\",\n      \"isDefault\": true\n    },\n    {\n      \"name\": \"CreateUiDefinition\",\n      \"type\": \"Custom\",\n      \"path\": \"Artifacts\\\\CreateUiDefinition.json\",\n      \"isDefault\": false\n    },\n  ],\n  \"icons\": {\n    \"small\": \"Icons\\\\Small.png\",\n    \"medium\": \"Icons\\\\Medium.png\",\n    \"large\": \"Icons\\\\Large.png\",\n    \"wide\": \"Icons\\\\Wide.png\"\n  },\n  \"links\": [\n    {\n      \"displayName\": \"ms-resource:link1\",\n      \"uri\": \"https://github.com/Azure/azure-linux-extensions/tree/master/CustomScript\"\n    }\n  ],\n  \"categories\": [\n    \"classicCompute-vmextension-linux\"\n  ]\n}\n"
  },
  {
    "path": "ui-extension-packages/microsoft.custom-script-linux/Strings/resources.resjson",
    "content": "{\n  \"displayName\": \"Custom Script For Linux\",\n  \"publisherDisplayName\": \"Microsoft Corp.\",\n  \"summary\": \"Custom Script extension for Linux\",\n  \"description\": \"<p>CustomScript Extension is a tool to execute your VM customization tasks post VM provision. When this Extension is added to a Virtual Machine, it can download customer’s scripts from the Azure storage or public storage, and execute the scripts on the VM. CustomScript Extension tasks can also be automated using the Azure PowerShell cmdlets and Azure Cross-Platform Command-Line Interface (xPlat CLI).</p><p><h3>Legal Terms</h3>By clicking the Create button, I acknowledge that I am getting this software from Microsoft Corp. and that the <a href='https://github.com/Azure/azure-linux-extensions/blob/1.0/LICENSE-2_0.txt' target='_blank'>legal terms</a> of Microsoft Corp. apply to it. Microsoft does not provide rights for third-party software. Also see the <a href='https://github.com/Azure/azure-linux-extensions/blob/1.0/LICENSE-2_0.txt' target='_blank'>privacy statement</a> from Microsoft Corp..</p>\",\n  \"link1\": \"Documentation\"\n}\n"
  },
  {
    "path": "ui-extension-packages/microsoft.custom-script-linux/UiDefinition.json",
    "content": "{\n  \"$schema\": \"https://gallery.azure.com/schemas/2015-02-12/uiDefinition.json#\",\n  \"createDefinition\": {\n    \"createBlade\": {\n      \"name\": \"AddVmExtension\",\n      \"extension\": \"Microsoft_Azure_Classic_Compute\"\n    }\n  }\n}\n"
  },
  {
    "path": "ui-extension-packages/microsoft.custom-script-linux-arm/Artifacts/CreateUiDefinition.json",
    "content": "{\n  \"handler\": \"Microsoft.Compute.VmExtension\",\n  \"version\": \"0.0.1-preview\",\n  \"parameters\": {\n    \"elements\": [\n      {\n        \"name\": \"fileUris\",\n        \"type\": \"Microsoft.Common.FileUpload\",\n        \"label\": \"Script files\",\n        \"toolTip\": \"The script files that will be downloaded to the virtual machine.\",\n        \"constraints\": {\n          \"required\": false\n        },\n        \"options\": {\n          \"multiple\": true,\n          \"uploadMode\": \"url\"\n        }\n      },\n      {\n        \"name\": \"commandToExecute\",\n        \"type\": \"Microsoft.Common.TextBox\",\n        \"label\": \"Command\",\n        \"defaultValue\": \"sh script.sh\",\n        \"toolTip\": \"The command to execute, for example: sh script.sh\",\n        \"constraints\": {\n          \"required\": true\n        }\n      }\n    ],\n    \"outputs\": {\n      \"vmName\": \"[vmName()]\",\n      \"location\": \"[location()]\",\n      \"fileUris\": \"[elements('fileUris')]\",\n      \"commandToExecute\": \"[elements('commandToExecute')]\"\n    }\n  }\n}\n"
  },
  {
    "path": "ui-extension-packages/microsoft.custom-script-linux-arm/Artifacts/MainTemplate.json",
    "content": "{\n  \"$schema\": \"http://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#\",\n  \"contentVersion\": \"1.0.0.0\",\n  \"parameters\": {\n    \"vmName\": {\n      \"type\": \"string\"\n    },\n    \"location\": {\n      \"type\": \"string\"\n    },\n    \"fileUris\": {\n      \"type\": \"array\"\n    },\n    \"commandToExecute\": {\n      \"type\": \"string\"\n    }\n  },\n  \"resources\": [\n    {\n      \"name\": \"[concat(parameters('vmName'),'/CustomScriptForLinux')]\",\n      \"type\": \"Microsoft.Compute/virtualMachines/extensions\",\n      \"location\": \"[parameters('location')]\",\n      \"apiVersion\": \"2015-06-15\",\n      \"properties\": {\n        \"publisher\": \"Microsoft.OSTCExtensions\",\n        \"type\": \"CustomScriptForLinux\",\n        \"typeHandlerVersion\": \"1.4\",\n        \"autoUpgradeMinorVersion\": true,\n        \"settings\": {\n          \"fileUris\": \"[parameters('fileUris')]\",\n          \"commandToExecute\": \"[parameters('commandToExecute')]\"\n        }\n      }\n    }\n  ]\n}\n"
  },
  {
    "path": "ui-extension-packages/microsoft.custom-script-linux-arm/Manifest.json",
    "content": "{\n  \"$schema\": \"https://gallery.azure.com/schemas/2015-04-01/manifest.json#\",\n  \"name\": \"custom-script-linux-arm\",\n  \"publisher\": \"microsoft\",\n  \"version\": \"1.0.0\",\n  \"displayName\": \"ms-resource:displayName\",\n  \"publisherDisplayName\": \"ms-resource:publisherDisplayName\",\n  \"publisherLegalName\": \"ms-resource:publisherDisplayName\",\n  \"summary\": \"ms-resource:summary\",\n  \"longSummary\": \"ms-resource:summary\",\n  \"description\": \"ms-resource:description\",\n  \"uiDefinition\": {\n    \"path\": \"UiDefinition.json\"\n  },\n  \"artifacts\": [\n    {\n      \"name\": \"MainTemplate\",\n      \"type\": \"Template\",\n      \"path\": \"Artifacts\\\\MainTemplate.json\",\n      \"isDefault\": true\n    },\n    {\n      \"name\": \"CreateUiDefinition\",\n      \"type\": \"Custom\",\n      \"path\": \"Artifacts\\\\CreateUiDefinition.json\",\n      \"isDefault\": false\n    },\n  ],\n  \"icons\": {\n    \"small\": \"Icons\\\\Small.png\",\n    \"medium\": \"Icons\\\\Medium.png\",\n    \"large\": \"Icons\\\\Large.png\",\n    \"wide\": \"Icons\\\\Wide.png\"\n  },\n  \"links\": [\n    {\n      \"displayName\": \"ms-resource:link1\",\n      \"uri\": \"https://github.com/Azure/azure-linux-extensions/tree/master/CustomScript\"\n    }\n  ],\n  \"categories\": [\n    \"compute-vmextension-linux\"\n  ]\n}\n"
  },
  {
    "path": "ui-extension-packages/microsoft.custom-script-linux-arm/Strings/resources.resjson",
    "content": "{\n  \"displayName\": \"Custom Script For Linux\",\n  \"publisherDisplayName\": \"Microsoft Corp.\",\n  \"summary\": \"Custom Script extension for Linux\",\n  \"description\": \"<p>CustomScript Extension is a tool to execute your VM customization tasks post VM provision. When this Extension is added to a Virtual Machine, it can download customer’s scripts from the Azure storage or public storage, and execute the scripts on the VM. CustomScript Extension tasks can also be automated using the Azure PowerShell cmdlets and Azure Cross-Platform Command-Line Interface (xPlat CLI).</p><p><h3>Legal Terms</h3>By clicking the Create button, I acknowledge that I am getting this software from Microsoft Corp. and that the <a href='https://github.com/Azure/azure-linux-extensions/blob/1.0/LICENSE-2_0.txt' target='_blank'>legal terms</a> of Microsoft Corp. apply to it. Microsoft does not provide rights for third-party software. Also see the <a href='https://github.com/Azure/azure-linux-extensions/blob/1.0/LICENSE-2_0.txt' target='_blank'>privacy statement</a> from Microsoft Corp..</p>\",\n  \"link1\": \"Documentation\"\n}\n"
  },
  {
    "path": "ui-extension-packages/microsoft.custom-script-linux-arm/UiDefinition.json",
    "content": "{\n  \"$schema\": \"https://gallery.azure.com/schemas/2015-02-12/uiDefinition.json#\",\n  \"createDefinition\": {\n    \"createBlade\": {\n      \"name\": \"AddVmExtension\",\n      \"extension\": \"Microsoft_Azure_Compute\"\n    }\n  }\n}\n"
  }
]