Full Code of radlab/sparrow for AI

master afb8efadeb88 cached
407 files
12.9 MB
3.4M tokens
6283 symbols
1 requests
Copy disabled (too large) Download .txt
Showing preview only (13,572K chars total). Download the full file to get everything.
Repository: radlab/sparrow
Branch: master
Commit: afb8efadeb88
Files: 407
Total size: 12.9 MB

Directory structure:
gitextract_h85zoo_k/

├── .gitignore
├── LICENSE
├── deploy/
│   ├── README
│   ├── ec2/
│   │   ├── README
│   │   ├── ec2-exp.sh
│   │   ├── ec2_exp.py
│   │   ├── fairness.py
│   │   ├── fairness.sh
│   │   ├── isolation.py
│   │   ├── isolation.sh
│   │   ├── osdi.py
│   │   ├── prepare_tpch_experiments.py
│   │   ├── prepare_tpch_experiments.sh
│   │   ├── probe-ratio-het.sh
│   │   ├── probe-ratio.sh
│   │   ├── probe_ratio.py
│   │   ├── probe_ratio_het.py
│   │   ├── spark_v_mesos.py
│   │   ├── template/
│   │   │   ├── README
│   │   │   ├── backends.txt
│   │   │   ├── build_sparrow.sh
│   │   │   ├── clean_logs.sh
│   │   │   ├── configure_node.sh
│   │   │   ├── create_database.sh
│   │   │   ├── create_tpch_tables_primary.sh
│   │   │   ├── create_tpch_tables_secondary.sh
│   │   │   ├── deploy_sparrow.sh
│   │   │   ├── find_bugs.sh
│   │   │   ├── find_bugs_filtered.sh
│   │   │   ├── find_bugs_verbose.sh
│   │   │   ├── find_cache_partitions.sh
│   │   │   ├── frontend.conf
│   │   │   ├── frontends.txt
│   │   │   ├── hadoop-env.sh
│   │   │   ├── hdfs-site.xml
│   │   │   ├── hive-site.xml
│   │   │   ├── prepare_logs.sh
│   │   │   ├── shark-env.sh
│   │   │   ├── spark-env.sh
│   │   │   ├── spark-run.sh
│   │   │   ├── sparrow.conf
│   │   │   ├── sparrow_schedulers.txt
│   │   │   ├── start_mesos_master.sh
│   │   │   ├── start_mesos_slave.sh
│   │   │   ├── start_proto_backend.sh
│   │   │   ├── start_proto_frontend.sh
│   │   │   ├── start_shark_tpch.sh
│   │   │   ├── start_spark_backend.sh
│   │   │   ├── start_spark_frontend.sh
│   │   │   ├── start_sparrow.sh
│   │   │   ├── start_throughput_exp_spark.sh
│   │   │   ├── start_throughput_exp_sparrow.sh
│   │   │   ├── stop_mesos_master.sh
│   │   │   ├── stop_mesos_slave.sh
│   │   │   ├── stop_proto_backend.sh
│   │   │   ├── stop_proto_frontend.sh
│   │   │   ├── stop_spark_backend.sh
│   │   │   ├── stop_spark_frontend.sh
│   │   │   ├── stop_sparrow.sh
│   │   │   ├── tpch/
│   │   │   │   ├── make_base_tables.hql
│   │   │   │   ├── make_denorm_cached.hql
│   │   │   │   ├── make_denorm_table_primary.hql
│   │   │   │   ├── make_denorm_table_secondary.hql
│   │   │   │   ├── tpch_workload_1
│   │   │   │   ├── tpch_workload_10
│   │   │   │   ├── tpch_workload_11
│   │   │   │   ├── tpch_workload_12
│   │   │   │   ├── tpch_workload_13
│   │   │   │   ├── tpch_workload_14
│   │   │   │   ├── tpch_workload_15
│   │   │   │   ├── tpch_workload_2
│   │   │   │   ├── tpch_workload_3
│   │   │   │   ├── tpch_workload_4
│   │   │   │   ├── tpch_workload_5
│   │   │   │   ├── tpch_workload_6
│   │   │   │   ├── tpch_workload_7
│   │   │   │   ├── tpch_workload_8
│   │   │   │   └── tpch_workload_9
│   │   │   ├── tpch_experiment.sh
│   │   │   └── wipe_hdfs.sh
│   │   └── tpch_experiments.py
│   ├── example_sparrow_configuration.conf
│   └── third_party/
│       └── boto-2.1.1/
│           ├── PKG-INFO
│           ├── bin/
│           │   ├── bundle_image
│           │   ├── cfadmin
│           │   ├── cq
│           │   ├── cwutil
│           │   ├── elbadmin
│           │   ├── fetch_file
│           │   ├── kill_instance
│           │   ├── launch_instance
│           │   ├── list_instances
│           │   ├── lss3
│           │   ├── pyami_sendmail
│           │   ├── route53
│           │   ├── s3multiput
│           │   ├── s3put
│           │   ├── sdbadmin
│           │   └── taskadmin
│           ├── boto/
│           │   ├── __init__.py
│           │   ├── auth.py
│           │   ├── auth_handler.py
│           │   ├── cacerts/
│           │   │   ├── __init__.py
│           │   │   └── cacerts.txt
│           │   ├── cloudformation/
│           │   │   ├── __init__.py
│           │   │   ├── connection.py
│           │   │   ├── stack.py
│           │   │   └── template.py
│           │   ├── cloudfront/
│           │   │   ├── __init__.py
│           │   │   ├── distribution.py
│           │   │   ├── exception.py
│           │   │   ├── identity.py
│           │   │   ├── invalidation.py
│           │   │   ├── logging.py
│           │   │   ├── object.py
│           │   │   ├── origin.py
│           │   │   └── signers.py
│           │   ├── connection.py
│           │   ├── contrib/
│           │   │   ├── __init__.py
│           │   │   ├── m2helpers.py
│           │   │   └── ymlmessage.py
│           │   ├── ec2/
│           │   │   ├── __init__.py
│           │   │   ├── address.py
│           │   │   ├── autoscale/
│           │   │   │   ├── __init__.py
│           │   │   │   ├── activity.py
│           │   │   │   ├── group.py
│           │   │   │   ├── instance.py
│           │   │   │   ├── launchconfig.py
│           │   │   │   ├── policy.py
│           │   │   │   ├── request.py
│           │   │   │   └── scheduled.py
│           │   │   ├── blockdevicemapping.py
│           │   │   ├── bundleinstance.py
│           │   │   ├── buyreservation.py
│           │   │   ├── cloudwatch/
│           │   │   │   ├── __init__.py
│           │   │   │   ├── alarm.py
│           │   │   │   ├── datapoint.py
│           │   │   │   ├── listelement.py
│           │   │   │   └── metric.py
│           │   │   ├── connection.py
│           │   │   ├── ec2object.py
│           │   │   ├── elb/
│           │   │   │   ├── __init__.py
│           │   │   │   ├── healthcheck.py
│           │   │   │   ├── instancestate.py
│           │   │   │   ├── listelement.py
│           │   │   │   ├── listener.py
│           │   │   │   ├── loadbalancer.py
│           │   │   │   ├── policies.py
│           │   │   │   └── securitygroup.py
│           │   │   ├── image.py
│           │   │   ├── instance.py
│           │   │   ├── instanceinfo.py
│           │   │   ├── keypair.py
│           │   │   ├── launchspecification.py
│           │   │   ├── placementgroup.py
│           │   │   ├── regioninfo.py
│           │   │   ├── reservedinstance.py
│           │   │   ├── securitygroup.py
│           │   │   ├── snapshot.py
│           │   │   ├── spotdatafeedsubscription.py
│           │   │   ├── spotinstancerequest.py
│           │   │   ├── spotpricehistory.py
│           │   │   ├── tag.py
│           │   │   ├── volume.py
│           │   │   └── zone.py
│           │   ├── ecs/
│           │   │   ├── __init__.py
│           │   │   └── item.py
│           │   ├── emr/
│           │   │   ├── __init__.py
│           │   │   ├── bootstrap_action.py
│           │   │   ├── connection.py
│           │   │   ├── emrobject.py
│           │   │   ├── instance_group.py
│           │   │   └── step.py
│           │   ├── exception.py
│           │   ├── file/
│           │   │   ├── __init__.py
│           │   │   ├── bucket.py
│           │   │   ├── connection.py
│           │   │   ├── key.py
│           │   │   └── simpleresultset.py
│           │   ├── fps/
│           │   │   ├── __init__.py
│           │   │   └── connection.py
│           │   ├── gs/
│           │   │   ├── __init__.py
│           │   │   ├── acl.py
│           │   │   ├── bucket.py
│           │   │   ├── connection.py
│           │   │   ├── key.py
│           │   │   ├── resumable_upload_handler.py
│           │   │   └── user.py
│           │   ├── handler.py
│           │   ├── https_connection.py
│           │   ├── iam/
│           │   │   ├── __init__.py
│           │   │   ├── connection.py
│           │   │   └── summarymap.py
│           │   ├── jsonresponse.py
│           │   ├── manage/
│           │   │   ├── __init__.py
│           │   │   ├── cmdshell.py
│           │   │   ├── propget.py
│           │   │   ├── server.py
│           │   │   ├── task.py
│           │   │   ├── test_manage.py
│           │   │   └── volume.py
│           │   ├── mashups/
│           │   │   ├── __init__.py
│           │   │   ├── interactive.py
│           │   │   ├── iobject.py
│           │   │   ├── order.py
│           │   │   └── server.py
│           │   ├── mturk/
│           │   │   ├── __init__.py
│           │   │   ├── connection.py
│           │   │   ├── notification.py
│           │   │   ├── price.py
│           │   │   ├── qualification.py
│           │   │   └── question.py
│           │   ├── plugin.py
│           │   ├── provider.py
│           │   ├── pyami/
│           │   │   ├── __init__.py
│           │   │   ├── bootstrap.py
│           │   │   ├── config.py
│           │   │   ├── copybot.py
│           │   │   ├── helloworld.py
│           │   │   ├── launch_ami.py
│           │   │   ├── scriptbase.py
│           │   │   └── startup.py
│           │   ├── rds/
│           │   │   ├── __init__.py
│           │   │   ├── dbinstance.py
│           │   │   ├── dbsecuritygroup.py
│           │   │   ├── dbsnapshot.py
│           │   │   ├── event.py
│           │   │   ├── parametergroup.py
│           │   │   └── regioninfo.py
│           │   ├── regioninfo.py
│           │   ├── resultset.py
│           │   ├── roboto/
│           │   │   ├── __init__.py
│           │   │   ├── awsqueryrequest.py
│           │   │   ├── awsqueryservice.py
│           │   │   └── param.py
│           │   ├── route53/
│           │   │   ├── __init__.py
│           │   │   ├── connection.py
│           │   │   ├── exception.py
│           │   │   ├── hostedzone.py
│           │   │   └── record.py
│           │   ├── s3/
│           │   │   ├── __init__.py
│           │   │   ├── acl.py
│           │   │   ├── bucket.py
│           │   │   ├── bucketlistresultset.py
│           │   │   ├── connection.py
│           │   │   ├── deletemarker.py
│           │   │   ├── key.py
│           │   │   ├── multipart.py
│           │   │   ├── prefix.py
│           │   │   ├── resumable_download_handler.py
│           │   │   └── user.py
│           │   ├── sdb/
│           │   │   ├── __init__.py
│           │   │   ├── connection.py
│           │   │   ├── db/
│           │   │   │   ├── __init__.py
│           │   │   │   ├── blob.py
│           │   │   │   ├── key.py
│           │   │   │   ├── manager/
│           │   │   │   │   ├── __init__.py
│           │   │   │   │   ├── pgmanager.py
│           │   │   │   │   ├── sdbmanager.py
│           │   │   │   │   └── xmlmanager.py
│           │   │   │   ├── model.py
│           │   │   │   ├── property.py
│           │   │   │   ├── query.py
│           │   │   │   ├── sequence.py
│           │   │   │   └── test_db.py
│           │   │   ├── domain.py
│           │   │   ├── item.py
│           │   │   ├── queryresultset.py
│           │   │   └── regioninfo.py
│           │   ├── services/
│           │   │   ├── __init__.py
│           │   │   ├── bs.py
│           │   │   ├── message.py
│           │   │   ├── result.py
│           │   │   ├── service.py
│           │   │   ├── servicedef.py
│           │   │   ├── sonofmmm.py
│           │   │   └── submit.py
│           │   ├── ses/
│           │   │   ├── __init__.py
│           │   │   └── connection.py
│           │   ├── sns/
│           │   │   ├── __init__.py
│           │   │   └── connection.py
│           │   ├── sqs/
│           │   │   ├── __init__.py
│           │   │   ├── attributes.py
│           │   │   ├── connection.py
│           │   │   ├── jsonmessage.py
│           │   │   ├── message.py
│           │   │   ├── queue.py
│           │   │   └── regioninfo.py
│           │   ├── storage_uri.py
│           │   ├── sts/
│           │   │   ├── __init__.py
│           │   │   ├── connection.py
│           │   │   └── credentials.py
│           │   ├── utils.py
│           │   └── vpc/
│           │       ├── __init__.py
│           │       ├── customergateway.py
│           │       ├── dhcpoptions.py
│           │       ├── internetgateway.py
│           │       ├── routetable.py
│           │       ├── subnet.py
│           │       ├── vpc.py
│           │       ├── vpnconnection.py
│           │       └── vpngateway.py
│           └── setup.py
├── lib/
│   ├── README
│   └── javax.jms-1.1.jar
├── pom.xml
├── readme.markdown
├── simulation/
│   ├── run_simulation.py
│   ├── simulation.py
│   ├── simulation_batch.py
│   ├── simulation_cancellation.py
│   ├── simulation_centralized.py
│   ├── simulation_hacked.py
│   ├── simulation_multi.py
│   ├── simulation_random.py
│   ├── test_simulation_cancellation.py
│   ├── test_simulation_multi.py
│   └── util.py
├── src/
│   ├── main/
│   │   ├── gen-java/
│   │   │   └── edu/
│   │   │       └── berkeley/
│   │   │           └── sparrow/
│   │   │               └── thrift/
│   │   │                   ├── BackendService.java
│   │   │                   ├── FrontendService.java
│   │   │                   ├── GetTaskService.java
│   │   │                   ├── IncompleteRequestException.java
│   │   │                   ├── InternalService.java
│   │   │                   ├── LoadSpec.java
│   │   │                   ├── NodeMonitorService.java
│   │   │                   ├── PongService.java
│   │   │                   ├── SchedulerService.java
│   │   │                   ├── SchedulerStateStoreService.java
│   │   │                   ├── StateStoreService.java
│   │   │                   ├── TCancelTaskReservationsRequest.java
│   │   │                   ├── TEnqueueTaskReservationsRequest.java
│   │   │                   ├── TFullTaskId.java
│   │   │                   ├── THostPort.java
│   │   │                   ├── TNodeState.java
│   │   │                   ├── TPlacementPreference.java
│   │   │                   ├── TResourceVector.java
│   │   │                   ├── TSchedulingRequest.java
│   │   │                   ├── TTaskLaunchSpec.java
│   │   │                   ├── TTaskSpec.java
│   │   │                   └── TUserGroupInfo.java
│   │   ├── java/
│   │   │   └── edu/
│   │   │       └── berkeley/
│   │   │           └── sparrow/
│   │   │               ├── api/
│   │   │               │   ├── SparrowBackendClient.java
│   │   │               │   └── SparrowFrontendClient.java
│   │   │               ├── daemon/
│   │   │               │   ├── SparrowConf.java
│   │   │               │   ├── SparrowDaemon.java
│   │   │               │   ├── StandaloneStateStore.java
│   │   │               │   ├── nodemonitor/
│   │   │               │   │   ├── ConfigNodeMonitorState.java
│   │   │               │   │   ├── FifoTaskScheduler.java
│   │   │               │   │   ├── NoQueueTaskScheduler.java
│   │   │               │   │   ├── NodeMonitor.java
│   │   │               │   │   ├── NodeMonitorState.java
│   │   │               │   │   ├── NodeMonitorThrift.java
│   │   │               │   │   ├── PriorityTaskScheduler.java
│   │   │               │   │   ├── RoundRobinTaskScheduler.java
│   │   │               │   │   ├── StandaloneNodeMonitorState.java
│   │   │               │   │   ├── TaskLauncherService.java
│   │   │               │   │   └── TaskScheduler.java
│   │   │               │   ├── scheduler/
│   │   │               │   │   ├── CancellationService.java
│   │   │               │   │   ├── ConfigSchedulerState.java
│   │   │               │   │   ├── ConstrainedTaskPlacer.java
│   │   │               │   │   ├── Scheduler.java
│   │   │               │   │   ├── SchedulerState.java
│   │   │               │   │   ├── SchedulerThrift.java
│   │   │               │   │   ├── StandaloneSchedulerState.java
│   │   │               │   │   ├── TaskPlacer.java
│   │   │               │   │   └── UnconstrainedTaskPlacer.java
│   │   │               │   └── util/
│   │   │               │       ├── ConfigUtil.java
│   │   │               │       ├── Logging.java
│   │   │               │       ├── Network.java
│   │   │               │       ├── Resolution.java
│   │   │               │       ├── Resources.java
│   │   │               │       ├── Serialization.java
│   │   │               │       ├── TClients.java
│   │   │               │       ├── TServers.java
│   │   │               │       └── ThriftClientPool.java
│   │   │               └── examples/
│   │   │                   ├── BackendBenchmarkProfiler.java
│   │   │                   ├── FairnessTestingFrontend.java
│   │   │                   ├── HeterogeneousFrontend.java
│   │   │                   ├── PingClient.java
│   │   │                   ├── PongServer.java
│   │   │                   ├── ProtoBackend.java
│   │   │                   ├── ProtoFrontend.java
│   │   │                   ├── ProtoFrontendAsync.java
│   │   │                   ├── SimpleBackend.java
│   │   │                   ├── SimpleFrontend.java
│   │   │                   ├── ThriftPongClient.java
│   │   │                   ├── ThriftPongServer.java
│   │   │                   ├── ThroughputTestingFrontend.java
│   │   │                   └── readme.markdown
│   │   ├── python/
│   │   │   ├── README
│   │   │   ├── get_response_time.py
│   │   │   ├── get_utilization.py
│   │   │   ├── parse_logs.py
│   │   │   ├── parse_logs.sh
│   │   │   ├── parse_per_task_logs.py
│   │   │   ├── parse_per_task_logs.sh
│   │   │   ├── parse_tpch_logs.py
│   │   │   ├── parse_tpch_logs.sh
│   │   │   ├── service_per_node.py
│   │   │   └── third_party/
│   │   │       └── stats.py
│   │   └── thrift/
│   │       ├── build.sh
│   │       ├── service.thrift
│   │       └── types.thrift
│   └── test/
│       └── java/
│           └── edu/
│               └── berkeley/
│                   └── sparrow/
│                       └── daemon/
│                           ├── nodemonitor/
│                           │   └── TestTaskScheduler.java
│                           ├── scheduler/
│                           │   ├── TestConstrainedTaskPlacer.java
│                           │   └── TestUnconstrainedTaskPlacer.java
│                           └── util/
│                               └── TestThriftClientPool.java
└── upload.py

================================================
FILE CONTENTS
================================================

================================================
FILE: .gitignore
================================================
*.class
.classpath
.project
target/
*.swp
*.pyc


================================================
FILE: LICENSE
================================================

                                 Apache License
                           Version 2.0, January 2004
                        http://www.apache.org/licenses/

   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION

   1. Definitions.

      "License" shall mean the terms and conditions for use, reproduction,
      and distribution as defined by Sections 1 through 9 of this document.

      "Licensor" shall mean the copyright owner or entity authorized by
      the copyright owner that is granting the License.

      "Legal Entity" shall mean the union of the acting entity and all
      other entities that control, are controlled by, or are under common
      control with that entity. For the purposes of this definition,
      "control" means (i) the power, direct or indirect, to cause the
      direction or management of such entity, whether by contract or
      otherwise, or (ii) ownership of fifty percent (50%) or more of the
      outstanding shares, or (iii) beneficial ownership of such entity.

      "You" (or "Your") shall mean an individual or Legal Entity
      exercising permissions granted by this License.

      "Source" form shall mean the preferred form for making modifications,
      including but not limited to software source code, documentation
      source, and configuration files.

      "Object" form shall mean any form resulting from mechanical
      transformation or translation of a Source form, including but
      not limited to compiled object code, generated documentation,
      and conversions to other media types.

      "Work" shall mean the work of authorship, whether in Source or
      Object form, made available under the License, as indicated by a
      copyright notice that is included in or attached to the work
      (an example is provided in the Appendix below).

      "Derivative Works" shall mean any work, whether in Source or Object
      form, that is based on (or derived from) the Work and for which the
      editorial revisions, annotations, elaborations, or other modifications
      represent, as a whole, an original work of authorship. For the purposes
      of this License, Derivative Works shall not include works that remain
      separable from, or merely link (or bind by name) to the interfaces of,
      the Work and Derivative Works thereof.

      "Contribution" shall mean any work of authorship, including
      the original version of the Work and any modifications or additions
      to that Work or Derivative Works thereof, that is intentionally
      submitted to Licensor for inclusion in the Work by the copyright owner
      or by an individual or Legal Entity authorized to submit on behalf of
      the copyright owner. For the purposes of this definition, "submitted"
      means any form of electronic, verbal, or written communication sent
      to the Licensor or its representatives, including but not limited to
      communication on electronic mailing lists, source code control systems,
      and issue tracking systems that are managed by, or on behalf of, the
      Licensor for the purpose of discussing and improving the Work, but
      excluding communication that is conspicuously marked or otherwise
      designated in writing by the copyright owner as "Not a Contribution."

      "Contributor" shall mean Licensor and any individual or Legal Entity
      on behalf of whom a Contribution has been received by Licensor and
      subsequently incorporated within the Work.

   2. Grant of Copyright License. Subject to the terms and conditions of
      this License, each Contributor hereby grants to You a perpetual,
      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
      copyright license to reproduce, prepare Derivative Works of,
      publicly display, publicly perform, sublicense, and distribute the
      Work and such Derivative Works in Source or Object form.

   3. Grant of Patent License. Subject to the terms and conditions of
      this License, each Contributor hereby grants to You a perpetual,
      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
      (except as stated in this section) patent license to make, have made,
      use, offer to sell, sell, import, and otherwise transfer the Work,
      where such license applies only to those patent claims licensable
      by such Contributor that are necessarily infringed by their
      Contribution(s) alone or by combination of their Contribution(s)
      with the Work to which such Contribution(s) was submitted. If You
      institute patent litigation against any entity (including a
      cross-claim or counterclaim in a lawsuit) alleging that the Work
      or a Contribution incorporated within the Work constitutes direct
      or contributory patent infringement, then any patent licenses
      granted to You under this License for that Work shall terminate
      as of the date such litigation is filed.

   4. Redistribution. You may reproduce and distribute copies of the
      Work or Derivative Works thereof in any medium, with or without
      modifications, and in Source or Object form, provided that You
      meet the following conditions:

      (a) You must give any other recipients of the Work or
          Derivative Works a copy of this License; and

      (b) You must cause any modified files to carry prominent notices
          stating that You changed the files; and

      (c) You must retain, in the Source form of any Derivative Works
          that You distribute, all copyright, patent, trademark, and
          attribution notices from the Source form of the Work,
          excluding those notices that do not pertain to any part of
          the Derivative Works; and

      (d) If the Work includes a "NOTICE" text file as part of its
          distribution, then any Derivative Works that You distribute must
          include a readable copy of the attribution notices contained
          within such NOTICE file, excluding those notices that do not
          pertain to any part of the Derivative Works, in at least one
          of the following places: within a NOTICE text file distributed
          as part of the Derivative Works; within the Source form or
          documentation, if provided along with the Derivative Works; or,
          within a display generated by the Derivative Works, if and
          wherever such third-party notices normally appear. The contents
          of the NOTICE file are for informational purposes only and
          do not modify the License. You may add Your own attribution
          notices within Derivative Works that You distribute, alongside
          or as an addendum to the NOTICE text from the Work, provided
          that such additional attribution notices cannot be construed
          as modifying the License.

      You may add Your own copyright statement to Your modifications and
      may provide additional or different license terms and conditions
      for use, reproduction, or distribution of Your modifications, or
      for any such Derivative Works as a whole, provided Your use,
      reproduction, and distribution of the Work otherwise complies with
      the conditions stated in this License.

   5. Submission of Contributions. Unless You explicitly state otherwise,
      any Contribution intentionally submitted for inclusion in the Work
      by You to the Licensor shall be under the terms and conditions of
      this License, without any additional terms or conditions.
      Notwithstanding the above, nothing herein shall supersede or modify
      the terms of any separate license agreement you may have executed
      with Licensor regarding such Contributions.

   6. Trademarks. This License does not grant permission to use the trade
      names, trademarks, service marks, or product names of the Licensor,
      except as required for reasonable and customary use in describing the
      origin of the Work and reproducing the content of the NOTICE file.

   7. Disclaimer of Warranty. Unless required by applicable law or
      agreed to in writing, Licensor provides the Work (and each
      Contributor provides its Contributions) on an "AS IS" BASIS,
      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
      implied, including, without limitation, any warranties or conditions
      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
      PARTICULAR PURPOSE. You are solely responsible for determining the
      appropriateness of using or redistributing the Work and assume any
      risks associated with Your exercise of permissions under this License.

   8. Limitation of Liability. In no event and under no legal theory,
      whether in tort (including negligence), contract, or otherwise,
      unless required by applicable law (such as deliberate and grossly
      negligent acts) or agreed to in writing, shall any Contributor be
      liable to You for damages, including any direct, indirect, special,
      incidental, or consequential damages of any character arising as a
      result of this License or out of the use or inability to use the
      Work (including but not limited to damages for loss of goodwill,
      work stoppage, computer failure or malfunction, or any and all
      other commercial damages or losses), even if such Contributor
      has been advised of the possibility of such damages.

   9. Accepting Warranty or Additional Liability. While redistributing
      the Work or Derivative Works thereof, You may choose to offer,
      and charge a fee for, acceptance of support, warranty, indemnity,
      or other liability obligations and/or rights consistent with this
      License. However, in accepting such obligations, You may act only
      on Your own behalf and on Your sole responsibility, not on behalf
      of any other Contributor, and only if You agree to indemnify,
      defend, and hold each Contributor harmless for any liability
      incurred by, or claims asserted against, such Contributor by reason
      of your accepting any such warranty or additional liability.

   END OF TERMS AND CONDITIONS

   APPENDIX: How to apply the Apache License to your work.

      To apply the Apache License to your work, attach the following
      boilerplate notice, with the fields enclosed by brackets "[]"
      replaced with your own identifying information. (Don't include
      the brackets!)  The text should be enclosed in the appropriate
      comment syntax for the file format. We also recommend that a
      file or class name and description of purpose be included on the
      same "printed page" as the copyright notice for easier
      identification within third-party archives.

   Copyright [yyyy] [name of copyright owner]

   Licensed under the Apache License, Version 2.0 (the "License");
   you may not use this file except in compliance with the License.
   You may obtain a copy of the License at

       http://www.apache.org/licenses/LICENSE-2.0

   Unless required by applicable law or agreed to in writing, software
   distributed under the License is distributed on an "AS IS" BASIS,
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
   See the License for the specific language governing permissions and
   limitations under the License.

================================================
FILE: deploy/README
================================================
Files related to deploying Sparrow.

The ec2 directory contains files for deploying Sparrow on ec2. The Sparrow
AMI is not yet public, so it is not currently possible for external users to
use these scripts. Contact us is this is a problem for you.

If you'd like to deploy Sparrow on your own cluster, you'll need to
do a few things:
1) Download Sparrow:
$ git clone git://github.com/radlab/sparrow.git
2) Build Sparrow:
$ cd sparrow
$ mvn package
3) Write a configuration file and copy the file to all machines. Currently,
each Sparrow scheduler needs a configuration file that gives a list of where
all of the Sparrow node monitors (worker machines) are running.
See sparrow_configuration_example.conf to get started; see
daemon/SparrowConf.java for a full list of options.
4) Start Sparrow on all machines (both schedulers and node monitors). Starting
Sparrow runs both a scheduler and a node monitor on the machine. Using
the concurrent mark and sweep garbage collector is highly recommended -- the GC
pauses using other gargabge collectors become significant when running
sub-second tasks). Change the class path to reflect the location where you
built Sparrow, and replace "sparrow.conf" with the location of the
congiguration file you wrote above.
$ java -XX:+UseConcMarkSweepGC -cp target/sparrow-1.0-SNAPSHOT.jar edu.berkeley.sparrow.daemon.SparrowDaemon -c sparrow.conf
5) Start your application executor on all backends. To use the prototype
backend, which can run either CPU-intensive or memory-intensive tasks for
some period of time:
$ java -cp target/sparrow-1.0-SNAPSHOT.jar edu.berkeley.sparrow.prototype.ProtoBackend
6) Start a frontend that submits scheduling requests to Sparrow. The current
implementation assumes that there is a scheduler running on the same
machine where the frontend is started, so frontends should only be started
on machines where Sparrow is running.
$ java -cp target/sparrow-1.0-SNAPSHOT.jar edu.berkeley.sparrow.prototype.ProtoFrontend

The frontend accepts a configuration file (by adding "-c conf_file" when
starting the frontend) where you can specify various configuration options to
submit different types of jobs (e.g., with different numbers of tasks,
of different duration, etc.), described in prototype/ProtoFrontend.java.



================================================
FILE: deploy/ec2/README
================================================
This script automates the deployment of a Sparrow cluster on ec2. To use it:

1) Get an ec2 account.

2) Get your access key ID and value from ec2. You need to have these available
   in environment variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY
   respectively.

3) Generate a key-pair in the AWS control panel in the "US East" datacenter.

4) Run ./ec2_exp.sh --help to see how the script works.

   The basic workflow would is usually like this.
   # Launch instances
   ./ec2-exp.sh launch -k eastkey -i ~/.ssh/eastkey.pem

   # Deploy Sparrow files, waiting 100 seconds for instances to boot before
   # attempting to contact them. It will also set the job rate to 10 jobs/s
   # on each frontend (-l 10). This will generate config files and send
   # them to all machines.
   ./ec2-exp.sh deploy -i ~/.ssh/eastkey.pem -w 100 -l 10

   # Start Sparrow
   ./ec2-exp.sh start -i ~/.ssh/eastkey.pem

   # Start proto application
   ./ec2-exp.sh start-proto -i ~/.ssh/eastkey.pem

   # Stop proto application
   ./ec2-exp.sh stop-proto -i ~/.ssh/eastkey.pem

   # Stop Sparrow
   ./ec2-exp.sh stop -i ~/.ssh/eastkey.pem

NOTE:
Right now if you start and stop the proto application while keeping Sparrow
running (i.e. you don't also restart Sparrow) things break. You will get a
confusing Thrift error if you do this.

If you'd like to run the full TPC-H experiment, you need to run two scripts:
1) prepare_tpch_experiments.sh <num_frontends> <num_backends>:
This script launches the given number of frontend and backend machines,
and loads the TPCH data into HDFS and then into Shark.

2) tpch_experiments.sh:
This runs the TPCH experiments and can be executed multiple times after
the prepare_tpch_experiments.sh script has been run.


================================================
FILE: deploy/ec2/ec2-exp.sh
================================================
#!/bin/sh

#
# Copyright 2013 The Regents of The University California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

cd "`dirname $0`"
PYTHONPATH="$PYTHONPATH:../third_party/boto-2.1.1" python ./ec2_exp.py $@


================================================
FILE: deploy/ec2/ec2_exp.py
================================================
#
# Copyright 2013 The Regents of The University California
# 
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# 
#   http://www.apache.org/licenses/LICENSE-2.0
# 
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

import boto
import os
import sys
import tempfile
import time
import subprocess
import shutil
import random

from optparse import OptionParser


def parse_args(force_action=True):
  parser = OptionParser(usage="sparrow-exp <action> <cluster> [options]" +
    "\n\n<action> can be: launch, deploy, start-sparrow, stop-sparrow, start-proto, stop-proto, start-hdfs, stop-hdfs, start-sparrow-throughput, start-spark-shark, stop-spark, restart-spark-shark, command, collect-logs, destroy, login-fe, login-be, create-database, create-tpch-tables, start-shark-tpch")
  parser.add_option("-z", "--zone", default="us-east-1d",
      help="Availability zone to launch instances in")
  parser.add_option("-a", "--ami", default="ami-533a733a",
      help="Amazon Machine Image ID to use (use ami-894801e0 for HVM instance types)")
  parser.add_option("-t", "--instance-type", default="m2.2xlarge",
      help="Type of instance to launch (default: m2.2xlarge). " +
           "WARNING: must be 64 bit, thus small instances won't work")
  parser.add_option("-l", "--arrival-rate", type="float", default=1,
      help = "Arrival rate of jobs in proto frontends (jobs/s)")
  parser.add_option("-k", "--key-pair",
      help="Key pair to use on instances")
  parser.add_option("-i", "--identity-file",
      help="SSH private key file to use for logging into instances")
  parser.add_option("-f", "--frontends", type="int", default=1,
      help="Number of frontends to launch (default: 1)")
  parser.add_option("-b", "--backends", type="int", default=1,
      help="Number of backends to launch (default: 1)")
  parser.add_option("-w", "--wait", type="int", default=0,
      help="Number of seconds to wait for cluster nodes to boot (default: 0)")
  parser.add_option("-g", "--branch", default="master",
      help="Which git branch to checkout")
  parser.add_option("-s", "--spark-branch", default="sparrow",
      help="Which git branch to checkout (for spark)")
  parser.add_option("-d", "--log-dir", default="/tmp/",
      help="Local directory into which log files are copied")
  parser.add_option("-n", "--tasks-per-job", type="int", default=1,
      help="Number of tasks to launch for each job in prototype")
  parser.add_option("-x", "--num-preferred-nodes", type="int", default=0,
      help="Number of preferred nodes to use in the prototype frontend (0 means unconstrained)")
  parser.add_option("-c", "--benchmark-id", type="int", default=1,
      help="Which benchmark to run")
  parser.add_option("-e", "--benchmark-iterations", type="int", default=100,
      help="Iterations of benchmark to run")
  parser.add_option("-p", "--sample-ratio", type="float", default=2,
      help="Sample ratio for unconstrained tasks")
  parser.add_option("-q", "--sample-ratio-constrained", type=int, default=2,
      help="Sample ratio for constrained tasks")
  parser.add_option("-y", "--kill-delay", type="int", default=1,
      help="Time to wait between killing backends and frontends")
  parser.add_option("-v", "--inter-query-delay", type="int", default=100,
      help="How many ms to wait between shark queries")
  parser.add_option("-m", "--scheduler", type="string", default="sparrow",
      help="Which scheduler to use for running spark (mesos/sparrow)")
  parser.add_option("--spot-price", type="float", default=None,
      help="If specified, launch slaves as spot instances with the given " +
            "maximum price (in dollars). To see current spot prices, visit "
            "http://aws.amazon.com/ec2/spot-instances/#7")
  parser.add_option("--cpus", type="int", default=4,
      help="Number of cpus on the machine, used to determine how many concurrent tasks "
           "to run")
  parser.add_option("--frontend-type", type="string", default="ProtoFrontend",
      help="Classname (not fully qualified!) of the frontend to use")
  parser.add_option("-r", "--parallelism", type="int", default=8,
      help="Level of parallelism for dummy queries.")
  parser.add_option("-u", "--num_partitions", type="int", default=1,
      help="Number of partitions for shark tables.")
  parser.add_option("--reduce-tasks", type="int", default=5,
      help="Number of reduce tasks to use for Shark queries.")
  parser.add_option("--spark-backend-mem", default="2g",
      help="Amount of memory to give spark backends."),
  parser.add_option("--scale-factor", default="2.5",
      help="Scale factor to use when creating TPCH database (used with create-database)")
  parser.add_option("--total-cores", default="200",
      help="Total number of cores in the cluster (used to determine launch rate for "
           "throughput experiments)")

  (opts, args) = parser.parse_args()
  if len(args) < 2 and force_action:
    parser.print_help()
    sys.exit(1)
  if os.getenv('AWS_ACCESS_KEY_ID') == None:
    print >> sys.stderr, ("ERROR: The environment variable AWS_ACCESS_KEY_ID " +
                          "must be set")
    sys.exit(1)
  if os.getenv('AWS_SECRET_ACCESS_KEY') == None:
    print >> sys.stderr, ("ERROR: The environment variable " +
                          "AWS_SECRET_ACCESS_KEY must be set")
    sys.exit(1)

  return (opts, args)

# Get the EC2 security group of the given name, creating it if it doesn't exist
def get_or_make_group(conn, name):
  groups = conn.get_all_security_groups()
  group = [g for g in groups if g.name == name]
  if len(group) > 0:
    return group[0]
  else:
    print "Creating security group " + name
    return conn.create_security_group(name, "Sparrow EC2 group")

# Copy a file to a given host through scp, throwing an exception if scp fails
def scp(host, opts, local_file, dest_file):
  subprocess.check_call(
      "scp -q -o StrictHostKeyChecking=no -i %s '%s' 'root@%s:%s'" %
      (opts.identity_file, local_file, host, dest_file), shell=True)

# Copy a file from a given host through scp, throwing an exception if scp fails
def scp_from(host, opts, dest_file, local_file):
  subprocess.check_call(
      "scp -q -o StrictHostKeyChecking=no -i %s 'root@%s:%s' '%s'" %
      (opts.identity_file, host, dest_file, local_file), shell=True)

def rsync_from_all(hosts, opts, dest_pattern, local_dir, errors=0):
  commands = []
  for host in hosts:
    cmd = "rsync -rv -e 'ssh -o StrictHostKeyChecking=no -i %s' root@%s: --include=\"%s\" --exclude=\"*\" %s" % (
      opts.identity_file, host, dest_pattern, local_dir)
    commands.append(cmd)
  parallel_commands(commands, errors)


# Execute a sequence of commands in parallel, raising an exception if
# more than tolerable_failures of them fail
def parallel_commands(commands, tolerable_failures):
  processes = {} # popen object --> command string
  failures = []
  for c in commands:
    p = subprocess.Popen(c, shell=True, stdout = subprocess.PIPE,
                         stderr = subprocess.PIPE, stdin=subprocess.PIPE)
    processes[p] = c
  for p in processes.keys():
    (stdout, stderr) = p.communicate()
    if p.poll() != 0:
      failures.append((stdout, stderr, processes[p]))
    print stdout

  if len(failures) > tolerable_failures:
    out = "Parallel commands failed:\n"
    for (stdout, stderr, cmd) in failures:
      out = out + "command:\n%s\nstdout\n%sstderr\n%s\n" %  \
        (cmd, stdout, stderr)
    raise Exception(out)

# Run a command on a host through ssh, throwing an exception if ssh fails
def ssh(host, opts, command):
  subprocess.check_call(
      "ssh -t -o StrictHostKeyChecking=no -i %s root@%s '%s'" %
      (opts.identity_file, host, command), shell=True)

# Run a command on multiple hosts through ssh, throwing an exception on failure
def ssh_all(hosts, opts, command):
  commands = []
  for host in hosts:
    cmd = "ssh -t -o StrictHostKeyChecking=no -i %s root@%s '%s'" % \
      (opts.identity_file, host, command)
    commands.append(cmd)
  parallel_commands(commands, 0)

# Launch a cluster and return instances launched
def launch_cluster(conn, opts, cluster_name):
  backend_group = get_or_make_group(conn, "%s-backends" % cluster_name)
  frontend_group = get_or_make_group(conn, "%s-frontends" % cluster_name)
  groups = [backend_group, frontend_group]

  for group in groups:
    if group.rules == []: # Group was now just created
      # Allow all access from all other sparrow machines
      for group2 in groups:
        group.authorize(src_group=group2)
      # Allow some access from all machines
      group.authorize('tcp', 22, 22, '0.0.0.0/0')

  print "Launching instances..."
  try:
    image = conn.get_all_images(image_ids=[opts.ami])[0]
  except:
    print >> sys.stderr, "Could not find AMI " + opts.ami
    sys.exit(1)
  if opts.spot_price != None:
      # Launch spot instances with the requested price.
      # The launch group ensures that the instances will be launched and
      # terminated as a set.
      launch_group_name = "launch-group-%s" % cluster_name
      req_ids = []
      if opts.frontends > 0:
        print ("Requesting %d frontends as spot instances with price $%.3f" %
            (opts.frontends, opts.spot_price))
        frontend_reqs = conn.request_spot_instances(
            price = opts.spot_price,
            image_id = opts.ami,
            launch_group = launch_group_name,
            placement = opts.zone,
            count = opts.frontends,
            key_name = opts.key_pair,
            security_groups = [frontend_group],
            instance_type = opts.instance_type)

        req_ids += [req.id for req in frontend_reqs]
      if opts.backends > 0:
        print ("Requesting %d backends as spot instances with price $%.3f" %
            (opts.backends, opts.spot_price))
        backend_reqs = conn.request_spot_instances(
            price = opts.spot_price,
            image_id = opts.ami,
            launch_group = launch_group_name,
            placement = opts.zone,
            count = opts.backends,
            key_name = opts.key_pair,
            security_groups = [backend_group],
            instance_type = opts.instance_type)
        req_ids += [req.id for req in backend_reqs]

      print "Waiting for spot instances to be granted..."
      instances_requested = opts.frontends + opts.backends
      try:
        while True:
          time.sleep(10)
          # See if all the requests have been fulfilled.
          reqs = conn.get_all_spot_instance_requests()
          active_instance_ids = [r.instance_id for r in reqs
                                 if r.id in req_ids and r.state == "active"]
          if len(active_instance_ids) == instances_requested:
            print "All %d frontends and %d backends granted" % (opts.frontends, opts.backends)
            break
          else:
            print ("%d of %d nodes granted; waiting longer" %
                   (len(active_instance_ids), instances_requested))
      except:
        print "Canceling spot instance requests"
        conn.cancel_spot_instance_requests(req_ids)
        (frontends, backends) = find_existing_cluster(conn, opts, cluster_name)
        running = len(frontends) + len(backends)
        if running:
          print >> sys.stderr, ("WARNING: %d instances are still running" % running)
        sys.exit(0)
  else:
    print "ARE YOU SURE YOU DON'T WANT TO USE SPOT INSTANCES?"
    frontend_res = image.run(key_name = opts.key_pair,
                            security_groups = [frontend_group],
                            instance_type = opts.instance_type,
                            placement = opts.zone,
                            min_count = opts.frontends,
                            max_count = opts.frontends)
    backend_res = image.run(key_name = opts.key_pair,
                            security_groups = [backend_group],
                            instance_type = opts.instance_type,
                            placement = opts.zone,
                            min_count = opts.backends,
                            max_count = opts.backends)

    print "Launched cluster with %s frontends and %s backends" % (
           opts.frontends, opts.backends)

# Wait for a set of launched instances to exit the "pending" state
# (i.e. either to start running or to fail and be terminated)
def wait_for_instances(instances):
  while True:
    for i in instances:
      i.update()
    if len([i for i in instances if i.state == 'pending']) > 0:
      time.sleep(5)
    else:
      return

# Check whether a given EC2 instance object is in a state we consider active,
# i.e. not terminating or terminated. We count both stopping and stopped as
# active since we can restart stopped clusters.
def is_active(instance):
  return (instance.state in ['pending', 'running', 'stopping', 'stopped'])


def find_existing_cluster(conn, opts, cluster_name):
  print "Searching for existing Sparrow cluster..."
  reservations = conn.get_all_instances()
  frontend_nodes = []
  backend_nodes = []
  for res in reservations:
    active = [i for i in res.instances if is_active(i)]
    if len(active) > 0:
      group_names = [g.name for g in res.groups]
      if group_names == ["%s-frontends" % cluster_name]:
        frontend_nodes += res.instances
      elif group_names == ["%s-backends" % cluster_name]:
        backend_nodes += res.instances
  if frontend_nodes != [] and backend_nodes != []:
    print ("Found %d frontend and %s backend nodes" %
           (len(frontend_nodes), len(backend_nodes)))

    print "Frontends:"
    frontend_nodes = filter(lambda k: k.public_dns_name != "", frontend_nodes)
    for fe in frontend_nodes:
      print fe.public_dns_name
    print "Backends:"
    backend_nodes = filter(lambda k: k.public_dns_name != "", backend_nodes)
    for be in backend_nodes:
      print be.public_dns_name

    return (frontend_nodes, backend_nodes)
  else:
    print "ERROR: Could not find full cluster named %s: fe=%s be=%s" % (
      cluster_name, frontend_nodes, backend_nodes)
    sys.exit(1)

""" Replaces all of the variables in the tempate files, based on opts.

Returns the name of the directory with all of the files that need to be deployed.
"""
def generate_deploy_files(frontends, backends, opts, warmup_job_arrival_s, warmup_s, post_warmup_s,
                          nm_task_scheduler, users):
  # Replace template vars
  tmp_dir = tempfile.mkdtemp()

  template_vars = {
    "frontend_list": "\n".join(["%s" % i.public_dns_name \
                                 for i in frontends]), \
    "sparrow_schedulers": "\n".join(["sparrow@%s:20503" % i.private_ip_address \
                                      for i in frontends]), \
    "static_backends": ",".join(["%s:20502" % i.public_dns_name \
                                 for i in backends]),
    "name_node": frontends[0].public_dns_name,
    "backend_list": "\n".join(["%s" % i.public_dns_name \
                                 for i in backends]),
    "backend_comma_joined_list": ",".join(["%s" % i.public_dns_name \
                                           for i in backends]),
    "arrival_lambda": "%s" % opts.arrival_rate,
    "git_branch": "%s" % opts.branch,
    "spark_git_branch": "%s" % opts.spark_branch,
    "benchmark_iterations": "%s" % opts.benchmark_iterations,
    "benchmark_id": "%s" % opts.benchmark_id,
    "tasks_per_job": "%s" % opts.tasks_per_job,
    "num_preferred_nodes": "%s" % opts.num_preferred_nodes,
    "sample_ratio": "%s" % opts.sample_ratio,
    "sample_ratio_constrained": "%s" % opts.sample_ratio_constrained,
    "warmup_job_arrival_rate_s": "%s" % warmup_job_arrival_s,
    "warmup_s": "%s" % warmup_s,
    "post_warmup_s": "%s" % post_warmup_s,
    "node_monitor_task_scheduler": "%s" % nm_task_scheduler,
    "num_partitions": "%s" % opts.num_partitions,
    "reduce_tasks": "%s" % (opts.reduce_tasks),
    "inter_query_delay": "%s" % opts.inter_query_delay,
    "users": users,
    "frontend_type": opts.frontend_type,
    "cpus": "%s" % opts.cpus,
    "total_cores": "%s" % opts.total_cores,
    "spark_backend_mem": "%s" % opts.spark_backend_mem
  }

  for dirpath, dirnames, filenames in os.walk("template"):
    rel_dir_path=dirpath.replace("template", "")
    if rel_dir_path.startswith(os.sep):
      rel_dir_path = rel_dir_path[1:]
    if rel_dir_path != "":
      os.mkdir(os.path.join(tmp_dir, rel_dir_path))
    for filename in filenames:
      if filename[0] not in '#.~' and filename[-1] != '~':
	local_file = os.path.join(tmp_dir, rel_dir_path, filename)
        with open(os.path.join(dirpath, filename)) as src:
          with open(local_file, "w") as dest:
            text = src.read()
            for key in template_vars:
	      text = text.replace("{{" + key + "}}", template_vars[key])
	    dest.write(text)
	    dest.close()
  return tmp_dir

# Deploy Sparrow configuration on a launched cluster (but don't rebuild).
def redeploy_sparrow(machines, frontends, backends, opts, warmup_job_arrival_s=0, warmup_s=0,
                     post_warmup_s=0, nm_task_scheduler="fifo",
                     users="user0:1:0"):
  # Replace template vars
  tmp_dir = generate_deploy_files(frontends, backends, opts, warmup_job_arrival_s, warmup_s,
                                  post_warmup_s, nm_task_scheduler, users)

  for machine in machines:
    print "Copying files to: %s ..." % machine.public_dns_name

    # Rsync this to one machine
    command = (("rsync -rv -e 'ssh -o StrictHostKeyChecking=no -i %s' " +
        "'%s/' 'root@%s:~/'") % (opts.identity_file, tmp_dir, machine.public_dns_name))
    subprocess.check_call(command, shell=True)

# Deploy Sparrow binaries and configuration on a launched cluster
def deploy_cluster(frontends, backends, opts, warmup_job_arrival_s=0, warmup_s=0,
                   post_warmup_s=0, nm_task_scheduler="fifo",
                   users="user0:1:0"):
  # Replace template vars
  tmp_dir = generate_deploy_files(frontends, backends, opts, warmup_job_arrival_s, warmup_s,
                                  post_warmup_s, nm_task_scheduler, users)

  driver_machine = frontends[0].public_dns_name
  print "Chose driver machine: %s ..." % driver_machine

  # Rsync this to one machine
  command = (("rsync -rv -e 'ssh -o StrictHostKeyChecking=no -i %s' " +
      "'%s/' 'root@%s:~/'") % (opts.identity_file, tmp_dir, driver_machine))
  subprocess.check_call(command, shell=True)
  # Remove the temp directory we created above
  shutil.rmtree(tmp_dir)

  print "Copying SSH key %s to driver..." % opts.identity_file
  ssh(driver_machine, opts, 'mkdir -p /root/.ssh')
  scp(driver_machine, opts, opts.identity_file, '/root/.ssh/id_rsa')

  print "Building sparrow on driver machine..."
  ssh(driver_machine, opts, "chmod 755 /root/*.sh;"
                            "/root/build_sparrow.sh;")

  print "Deploying sparrow to other machines..."
  ssh(driver_machine, opts, "/root/deploy_sparrow.sh")

def start_sparrow_throughput(frontends, backends, opts):
  start_sparrow(frontends, backends, opts)
  start_spark_shark(frontends, backends, opts)
  ssh_all([fe.public_dns_name for fe in frontends], opts, "/root/start_throughput_exp_sparrow.sh")

def start_sparrow(frontends, backends, opts):
  all_machines = []
  for fe in frontends:
    all_machines.append(fe.public_dns_name)
  for be in backends:
    all_machines.append(be.public_dns_name)

  print "Starting sparrow on all machines..."
  ssh_all(all_machines, opts, "/root/start_sparrow.sh;")

def stop_sparrow(frontends, backends, opts):
  all_machines = []
  for fe in frontends:
    all_machines.append(fe.public_dns_name)
  for be in backends:
    all_machines.append(be.public_dns_name)
  print "Stopping sparrow on all machines..."
  ssh_all(all_machines, opts, "/root/stop_sparrow.sh;")

def start_mesos(frontends, backends, opts):
  print "Starting mesos master..."
  ssh(frontends[0].public_dns_name, opts, "/root/start_mesos_master.sh;")
  print "Starting mesos slaves..."
  ssh_all([be.public_dns_name for be in backends],
           opts, "/root/start_mesos_slave.sh")

def stop_mesos(frontends, backends, opts):
  print "Stopping mesos slaves..."
  ssh_all([be.public_dns_name for be in backends],
          opts, "/root/stop_mesos_slave.sh")
  print "Stopping mesos master..."
  ssh(frontends[0].public_dns_name, opts, "/root/stop_mesos_master.sh")


""" Starts spark backends only to allow shark shell to launch. """
def start_spark_shark(frontends, backends, opts):
  if opts.scheduler != "sparrow":
    print "ERROR: shark only supported w/ sparrow scheduler"
    return
  print "Starting Spark backends..."
  ssh_all([be.public_dns_name for be in backends], opts,
          "/root/start_spark_backend.sh")

def stop_spark(frontends, backends, opts):
  print "Stopping spark frontends..."
  ssh_all([fe.public_dns_name for fe in frontends], opts,
          "/root/stop_spark_frontend.sh")
  time.sleep(opts.kill_delay)
  print "Stopping spark backends..."
  ssh_all([be.public_dns_name for be in backends], opts,
         "/root/stop_spark_backend.sh")

def start_hdfs(frontends, backends, opts):
  print "Starting HDFS"
  ssh(frontends[0].public_dns_name, opts,
      'HADOOP_SSH_OPTS="-o StrictHostKeyChecking=no" /opt/hadoop/bin/start-dfs.sh')

def stop_hdfs(frontends, backends,opts):
  print "Stopping HDFS"
  ssh(frontends[0].public_dns_name, opts,
      'HADOOP_SSH_OPTS="-o StrictHostKeyChecking=no" /opt/hadoop/bin/stop-dfs.sh')

# Start the prototype backends/frontends
def start_proto(frontends, backends, opts):
  print "Starting Proto backends..."
  ssh_all([be.public_dns_name for be in backends], opts,
         "/root/start_proto_backend.sh")
  print "Starting Proto frontends..."
  ssh_all([fe.public_dns_name for fe in frontends], opts,
          "/root/start_proto_frontend.sh")

# Start the prototype backends/frontends
def stop_proto(frontends, backends, opts):
  print "Stopping Proto frontends..."
  ssh_all([fe.public_dns_name for fe in frontends], opts,
          "/root/stop_proto_frontend.sh")
  time.sleep(opts.kill_delay)
  print "Stopping Proto backends..."
  ssh_all([be.public_dns_name for be in backends], opts,
         "/root/stop_proto_backend.sh")

def create_database(frontends, opts):
  scale_factor = opts.scale_factor
  print "Creating TPCH database in HDFS on primary node with scale factor " + scale_factor
  ssh(frontends[0].public_dns_name, opts,
      "/root/create_database.sh %s" % scale_factor)

def create_tpch_tables(frontends, backends, opts):
  print "Creating tables on primary node (takes longer)"
  ssh(frontends[0].public_dns_name, opts, 
      "/root/create_tpch_tables_primary.sh")
 
  print "Creating table on other nodes"
  ssh_all([fe.public_dns_name for fe in frontends[1:]], opts,
      "/root/create_tpch_tables_secondary.sh")

def start_shark_tpch(frontends, backends, opts):
  print "Starting Shark/TPCH workloads"
  ssh_all([fe.public_dns_name for fe in frontends], opts,
          "/root/start_shark_tpch.sh")

# Collect logs from all machines
def collect_logs(frontends, backends, opts):
  print "Zipping logs..."
  ssh_all([fe.public_dns_name for fe in frontends], opts,
          "/root/prepare_logs.sh")
  ssh_all([be.public_dns_name for be in backends], opts,
          "/root/prepare_logs.sh")
  print "Hauling logs"
  rsync_from_all([fe.public_dns_name for fe in frontends], opts,
    "*.log.gz", opts.log_dir, len(frontends))
  rsync_from_all([be.public_dns_name for be in backends], opts,
    "*.log.gz", opts.log_dir, len(backends))
#  f = open(os.path.join(opts.log_dir, "params.txt"), 'w')
#  for (k, v) in opts.__dict__.items():
#    f.write("%s\t%s\n" % (k, v))
#  f.close()

  ssh_all([fe.public_dns_name for fe in frontends], opts,
          "rm -f /tmp/*audit*.log.gz; mv /root/*log.gz /tmp;")
  ssh_all([be.public_dns_name for be in backends], opts,
          "rm -f /tmp/*audit*.log.gz; mv /root/*log.gz /tmp;")

# Tear down a cluster
def destroy_cluster(frontends, backends, opts):
  response = raw_input("Are you sure you want to destroy the cluster " +
    "?\nALL DATA ON ALL NODES WILL BE LOST!!\n" +
    "Destroy cluster (y/N): ")

  if response == "y":
    print "Terminating frontends"
    for fe in frontends:
      fe.terminate()
    print "Terminating backends"
    for be in backends:
      be.terminate()

# Execute a shell command on all machines
def execute_command(frontends, backends, opts, cmd):
  ssh_all([fe.public_dns_name for fe in frontends], opts, cmd)
  ssh_all([be.public_dns_name for be in backends], opts, cmd)

# Login to a random frontend
def login_frontend(frontends, backends, opts):
  node = frontends[0].public_dns_name
  print "Logging into a frontend " + node
  subprocess.check_call("ssh -o StrictHostKeyChecking=no -i %s root@%s" %
    (opts.identity_file, node), shell=True)

# Login to a random backend
def login_backend(frontends, backends, opts):
  node = backends[0].public_dns_name
  print "Logging into a backend " + node
  subprocess.check_call("ssh -o StrictHostKeyChecking=no -i %s root@%s" %
    (opts.identity_file, node), shell=True)

def main():
  (opts, args) = parse_args()
  conn = boto.connect_ec2()
  action = args[0]
  cluster = args[1]

  if action == "launch":
    launch_cluster(conn, opts, cluster)
    return

  if action == "command" and len(args) < 3:
    print "Command action requires command string"

  # Wait until ec2 says the cluster is started, then possibly wait more time
  # to make sure all nodes have booted.
  (frontends, backends) = find_existing_cluster(conn, opts, cluster)
  print "Waiting for instances to start up"
  wait_for_instances(frontends)
  wait_for_instances(backends)

  print "Waiting %d more seconds..." % opts.wait
  time.sleep(opts.wait)

  print "Executing action: %s" % action

  if action == "command":
    cmd = " ".join(args[2:])
    execute_command(frontends, backends, opts, cmd)
  elif action == "deploy":
    deploy_cluster(frontends, backends, opts)
  elif action == "start-sparrow":
    start_sparrow(frontends, backends, opts)
  elif action == "stop-sparrow":
    stop_sparrow(frontends, backends, opts)
  elif action == "start-sparrow-throughput":
    # Starts a throughput testing experiment using Sparrow.
    start_sparrow_throughput(frontends, backends, opts)
  elif action == "start-mesos":
    start_mesos(frontends, backends, opts)
  elif action == "stop-mesos":
    stop_mesos(frontends, backends, opts)
  elif action == "start-spark-shark":
    start_spark_shark(frontends, backends, opts)
  elif action == "stop-spark":
    stop_spark(frontends, backends, opts)
  elif action == "restart-spark-shark":
    stop_spark(frontends, backends, opts)
    stop_sparrow(frontends, backends, opts)
    start_sparrow(frontends, backends, opts)
    start_spark_shark(frontends, backends, opts)
  elif action == "start-proto":
    start_proto(frontends, backends, opts)
  elif action == "stop-proto":
    stop_proto(frontends, backends, opts)
  elif action == "start-hdfs":
    start_hdfs(frontends, backends, opts)
  elif action == "stop-hdfs":
    stop_hdfs(frontends, backends, opts)
  elif action == "collect-logs":
    collect_logs(frontends, backends, opts)
  elif action == "destroy":
    destroy_cluster(frontends, backends, opts)
  elif action == "login-fe":
    login_frontend(frontends, backends, opts)
  elif action == "login-be":
    login_backend(frontends, backends, opts)
  elif action == "create-database":
    create_database(frontends, opts)
  elif action == "create-tpch-tables":
    create_tpch_tables(frontends, backends, opts)
  elif action == "start-shark-tpch":
    start_shark_tpch(frontends, backends, opts)
  else:
    print "Unknown action: %s" % action
    sys.exit(1)

if __name__ == "__main__":
  main()


================================================
FILE: deploy/ec2/fairness.py
================================================
#
# Copyright 2013 The Regents of The University California
# 
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# 
#   http://www.apache.org/licenses/LICENSE-2.0
# 
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

import boto
import os
import subprocess
import sys
import time

import ec2_exp

def run_cmd(cmd):
    subprocess.check_call(cmd, shell=True)

def main(argv):
    launch_instances = False
    if len(argv) >= 1 and argv[0] == "True":
        launch_instances = True

    utilizations = [1.0]
    sample_ratios = [2.0]
    sample_ratio_constrained = 1

    # Amount of time it takes each task to run in isolation
    task_duration_ms = 100
    tasks_per_job = 3
    private_ssh_key = "patkey.pem"
    sparrow_branch = "master"
    num_backends = 100
    num_frontends = 10
    cores_per_backend = 4
    # Run each trial for many minutes.
    trial_length = 700
    num_preferred_nodes = 0
    nm_task_scheduler = "round_robin"
    cluster_name = "fairness"

    full_utilization_rate_s = (float(num_backends * cores_per_backend * 1000) /
                               (task_duration_ms * tasks_per_job * num_frontends))

    # Warmup information
    warmup_s = 120
    post_warmup_s = 30
    warmup_arrival_rate_s = 0.4 * full_utilization_rate_s

    if launch_instances:
        print "********Launching instances..."
        run_cmd("./ec2-exp.sh launch %s -f %s -b %s -i %s" %
                (cluster_name, num_frontends, num_backends, private_ssh_key))
        time.sleep(10)

    for sample_ratio in sample_ratios:
        for utilization in utilizations:
            arrival_rate_s = utilization * full_utilization_rate_s

            # This is a little bit of a hacky way to pass args to the ec2 script.
            (opts, args) = ec2_exp.parse_args(False)
            opts.identity_file = private_ssh_key
            opts.arrival_rate = arrival_rate_s
            opts.branch = sparrow_branch
            opts.sample_ratio  = sample_ratio
            opts.sample_ratio_constrained = sample_ratio_constrained
            opts.tasks_per_job = tasks_per_job
            opts.num_preferred_nodes = num_preferred_nodes
            opts.frontend_type = "FairnessTestingFrontend"

            conn = boto.connect_ec2()
            frontends, backends = ec2_exp.find_existing_cluster(conn, opts, cluster_name)

            print ("********Launching experiment at utilization %s with sample ratio %s..." %
                   (utilization, sample_ratio))

            print ("********Deploying with arrival rate %s and warmup arrival rate %s"
                   % (arrival_rate_s, warmup_arrival_rate_s))
            #ec2_exp.deploy_cluster(frontends, backends, opts, warmup_arrival_rate_s, warmup_s,
            #                       post_warmup_s, nm_task_scheduler)
            ec2_exp.start_sparrow(frontends, backends, opts)

            print "*******Sleeping after starting Sparrow"
            time.sleep(10)
            print "********Starting prototype frontends and backends"
            ec2_exp.start_proto(frontends, backends, opts)
            time.sleep(trial_length)

            log_dirname = "/Users/keo/Documents/opportunistic-scheduling/sparrow/deploy/ec2/fairness_%s_%s" % (utilization, sample_ratio)
            while os.path.exists(log_dirname):
                log_dirname = "%s_a" % log_dirname
            os.mkdir(log_dirname)

            ec2_exp.execute_command(frontends, backends, opts, "./find_bugs.sh")

            print "********Stopping prototypes and Sparrow"
            ec2_exp.stop_proto(frontends, backends, opts)
            ec2_exp.stop_sparrow(frontends, backends, opts)

            print "********Collecting logs and placing in %s" % log_dirname
            opts.log_dir = log_dirname
            ec2_exp.collect_logs(frontends, backends, opts)
            run_cmd("gunzip %s/*.gz" % log_dirname)

            print "********Parsing logs"
            run_cmd(("cd ../../src/main/python/ && ./parse_logs.sh log_dir=%s "
                     "output_dir=%s/results start_sec=240 end_sec=540 && cd -") %
                    (log_dirname, log_dirname))

if __name__ == "__main__":
    main(sys.argv[1:])



================================================
FILE: deploy/ec2/fairness.sh
================================================
#!/bin/sh

#
# Copyright 2013 The Regents of The University California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

cd "`dirname $0`"
PYTHONPATH="$PYTHONPATH:../third_party/boto-2.1.1" python ./fairness.py $@


================================================
FILE: deploy/ec2/isolation.py
================================================
#
# Copyright 2013 The Regents of The University California
# 
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# 
#   http://www.apache.org/licenses/LICENSE-2.0
# 
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

import boto
import os
import subprocess
import sys
import time

import ec2_exp

def run_cmd(cmd):
    subprocess.check_call(cmd, shell=True)

def main(argv):
    launch_instances = False
    if len(argv) >= 1 and argv[0] == "True":
        launch_instances = True

    utilization_user_pairs = [(0.25, "high:1:0"),
                              (0.5, "high:1:0,low:1:0"),
                              (0.75, "high:1:0,low:2:0"),
                              (1.0, "high:1:0,low:3:0"),
                              (1.25, "high:1:0,low:4:0"),
                              (1.5, "high:1:0,low:5:0"),
                              (1.75, "high:1:0,low:6:0"),
                              (2.0, "high:1:0,low:7:0")]
    sample_ratios = [2.0]
    sample_ratio_constrained = 1

    # Amount of time it takes each task to run in isolation
    task_duration_ms = 100
    tasks_per_job = 1
    private_ssh_key = "patkey.pem"
    sparrow_branch = "debugging"
    num_backends = 5
    num_frontends = 1
    cores_per_backend = 4
    # Run each trial for 5 minutes.
    trial_length = 500
    num_preferred_nodes = 0
    nm_task_scheduler = "priority"
    cluster_name = "isolation"

    full_utilization_rate_s = (float(num_backends * cores_per_backend * 1000) /
                               (task_duration_ms * tasks_per_job * num_frontends))

    # Warmup information
    warmup_s = 120
    post_warmup_s = 30
    warmup_arrival_rate_s = 0.4 * full_utilization_rate_s

    if launch_instances:
        print "********Launching instances..."
        run_cmd(("./ec2-exp.sh launch %s --ami ami-a658c0cf " +
                 "--instance-type cr1.8xlarge --spot-price %s -f %s -b %s -i %s") %
                (cluster_name, 0.5, num_frontends, num_backends, private_ssh_key))
        time.sleep(10)

    for sample_ratio in sample_ratios:
        for utilization, users in utilization_user_pairs:
            arrival_rate_s = utilization * full_utilization_rate_s

            # This is a little bit of a hacky way to pass args to the ec2 script.
            (opts, args) = ec2_exp.parse_args(False)
            opts.identity_file = private_ssh_key
            opts.arrival_rate = arrival_rate_s
            opts.branch = sparrow_branch
            opts.sample_ratio  = sample_ratio
            opts.sample_ratio_constrained = sample_ratio_constrained
            opts.tasks_per_job = tasks_per_job
            opts.num_preferred_nodes = num_preferred_nodes
            opts.cpus = cores_per_backend

            conn = boto.connect_ec2()
            frontends, backends = ec2_exp.find_existing_cluster(conn, opts, cluster_name)

            print ("********Launching experiment at utilization %s with sample ratio %s..." %
                   (utilization, sample_ratio))

            print ("********Deploying with arrival rate %s and warmup arrival rate %s"
                   % (arrival_rate_s, warmup_arrival_rate_s))
            ec2_exp.deploy_cluster(frontends, backends, opts, warmup_arrival_rate_s, warmup_s,
                                   post_warmup_s, nm_task_scheduler, users)
            ec2_exp.start_sparrow(frontends, backends, opts)

            print "*******Sleeping after starting Sparrow"
            time.sleep(10)
            print "********Starting prototype frontends and backends"
            ec2_exp.start_proto(frontends, backends, opts)
            time.sleep(trial_length)

            log_dirname = "/disk1/sparrow/isolation_%s_%s" % (utilization, sample_ratio)
            while os.path.exists(log_dirname):
                log_dirname = "%s_a" % log_dirname
            os.mkdir(log_dirname)

            ec2_exp.execute_command(frontends, backends, opts, "./find_bugs.sh")

            print "********Stopping prototypes and Sparrow"
            ec2_exp.stop_proto(frontends, backends, opts)
            ec2_exp.stop_sparrow(frontends, backends, opts)

            print "********Collecting logs and placing in %s" % log_dirname
            opts.log_dir = log_dirname
            ec2_exp.collect_logs(frontends, backends, opts)
            run_cmd("gunzip %s/*.gz" % log_dirname)

            print "********Parsing logs"
            run_cmd(("cd /tmp/sparrow/src/main/python/ && ./parse_logs.sh log_dir=%s "
                     "output_dir=%s/results start_sec=350 end_sec=450 && cd -") %
                    (log_dirname, log_dirname))

if __name__ == "__main__":
    main(sys.argv[1:])



================================================
FILE: deploy/ec2/isolation.sh
================================================
#!/bin/sh
#
# Copyright 2013 The Regents of The University California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

cd "`dirname $0`"
PYTHONPATH="$PYTHONPATH:../third_party/boto-2.1.1" python ./isolation.py $@


================================================
FILE: deploy/ec2/osdi.py
================================================
#
# Copyright 2013 The Regents of The University California
# 
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# 
#   http://www.apache.org/licenses/LICENSE-2.0
# 
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

import os
import subprocess
import time

def run_cmd(cmd):
  subprocess.check_call(cmd, shell=True)

# 50-node: q1, 3.5, par 10, sf .5 = 60% utilization
# 50-node: q6, 4, par 10, sf .5 = 60% utilization
# 50-node: q3, 2, par 10, sf .5 = 50% utilization
# 100-node: q6 (think I ran at 4ps)
# 100-node: q3

trial_length = 300
ratios = [(2, 2)]
rates = [1]
tpch_query = 1
par_level = 20

for ratio in ratios:
  for rate in rates:
    run_cmd("./ec2-exp.sh stop-sparrow -i ~/.ssh/eastkey.pem")
    run_cmd("./ec2-exp.sh stop-spark -i ~/.ssh/eastkey.pem")
    run_cmd("./ec2-exp.sh deploy -g better-policies -s sparrow -k eastkey -i ~/.ssh/eastkey.pem -p %s -q %s" % (ratio[0], ratio[1]))
    run_cmd("./ec2-exp.sh start-sparrow -i ~/.ssh/eastkey.pem")
    time.sleep(30)
    max_queries = int(trial_length * rate)
    run_cmd("./ec2-exp.sh start-spark -i ~/.ssh/eastkey.pem -m "
            "sparrow -v %s -j %s -o %s -r %s" % (rate, max_queries, 
                                                 tpch_query, par_level))
    dirname = "%s_%s_%s" % (ratio[0], ratio[1], rate)
    if not os.path.exists(dirname):
      os.mkdir(dirname)
    time.sleep(trial_length + 130) 
    run_cmd("./ec2-exp.sh collect-logs -i ~/.ssh/eastkey.pem --log-dir=%s/" % 
    dirname)
    run_cmd("cd %s && gunzip *.gz && cd -" % dirname)




================================================
FILE: deploy/ec2/prepare_tpch_experiments.py
================================================
#
# Copyright 2013 The Regents of The University California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import subprocess
import sys
import time

def run_cmd(cmd):
  subprocess.check_call(cmd, shell=True)


def main(argv):
  if len(argv) != 3 and len(argv) != 1:
    print "Usage: python prepare_tpch_experiments.py [opt: num_frontends num_backends]"
    print "Specifying num_frontends and num_backends will cause a cluster to be launched"
    return

  cluster_name = "tpch"
  key_loc = "patkey.pem"

  if len(argv) == 3:
      num_frontends = argv[1]
      num_backends = argv[2]

      print "Launching %s frontends and %s backends" % (num_frontends, num_backends)
# add --spot-price=foo for spot pricing
      launch_cmd = ("./ec2-exp.sh -t m2.4xlarge --spot-price=2.00 -a ami-533a733a -i %s launch %s -f %s -b %s" %
          (key_loc, cluster_name, num_frontends, num_backends))
      #launch_cmd = ("./ec2-exp.sh -t cr1.8xlarge --spot-price=2.00 -a ami-894801e0 -i %s launch %s -f %s -b %s" %
      #    (key_loc, cluster_name, num_frontends, num_backends))
      run_cmd(launch_cmd)

      print "Sleeping for 1 minute after launching machines"
      time.sleep(60)

  print "***********Attempting to stop HDFS"
  stop_hdfs_cmd = "./ec2-exp.sh -i %s stop-hdfs %s" % (key_loc, cluster_name)
  run_cmd(stop_hdfs_cmd)
 
  print "***********Deploying Code and Config Files"
  backend_mem="10g"
  deploy_cmd = "./ec2-exp.sh -i %s deploy %s --spark-backend-mem %s" % (key_loc, cluster_name, backend_mem)
  run_cmd(deploy_cmd)

  print "***********Starting HDFS"
  hdfs_cmd = "./ec2-exp.sh -i %s start-hdfs %s" % (key_loc, cluster_name)
  run_cmd(hdfs_cmd)

  scale_factor = 2
  print "***********Creating database"
  create_db_cmd = ("./ec2-exp.sh -i %s create-database %s --scale-factor %s" %
                   (key_loc, cluster_name, scale_factor))
  # This returns a non-zero exit code if some of the directorys already exist, which is
  # not an error we care about.
  subprocess.call(create_db_cmd, shell=True)

  print "***********Starting Sparrow (needed for creating denormalized Hive tables)"
  restart_cmd = "./ec2-exp.sh -i %s restart-spark-shark %s" % (key_loc, cluster_name)
  run_cmd(restart_cmd)

  print "***********Creating TPCH Tables in Shark"
  create_tables_cmd = "./ec2-exp.sh -i %s create-tpch-tables %s " % (key_loc, cluster_name)
  run_cmd(create_tables_cmd)

if __name__ == "__main__":
  main(sys.argv)


================================================
FILE: deploy/ec2/prepare_tpch_experiments.sh
================================================
#!/bin/sh

#
# Copyright 2013 The Regents of The University California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

cd "`dirname $0`"
PYTHONPATH="$PYTHONPATH:../third_party/boto-2.1.1" python ./prepare_tpch_experiments.py $@


================================================
FILE: deploy/ec2/probe-ratio-het.sh
================================================
#!/bin/sh
#
# Copyright 2013 The Regents of The University California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

cd "`dirname $0`"
PYTHONPATH="$PYTHONPATH:../third_party/boto-2.1.1" python ./probe_ratio_het.py $@


================================================
FILE: deploy/ec2/probe-ratio.sh
================================================
#!/bin/sh
#
# Copyright 2013 The Regents of The University California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

cd "`dirname $0`"
PYTHONPATH="$PYTHONPATH:../third_party/boto-2.1.1" python ./probe_ratio.py $@


================================================
FILE: deploy/ec2/probe_ratio.py
================================================
#
# Copyright 2013 The Regents of The University California
# 
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# 
#   http://www.apache.org/licenses/LICENSE-2.0
# 
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

import boto
import os
import subprocess
import sys
import time

import ec2_exp

def run_cmd(cmd):
    subprocess.check_call(cmd, shell=True)

def main(argv):
    launch_instances = False
    if len(argv) >= 1 and argv[0] == "True":
        launch_instances = True

    utilizations = [0.8, 0.9]
    sample_ratios = [1, 1.1, 1.2, 1.5, 2.0, 3.0]
    sample_ratio_constrained = 2

    # Amount of time it takes each task to run in isolation
    task_duration_ms = 100
    tasks_per_job = 10
    private_ssh_key = "patkey.pem"
    sparrow_branch = "master"
    nm_task_scheduler = "fifo"
    num_backends = 100
    num_frontends = 10
    cores_per_backend = 8
    # Run each trial for 5 minutes.
    trial_length = 400
    num_preferred_nodes = 0
    num_users = 1
    cluster = "probe"

    full_utilization_rate_s = (float(num_backends * cores_per_backend * 1000) /
                               (task_duration_ms * tasks_per_job * num_frontends))

    # Warmup information
    warmup_s = 30
    post_warmup_s = 30
    warmup_arrival_rate_s = 0.4 * full_utilization_rate_s

    if launch_instances:
        print "********Launching instances..."
        run_cmd("./ec2-exp.sh launch %s -f %s -b %s -i %s" % # --spot-price %s" %
                (cluster, num_frontends, num_backends, private_ssh_key))
        time.sleep(10)

    for sample_ratio in sample_ratios:
        for utilization in utilizations:
            arrival_rate_s = utilization * full_utilization_rate_s

            # This is a little bit of a hacky way to pass args to the ec2 script.
            (opts, args) = ec2_exp.parse_args(False)
            opts.identity_file = private_ssh_key
            opts.arrival_rate = arrival_rate_s
            opts.branch = sparrow_branch
            opts.sample_ratio  = sample_ratio
            opts.sample_ratio_constrained = sample_ratio_constrained
            opts.tasks_per_job = tasks_per_job
            opts.num_preferred_nodes = num_preferred_nodes
            opts.cpus = cores_per_backend

            conn = boto.connect_ec2()
            frontends, backends = ec2_exp.find_existing_cluster(conn, opts, cluster)

            print ("********Launching experiment at utilization %s with sample ratio %s..." %
                   (utilization, sample_ratio))

            print ("********Deploying with arrival rate %s and warmup arrival rate %s"
                   % (arrival_rate_s, warmup_arrival_rate_s))
            ec2_exp.deploy_cluster(frontends, backends, opts, warmup_arrival_rate_s, warmup_s,
                                   post_warmup_s, nm_task_scheduler)
            ec2_exp.start_sparrow(frontends, backends, opts)

            print "*******Sleeping after starting Sparrow"
            time.sleep(10)
            print "********Starting prototype frontends and backends"
            ec2_exp.start_proto(frontends, backends, opts)
            time.sleep(trial_length)

            log_dirname = "/home/ec2-user/sparrow/deploy/ec2/probe_ratio_%s_%s" % (utilization, sample_ratio)
            while os.path.exists(log_dirname):
                log_dirname = "%s_a" % log_dirname
            os.mkdir(log_dirname)

            ec2_exp.execute_command(frontends, backends, opts, "./find_bugs.sh")

            print "********Stopping prototypes and Sparrow"
            ec2_exp.stop_proto(frontends, backends, opts)
            ec2_exp.stop_sparrow(frontends, backends, opts)

            print "********Collecting logs and placing in %s" % log_dirname
            opts.log_dir = log_dirname
            ec2_exp.collect_logs(frontends, backends, opts)
            #run_cmd("gunzip %s/*.gz" % log_dirname)

            #print "********Parsing logs"
            #run_cmd(("cd ../../src/main/python/ && ./parse_logs.sh log_dir=%s "
            #         "output_dir=%s/results start_sec=350 end_sec=450 && cd -") %
            #        (log_dirname, log_dirname))

if __name__ == "__main__":
    main(sys.argv[1:])



================================================
FILE: deploy/ec2/probe_ratio_het.py
================================================
#
# Copyright 2013 The Regents of The University California
# 
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# 
#   http://www.apache.org/licenses/LICENSE-2.0
# 
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

import boto
import os
import subprocess
import sys
import time

import ec2_exp

def run_cmd(cmd):
    subprocess.check_call(cmd, shell=True)

def main(argv):
    launch_instances = False
    if len(argv) >= 1 and argv[0] == "True":
        launch_instances = True

    utilizations = [0.8, 0.9]
    sample_ratios = [1.1, 1.2, 1.5, 2.0, 3.0]
    sample_ratio_constrained = 2

    # Amount of time it takes each task to run in isolation
    task_duration_ms = 100
    tasks_per_job = 10
    private_ssh_key = "patkey.pem"
    sparrow_branch = "master"
    nm_task_scheduler = "fifo"
    num_backends = 100
    num_frontends = 10
    cores_per_backend = 8
    # Run each trial for 5 minutes.
    trial_length = 400
    num_preferred_nodes = 0
    num_users = 1
    cluster = "probe"

    full_utilization_rate_s = (float(num_backends * cores_per_backend * 1000) /
                               (task_duration_ms * tasks_per_job * num_frontends))

    # Warmup information
    warmup_s = 120
    post_warmup_s = 30
    warmup_arrival_rate_s = 0.4 * full_utilization_rate_s

    if launch_instances:
        print "********Launching instances..."
        run_cmd("./ec2-exp.sh launch %s -f %s -b %s -i %s" % # --spot-price %s" %
                (cluster, num_frontends, num_backends, private_ssh_key))
        time.sleep(10)

    for sample_ratio in sample_ratios:
        for utilization in utilizations:
            arrival_rate_s = utilization * full_utilization_rate_s

            # This is a little bit of a hacky way to pass args to the ec2 script.
            (opts, args) = ec2_exp.parse_args(False)
            opts.identity_file = private_ssh_key
            opts.arrival_rate = arrival_rate_s
            opts.branch = sparrow_branch
            opts.sample_ratio  = sample_ratio
            opts.sample_ratio_constrained = sample_ratio_constrained
            opts.tasks_per_job = tasks_per_job
            opts.num_preferred_nodes = num_preferred_nodes
            opts.cpus = cores_per_backend
            opts.benchmark_iterations = 100

            conn = boto.connect_ec2()
            frontends, backends = ec2_exp.find_existing_cluster(conn, opts, cluster)

            print ("********Launching experiment at utilization %s with sample ratio %s..." %
                   (utilization, sample_ratio))

            print ("********Deploying with arrival rate %s and warmup arrival rate %s"
                   % (arrival_rate_s, warmup_arrival_rate_s))
            ec2_exp.deploy_cluster(frontends, backends, opts, warmup_arrival_rate_s, warmup_s,
                                   post_warmup_s, nm_task_scheduler)
            # Redeploy on half of the frontends with a longer duration for each task and a correspondingly
            # lower arrival rate.
            opts.arrival_rate = arrival_rate_s / 100.
            opts.benchmark_iterations = 10000
            ec2_exp.redeploy_sparrow(frontends[:5], backends, opts, warmup_arrival_rate_s / 100., warmup_s, post_warmup_s, nm_task_scheduler) 
            ec2_exp.start_sparrow(frontends, backends, opts)

            print "*******Sleeping after starting Sparrow"
            time.sleep(10)
            print "********Starting prototype frontends and backends"
            ec2_exp.start_proto(frontends, backends, opts)
            time.sleep(trial_length)

            log_dirname = "/home/ec2-user/sparrow/deploy/ec2/probe_ratio_%s_%s" % (utilization, sample_ratio)
            while os.path.exists(log_dirname):
                log_dirname = "%s_a" % log_dirname
            os.mkdir(log_dirname)

            ec2_exp.execute_command(frontends, backends, opts, "./find_bugs.sh")

            print "********Stopping prototypes and Sparrow"
            ec2_exp.stop_proto(frontends, backends, opts)
            ec2_exp.stop_sparrow(frontends, backends, opts)

            print "********Collecting logs and placing in %s" % log_dirname
            opts.log_dir = log_dirname
            ec2_exp.collect_logs(frontends, backends, opts)
            #run_cmd("gunzip %s/*.gz" % log_dirname)

            #print "********Parsing logs"
            #run_cmd(("cd ../../src/main/python/ && ./parse_logs.sh log_dir=%s "
            #         "output_dir=%s/results start_sec=350 end_sec=450 && cd -") %
            #        (log_dirname, log_dirname))

if __name__ == "__main__":
    main(sys.argv[1:])



================================================
FILE: deploy/ec2/spark_v_mesos.py
================================================
#
# Copyright 2013 The Regents of The University California
# 
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# 
#   http://www.apache.org/licenses/LICENSE-2.0
# 
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

import os
import subprocess
import time

def run_cmd(cmd):
  print cmd
  subprocess.check_call(cmd, shell=True)

trial_length = 400
probe_ratio = (2, 2)
query_nums = [7000]
query_par = 10
scheds = ["sparrow"]

# STOP ALL THE THINGS
run_cmd("./ec2-exp.sh stop-spark -i ~/.ssh/eastkey.pem")
run_cmd("./ec2-exp.sh stop-sparrow -i ~/.ssh/eastkey.pem")
run_cmd("./ec2-exp.sh stop-mesos -i ~/.ssh/eastkey.pem")

for task_length in query_nums:
  rate = float(1000) / task_length # WAS 2000! but went from 5->10 nodes
  for sched in scheds:
    run_cmd("./ec2-exp.sh deploy -g better-policies -s sparrow -k eastkey -i ~/.ssh/eastkey.pem -p %s -q %s" % (probe_ratio[0], probe_ratio[1]))
    run_cmd("./ec2-exp.sh start-%s -i ~/.ssh/eastkey.pem" % sched)
#    time.sleep(30)
    max_queries = int(trial_length * rate)
    run_cmd("./ec2-exp.sh start-spark -i ~/.ssh/eastkey.pem "
            "-v %s -j %s -o %s -r %s -m %s" % (rate, max_queries, 
                                                 task_length, query_par, sched))
    dirname = "race/%s_%s" % (sched, task_length)
    if not os.path.exists(dirname):
      os.mkdir(dirname)
    time.sleep(trial_length + 20) 
    run_cmd("./ec2-exp.sh collect-logs -i ~/.ssh/eastkey.pem --log-dir=%s/" % 
    dirname)
    run_cmd("cd %s && gunzip *.gz && cd -" % dirname)
    run_cmd("./ec2-exp.sh stop-spark -i ~/.ssh/eastkey.pem")
    run_cmd("./ec2-exp.sh stop-%s -i ~/.ssh/eastkey.pem" % sched)




================================================
FILE: deploy/ec2/template/README
================================================
All files in this folder are copied to each remote Sparrow node, with any
variables filled in by the EC2 install script.


================================================
FILE: deploy/ec2/template/backends.txt
================================================
{{backend_list}}


================================================
FILE: deploy/ec2/template/build_sparrow.sh
================================================
#!/bin/bash
# Build Sparrow locally

SPARROW_INSTALL_DIR=~/sparrow/
SPARK_INSTALL_DIR=~/spark/
SHARK_INSTALL_DIR=~/shark/

chmod 755 ~/*.sh

if [ ! -d "$SPARROW_INSTALL_DIR" ]; then
  mkdir $SPARROW_INSTALL_DIR
fi;

if [ ! -d "$SPARK_INSTALL_DIR" ]; then
  mkdir $SPARK_INSTALL_DIR
fi;

if [ ! -d "$SHARK_INSTALL_DIR" ]; then
  mkdir $SHARK_INSTALL_DIR
fi;

cd /tmp/

if [ ! -d "sparrow" ]; then
  git clone git://github.com/radlab/sparrow.git -b {{git_branch}}
fi;


if [ ! -d "spark" ]; then
  git clone git://github.com/kayousterhout/spark.git -b {{spark_git_branch}}
fi;

if [ ! -d "shark" ]; then
  git clone git://github.com/kayousterhout/shark.git -b sparrow
fi;

cd sparrow
if [ ! -e "/tmp/sparrow/target/sparrow-1.0-SNAPSHOT.jar" ]; then
  mvn package -Dmaven.test.skip=true
fi
cp /tmp/sparrow/target/sparrow-1.0-SNAPSHOT.jar $SPARROW_INSTALL_DIR

# Copy the Sparrow jar to a place where Spark includes it in the build.
if [ ! -d "/tmp/spark/core/lib" ]; then
  mkdir /tmp/spark/core/lib
fi
cp /tmp/sparrow/target/sparrow-1.0-SNAPSHOT.jar /tmp/spark/core/lib/

cd /tmp/spark
if [ ! -e "/tmp/spark/core/target/scala-2.9.1" ]; then
  sbt/sbt compile
  sbt/sbt publish-local
fi

# Manually download httpcore, which SBT seems to have issues with
wget http://repo1.maven.org/maven2/org/apache/httpcomponents/httpcore/4.1.2/httpcore-4.1.2.jar
HTTPCORE_DIR=/root/.m2/repository/org/apache/httpcomponents/httpcore/4.1.2
mkdir -p $HTTPCORE_DIR
mv httpcore-4.1.2.jar $HTTPCORE_DIR

cd /tmp/shark

# Copy shark-conv into the conf/ directory, because it's needed for building.
if [ ! -d "conf" ]; then
  mkdir conf
fi
cp ~/shark-env.sh conf/

if [ ! -e "/tmp/shark/target/scala-2.9.1/classes/shark/SharkEnv.class" ]; then
sbt/sbt products
fi
# This jar could be in sparrow spark branch instead.
cp /tmp/spark/core/lib/sparrow* /tmp/shark/lib

cp -r /tmp/spark/* $SPARK_INSTALL_DIR
cp -r /tmp/shark/* $SHARK_INSTALL_DIR


================================================
FILE: deploy/ec2/template/clean_logs.sh
================================================
#!/bin/bash
rm -f /tmp/*.log
rm -f /tmp/*.log.gz
rm -f ~/*.log
rm -f ~/*.log.gz
rm /tmp/spark-local-* -rf


================================================
FILE: deploy/ec2/template/configure_node.sh
================================================
#!/bin/bash

/etc/init.d/ntpd stop
ntpdate ntp.ubuntu.com

rm -rf /tmp/spark-local*

if [ -d "/mnt" ]; then
  umount /mnt/
  rmdir /mnt/
fi;

if [ ! -d "/disk1" ]; then
  mkdir /disk1
  mount /dev/xvdb /disk1 -t ext3
fi;

mkdir -p /disk1/hdfs/name
chown hdfs.hdfs /disk1/hdfs/name

mkdir -p /disk1/sparrow
mkdir -p /disk1/spark-tmp
mkdir -p /disk1/hdfs/data
mkdir -p /disk1/tmp/
mkdir -p /disk1/tmp/spark/
chown hdfs.hdfs /disk1/hdfs/data

# Annoying ec2 cloud entry in hostfile
cat /etc/hosts | grep -v internal > tmp && mv tmp /etc/hosts

cp ~/hdfs-site.xml /opt/hadoop/conf/
cp ~/hadoop-env.sh /opt/hadoop/conf/
cp ~/backends.txt /opt/hadoop/conf/slaves
cp ~/hive-site.xml /opt/hive/conf/
cp ~/shark-env.sh /root/shark/conf/shark-env.sh
cp ~/spark-env.sh /root/spark/conf/spark-env.sh

# Create references to per-frontend hive tables
cd /opt/tpch_hive/
bash -c "sed -i 's/\/tpch/\/`hostname -i`/g' *.hive"
cd -

# Reference correct directory in hive queries
bash -c "sed -i 's/HOST_IP/`hostname -i`/g' ~/tpch/*"



================================================
FILE: deploy/ec2/template/create_database.sh
================================================
#!/bin/bash
# Generate databases

SCALE=$1
FRONTENDS=`cat frontends.txt`

if [ "$SCALE" = "" ];
then
  echo "Scale factor required"
  exit -1
fi

if [ -d "/disk1/tpch" ];
then
  rm -rf /disk1/tpch
fi

mkdir /disk1/tpch
cp /opt/tpch/dbgen/* /disk1/tpch

echo "Generating database"
cd /disk1/tpch
./dbgen -f -s $SCALE


echo "Copying database"
sudo -u hdfs /opt/hadoop/bin/hadoop dfs -rmr "hdfs://{{name_node}}:8020/tpch/*"
sudo -u hdfs /opt/hadoop/bin/hadoop dfs -rmr "hdfs://{{name_node}}:8020/tpch/"
sudo -u hdfs /opt/hadoop/bin/hadoop dfs -mkdir "hdfs://{{name_node}}:8020/tpch/"

# Make and populate /tpch/[tbl] directory for each table
for t in *.tbl; do
  name=`echo $t | sed "s/.tbl//g"`
  sudo -u hdfs /opt/hadoop/bin/hadoop dfs -mkdir "hdfs://{{name_node}}:8020/tpch/$name/"
  sudo -u hdfs /opt/hadoop/bin/hadoop dfs -Ddfs.block.size=33554432 -copyFromLocal $t hdfs://{{name_node}}:8020/tpch/$name/
done

echo "Making Hive User Directory"
sudo -u hdfs /opt/hadoop/bin/hadoop dfs -mkdir "hdfs://{{name_node}}:8020/user/"
sudo -u hdfs /opt/hadoop/bin/hadoop dfs -mkdir "hdfs://{{name_node}}:8020/user/hive/"

echo "Making Temp Dir"
sudo -u hdfs /opt/hadoop/bin/hadoop dfs -mkdir "hdfs://{{name_node}}:8020/tmp/"
sudo -u hdfs /opt/hadoop/bin/hadoop dfs -chmod -R 777 "hdfs://{{name_node}}:8020/tmp/"

echo "Making Hive warehouse dir for denorm table"
sudo -u hdfs /opt/hadoop/bin/hadoop dfs -mkdir "hdfs://{{name_node}}:8020/tpch/denorm-warehouse/"



================================================
FILE: deploy/ec2/template/create_tpch_tables_primary.sh
================================================
#!/bin/bash
# Create shark tables for tpc-h benchmark

LOG=/disk1/sparrow/createTPCH.log

SHARK=/root/shark/bin/shark-withinfo
QUERY_DIR=/root/tpch

cd /disk1/sparrow/
echo "Making base tables"
$SHARK -f $QUERY_DIR/make_base_tables.hql > $LOG 2>&1

echo "Making demormalized table"
$SHARK -f $QUERY_DIR/make_denorm_table_primary.hql > $LOG 2>&1



================================================
FILE: deploy/ec2/template/create_tpch_tables_secondary.sh
================================================
#!/bin/bash
# Create shark tables for tpc-h benchmark

LOG=/disk1/sparrow/createTPCH.log

SHARK=/root/shark/bin/shark-withinfo
QUERY_DIR=/root/tpch

cd /disk1/sparrow/
echo "Making base tables"
$SHARK -f $QUERY_DIR/make_base_tables.hql > $LOG 2>&1

echo "Making demormalized table"
$SHARK -f $QUERY_DIR/make_denorm_table_secondary.hql > $LOG 2>&1



================================================
FILE: deploy/ec2/template/deploy_sparrow.sh
================================================
#!/bin/bash
# Deploy built version of Sparrow on frontends and backends.

FRONTENDS=`cat frontends.txt`
BACKENDS=`cat backends.txt`
SSH_OPTS="-o StrictHostKeyChecking=no -o ConnectTimeout=5"

for fe in $FRONTENDS; do
  rsync -e "ssh $SSH_OPTS" --delete -az ~/ `dig +short $fe`:~/ &
done
wait

for fe in $FRONTENDS; do
  ssh $SSH_OPTS `dig +short $fe` "/root/configure_node.sh" &
done

for be in $BACKENDS; do
  rsync -e "ssh $SSH_OPTS" --delete -az ~/ `dig +short $be`:~/ &
done
wait

for be in $BACKENDS; do
  ssh $SSH_OPTS `dig +short $be` "/root/configure_node.sh" &
done
wait

if [ ! -d "/disk1/hdfs/name/current/" ]; then
  echo "Formatting Namenode"
  echo "Y" | sudo -u hdfs /opt/hadoop/bin/hadoop namenode -format
else
  echo "Namenode formatted"
fi


================================================
FILE: deploy/ec2/template/find_bugs.sh
================================================
#!/bin/bash
host=`ec2metadata  |grep public-hostname | cut -d " " -f 2`
echo $host
cat /disk1/sparrow/* |egrep "ERROR|Exception" | wc  


================================================
FILE: deploy/ec2/template/find_bugs_filtered.sh
================================================
#!/bin/bash
#Filters out some known issues
host=`ec2metadata  |grep public-hostname | cut -d " " -f 2`
echo $host
cat /disk1/sparrow/* |egrep "ERROR|Exception" | grep -v ConfigNodeMonitorState | wc  


================================================
FILE: deploy/ec2/template/find_bugs_verbose.sh
================================================
#!/bin/bash
#Filters out some known issues
host=`ec2metadata  |grep public-hostname | cut -d " " -f 2`
echo $host
cat /disk1/sparrow/* |egrep "ERROR|Exception" | grep -v ConfigNodeMonitorState 


================================================
FILE: deploy/ec2/template/find_cache_partitions.sh
================================================
#!/bin/bash
host=`ec2metadata  |grep public-hostname | cut -d " " -f 2`
echo $host
cat /disk1/sparrow/* |egrep "ensureFree"
exit 0


================================================
FILE: deploy/ec2/template/frontend.conf
================================================
job_arrival_rate_s = {{arrival_lambda}}
tasks_per_job = {{tasks_per_job}}
benchmark.id = {{benchmark_id}}
benchmark.iterations = {{benchmark_iterations}}
num_preferred_nodes = {{num_preferred_nodes}}
backends = {{backend_comma_joined_list}}
warmup_job_arrival_rate_s = {{warmup_job_arrival_rate_s}}
warmup_s = {{warmup_s}}
post_warmup_s = {{post_warmup_s}}
users = {{users}}


================================================
FILE: deploy/ec2/template/frontends.txt
================================================
{{frontend_list}}


================================================
FILE: deploy/ec2/template/hadoop-env.sh
================================================
# The only required environment variable is JAVA_HOME.  All others are
# optional.  When running a distributed configuration it is best to
# set JAVA_HOME in this file, so that it is correctly defined on
# remote nodes.

# The java implementation to use.  Required.
export JAVA_HOME=/usr/lib/jvm/java-6-sun/jre
#export JAVA_HOME=/usr/lib/jvm/jre-1.6.0-openjdk.x86_64

# Extra Java CLASSPATH elements.  Optional.
export HADOOP_CLASSPATH="/opt/hive/conf/:/opt/hive/build/dist/lib"

# The maximum amount of heap to use, in MB. Default is 1000.
# export HADOOP_HEAPSIZE=2000

# Extra Java runtime options.  Empty by default.
# export HADOOP_OPTS=-server

# Command specific options appended to HADOOP_OPTS when specified
export HADOOP_NAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_NAMENODE_OPTS"
export HADOOP_SECONDARYNAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_SECONDARYNAMENODE_OPTS"
export HADOOP_DATANODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_DATANODE_OPTS"
export HADOOP_BALANCER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_BALANCER_OPTS"
export HADOOP_JOBTRACKER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_JOBTRACKER_OPTS"
# export HADOOP_TASKTRACKER_OPTS=
# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
# export HADOOP_CLIENT_OPTS

# Extra ssh options.  Empty by default.
# export HADOOP_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HADOOP_CONF_DIR"

# Where log files are stored.  $HADOOP_HOME/logs by default.
# export HADOOP_LOG_DIR=${HADOOP_HOME}/logs

# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves

# host:path where hadoop code should be rsync'd from.  Unset by default.
# export HADOOP_MASTER=master:/home/$USER/src/hadoop

# Seconds to sleep between slave commands.  Unset by default.  This
# can be useful in large clusters, where, e.g., slave rsyncs can
# otherwise arrive faster than the master can service them.
# export HADOOP_SLAVE_SLEEP=0.1

# The directory where pid files are stored. /tmp by default.
# export HADOOP_PID_DIR=/var/hadoop/pids

# A string representing this instance of hadoop. $USER by default.
# export HADOOP_IDENT_STRING=$USER

# The scheduling priority for daemon processes.  See 'man nice'.
# export HADOOP_NICENESS=10


================================================
FILE: deploy/ec2/template/hdfs-site.xml
================================================
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>

<!-- Put site-specific property overrides in this file. -->

<configuration>
  <property>
    <name>dfs.permissions</name>
    <value>false</value>
  </property>

  <property>
    <name>dfs.name.dir</name>
    <value>/disk1/hdfs/name</value>
  </property>

  <property>
    <name>dfs.data.dir</name>
    <value>/disk1/hdfs/data</value>
  </property>

  <property>
    <name>fs.default.name</name>
    <value>hdfs://{{name_node}}:8020/</value>
  </property>

  <property>
  <name>dfs.blocksize</name>
  <value>33554432</value>
  </property>
</configuration>


================================================
FILE: deploy/ec2/template/hive-site.xml
================================================
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
   Licensed to the Apache Software Foundation (ASF) under one or more
   contributor license agreements.  See the NOTICE file distributed with
   this work for additional information regarding copyright ownership.
   The ASF licenses this file to You under the Apache License, Version 2.0
   (the "License"); you may not use this file except in compliance with
   the License.  You may obtain a copy of the License at

       http://www.apache.org/licenses/LICENSE-2.0

   Unless required by applicable law or agreed to in writing, software
   distributed under the License is distributed on an "AS IS" BASIS,
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
   See the License for the specific language governing permissions and
   limitations under the License.
-->

<configuration>

<!-- Just need to set this to something other than local -->
<property>
<name>mapreduce.framework.name</name>
<value>foo</value>
</property>
<property>
<name>mapred.job.tracker</name>
<value>foo</value>
</property>

<!-- Hive Configuration can either be stored in this file or in the hadoop configuration files  -->
<!-- that are implied by Hadoop setup variables.                                                -->
<!-- Aside from Hadoop setup variables - this file is provided as a convenience so that Hive    -->
<!-- users do not have to edit hadoop configuration files (that may be managed as a centralized -->
<!-- resource).                                                                                 -->

<!-- Hive Execution Parameters -->
<property>
  <name>mapred.reduce.tasks</name>
  <value>-1</value>
    <description>The default number of reduce tasks per job.  Typically set
  to a prime close to the number of available hosts.  Ignored when
  mapred.job.tracker is "local". Hadoop set this to 1 by default, whereas hive uses -1 as its default value.
  By setting this property to -1, Hive will automatically figure out what should be the number of reducers.
  </description>
</property>

<property>
  <name>mapred.max.split.size</name>
  <value>256000000</value>
  <description>Largest valid size in bytes for a file split.
  This should be set to a small multiple of the block size in bytes</description>
</property>

<property>
  <name>mapred.min.split.size</name>
  <value>1</value>
  <description>Smallest valid size in bytes for a file split</description>
</property>

<property>
  <name>mapred.min.split.size.per.rack</name>
  <value>1</value>
  <description>The minimum number of bytes of data required to create a rack-local partition</description>
</property>

<property>
  <name>mapred.min.split.size.per.node</name>
  <value>1</value>
  <description>The minimum number of bytes of data required to create a node-local partition</description>
</property>

<property>
  <name>hive.exec.reducers.bytes.per.reducer</name>
  <value>1000000000</value>
  <description>size per reducer.The default is 1G, i.e if the input size is 10G, it will use 10 reducers.</description>
</property>

<property>
  <name>hive.exec.reducers.max</name>
  <value>999</value>
  <description>max number of reducers will be used. If the one
	specified in the configuration parameter mapred.reduce.tasks is
	negative, hive will use this one as the max number of reducers when
	automatically determine number of reducers.</description>
</property>

<property>
  <name>hive.cli.print.header</name>
  <value>false</value>
  <description>Whether to print the names of the columns in query output.</description>
</property>

<property>
  <name>hive.exec.scratchdir</name>
  <value>/tmp/hive-${user.name}</value>
  <description>Scratch space for Hive jobs</description>
</property>

<property>
  <name>hive.test.mode</name>
  <value>false</value>
  <description>whether hive is running in test mode. If yes, it turns on sampling and prefixes the output tablename</description>
</property>

<property>
  <name>hive.test.mode.prefix</name>
  <value>test_</value>
  <description>if hive is running in test mode, prefixes the output table by this string</description>
</property>

<!-- If the input table is not bucketed, the denominator of the tablesample is determinied by the parameter below   -->
<!-- For example, the following query:                                                                              -->
<!--   INSERT OVERWRITE TABLE dest                                                                                  -->
<!--   SELECT col1 from src                                                                                         -->
<!-- would be converted to                                                                                          -->
<!--   INSERT OVERWRITE TABLE test_dest                                                                             -->
<!--   SELECT col1 from src TABLESAMPLE (BUCKET 1 out of 32 on rand(1))                                             -->
<property>
  <name>hive.test.mode.samplefreq</name>
  <value>32</value>
  <description>if hive is running in test mode and table is not bucketed, sampling frequency</description>
</property>

<property>
  <name>hive.test.mode.nosamplelist</name>
  <value></value>
  <description>if hive is running in test mode, dont sample the above comma seperated list of tables</description>
</property>

<property>
  <name>hive.metastore.local</name>
  <value>true</value>
  <description>controls whether to connect to remove metastore server or open a new metastore server in Hive Client JVM</description>
</property>

<property>
  <name>javax.jdo.option.ConnectionURL</name>
  <value>jdbc:derby:;databaseName=metastore_db;create=true</value>
  <description>JDBC connect string for a JDBC metastore</description>
</property>

<property>
  <name>javax.jdo.option.ConnectionDriverName</name>
  <value>org.apache.derby.jdbc.EmbeddedDriver</value>
  <description>Driver class name for a JDBC metastore</description>
</property>

<property>
  <name>javax.jdo.PersistenceManagerFactoryClass</name>
  <value>org.datanucleus.jdo.JDOPersistenceManagerFactory</value>
  <description>class implementing the jdo persistence</description>
</property>

<property>
  <name>javax.jdo.option.DetachAllOnCommit</name>
  <value>true</value>
  <description>detaches all objects from session so that they can be used after transaction is committed</description>
</property>

<property>
  <name>javax.jdo.option.NonTransactionalRead</name>
  <value>true</value>
  <description>reads outside of transactions</description>
</property>

<property>
  <name>javax.jdo.option.ConnectionUserName</name>
  <value>APP</value>
  <description>username to use against metastore database</description>
</property>

<property>
  <name>javax.jdo.option.ConnectionPassword</name>
  <value>mine</value>
  <description>password to use against metastore database</description>
</property>

<property>
  <name>datanucleus.connectionPoolingType</name>
  <value>DBCP</value>
  <description>Uses a DBCP connection pool for JDBC metastore</description>
</property>

<property>
  <name>datanucleus.validateTables</name>
  <value>false</value>
  <description>validates existing schema against code. turn this on if you want to verify existing schema </description>
</property>

<property>
  <name>datanucleus.validateColumns</name>
  <value>false</value>
  <description>validates existing schema against code. turn this on if you want to verify existing schema </description>
</property>

<property>
  <name>datanucleus.validateConstraints</name>
  <value>false</value>
  <description>validates existing schema against code. turn this on if you want to verify existing schema </description>
</property>

<property>
  <name>datanucleus.storeManagerType</name>
  <value>rdbms</value>
  <description>metadata store type</description>
</property>

<property>
  <name>datanucleus.autoCreateSchema</name>
  <value>true</value>
  <description>creates necessary schema on a startup if one doesn't exist. set this to false, after creating it once</description>
</property>

<property>
  <name>datanucleus.autoStartMechanismMode</name>
  <value>checked</value>
  <description>throw exception if metadata tables are incorrect</description>
</property>

<property>
  <name>datanucleus.transactionIsolation</name>
  <value>read-committed</value>
  <description>Default transaction isolation level for identity generation. </description>
</property>

<property>
  <name>datanucleus.cache.level2</name>
  <value>false</value>
  <description>Use a level 2 cache. Turn this off if metadata is changed independently of hive metastore server</description>
</property>

<property>
  <name>datanucleus.cache.level2.type</name>
  <value>SOFT</value>
  <description>SOFT=soft reference based cache, WEAK=weak reference based cache.</description>
</property>

<property>
  <name>datanucleus.identifierFactory</name>
  <value>datanucleus</value>
  <description>Name of the identifier factory to use when generating table/column names etc. 'datanucleus' is used for backward compatibility</description>
</property>

<property>
  <name>datanucleus.plugin.pluginRegistryBundleCheck</name>
  <value>LOG</value>
  <description>Defines what happens when plugin bundles are found and are duplicated [EXCEPTION|LOG|NONE]</description>
</property>

<property>
  <name>hive.metastore.warehouse.dir</name>
  <value>/tpch/denorm-warehouse</value>
  <description>location of default database for the warehouse</description>
</property>

<property>
  <name>hive.metastore.connect.retries</name>
  <value>5</value>
  <description>Number of retries while opening a connection to metastore</description>
</property>

<property>
  <name>hive.metastore.client.connect.retry.delay</name>
  <value>1</value>
  <description>Number of seconds for the client to wait between consecutive connection attempts</description>
</property>

<property>
  <name>hive.metastore.client.socket.timeout</name>
  <value>20</value>
  <description>MetaStore Client socket timeout in seconds</description>
</property>

<property>
  <name>hive.metastore.rawstore.impl</name>
  <value>org.apache.hadoop.hive.metastore.ObjectStore</value>
  <description>Name of the class that implements org.apache.hadoop.hive.metastore.rawstore interface. This class is used to store and retrieval of raw metadata objects such as table, database</description>
</property>

<property>
  <name>hive.default.fileformat</name>
  <value>TextFile</value>
  <description>Default file format for CREATE TABLE statement. Options are TextFile and SequenceFile. Users can explicitly say CREATE TABLE ... STORED AS &lt;TEXTFILE|SEQUENCEFILE&gt; to override</description>
</property>

<property>
  <name>hive.fileformat.check</name>
  <value>true</value>
  <description>Whether to check file format or not when loading data files</description>
</property>

<property>
  <name>hive.map.aggr</name>
  <value>true</value>
  <description>Whether to use map-side aggregation in Hive Group By queries</description>
</property>

<property>
  <name>hive.groupby.skewindata</name>
  <value>false</value>
  <description>Whether there is skew in data to optimize group by queries</description>
</property>

<property>
  <name>hive.groupby.mapaggr.checkinterval</name>
  <value>100000</value>
  <description>Number of rows after which size of the grouping keys/aggregation classes is performed</description>
</property>

<property>
  <name>hive.mapred.local.mem</name>
  <value>0</value>
  <description>For local mode, memory of the mappers/reducers</description>
</property>

<property>
  <name>hive.mapjoin.followby.map.aggr.hash.percentmemory</name>
  <value>0.3</value>
  <description>Portion of total memory to be used by map-side grup aggregation hash table, when this group by is followed by map join</description>
</property>

<property>
  <name>hive.map.aggr.hash.force.flush.memory.threshold</name>
  <value>0.9</value>
  <description>The max memory to be used by map-side grup aggregation hash table, if the memory usage is higher than this number, force to flush data</description>
</property>

<property>
  <name>hive.map.aggr.hash.percentmemory</name>
  <value>0.5</value>
  <description>Portion of total memory to be used by map-side grup aggregation hash table</description>
</property>

<property>
  <name>hive.map.aggr.hash.min.reduction</name>
  <value>0.5</value>
  <description>Hash aggregation will be turned off if the ratio between hash
  table size and input rows is bigger than this number. Set to 1 to make sure
  hash aggregation is never turned off.</description>
</property>

<property>
  <name>hive.optimize.cp</name>
  <value>true</value>
  <description>Whether to enable column pruner</description>
</property>

<property>
  <name>hive.optimize.ppd</name>
  <value>true</value>
  <description>Whether to enable predicate pushdown</description>
</property>

<property>
  <name>hive.optimize.ppd.storage</name>
  <value>true</value>
  <description>Whether to push predicates down into storage handlers.  Ignored when hive.optimize.ppd is false.</description>
</property>

<property>
  <name>hive.optimize.pruner</name>
  <value>true</value>
  <description>Whether to enable the new partition pruner which depends on predicate pushdown. If this is disabled,
  the old partition pruner which is based on AST will be enabled.</description>
</property>

<property>
  <name>hive.optimize.groupby</name>
  <value>true</value>
  <description>Whether to enable the bucketed group by from bucketed partitions/tables.</description>
</property>

<property>
  <name>hive.join.emit.interval</name>
  <value>1000</value>
  <description>How many rows in the right-most join operand Hive should buffer before emitting the join result. </description>
</property>

<property>
  <name>hive.join.cache.size</name>
  <value>25000</value>
  <description>How many rows in the joining tables (except the streaming table) should be cached in memory. </description>
</property>

<property>
  <name>hive.mapjoin.bucket.cache.size</name>
  <value>100</value>
  <description>How many values in each keys in the map-joined table should be cached in memory. </description>
</property>

<property>
  <name>hive.mapjoin.maxsize</name>
  <value>100000</value>
  <description>Maximum # of rows of the small table that can be handled by map-side join. If the size is reached and hive.task.progress is set, a fatal error counter is set and the job will be killed.</description>
</property>

<property>
  <name>hive.mapjoin.cache.numrows</name>
  <value>25000</value>
  <description>How many rows should be cached by jdbm for map join. </description>
</property>

<property>
  <name>hive.optimize.skewjoin</name>
  <value>false</value>
  <description>Whether to enable skew join optimization. </description>
</property>

<property>
  <name>hive.skewjoin.key</name>
  <value>100000</value>
  <description>Determine if we get a skew key in join. If we see more
	than the specified number of rows with the same key in join operator,
	we think the key as a skew join key. </description>
</property>

<property>
  <name>hive.skewjoin.mapjoin.map.tasks</name>
  <value>10000</value>
  <description> Determine the number of map task used in the follow up map join job
	for a skew join. It should be used together with hive.skewjoin.mapjoin.min.split
	to perform a fine grained control.</description>
</property>

<property>
  <name>hive.skewjoin.mapjoin.min.split</name>
  <value>33554432</value>
  <description> Determine the number of map task at most used in the follow up map join job
	for a skew join by specifying the minimum split size. It should be used together with
	hive.skewjoin.mapjoin.map.tasks to perform a fine grained control.</description>
</property>

<property>
  <name>hive.mapred.mode</name>
  <value>nonstrict</value>
  <description>The mode in which the hive operations are being performed. In strict mode, some risky queries are not allowed to run</description>
</property>

<property>
  <name>hive.exec.script.maxerrsize</name>
  <value>100000</value>
  <description>Maximum number of bytes a script is allowed to emit to standard error (per map-reduce task). This prevents runaway scripts from filling logs partitions to capacity </description>
</property>

<property>
  <name>hive.exec.script.allow.partial.consumption</name>
  <value>false</value>
  <description> When enabled, this option allows a user script to exit successfully without consuming all the data from the standard input.
  </description>
</property>

<property>
  <name>hive.script.operator.id.env.var</name>
  <value>HIVE_SCRIPT_OPERATOR_ID</value>
  <description> Name of the environment variable that holds the unique script operator ID in the user's transform function (the custom mapper/reducer that the user has specified in the query)
  </description>
</property>

<property>
  <name>hive.exec.compress.output</name>
  <value>false</value>
  <description> This controls whether the final outputs of a query (to a local/hdfs file or a hive table) is compressed. The compression codec and other options are determined from hadoop config variables mapred.output.compress* </description>
</property>

<property>
  <name>hive.exec.compress.intermediate</name>
  <value>false</value>
  <description> This controls whether intermediate files produced by hive between multiple map-reduce jobs are compressed. The compression codec and other options are determined from hadoop config variables mapred.output.compress* </description>
</property>

<property>
  <name>hive.exec.parallel</name>
  <value>false</value>
  <description>Whether to execute jobs in parallel</description>
</property>

<property>
  <name>hive.exec.parallel.thread.number</name>
  <value>8</value>
  <description>How many jobs at most can be executed in parallel</description>
</property>

<property>
  <name>hive.task.progress</name>
  <value>false</value>
  <description>Whether Hive should periodically update task progress counters during execution.  Enabling this allows task progress to be monitored more closely in the job tracker, but may impose a performance penalty.  This flag is automatically set to true for jobs with hive.exec.dynamic.partition set to true.</description>
</property>

<property>
  <name>hive.hwi.war.file</name>
  <value>lib/hive-hwi-0.7.1-cdh3u3.war</value>
  <description>This sets the path to the HWI war file, relative to ${HIVE_HOME}. </description>
</property>

<property>
  <name>hive.hwi.listen.host</name>
  <value>0.0.0.0</value>
  <description>This is the host address the Hive Web Interface will listen on</description>
</property>

<property>
  <name>hive.hwi.listen.port</name>
  <value>9999</value>
  <description>This is the port the Hive Web Interface will listen on</description>
</property>

<property>
  <name>hive.exec.pre.hooks</name>
  <value></value>
  <description>Comma-separated list of pre-execution hooks to be invoked for each statement.  A pre-execution hook is specified as the name of a Java class which implements the org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface.</description>
</property>

<property>
  <name>hive.exec.post.hooks</name>
  <value></value>
  <description>Comma-separated list of post-execution hooks to be invoked for each statement.  A post-execution hook is specified as the name of a Java class which implements the org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface.</description>
</property>

<property>
  <name>hive.merge.mapfiles</name>
  <value>true</value>
  <description>Merge small files at the end of a map-only job</description>
</property>

<property>
  <name>hive.merge.mapredfiles</name>
  <value>false</value>
  <description>Merge small files at the end of a map-reduce job</description>
</property>

<property>
  <name>hive.mergejob.maponly</name>
  <value>true</value>
  <description>Try to generate a map-only job for merging files if CombineHiveInputFormat is supported.</description>
</property>

<property>
  <name>hive.heartbeat.interval</name>
  <value>1000</value>
  <description>Send a heartbeat after this interval - used by mapjoin and filter operators</description>
</property>

<property>
  <name>hive.merge.size.per.task</name>
  <value>256000000</value>
  <description>Size of merged files at the end of the job</description>
</property>

<property>
  <name>hive.merge.smallfiles.avgsize</name>
  <value>16000000</value>
  <description>When the average output file size of a job is less than this number, Hive will start an additional map-reduce job to merge the output files into bigger files.  This is only done for map-only jobs if hive.merge.mapfiles is true, and for map-reduce jobs if hive.merge.mapredfiles is true.</description>
</property>


<property>
  <name>hive.mapjoin.hashtable.initialCapacity</name>
  <value>100000</value>
  <description>In the process of Mapjoin, the key/value will be hold in the hashtable. This value means the initialCapacity of the in-memory hashtable</description>
</property>

<property>
  <name>hive.mapjoin.hashtable.loadfactor</name>
  <value>0.75</value>
  <description>In the process of Mapjoin, the key/value will be hold in the hashtable. This value means the load factor for the in-memory hashtable</description>
</property>

<property>
  <name>hive.mapjoin.smalltable.filesize</name>
  <value>25000000</value>
  <description>The threshold for the input file size of the small tables; if the file size is smaller than this threshold, it will try to convert the common join into map join</description>
</property>

<property>
<name>mapred.job.tracker</name>
<value>foobar</value>
</property>

<property>
  <name>hive.mapjoin.localtask.max.memory.usage</name>
  <value>0.90</value>
  <description>This number means how much memory the local task can take to hold the key/value into in-memory hash table; If the local task's memory usage is more than this number, the local task will be abort by themself. It means the data of small table is too large to be hold in the memory.</description>
</property>

<property>
  <name>hive.mapjoin.followby.gby.localtask.max.memory.usage</name>
  <value>0.55</value>
  <description>This number means how much memory the local task can take to hold the key/value into in-memory hash table when this map join followed by a group by; If the local task's memory usage is more than this number, the local task will be abort by themself. It means the data of small table is too large to be hold in the memory.</description>
</property>

<property>
  <name>hive.mapjoin.check.memory.rows</name>
  <value>100000</value>
  <description>The number means after how many rows processed it needs to check the memory usage</description>
</property>

<property>
  <name>hive.auto.convert.join</name>
  <value>false</value>
  <description>Whether Hive enable the optimization about converting common join into mapjoin based on the input file size</description>
</property>


<property>
  <name>hive.script.auto.progress</name>
  <value>false</value>
  <description>Whether Hive Tranform/Map/Reduce Clause should automatically send progress information to TaskTracker to avoid the task getting killed because of inactivity.  Hive sends progress information when the script is outputting to stderr.  This option removes the need of periodically producing stderr messages, but users should be cautious because this may prevent infinite loops in the scripts to be killed by TaskTracker.  </description>
</property>

<property>
  <name>hive.script.serde</name>
  <value>org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe</value>
  <description>The default serde for trasmitting input data to and reading output data from the user scripts. </description>
</property>

<property>
  <name>hive.script.recordreader</name>
  <value>org.apache.hadoop.hive.ql.exec.TextRecordReader</value>
  <description>The default record reader for reading data from the user scripts. </description>
</property>

<property>
  <name>hive.script.recordwriter</name>
  <value>org.apache.hadoop.hive.ql.exec.TextRecordWriter</value>
  <description>The default record writer for writing data to the user scripts. </description>
</property>

<property>
  <name>hive.input.format</name>
  <value>org.apache.hadoop.hive.ql.io.CombineHiveInputFormat</value>
  <description>The default input format. Set this to HiveInputFormat if you encounter problems with CombineHiveInputFormat.</description>
</property>

<property>
  <name>hive.udtf.auto.progress</name>
  <value>false</value>
  <description>Whether Hive should automatically send progress information to TaskTracker when using UDTF's to prevent the task getting killed because of inactivity.  Users should be cautious because this may prevent TaskTracker from killing tasks with infinte loops.  </description>
</property>

<property>
  <name>hive.mapred.reduce.tasks.speculative.execution</name>
  <value>true</value>
  <description>Whether speculative execution for reducers should be turned on. </description>
</property>

<property>
  <name>hive.exec.counters.pull.interval</name>
  <value>1000</value>
  <description>The interval with which to poll the JobTracker for the counters the running job. The smaller it is the more load there will be on the jobtracker, the higher it is the less granular the caught will be.</description>
</property>

<property>
  <name>hive.enforce.bucketing</name>
  <value>false</value>
  <description>Whether bucketing is enforced. If true, while inserting into the table, bucketing is enforced. </description>
</property>

<property>
  <name>hive.enforce.sorting</name>
  <value>false</value>
  <description>Whether sorting is enforced. If true, while inserting into the table, sorting is enforced. </description>
</property>

<property>
  <name>hive.metastore.ds.connection.url.hook</name>
  <value></value>
  <description>Name of the hook to use for retriving the JDO connection URL. If empty, the value in javax.jdo.option.ConnectionURL is used </description>
</property>

<property>
  <name>hive.metastore.ds.retry.attempts</name>
  <value>1</value>
  <description>The number of times to retry a metastore call if there were a connection error</description>
</property>

<property>
   <name>hive.metastore.ds.retry.interval</name>
   <value>1000</value>
   <description>The number of miliseconds between metastore retry attempts</description>
</property>

<property>
  <name>hive.metastore.server.min.threads</name>
  <value>200</value>
  <description>Minimum number of worker threads in the Thrift server's pool.</description>
</property>

<property>
  <name>hive.metastore.server.max.threads</name>
  <value>100000</value>
  <description>Maximum number of worker threads in the Thrift server's pool.</description>
</property>

<property>
  <name>hive.metastore.server.tcp.keepalive</name>
  <value>true</value>
  <description>Whether to enable TCP keepalive for the metastore server. Keepalive will prevent accumulation of half-open connections.</description>
</property>

<property>
  <name>hive.metastore.sasl.enabled</name>
  <value>false</value>
  <description>If true, the metastore thrift interface will be secured with SASL. Clients must authenticate with Kerberos.</description>
</property>

<property>
  <name>hive.metastore.kerberos.keytab.file</name>
  <value></value>
  <description>The path to the Kerberos Keytab file containing the metastore thrift server's service principal.</description>
</property>

<property>
  <name>hive.metastore.kerberos.principal</name>
  <value>hive-metastore/_HOST@EXAMPLE.COM</value>
  <description>The service principal for the metastore thrift server. The special string _HOST will be replaced automatically with the correct host name.</description>
</property>

<property>
  <name>hive.metastore.cache.pinobjtypes</name>
  <value>Table,StorageDescriptor,SerDeInfo,Partition,Database,Type,FieldSchema,Order</value>
  <description>List of comma separated metastore object types that should be pinned in the cache</description>
</property>

<property>
  <name>hive.optimize.reducededuplication</name>
  <value>true</value>
  <description>Remove extra map-reduce jobs if the data is already clustered by the same key which needs to be used again. This should always be set to true. Since it is a new feature, it has been made configurable.</description>
</property>

<property>
  <name>hive.exec.dynamic.partition</name>
  <value>false</value>
  <description>Whether or not to allow dynamic partitions in DML/DDL.</description>
</property>

<property>
  <name>hive.exec.dynamic.partition.mode</name>
  <value>strict</value>
  <description>In strict mode, the user must specify at least one static partition in case the user accidentally overwrites all partitions.</description>
</property>

<property>
  <name>hive.exec.max.dynamic.partitions</name>
  <value>1000</value>
  <description>Maximum number of dynamic partitions allowed to be created in total.</description>
</property>

<property>
  <name>hive.exec.max.dynamic.partitions.pernode</name>
  <value>100</value>
  <description>Maximum number of dynamic partitions allowed to be created in each mapper/reducer node.</description>
</property>

<property>
  <name>hive.exec.max.created.files</name>
  <value>100000</value>
  <description>Maximum number of HDFS files created by all mappers/reducers in a MapReduce job.</description>
</property>

<property>
  <name>hive.exec.default.partition.name</name>
  <value>__HIVE_DEFAULT_PARTITION__</value>
  <description>The default partition name in case the dynamic partition column value is null/empty string or anyother values that cannot be escaped. This value must not contain any special character used in HDFS URI (e.g., ':', '%', '/' etc). The user has to be aware that the dynamic partition value should not contain this value to avoid confusions.</description>
</property>

<property>
  <name>hive.stats.dbclass</name>
  <value>jdbc:derby</value>
  <description>The default database that stores temporary hive statistics.</description>
</property>

<property>
  <name>hive.stats.autogather</name>
  <value>true</value>
  <description>A flag to gather statistics automatically during the INSERT OVERWRITE command.</description>
</property>

<property>
  <name>hive.stats.jdbcdriver</name>
  <value>org.apache.derby.jdbc.EmbeddedDriver</value>
  <description>The JDBC driver for the database that stores temporary hive statistics.</description>
</property>

<property>
  <name>hive.stats.dbconnectionstring</name>
  <value>jdbc:derby:;databaseName=TempStatsStore;create=true</value>
  <description>The default connection string for the database that stores temporary hive statistics.</description>
</property>

<property>
  <name>hive.stats.default.publisher</name>
  <value></value>
  <description>The Java class (implementing the StatsPublisher interface) that is used by default if hive.stats.dbclass is not JDBC or HBase.</description>
</property>

<property>
  <name>hive.stats.default.aggregator</name>
  <value></value>
  <description>The Java class (implementing the StatsAggregator interface) that is used by default if hive.stats.dbclass is not JDBC or HBase.</description>
</property>

<property>
  <name>hive.stats.jdbc.timeout</name>
  <value>30</value>
  <description>Timeout value (number of seconds) used by JDBC connection and statements.</description>
</property>

<property>
  <name>hive.stats.jdbc.atomic</name>
  <value>false</value>
  <description>If this is set to true then the metastore stats will be updated only if all types of stats (# of rows, # of files, # of bytes etc.) are available. Otherwise metastore stats are updated in a best effort fashion with whatever are available.</description>
</property>

<property>
  <name>hive.support.concurrency</name>
  <value>false</value>
  <description>Whether hive supports concurrency or not. A zookeeper instance must be up and running for the default hive lock manager to support read-write locks.</description>
</property>

<property>
  <name>hive.concurrency.manager</name>
  <value>org.apache.hadoop.hive.ql.lockmgr.ZooKeeperLockMgr</value>
  <description>The concurrency manager for hive.</description>
</property>

<property>
  <name>hive.lock.numretries</name>
  <value>100</value>
  <description>The number of times you want to try to get all the locks</description>
</property>

<property>
  <name>hive.lock.sleep.between.retries</name>
  <value>60</value>
  <description>The sleep time (in seconds) between various retries</description>
</property>

<property>
  <name>hive.zookeeper.quorum</name>
  <value></value>
  <description>The list of zookeeper servers to talk to. This is only needed for read/write locks.</description>
</property>

<property>
  <name>hive.zookeeper.client.port</name>
  <value>2181</value>
  <description>The port of zookeeper servers to talk to. This is only needed for read/write locks.</description>
</property>

<property>
  <name>hive.zookeeper.session.timeout</name>
  <value></value>
  <description>Zookeeper client's session timeout. The client is disconnected, and as a result, all locks released, if a heartbeat is not sent in the timeout.</description>
</property>

<property>
  <name>hive.zookeeper.namespace</name>
  <value>hive_zookeeper_namespace</value>
  <description>The parent node under which all zookeeper nodes are created.</description>
</property>

<property>
  <name>hive.zookeeper.clean.extra.nodes</name>
  <value>false</value>
  <description>Clean extra nodes at the end of the session.</description>
</property>

<property>
  <name>fs.har.impl</name>
  <value>org.apache.hadoop.hive.shims.HiveHarFileSystem</value>
  <description>The implementation for accessing Hadoop Archives. Note that this won't be applicable to Hadoop vers less than 0.20</description>
</property>

<property>
  <name>hive.archive.enabled</name>
  <value>false</value>
  <description>Whether archiving operations are permitted</description>
</property>

<property>
  <name>hive.archive.har.parentdir.settable</name>
  <value>false</value>
  <description>In new Hadoop versions, the parent directory must be set while
  creating a HAR. Because this functionality is hard to detect with just version
  numbers, this conf var needs to be set manually.</description>
</property>

<property>
  <name>hive.fetch.output.serde</name>
  <value>org.apache.hadoop.hive.serde2.DelimitedJSONSerDe</value>
  <description>The serde used by FetchTask to serialize the fetch output.</description>
</property>

<property>
  <name>hive.exec.mode.local.auto</name>
  <value>false</value>
  <description> Let hive determine whether to run in local mode automatically </description>
</property>

<property>
  <name>hive.exec.drop.ignorenonexistent</name>
  <value>true</value>
  <description>
    Do not report an error if DROP TABLE/VIEW specifies a non-existent table/view
  </description>
</property>

<property>
  <name>hive.exec.show.job.failure.debug.info</name>
  <value>false</value>
  <description>
  	If a job fails, whether to provide a link in the CLI to the task with the
  	most failures, along with debugging hints if applicable.
  </description>
</property>

<property>
  <name>hive.auto.progress.timeout</name>
  <value>0</value>
  <description>
    How long to run autoprogressor for the script/UDTF operators (in seconds).
    Set to 0 for forever.
  </description>
</property>

<!-- HBase Storage Handler Parameters -->

<property>
  <name>hive.hbase.wal.enabled</name>
  <value>true</value>
  <description>Whether writes to HBase should be forced to the write-ahead log.  Disabling this improves HBase write performance at the risk of lost writes in case of a crash.</description>
</property>

<property>
  <name>hive.table.parameters.default</name>
  <value></value>
  <description>Default property values for newly created tables</description>
</property>

<property>
  <name>hive.variable.substitute</name>
  <value>true</value>
  <description>This enables substitution using syntax like ${var} ${system:var} and ${env:var}.</description>
</property>


<property>
  <name>hive.security.authorization.enabled</name>
  <value>false</value>
  <description>enable or disable the hive client authorization</description>
</property>

<property>
  <name>hive.security.authorization.manager</name>
  <value>org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider</value>
  <description>the hive client authorization manager class name.
  The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider. 
  </description>
</property>

<property>
  <name>hive.security.authenticator.manager</name>
  <value>org.apache.hadoop.hive.ql.security.HadoopDefaultAuthenticator</value>
  <description>hive client authenticator manager class name. 
  The user defined authenticator should implement interface org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider.</description>
</property>

<property>
  <name>hive.security.authorization.createtable.user.grants</name>
  <value></value>
  <description>the privileges automatically granted to some users whenever a table gets created. 
   An example like "userX,userY:select;userZ:create" will grant select privilege to userX and userY, 
   and grant create privilege to userZ whenever a new table created.</description>
</property>

<property>
  <name>hive.security.authorization.createtable.group.grants</name>
  <value></value>
  <description>the privileges automatically granted to some groups whenever a table gets created. 
   An example like "groupX,groupY:select;groupZ:create" will grant select privilege to groupX and groupY, 
   and grant create privilege to groupZ whenever a new table created.</description>
</property>

<property>
  <name>hive.security.authorization.createtable.role.grants</name>
  <value></value>
  <description>the privileges automatically granted to some roles whenever a table gets created. 
   An example like "roleX,roleY:select;roleZ:create" will grant select privilege to roleX and roleY, 
   and grant create privilege to roleZ whenever a new table created.</description>
</property>

<property>
  <name>hive.security.authorization.createtable.owner.grants</name>
  <value></value>
  <description>the privileges automatically granted to the owner whenever a table gets created. 
   An example like "select,drop" will grant select and drop privilege to the owner of the table</description>
</property>

<property>
  <name>hive.error.on.empty.partition</name>
  <value>false</value>
  <description>Whether to throw an excpetion if dynamic partition insert generates empty results.</description>
</property>

<property>
  <name>hive.index.compact.file.ignore.hdfs</name>
  <value>false</value>
  <description>True the hdfs location stored in the index file will be igbored at runtime. 
  If the data got moved or the name of the cluster got changed, the index data should still be usable.</description>
</property>

<property>
  <name>hive.rework.mapredwork</name>
  <value>false</value>
  <description>should rework the mapred work or not.
  This is first introduced by SymlinkTextInputFormat to replace symlink files with real paths at compile time.</description>
</property>

<property>
  <name>hive.output.file.extension</name>
  <value></value>
  <description>String used as a file extension for output files. If not set, defaults to the codec extension for text files (e.g. ".gz"), or no extension otherwise.</description>
</property>

<property>    
  <name>fs.default.name</name>
  <value>hdfs://{{name_node}}:8020/</value>
</property>

</configuration>


================================================
FILE: deploy/ec2/template/prepare_logs.sh
================================================
#!/bin/bash
cp /disk1/sparrow/shark_* /root/
touch foo.log && gzip -f *.log


================================================
FILE: deploy/ec2/template/shark-env.sh
================================================
#!/usr/bin/env bash

# Copyright (C) 2012 The Regents of The University California.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#      http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# (Required) Amount of memory used per slave node. This should be in the same
# format as the JVM's -Xmx option, e.g. 300m or 1g.
export SPARK_MEM=15g

# (Required) Set the master program's memory
export SHARK_MASTER_MEM=1g

# (Required) Point to your Scala installation.
export SCALA_HOME="/opt/scala-2.9.3"
export SCALA_VERSON=2.9.3
# Use this for non-HVM amis:
export JAVA_HOME="/usr/lib/jvm/java-6-sun/jre"
# Use this for HVM amis:
#export JAVA_HOME="/usr/lib/jvm/jre-1.6.0-openjdk.x86_64"

# (Required) Point to the patched Hive binary distribution
export HIVE_DEV_HOME="/opt/hive"
export HIVE_HOME="$HIVE_DEV_HOME/build/dist"

# (Optional) Specify the location of Hive's configuration directory. By default,
# it points to $HIVE_HOME/conf
export HIVE_CONF_DIR="$HIVE_DEV_HOME/conf"

# For running Shark in distributed mode, set the following:
export HADOOP_HOME="/opt/hadoop/"
export SPARK_HOME="/disk1/tmp/spark/"

my_ip=`hostname -i`
before_me=`cat /root/sparrow_schedulers.txt | grep -B 1000 $my_ip | grep -v $my_ip | tr "\n" ","`
after_me=`cat /root/sparrow_schedulers.txt | grep -A 1000 $my_ip | grep -v $my_ip | tr "\n" ","`
export MASTER="sparrow@$my_ip:20503,"
# Temp fix: need to add this back to the master thing for fault tolerance $before_me$after_me"

# Only required if using Mesos:
#export MESOS_NATIVE_LIBRARY=/usr/local/lib/libmesos.so 

# (Optional) Extra classpath
#export SPARK_LIBRARY_PATH=""

# Java options
# On EC2, change the local.dir to /mnt/tmp
SPARK_JAVA_OPTS="-Dspark.local.dir=/tmp "
SPARK_JAVA_OPTS+="-Dspark.serializer=spark.KryoSerializer -Dspark.kryoserializer.buffer.mb=10 "
SPARK_JAVA_OPTS+="-verbose:gc -XX:-PrintGCDetails -XX:+PrintGCTimeStamps -XX:+UseConcMarkSweepGC"
SPARK_JAVA_OPTS+=" -Dsparrow.app.name=spark_`hostname -i`"
# Spark options that are usually sent to the executor using environment variables packaged
# from the Spark master, but that we need to set manually when running with Sparrow.
SPARK_JAVA_OPTS+=" -Dspark.broadcast.port=33624 -Dspark.driver.port=60500"
SPARK_JAVA_OPTS+=" -Dspark.io.compression.codec=spark.io.LZFCompressionCodec"
export SPARK_JAVA_OPTS


================================================
FILE: deploy/ec2/template/spark-env.sh
================================================
#!/usr/bin/env bash

# Set Spark environment variables for your site in this file. Some useful
# variables to set are:
# - MESOS_HOME, to point to your Mesos installation
# - SCALA_HOME, to point to your Scala installation
SCALA_HOME=/opt/scala-2.9.3
SPARK_CLASSPATH="/root/spark/core/lib/sparrow-1.0-SNAPSHOT.jar"

# Add Shark jars
SHARK_HOME="/root/shark"
SPARK_CLASSPATH+=:$SHARK_HOME/target/scala-$SCALA_VERSION/classes
for jar in `find $SHARK_HOME/lib -name '*jar'`; do
  SPARK_CLASSPATH+=:$jar
done
for jar in `find $SHARK_HOME/lib_managed/jars -name '*jar'`; do
  SPARK_CLASSPATH+=:$jar
done
for jar in `find $SHARK_HOME/lib_managed/bundles -name '*jar'`; do
  SPARK_CLASSPATH+=:$jar
done

# Add Hive jars.
export HIVE_HOME="/opt/hive/build/dist"
for jar in `find $HIVE_HOME/lib -name '*jar'`; do
  # Ignore the logging library since it has already been included with the Spark jar.
  if [[ "$jar" != *slf4j* ]]; then
    SPARK_CLASSPATH+=:$jar
  fi
done

# Add location of Hive configuration file.
SPARK_CLASSPATH+=:/opt/hive/conf/

SPARK_JAVA_OPTS="-Dspark.io.compression.codec=spark.io.LZFCompressionCodec -Dspark.local.dir=/disk1/spark-tmp"
# - SPARK_MEM, to change the amount of memory used per node (this should
#   be in the same format as the JVM's -Xmx option, e.g. 300m or 1g).
# - SPARK_LIBRARY_PATH, to add extra search paths for native libraries.




================================================
FILE: deploy/ec2/template/spark-run.sh
================================================
#!/usr/bin/env bash

#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements.  See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License.  You may obtain a copy of the License at
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

SCALA_VERSION=2.9.3

# Figure out where the Scala framework is installed
FWDIR="$(cd `dirname $0`; pwd)"

# Export this as SPARK_HOME
export SPARK_HOME="$FWDIR"

# Load environment variables from conf/spark-env.sh, if it exists
if [ -e $FWDIR/conf/spark-env.sh ] ; then
  . $FWDIR/conf/spark-env.sh
fi

if [ -z "$1" ]; then
  echo "Usage: run <spark-class> [<args>]" >&2
  exit 1
fi

# If this is a standalone cluster daemon, reset SPARK_JAVA_OPTS and SPARK_MEM to reasonable
# values for that; it doesn't need a lot
if [ "$1" = "spark.deploy.master.Master" -o "$1" = "spark.deploy.worker.Worker" ]; then
  SPARK_MEM=${SPARK_DAEMON_MEMORY:-512m}
  SPARK_DAEMON_JAVA_OPTS="$SPARK_DAEMON_JAVA_OPTS -Dspark.akka.logLifecycleEvents=true"
  # Do not overwrite SPARK_JAVA_OPTS environment variable in this script
  OUR_JAVA_OPTS="$SPARK_DAEMON_JAVA_OPTS"   # Empty by default
else
  OUR_JAVA_OPTS="$SPARK_JAVA_OPTS"
fi


# Add java opts for master, worker, executor. The opts maybe null
case "$1" in
  'spark.deploy.master.Master')
    OUR_JAVA_OPTS="$OUR_JAVA_OPTS $SPARK_MASTER_OPTS"
    ;;
  'spark.deploy.worker.Worker')
    OUR_JAVA_OPTS="$OUR_JAVA_OPTS $SPARK_WORKER_OPTS"
    ;;
  'spark.executor.StandaloneExecutorBackend')
    OUR_JAVA_OPTS="$OUR_JAVA_OPTS $SPARK_EXECUTOR_OPTS"
    ;;
  'spark.executor.MesosExecutorBackend')
    OUR_JAVA_OPTS="$OUR_JAVA_OPTS $SPARK_EXECUTOR_OPTS"
    ;;
  'spark.repl.Main')
    OUR_JAVA_OPTS="$OUR_JAVA_OPTS $SPARK_REPL_OPTS"
    ;;
esac

# Figure out whether to run our class with java or with the scala launcher.
# In most cases, we'd prefer to execute our process with java because scala
# creates a shell script as the parent of its Java process, which makes it
# hard to kill the child with stuff like Process.destroy(). However, for
# the Spark shell, the wrapper is necessary to properly reset the terminal
# when we exit, so we allow it to set a variable to launch with scala.
# We still fall back on java for the shell if this is a "release" created
# from make-distribution.sh since it's possible scala is not installed
# but we have everything we need to run the shell.
if [[ "$SPARK_LAUNCH_WITH_SCALA" == "1" && ! -f "$FWDIR/RELEASE" ]]; then
  if [ "$SCALA_HOME" ]; then
    RUNNER="${SCALA_HOME}/bin/scala"
  else
    if [ `command -v scala` ]; then
      RUNNER="scala"
    else
      echo "SCALA_HOME is not set and scala is not in PATH" >&2
      exit 1
    fi
  fi
else
  if [ -n "${JAVA_HOME}" ]; then
    RUNNER="${JAVA_HOME}/bin/java"
  else
    if [ `command -v java` ]; then
      RUNNER="java"
    else
      echo "JAVA_HOME is not set" >&2
      exit 1
    fi
  fi
  if [[ ! -f "$FWDIR/RELEASE" && -z "$SCALA_LIBRARY_PATH" ]]; then
    if [ -z "$SCALA_HOME" ]; then
      echo "SCALA_HOME is not set" >&2
      exit 1
    fi
    SCALA_LIBRARY_PATH="$SCALA_HOME/lib"
  fi
fi

# Figure out how much memory to use per executor and set it as an environment
# variable so that our process sees it and can report it to Mesos
if [ -z "$SPARK_MEM" ] ; then
  SPARK_MEM="512m"
fi
export SPARK_MEM

# Set JAVA_OPTS to be able to load native libraries and to set heap size
JAVA_OPTS="$OUR_JAVA_OPTS"
JAVA_OPTS="$JAVA_OPTS -Djava.library.path=$SPARK_LIBRARY_PATH"
JAVA_OPTS="$JAVA_OPTS -Xms$SPARK_MEM -Xmx$SPARK_MEM"
# Load extra JAVA_OPTS from conf/java-opts, if it exists
if [ -e $FWDIR/conf/java-opts ] ; then
  JAVA_OPTS="$JAVA_OPTS `cat $FWDIR/conf/java-opts`"
fi
export JAVA_OPTS
# Attention: when changing the way the JAVA_OPTS are assembled, the change must be reflected in ExecutorRunner.scala!

if [ ! -f "$FWDIR/RELEASE" ]; then
  CORE_DIR="$FWDIR/core"
  EXAMPLES_DIR="$FWDIR/examples"
  REPL_DIR="$FWDIR/repl"

  # Exit if the user hasn't compiled Spark
  if [ ! -e "$CORE_DIR/target" ]; then
    echo "Failed to find Spark classes in $CORE_DIR/target" >&2
    echo "You need to compile Spark before running this program" >&2
    exit 1
  fi

  if [[ "$@" = *repl* && ! -e "$REPL_DIR/target" ]]; then
    echo "Failed to find Spark classes in $REPL_DIR/target" >&2
    echo "You need to compile Spark repl module before running this program" >&2
    exit 1
  fi

  # Figure out the JAR file that our examples were packaged into. This includes a bit of a hack
  # to avoid the -sources and -doc packages that are built by publish-local.
  if [ -e "$EXAMPLES_DIR/target/scala-$SCALA_VERSION/spark-examples"*[0-9T].jar ]; then
    # Use the JAR from the SBT build
    export SPARK_EXAMPLES_JAR=`ls "$EXAMPLES_DIR/target/scala-$SCALA_VERSION/spark-examples"*[0-9T].jar`
  fi
  if [ -e "$EXAMPLES_DIR/target/spark-examples"*[0-9T].jar ]; then
    # Use the JAR from the Maven build
    export SPARK_EXAMPLES_JAR=`ls "$EXAMPLES_DIR/target/spark-examples"*[0-9T].jar`
  fi
fi

# Compute classpath using external script
CLASSPATH=`$FWDIR/bin/compute-classpath.sh`
export CLASSPATH

if [ "$SPARK_LAUNCH_WITH_SCALA" == "1" ]; then
  EXTRA_ARGS=""     # Java options will be passed to scala as JAVA_OPTS
else
  # The JVM doesn't read JAVA_OPTS by default so we need to pass it in
  EXTRA_ARGS="$JAVA_OPTS"
fi

command="$RUNNER -cp \"$CLASSPATH\" $EXTRA_ARGS $@"
if [ "$SPARK_PRINT_LAUNCH_COMMAND" == "1" ]; then
  echo "Spark Command: $command"
  echo "========================================"
  echo
fi

exec "$RUNNER" -cp "$CLASSPATH" $EXTRA_ARGS "$@"


================================================
FILE: deploy/ec2/template/sparrow.conf
================================================
deployment.mode = configbased
static.node_monitors = {{static_backends}}
system.cpus = {{cpus}}
static.app.name = spark
scheduler.thrift.threads = 200
log_level = DEBUG
agent.thrift.threads = 200
internal_agent.thrift.threads = 200
sample.ratio = {{sample_ratio}}
sample.ratio.constrained = {{sample_ratio_constrained}}
node_monitor.task_scheduler = {{node_monitor_task_scheduler}}
cancellation = True
spread_evenly_task_set_size={{num_partitions}}
per_task = True


================================================
FILE: deploy/ec2/template/sparrow_schedulers.txt
================================================
{{sparrow_schedulers}}


================================================
FILE: deploy/ec2/template/start_mesos_master.sh
================================================
#!/bin/bash
# Start mesos master

# Make sure software firewall is stopped (ec2 firewall subsumes)
/etc/init.d/iptables stop > /dev/null 2>&1

APPCHK=$(ps aux | grep -v grep | grep -c mesos-master)

if [ ! $APPCHK = '0' ]; then
  echo "Mesos master already running, cannot start it."
  exit 1;
fi

LOG=/disk1/sparrow/mesosMaster.log
nohup /opt/mesos/bin/mesos-master > $LOG 2>&1 &
PID=$!
echo "Logging to $LOG"
sleep 1
if ! kill -0 $PID > /dev/null 2>&1; then
  echo "Mesos masger failed to start"
  exit 1;
else
  echo "Mesos master started with pid $PID"
  exit 0;
fi


================================================
FILE: deploy/ec2/template/start_mesos_slave.sh
================================================
#!/bin/bash
# Start mesos slave

# Make sure software firewall is stopped (ec2 firewall subsumes)
/etc/init.d/iptables stop > /dev/null 2>&1

APPCHK=$(ps aux | grep -v grep | grep -c mesos-slave)

export MESOS_HOME="/opt/mesos"

CPUS=`grep processor /proc/cpuinfo | wc -l`
#CPUS=8
MEM_KB=`cat /proc/meminfo | grep MemTotal | awk '{print $2}'`
MEM=$[(MEM_KB - 1024 * 1024) / 1024] # Leaves 1 GB free
RES_OPTS="--resources=cpus:$CPUS;mem:$MEM"

if [ ! $APPCHK = '0' ]; then
  echo "Mesos slave already running, cannot start it."
  exit 1;
fi
MASTER=`cat frontends.txt | head -n 1`
LOG=/disk1/sparrow/mesosSlave.log
HOSTNAME=`ec2metadata  | grep local-hostname  | cut -d " " -f 2`
export SPARK_HOSTNAME=$HOSTNAME

nohup /opt/mesos/bin/mesos-slave $RES_OPTS --master=mesos://master@$MASTER:5050> $LOG 2>&1 &
PID=$!
echo "Logging to $LOG"
sleep 1
if ! kill -0 $PID > /dev/null 2>&1; then
  echo "Mesos slave failed to start"
  exit 1;
else
  echo "Mesos slave started with pid $PID"
  exit 0;
fi


================================================
FILE: deploy/ec2/template/start_proto_backend.sh
================================================
#!/bin/bash
# Start Prototype backend

LOG=/disk1/sparrow/protoBackend

APPCHK=$(ps aux | grep -v grep | grep -c ProtoBackend)

if [ ! $APPCHK = '0' ]; then
  echo "Backend already running, cannot start it."
  exit 1;
fi

nohup java -cp ./sparrow/sparrow-1.0-SNAPSHOT.jar edu.berkeley.sparrow.examples.ProtoBackend  > $LOG 2>&1 &
PID=$!
echo "Logging to $LOG"
sleep 1
if ! kill -0 $PID > /dev/null 2>&1; then
  echo "Proto backend failed to start"
  exit 1;
else
  echo "Proto backend started with pid $PID"
  exit 0;
fi


================================================
FILE: deploy/ec2/template/start_proto_frontend.sh
================================================
#!/bin/bash
# Start Prototype frontend

LOG=/disk1/sparrow/protoFrontend.log

APPCHK=$(ps aux | grep -v grep | grep -c {{frontend_type}})

if [ ! $APPCHK = '0' ]; then
  echo "Frontend already running, cannot start it."
  exit 1;
fi

nohup java -XX:+UseConcMarkSweepGC -verbose:gc -XX:+PrintGCTimeStamps -Xmx2046m -XX:+PrintGCDetails  -cp ./sparrow/sparrow-1.0-SNAPSHOT.jar edu.berkeley.sparrow.examples.{{frontend_type}} -c frontend.conf > $LOG 2>&1 &
PID=$!
echo "Logging to $LOG"
sleep 1
if ! kill -0 $PID > /dev/null 2>&1; then
  echo "Proto frontend failed to start"
  exit 1;
else
  echo "Proto frontend started with pid $PID"
  exit 0;
fi


================================================
FILE: deploy/ec2/template/start_shark_tpch.sh
================================================
#!/bin/bash
# Start shark tpch workload
ulimit -n 16384

APPCHK=$(ps aux | grep -v grep |grep -v start| grep java | grep -c spark)

ip=`ifconfig eth0 | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}'`
log="/disk1/sparrow/shark_$ip.log"

public_hostname=`ec2metadata  | grep public-hostname  | cut -d " " -f 2`
fe_num=`cat frontends.txt | grep -n $public_hostname | cut -d ":" -f 1`

cd /disk1/sparrow/
sleep .0$[ ( $RANDOM % 10 ) + 1 ]s # Helps avoid synchronization
/root/shark/run shark.sparrow.SparrowTPCHRunner /root/tpch/tpch_workload_$fe_num {{inter_query_delay}} > $log 2>&1 &

PID=$!
echo "Logging to $log"
sleep 1
if ! kill -0 $PID > /dev/null 2>&1; then
  echo "Shark TPCH failed to start"
  exit 1;
else
  echo "Shark TPCH started with pid $PID"
  exit 0;
fi


================================================
FILE: deploy/ec2/template/start_spark_backend.sh
================================================
#!/bin/bash
# Start Spark backend
ulimit -n 16384
FRONTENDS=`cat frontends.txt`


APPCHK=$(ps aux | grep -v grep | grep spark |grep -c java)

if [ ! $APPCHK = '0' ]; then
  echo "Spark already running, cannot start it."
  exit 1;
fi

port=8300
for fe in $FRONTENDS; do
  ip=`dig +short $fe`
  id=spark_$ip
  log=/disk1/sparrow/$id
  chmod 755 spark-run.sh
  HOSTNAME=`ec2metadata  | grep local-hostname  | cut -d " " -f 2`
  export SPARK_HOSTNAME=$HOSTNAME
  export SPARK_MEM={{spark_backend_mem}}
  name=`host $ip | cut -d " " -f 5 | cut -d "." -f 1-3`

  /root/spark/run -Dspark.scheduler=sparrow -Dspark.master.port=7077 -Dspark.hostname=$HOSTNAME -Dspark.serializer=spark.KryoSerializer -Dspark.driver.host=$name -Dspark.driver.port=60500 -Dsparrow.app.name=$id -Dsparrow.app.port=$port -Dspark.httpBroadcast.uri=http://$ip:33624 spark.scheduler.sparrow.SparrowExecutorBackend > $log 2>&1 &
  ((port++))
  PID=$!
  echo "Logging to $log"
  sleep .5
  if ! kill -0 $PID > /dev/null 2>&1; then
    echo "Spark executor failed to start"
    exit 1;
  else
    echo "Spark executor started with pid $PID"
  fi
done


================================================
FILE: deploy/ec2/template/start_spark_frontend.sh
================================================
#!/bin/bash
# Start Prototype frontend
ulimit -n 16384

APPCHK=$(ps aux | grep -v grep |grep -v start| grep java | grep -c spark)
SCHED=$1
RATE=$2
MAX=$3
QNUM=$4
PAR_LEVEL=$5
MESOS_MASTER=`cat frontends.txt | head -n 1`

/etc/init.d/iptables stop > /dev/null 2>&1

if [ "$SCHED" = "" ];
then
  echo "Scheduler required"
  exit -1
fi

if [ ! $APPCHK = '0' ]; then
  echo "Frontend already running, cannot start it."
  exit 1;
fi

ip=`ifconfig eth0 | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}'`
log="/disk1/sparrow/spark_$ip.log"

chmod 755 spark-run.sh
HOSTNAME=`ec2metadata  | grep local-hostname  | cut -d " " -f 2`

if [ "$SCHED" = "sparrow" ];
then
  export SPARK_HOSTNAME=$HOSTNAME
  nohup ./spark-run.sh -Dspark.master.host=$ip -Dspark.master.port=7077 -Dsparrow.app.name=spark_$ip -Dspark.scheduler=sparrow spark.SparkTPCHRunner sparrow@localhost:20503 hdfs://{{name_node}}:8020/`hostname -i`/ $QNUM true $RATE $MAX /root/spark_tpch_$ip.log $PAR_LEVEL > $log 2>&1 &
else
  SPARK_MEM="1300m"
  export SPARK_MEM
  nohup ./spark-run.sh -Dspark.master.host=$ip -Dspark.master.port=7077 -Dspark.locality.wait=500000 spark.SparkTPCHRunner master@$MESOS_MASTER:5050 hdfs://{{name_node}}:8020/`hostname -i`/ $QNUM true $RATE $MAX /root/spark_tpch_$ip.log $PAR_LEVEL > $log 2>&1 &
fi

PID=$!
echo "Logging to $log"
sleep 1
if ! kill -0 $PID > /dev/null 2>&1; then
  echo "Proto frontend failed to start"
  exit 1;
else
  echo "Proto frontend started with pid $PID"
  exit 0;
fi


================================================
FILE: deploy/ec2/template/start_sparrow.sh
================================================
#!/bin/bash
# Start Sparrow locally
ulimit -n 16384

LOG=/disk1/sparrow/sparrowDaemon.log
IP=`ifconfig eth0 | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}'`

ip_there=`cat sparrow.conf |grep hostname`
if [ "X$ip_there" == "X" ]; then
  echo "hostname = $IP" >> sparrow.conf
fi

# Make sure software firewall is stopped (ec2 firewall subsumes)
/etc/init.d/iptables stop > /dev/null 2>&1

APPCHK=$(ps aux | grep -v grep | grep -c SparrowDaemon)

if [ ! $APPCHK = '0' ]; then
  echo "Sparrow already running, cannot start it."
  exit 1;
fi

# -XX:MaxGCPauseMillis=3 
# removed nice -n -20
nohup java -XX:+UseConcMarkSweepGC -verbose:gc -XX:+PrintGCTimeStamps -Xmx2046m -XX:+PrintGCDetails -cp ./sparrow/sparrow-1.0-SNAPSHOT.jar edu.berkeley.sparrow.daemon.SparrowDaemon -c sparrow.conf > $LOG 2>&1 &
PID=$!
echo "Logging to $LOG"
sleep 1
if ! kill -0 $PID > /dev/null 2>&1; then
  echo "Sparrow Daemon failed to start"
  exit 1;
else
  echo "Sparrow Daemon started with pid $PID"
  exit 0;
fi


================================================
FILE: deploy/ec2/template/start_throughput_exp_spark.sh
================================================
#!/bin/bash
# Starts an experiment to test scheduler throughput with spark.
# Starts the master and slaves, and then the throughpupt
# tester.
my_ip=`hostname -i`
log="/disk1/sparrow/spark_throughput_$my_ip.log"

# Spark needs the list of backends in order to start them all.
cp ~/backends.txt /root/spark/conf/slaves
/root/spark/bin/start-all.sh

# Give the master and workers a moment to get started.
echo "Done starting master and workers; sleeping before starting tester"
sleep 5s
echo "Starting throughput tester"

/root/spark/run spark.scheduler.sparrow.ThroughputTester "spark://`hostname`:7077" {{total_cores}} 60000 10000,1000,500,100 > $log 2>&1 &



================================================
FILE: deploy/ec2/template/start_throughput_exp_sparrow.sh
================================================
#!/bin/bash
# Starts an experiment to test scheduler throughput.
my_ip=`hostname -i`
log="/disk1/sparrow/spark_$my_ip.log"
# for sparrow master: "sparrow@$my_ip:20503

/root/spark/run -Dspark.serializer=spark.KryoSerializer -Dsparrow.app.name=spark_`hostname -i` -Dspark.broadcast.port=33624 -Dspark.driver.port=60500 spark.scheduler.sparrow.ThroughputTester "sparrow@$my_ip:20503" {{total_cores}} 60000 1001,1002,1375,250,100,50 > $log 2>&1 &


================================================
FILE: deploy/ec2/template/stop_mesos_master.sh
================================================
#!/bin/bash
# Stop mesos master

APPCHK=$(ps aux | grep -v grep | grep -c mesos-master)

if [ $APPCHK = '0' ]; then
  echo "Mesos master is not running. Doing nothing."
  exit 0;
fi
ps -ef |grep mesos-master| grep -v grep | awk '{ print $2; }' | xargs -I {} kill -9 {}
echo "Stopped mesos master process."
exit 0;


================================================
FILE: deploy/ec2/template/stop_mesos_slave.sh
================================================
#!/bin/bash
# Stop mesos slave

APPCHK=$(ps aux | grep -v grep | grep -c mesos-slave)

if [ $APPCHK = '0' ]; then
  echo "Mesos slave is not running. Doing nothing."
  exit 0;
fi
ps -ef |grep mesos-slave| grep -v grep | awk '{ print $2; }' | xargs -I {} kill -9 {}
echo "Stopped mesos slave process."
exit 0;


================================================
FILE: deploy/ec2/template/stop_proto_backend.sh
================================================
#!/bin/bash
# Stop proto backend locally

APPCHK=$(ps aux | grep -v grep | grep -c ProtoBackend)

if [ $APPCHK = '0' ]; then
  echo "ProtoBackend is not running. Doing nothing."
  exit 0;
fi
ps -ef |grep ProtoBackend |grep -v grep | awk '{ print $2; }' | xargs -I {} kill -9 {}
echo "Stopped ProtoBackend process"
exit 0;



================================================
FILE: deploy/ec2/template/stop_proto_frontend.sh
================================================
#!/bin/bash
# Stop proto frontend locally

APPCHK=$(ps aux | grep -v grep | grep -c {{frontend_type}})

if [ $APPCHK = '0' ]; then
  echo "{{frontend_type}} is not running. Doing nothing."
  exit 0;
fi
ps -ef |grep {{frontend_type}} |grep -v grep | awk '{ print $2; }' | xargs -I {} kill -9 {}
echo "Stopped {{frontend_type}} process"
exit 0;


================================================
FILE: deploy/ec2/template/stop_spark_backend.sh
================================================
#!/bin/bash
# Stop spark backend locally

APPCHK=$(ps aux | grep -v grep | grep -v stop | grep -c spark)

if [ $APPCHK = '0' ]; then
  echo "Spark is not running. Doing nothing."
  exit 0;
fi
ps -ef |grep spark |grep -v grep | grep -v stop | awk '{ print $2; }' | xargs -I {} kill -9 {}
echo "Stopped spark process"
exit 0;



================================================
FILE: deploy/ec2/template/stop_spark_frontend.sh
================================================
#!/bin/bash
# Stop spark frontend locally

APPCHK=$(ps aux | grep -v grep | grep -v stop | grep -c spark)

if [ $APPCHK = '0' ]; then
  echo "Spark is not running. Doing nothing."
  exit 0;
fi
ps -ef |grep spark | grep -v stop | grep -v grep | awk '{ print $2; }' | xargs -I {} kill -9 {}
echo "Stopped spark process"
exit 0;


================================================
FILE: deploy/ec2/template/stop_sparrow.sh
================================================
#!/bin/bash
# Stop sparrow locally

APPCHK=$(ps aux | grep -v grep | grep -c SparrowDaemon)

if [ $APPCHK = '0' ]; then
  echo "Sparrow is not running. Doing nothing."
  exit 0;
fi
ps -ef |grep SparrowDaemon |grep -v grep | awk '{ print $2; }' | xargs -I {} kill -9 {}
echo "Stopped Sparrow process"
exit 0;


================================================
FILE: deploy/ec2/template/tpch/make_base_tables.hql
================================================
drop table if exists lineitem;
drop table if exists orders;
drop table if exists denorm;
drop table if exists part;
drop table if exists supplier;
drop table if exists nation;
drop table if exists partsupp;
drop table if exists customer;
drop table if exists region;

create external table region (R_REGIONKEY INT, R_NAME STRING, R_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/region';
Create external table lineitem (L_ORDERKEY INT, L_PARTKEY INT, L_SUPPKEY INT, L_LINENUMBER INT, L_QUANTITY DOUBLE, L_EXTENDEDPRICE DOUBLE, L_DISCOUNT DOUBLE, L_TAX DOUBLE, L_RETURNFLAG STRING, L_LINESTATUS STRING, L_SHIPDATE STRING, L_COMMITDATE STRING, L_RECEIPTDATE STRING, L_SHIPINSTRUCT STRING, L_SHIPMODE STRING, L_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/lineitem';
create external table orders (O_ORDERKEY INT, O_CUSTKEY INT, O_ORDERSTATUS STRING, O_TOTALPRICE DOUBLE, O_ORDERDATE STRING, O_ORDERPRIORITY STRING, O_CLERK STRING, O_SHIPPRIORITY INT, O_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/orders';
create external table part (P_PARTKEY INT, P_NAME STRING, P_MFGR STRING, P_BRAND STRING, P_TYPE STRING, P_SIZE INT, P_CONTAINER STRING, P_RETAILPRICE DOUBLE, P_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/part';
create external table supplier (S_SUPPKEY INT, S_NAME STRING, S_ADDRESS STRING, S_NATIONKEY INT, S_PHONE STRING, S_ACCTBAL DOUBLE, S_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/supplier';
create external table nation (N_NATIONKEY INT, N_NAME STRING, N_REGIONKEY INT, N_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/nation';
create external table partsupp (PS_PARTKEY INT, PS_SUPPKEY INT, PS_AVAILQTY INT, PS_SUPPLYCOST DOUBLE, PS_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION'/tpch/partsupp';
create external table customer (C_CUSTKEY INT, C_NAME STRING, C_ADDRESS STRING, C_NATIONKEY INT, C_PHONE STRING, C_ACCTBAL DOUBLE, C_MKTSEGMENT STRING, C_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/customer';


================================================
FILE: deploy/ec2/template/tpch/make_denorm_cached.hql
================================================
set mapred.reduce.tasks={{num_partitions}};
set hive.map.aggr=false;
create table denorm_cached as
select r_regionkey, l_linenumber, r_name, n_nationkey, n_name, s_suppkey, s_name, s_address, s_nationkey, s_phone, s_acctbal, s_comment, ps_partkey, ps_suppkey, ps_availqty, ps_supplycost, p_partkey, p_name, p_mfgr, p_brand, p_type, p_size, p_container, l_orderkey, l_partkey, l_suppkey, l_quantity, l_extendedprice, l_discount, l_returnflag, l_shipdate, l_linestatus, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_tax, o_orderkey, o_custkey, o_orderstatus, o_totalprice, o_orderdate, o_orderpriority, o_shippriority, o_comment, c_nationkey, c_custkey, c_name, c_mktsegment
from denorm
group by r_regionkey, l_linenumber, r_name, n_nationkey, n_name, s_suppkey, s_name, s_address, s_nationkey, s_phone, s_acctbal, s_comment, ps_partkey, ps_suppkey, ps_availqty, ps_supplycost, p_partkey, p_name, p_mfgr, p_brand, p_type, p_size, p_container, l_orderkey, l_partkey, l_suppkey, l_quantity, l_extendedprice, l_discount, l_returnflag, l_shipdate, l_linestatus, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_tax, o_orderkey, o_custkey, o_orderstatus, o_totalprice, o_orderdate, o_orderpriority, o_shippriority, o_comment, c_nationkey, c_custkey, c_name, c_mktsegment;
set hive.map.aggr=true;


================================================
FILE: deploy/ec2/template/tpch/make_denorm_table_primary.hql
================================================
SET mapred.reduce.tasks={{reduce_tasks}};
drop table if exists denorm;
create table denorm
row format delimited fields terminated by '|'
STORED AS TEXTFILE
AS
select r_regionkey, l_linenumber, r_name, n_nationkey, n_name, s_suppkey, s_name, s_address, s_nationkey, s_phone, s_acctbal, s_comment, ps_partkey, ps_suppkey, ps_availqty, ps_supplycost, p_partkey, p_name, p_mfgr, p_brand, p_type, p_size, p_container, l_orderkey, l_partkey, l_suppkey, l_quantity, l_extendedprice, l_discount, l_returnflag, l_shipdate, l_linestatus, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_tax, o_orderkey, o_custkey, o_orderstatus, o_totalprice, o_orderdate, o_orderpriority, o_shippriority, o_comment, c_nationkey, c_custkey, c_name, c_mktsegment 
  from 
  (select * from 
    (select * from
      (select * from
	(select * from
	  (select * from 
	    (select r_regionkey, r_name, n_nationkey, n_name from region r
	      join nation n on n.n_regionkey = r.r_regionkey) nr
	    join supplier s on s.s_nationkey = nr.n_nationkey) nrs
	  join partsupp ps on nrs.s_suppkey = ps.ps_suppkey) nrsps
	join part p on p.p_partkey = nrsps.ps_partkey) nrspsp
      join lineitem l on l.l_partkey = nrspsp.ps_partkey and
			 l.l_suppkey = nrspsp.ps_suppkey) nrspspl
    join orders o on o.o_orderkey = nrspspl.l_orderkey) nrspsplo
  join customer c on c.c_custkey = nrspsplo.o_custkey;


================================================
FILE: deploy/ec2/template/tpch/make_denorm_table_secondary.hql
================================================
create external table denorm (r_regionkey int, l_linenumber int, r_name string, n_nationkey int, n_name string, s_suppkey int, s_name string, s_address string, s_nationkey int, s_phone string, s_acctbal double, s_comment string, ps_partkey int, ps_suppkey int, ps_availqty int, ps_supplycost double, p_partkey int, p_name string, p_mfgr string, p_brand string, p_type string, p_size int, p_container string, l_orderkey int, l_partkey int, l_suppkey int, l_quantity double, l_extendedprice double, l_discount double, l_returnflag string, l_shipdate string, l_linestatus string, l_commitdate string, l_receiptdate string, l_shipinstruct string, l_shipmode string, l_tax double, o_orderkey int, o_custkey int, o_orderstatus string, o_totalprice double, o_orderdate string, o_orderpriority string, o_shippriority int, o_comment string, c_nationkey int, c_custkey int, c_name string, c_mktsegment string) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/denorm-warehouse/denorm/';


================================================
FILE: deploy/ec2/template/tpch/tpch_workload_1
================================================
set mapred.reduce.tasks={{num_partitions}};
set hive.map.aggr=false;
drop table if exists denorm_cached;
create table denorm_cached as
select --SPREAD_EVENLY--
  r_regionkey, l_linenumber, r_name, n_nationkey, n_name, s_suppkey, s_name, s_address, s_nationkey, s_phone, s_acctbal, s_comment, ps_partkey, ps_suppkey, ps_availqty, ps_supplycost, p_partkey, p_name, p_mfgr, p_brand, p_type, p_size, p_container, l_orderkey, l_partkey, l_suppkey, l_quantity, l_extendedprice, l_discount, l_returnflag, l_shipdate, l_linestatus, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_tax, o_orderkey, o_custkey, o_orderstatus, o_totalprice, o_orderdate, o_orderpriority, o_shippriority, o_comment, c_nationkey, c_custkey, c_name, c_mktsegment
from denorm
group by r_regionkey, l_linenumber, r_name, n_nationkey, n_name, s_suppkey, s_name, s_address, s_nationkey, s_phone, s_acctbal, s_comment, ps_partkey, ps_suppkey, ps_availqty, ps_supplycost, p_partkey, p_name, p_mfgr, p_brand, p_type, p_size, p_container, l_orderkey, l_partkey, l_suppkey, l_quantity, l_extendedprice, l_discount, l_returnflag, l_shipdate, l_linestatus, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_tax, o_orderkey, o_custkey, o_orderstatus, o_totalprice, o_orderdate, o_orderpriority, o_shippriority, o_comment, c_nationkey, c_custkey, c_name, c_mktsegment;
set hive.map.aggr=true;
select --SPREAD_EVENLY--
  count(*) from denorm_cached;
select --SPREAD_EVENLY--
  count(*) from denorm_cached;
set mapred.reduce.tasks={{reduce_tasks}};
select --6--
sum(l_extendedprice*l_discount) as revenue
from 
  denorm_cached
where 
  l_shipdate >= '1994-01-01'
  and l_shipdate < '1995-01-01'
  and l_discount >= 0.05 and l_discount <= 0.07
  and l_quantity < 24;
select --12--
  l_shipmode,
  sum(case
    when o_orderpriority ='1-URGENT'
         or o_orderpriority ='2-HIGH'
    then 1
    else 0
end
  ) as high_line_count,
  sum(case
    when o_orderpriority <> '1-URGENT'
         and o_orderpriority <> '2-HIGH'
    then 1
    else 0
end
  ) as low_line_count

from denorm_cached
where 
  l_receiptdate < '1995-01-01' and
  (l_shipmode = 'MAIL' or l_shipmode = 'SHIP') and
  l_shipdate < l_commitdate and l_receiptdate >= '1994-01-01' and
  l_commitdate < l_receiptdate
group by l_shipmode
order by l_shipmode;
select --3-- 
  l_orderkey, sum(l_extendedprice*(1-l_discount)) as revenue, o_orderdate, o_shippriority 
from 
  denorm_cached
where 
  o_orderdate < '1995-03-15' and l_shipdate > '1995-03-15' and c_mktsegment = 'BUILDING'
group by l_orderkey, o_orderdate, o_shippriority 
order by revenue desc, o_orderdate 
limit 10;
select --4--
  count(1), o_orderpriority from 
  (select distinct l_orderkey, o_orderpriority from denorm_cached where
    o_orderdate >= '1993-07-01' and 
    o_orderdate < '1993-10-01' and
    l_commitdate < l_receiptdate) a
  group by o_orderpriority;
select --3-- 
  l_orderkey, sum(l_extendedprice*(1-l_discount)) as revenue, o_orderdate, o_shippriority 
from 
  denorm_cached
where 
  o_orderdate < '1995-03-15' and l_shipdate > '1995-03-15' and c_mktsegment = 'BUILDING'
group by l_orderkey, o_orderdate, o_shippriority 
order by revenue desc, o_orderdate 
limit 10;
select --6--
sum(l_extendedprice*l_discount) as revenue
from 
  denorm_cached
where 
  l_shipdate >= '1994-01-01'
  and l_shipdate < '1995-01-01'
  and l_discount >= 0.05 and l_discount <= 0.07
  and l_quantity < 24;
select --4--
  count(1), o_orderpriority from 
  (select distinct l_orderkey, o_orderpriority from denorm_cached where
    o_orderdate >= '1993-07-01' and 
    o_orderdate < '1993-10-01' and
    l_commitdate < l_receiptdate) a
  group by o_orderpriority;
select --12--
  l_shipmode,
  sum(case
    when o_orderpriority ='1-URGENT'
         or o_orderpriority ='2-HIGH'
    then 1
    else 0
end
  ) as high_line_count,
  sum(case
    when o_orderpriority <> '1-URGENT'
         and o_orderpriority <> '2-HIGH'
    then 1
    else 0
end
  ) as low_line_count

from denorm_cached
where 
  l_receiptdate < '1995-01-01' and
  (l_shipmode = 'MAIL' or l_shipmode = 'SHIP') and
  l_shipdate < l_commitdate and l_receiptdate >= '1994-01-01' and
  l_commitdate < l_receiptdate
group by l_shipmode
order by l_shipmode;
select --6--
sum(l_extendedprice*l_discount) as revenue
from 
  denorm_cached
where 
  l_shipdate >= '1994-01-01'
  and l_shipdate < '1995-01-01'
  and l_discount >= 0.05 and l_discount <= 0.07
  and l_quantity < 24;
select --12--
  l_shipmode,
  sum(case
    when o_orderpriority ='1-URGENT'
         or o_orderpriority ='2-HIGH'
    then 1
    else 0
end
  ) as high_line_count,
  sum(case
    when o_orderpriority <> '1-URGENT'
         and o_orderpriority <> '2-HIGH'
    then 1
    else 0
end
  ) as low_line_count

from denorm_cached
where 
  l_receiptdate < '1995-01-01' and
  (l_shipmode = 'MAIL' or l_shipmode = 'SHIP') and
  l_shipdate < l_commitdate and l_receiptdate >= '1994-01-01' and
  l_commitdate < l_receiptdate
group by l_shipmode
order by l_shipmode;
select --3-- 
  l_orderkey, sum(l_extendedprice*(1-l_discount)) as revenue, o_orderdate, o_shippriority 
from 
  denorm_cached
where 
  o_orderdate < '1995-03-15' and l_shipdate > '1995-03-15' and c_mktsegment = 'BUILDING'
group by l_orderkey, o_orderdate, o_shippriority 
order by revenue desc, o_orderdate 
limit 10;
select --4--
  count(1), o_orderpriority from 
  (select distinct l_orderkey, o_orderpriority from denorm_cached where
    o_orderdate >= '1993-07-01' and 
    o_orderdate < '1993-10-01' and
    l_commitdate < l_receiptdate) a
  group by o_orderpriority;
select --3-- 
  l_orderkey, sum(l_extendedprice*(1-l_discount)) as revenue, o_orderdate, o_shippriority 
from 
  denorm_cached
where 
  o_orderdate < '1995-03-15' and l_shipdate > '1995-03-15' and c_mktsegment = 'BUILDING'
group by l_orderkey, o_orderdate, o_shippriority 
order by revenue desc, o_orderdate 
limit 10;
select --6--
sum(l_extendedprice*l_discount) as revenue
from 
  denorm_cached
where 
  l_shipdate >= '1994-01-01'
  and l_shipdate < '1995-01-01'
  and l_discount >= 0.05 and l_discount <= 0.07
  and l_quantity < 24;
select --4--
  count(1), o_orderpriority from 
  (select distinct l_orderkey, o_orderpriority from denorm_cached where
    o_orderdate >= '1993-07-01' and 
    o_orderdate < '1993-10-01' and
    l_commitdate < l_receiptdate) a
  group by o_orderpriority;
select --12--
  l_shipmode,
  sum(case
    when o_orderpriority ='1-URGENT'
         or o_orderpriority ='2-HIGH'
    then 1
    else 0
end
  ) as high_line_count,
  sum(case
    when o_orderpriority <> '1-URGENT'
         and o_orderpriority <> '2-HIGH'
    then 1
    else 0
end
  ) as low_line_count

from denorm_cached
where 
  l_receiptdate < '1995-01-01' and
  (l_shipmode = 'MAIL' or l_shipmode = 'SHIP') and
  l_shipdate < l_commitdate and l_receiptdate >= '1994-01-01' and
  l_commitdate < l_receiptdate
group by l_shipmode
order by l_shipmode;
select --3-- 
  l_orderkey, sum(l_extendedprice*(1-l_discount)) as revenue, o_orderdate, o_shippriority 
from 
  denorm_cached
where 
  o_orderdate < '1995-03-15' and l_shipdate > '1995-03-15' and c_mktsegment = 'BUILDING'
group by l_orderkey, o_orderdate, o_shippriority 
order by revenue desc, o_orderdate 
limit 10;
select --12--
  l_shipmode,
  sum(case
    when o_orderpriority ='1-URGENT'
         or o_orderpriority ='2-HIGH'
    then 1
    else 0
end
  ) as high_line_count,
  sum(case
    when o_orderpriority <> '1-URGENT'
         and o_orderpriority <> '2-HIGH'
    then 1
    else 0
end
  ) as low_line_count

from denorm_cached
where 
  l_receiptdate < '1995-01-01' and
  (l_shipmode = 'MAIL' or l_shipmode = 'SHIP') and
  l_shipdate < l_commitdate and l_receiptdate >= '1994-01-01' and
  l_commitdate < l_receiptdate
group by l_shipmode
order by l_shipmode;
select --6--
sum(l_extendedprice*l_discount) as revenue
from 
  denorm_cached
where 
  l_shipdate >= '1994-01-01'
  and l_shipdate < '1995-01-01'
  and l_discount >= 0.05 and l_discount <= 0.07
  and l_quantity < 24;
select --4--
  count(1), o_orderpriority from 
  (select distinct l_orderkey, o_orderpriority from denorm_cached where
    o_orderdate >= '1993-07-01' and 
    o_orderdate < '1993-10-01' and
    l_commitdate < l_receiptdate) a
  group by o_orderpriority;
select --12--
  l_shipmode,
  sum(case
    when o_orderpriority ='1-URGENT'
         or o_orderpriority ='2-HIGH'
    then 1
    else 0
end
  ) as high_line_count,
  sum(case
    when o_orderpriority <> '1-URGENT'
         and o_orderpriority <> '2-HIGH'
    then 1
    else 0
end
  ) as low_line_count

from denorm_cached
where 
  l_receiptdate < '1995-01-01' and
  (l_shipmode = 'MAIL' or l_shipmode = 'SHIP') and
  l_shipdate < l_commitdate and l_receiptdate >= '1994-01-01' and
  l_commitdate < l_receiptdate
group by l_shipmode
order by l_shipmode;
select --4--
  count(1), o_orderpriority from 
  (select distinct l_orderkey, o_orderpriority from denorm_cached where
    o_orderdate >= '1993-07-01' and 
    o_orderdate < '1993-10-01' and
    l_commitdate < l_receiptdate) a
  group by o_orderpriority;
select --6--
sum(l_extendedprice*l_discount) as revenue
from 
  denorm_cached
where 
  l_shipdate >= '1994-01-01'
  and l_shipdate < '1995-01-01'
  and l_discount >= 0.05 and l_discount <= 0.07
  and l_quantity < 24;
select --3-- 
  l_orderkey, sum(l_extendedprice*(1-l_discount)) as revenue, o_orderdate, o_shippriority 
from 
  denorm_cached
where 
  o_orderdate < '1995-03-15' and l_shipdate > '1995-03-15' and c_mktsegment = 'BUILDING'
group by l_orderkey, o_orderdate, o_shippriority 
order by revenue desc, o_orderdate 
limit 10;
select --3-- 
  l_orderkey, sum(l_extendedprice*(1-l_discount)) as revenue, o_orderdate, o_shippriority 
from 
  denorm_cached
where 
  o_orderdate < '1995-03-15' and l_shipdate > '1995-03-15' and c_mktsegment = 'BUILDING'
group by l_orderkey, o_orderdate, o_shippriority 
order by revenue desc, o_orderdate 
limit 10;
select --4--
  count(1), o_orderpriority from 
  (select distinct l_orderkey, o_orderpriority from denorm_cached where
    o_orderdate >= '1993-07-01' and 
    o_orderdate < '1993-10-01' and
    l_commitdate < l_receiptdate) a
  group by o_orderpriority;
select --12--
  l_shipmode,
  sum(case
    when o_orderpriority ='1-URGENT'
         or o_orderpriority ='2-HIGH'
    then 1
    else 0
end
  ) as high_line_count,
  sum(case
    when o_orderpriority <> '1-URGENT'
         and o_orderpriority <> '2-HIGH'
    then 1
    else 0
end
  ) as low_line_count

from denorm_cached
where 
  l_receiptdate < '1995-01-01' and
  (l_shipmode = 'MAIL' or l_shipmode = 'SHIP') and
  l_shipdate < l_commitdate and l_receiptdate >= '1994-01-01' and
  l_commitdate < l_receiptdate
group by l_shipmode
order by l_shipmode;
select --6--
sum(l_extendedprice*l_discount) as revenue
from 
  denorm_cached
where 
  l_shipdate >= '1994-01-01'
  and l_shipdate < '1995-01-01'
  and l_discount >= 0.05 and l_discount <= 0.07
  and l_quantity < 24;
select --12--
  l_shipmode,
  sum(case
    when o_orderpriority ='1-URGENT'
         or o_orderpriority ='2-HIGH'
    then 1
    else 0
end
  ) as high_line_count,
  sum(case
    when o_orderpriority <> '1-URGENT'
         and o_orderpriority <> '2-HIGH'
    then 1
    else 0
end
  ) as low_line_count

from denorm_cached
where 
  l_receiptdate < '1995-01-01' and
  (l_shipmode = 'MAIL' or l_shipmode = 'SHIP') and
  l_shipdate < l_commitdate and l_receiptdate >= '1994-01-01' and
  l_commitdate < l_receiptdate
group by l_shipmode
order by l_shipmode;
select --6--
sum(l_extendedprice*l_discount) as revenue
from 
  denorm_cached
where 
  l_shipdate >= '1994-01-01'
  and l_shipdate < '1995-01-01'
  and l_discount >= 0.05 and l_discount <= 0.07
  and l_quantity < 24;
select --4--
  count(1), o_orderpriority from 
  (select distinct l_orderkey, o_orderpriority from denorm_cached where
    o_orderdate >= '1993-07-01' and 
    o_orderdate < '1993-10-01' and
    l_commitdate < l_receiptdate) a
  group by o_orderpriority;
select --3-- 
  l_orderkey, sum(l_extendedprice*(1-l_discount)) as revenue, o_orderdate, o_shippriority 
from 
  denorm_cached
where 
  o_orderdate < '1995-03-15' and l_shipdate > '1995-03-15' and c_mktsegment = 'BUILDING'
group by l_orderkey, o_orderdate, o_shippriority 
order by revenue desc, o_orderdate 
limit 10;
select --12--
  l_shipmode,
  sum(case
    when o_orderpriority ='1-URGENT'
         or o_orderpriority ='2-HIGH'
    then 1
    else 0
end
  ) as high_line_count,
  sum(case
    when o_orderpriority <> '1-URGENT'
         and o_orderpriority <> '2-HIGH'
    then 1
    else 0
end
  ) as low_line_count

from denorm_cached
where 
  l_receiptdate < '1995-01-01' and
  (l_shipmode = 'MAIL' or l_shipmode = 'SHIP') and
  l_shipdate < l_commitdate and l_receiptdate >= '1994-01-01' and
  l_commitdate < l_receiptdate
group by l_shipmode
order by l_shipmode;
select --3-- 
  l_orderkey, sum(l_extendedprice*(1-l_discount)) as revenue, o_orderdate, o_shippriority 
from 
  denorm_cached
where 
  o_orderdate < '1995-03-15' and l_shipdate > '1995-03-15' and c_mktsegment = 'BUILDING'
group by l_orderkey, o_orderdate, o_shippriority 
order by revenue desc, o_orderdate 
limit 10;
select --4--
  count(1), o_orderpriority from 
  (select distinct l_orderkey, o_orderpriority from denorm_cached where
    o_orderdate >= '1993-07-01' and 
    o_orderdate < '1993-10-01' and
    l_commitdate < l_receiptdate) a
  group by o_orderpriority;
select --6--
sum(l_extendedprice*l_discount) as revenue
from 
  denorm_cached
where 
  l_shipdate >= '1994-01-01'
  and l_shipdate < '1995-01-01'
  and l_discount >= 0.05 and l_discount <= 0.07
  and l_quantity < 24;
select --6--
sum(l_extendedprice*l_discount) as revenue
from 
  denorm_cached
where 
  l_shipdate >= '1994-01-01'
  and l_shipdate < '1995-01-01'
  and l_discount >= 0.05 and l_discount <= 0.07
  and l_quantity < 24;
select --3-- 
  l_orderkey, sum(l_extendedprice*(1-l_discount)) as revenue, o_orderdate, o_shippriority 
from 
  denorm_cached
where 
  o_orderdate < '1995-03-15' and l_shipdate > '1995-03-15' and c_mktsegment = 'BUILDING'
group by l_orderkey, o_orderdate, o_shippriority 
order by revenue desc, o_orderdate 
limit 10;
select --12--
  l_shipmode,
  sum(case
    when o_orderpriority ='1-URGENT'
         or o_orderpriority ='2-HIGH'
    then 1
    else 0
end
  ) as high_line_count,
  sum(case
    when o_orderpriority <> '1-URGENT'
         and o_orderpriority <> '2-HIGH'
    then 1
    else 0
end
  ) as low_line_count

from denorm_cached
where 
  l_receiptdate < '1995-01-01' and
  (l_shipmode = 'MAIL' or l_shipmode = 'SHIP') and
  l_shipdate < l_commitdate and l_receiptdate >= '1994-01-01' and
  l_commitdate < l_receiptdate
group by l_shipmode
order by l_shipmode;
select --4--
  count(1), o_orderpriority from 
  (select distinct l_orderkey, o_orderpriority from denorm_cached where
    o_orderdate >= '1993-07-01' and 
    o_orderdate < '1993-10-01' and
    l_commitdate < l_receiptdate) a
  group by o_orderpriority;
select --3-- 
  l_orderkey, sum(l_extendedprice*(1-l_discount)) as revenue, o_orderdate, o_shippriority 
from 
  denorm_cached
where 
  o_orderdate < '1995-03-15' and l_shipdate > '1995-03-15' and c_mktsegment = 'BUILDING'
group by l_orderkey, o_orderdate, o_shippriority 
order by revenue desc, o_orderdate 
limit 10;
select --6--
sum(l_extendedprice*l_discount) as revenue
from 
  denorm_cached
where 
  l_shipdate >= '1994-01-01'
  and l_shipdate < '1995-01-01'
  and l_discount >= 0.05 and l_discount <= 0.07
  and l_quantity < 24;
select --12--
  l_shipmode,
  sum(case
    when o_orderpriority ='1-URGENT'
         or o_orderpriority ='2-HIGH'
    then 1
    else 0
end
  ) as high_line_count,
  sum(case
    when o_orderpriority <> '1-URGENT'
         and o_orderpriority <> '2-HIGH'
    then 1
    else 0
end
  ) as low_line_count

from denorm_cached
where 
  l_receiptdate < '1995-01-01' and
  (l_shipmode = 'MAIL' or l_shipmode = 'SHIP') and
  l_shipdate < l_commitdate and l_receiptdate >= '1994-01-01' and
  l_commitdate < l_receiptdate
group by l_shipmode
order by l_shipmode;
select --4--
  count(1), o_orderpriority from 
  (select distinct l_orderkey, o_orderpriority from denorm_cached where
    o_orderdate >= '1993-07-01' and 
    o_orderdate < '1993-10-01' and
    l_commitdate < l_receiptdate) a
  group by o_orderpriority;
select --4--
  count(1), o_orderpriority from 
  (select distinct l_orderkey, o_orderpriority from denorm_cached where
    o_orderdate >= '1993-07-01' and 
    o_orderdate < '1993-10-01' and
    l_commitdate < l_receiptdate) a
  group by o_orderpriority;
select --12--
  l_shipmode,
  sum(case
    when o_orderpriority ='1-URGENT'
         or o_orderpriority ='2-HIGH'
    then 1
    else 0
end
  ) as high_line_count,
  sum(case
    when o_orderpriority <> '1-URGENT'
         and o_orderpriority <> '2-HIGH'
    then 1
    else 0
end
  ) as low_line_count

from denorm_cached
where 
  l_receiptdate < '1995-01-01' and
  (l_shipmode = 'MAIL' or l_shipmode = 'SHIP') and
  l_shipdate < l_commitdate and l_receiptdate >= '1994-01-01' and
  l_commitdate < l_receiptdate
group by l_shipmode
order by l_shipmode;
select --6--
sum(l_extendedprice*l_discount) as revenue
from 
  denorm_cached
where 
  l_shipdate >= '1994-01-01'
  and l_shipdate < '1995-01-01'
  and l_discount >= 0.05 and l_discount <= 0.07
  and l_quantity < 24;
select --3-- 
  l_orderkey, sum(l_extendedprice*(1-l_discount)) as revenue, o_orderdate, o_shippriority 
from 
  denorm_cached
where 
  o_orderdate < '1995-03-15' and l_shipdate > '1995-03-15' and c_mktsegment = 'BUILDING'
group by l_orderkey, o_orderdate, o_shippriority 
order by revenue desc, o_orderdate 
limit 10;
select --12--
  l_shipmode,
  sum(case
    when o_orderpriority ='1-URGENT'
         or o_orderpriority ='2-HIGH'
    then 1
    else 0
end
  ) as high_line_count,
  sum(case
    when o_orderpriority <> '1-URGENT'
         and o_orderpriority <> '2-HIGH'
    then 1
    else 0
end
  ) as low_line_count

from denorm_cached
where 
  l_receiptdate < '1995-01-01' and
  (l_shipmode = 'MAIL' or l_shipmode = 'SHIP') and
  l_shipdate < l_commitdate and l_receiptdate >= '1994-01-01' and
  l_commitdate < l_receiptdate
group by l_shipmode
order by l_shipmode;
select --4--
  count(1), o_orderpriority from 
  (select distinct l_orderkey, o_orderpriority from denorm_cached where
    o_orderdate >= '1993-07-01' and 
    o_orderdate < '1993-10-01' and
    l_commitdate < l_receiptdate) a
  group by o_orderpriority;
select --3-- 
  l_orderkey, sum(l_extendedprice*(1-l_discount)) as revenue, o_orderdate, o_shippriority 
from 
  denorm_cached
where 
  o_orderdate < '1995-03-15' and l_shipdate > '1995-03-15' and c_mktsegment = 'BUILDING'
group by l_orderkey, o_orderdate, o_shippriority 
order by revenue desc, o_orderdate 
limit 10;
select --6--
sum(l_extendedprice*l_discount) as revenue
from 
  denorm_cached
where 
  l_shipdate >= '1994-01-01'
  and l_shipdate < '1995-01-01'
  and l_discount >= 0.05 and l_discount <= 0.07
  and l_quantity < 24;
select --12--
  l_shipmode,
  sum(case
    when o_orderpriority ='1-URGENT'
         or o_orderpriority ='2-HIGH'
    then 1
    else 0
end
  ) as high_line_count,
  sum(case
    when o_orderpriority <> '1-URGENT'
         and o_orderpriority <> '2-HIGH'
    then 1
    else 0
end
  ) as low_line_count

from denorm_cached
where 
  l_receiptdate < '1995-01-01' and
  (l_shipmode = 'MAIL' or l_shipmode = 'SHIP') and
  l_shipdate < l_commitdate and l_receiptdate >= '1994-01-01' and
  l_commitdate < l_receiptdate
group by l_shipmode
order by l_shipmode;
select --4--
  count(1), o_orderpriority from 
  (select distinct l_orderkey, o_orderpriority from denorm_cached where
    o_orderdate >= '1993-07-01' and 
    o_orderdate < '1993-10-01' and
    l_commitdate < l_receiptdate) a
  group by o_orderpriority;
select --6--
sum(l_extendedprice*l_discount) as revenue
from 
  denorm_cached
where 
  l_shipdate >= '1994-01-01'
  and l_shipdate < '1995-01-01'
  and l_discount >= 0.05 and l_discount <= 0.07
  and l_quantity < 24;
select --3-- 
  l_orderkey, sum(l_extendedprice*(1-l_discount)) as revenue, o_orderdate, o_shippriority 
from 
  denorm_cached
where 
  o_orderdate < '1995-03-15' and l_shipdate > '1995-03-15' and c_mktsegment = 'BUILDING'
group by l_orderkey, o_orderdate, o_shippriority 
order by revenue desc, o_orderdate 
limit 10;
select --6--
sum(l_extendedprice*l_discount) as revenue
from 
  denorm_cached
where 
  l_shipdate >= '1994-01-01'
  and l_shipdate < '1995-01-01'
  and l_discount >= 0.05 and l_discount <= 0.07
  and l_quantity < 24;
select --4--
  count(1), o_orderpriority from 
  (select distinct l_orderkey, o_orderpriority from denorm_cached where
    o_orderdate >= '1993-07-01' and 
    o_orderdate < '1993-10-01' and
    l_commitdate < l_receiptdate) a
  group by o_orderpriority;
select --12--
  l_shipmode,
  sum(case
    when o_orderpriority ='1-URGENT'
         or o_orderpriority ='2-HIGH'
    then 1
    else 0
end
  ) as high_line_count,
  sum(case
    when o_orderpriority <> '1-URGENT'
         and o_orderpriority <> '2-HIGH'
    then 1
    else 0
end
  ) as low_line_count

from denorm_cached
where 
  l_receiptdate < '1995-01-01' and
  (l_shipmode = 'MAIL' or l_shipmode = 'SHIP') and
  l_shipdate < l_commitdate and l_receiptdate >= '1994-01-01' and
  l_commitdate < l_receiptdate
group by l_shipmode
order by l_shipmode;
select --3-- 
  l_orderkey, sum(l_extendedprice*(1-l_discount)) as revenue, o_orderdate, o_shippriority 
from 
  denorm_cached
where 
  o_orderdate < '1995-03-15' and l_shipdate > '1995-03-15' and c_mktsegment = 'BUILDING'
group by l_orderkey, o_orderdate, o_shippriority 
order by revenue desc, o_orderdate 
limit 10;
select --3-- 
  l_orderkey, sum(l_extendedprice*(1-l_discount)) as revenue, o_orderdate, o_shippriority 
from 
  denorm_cached
where 
  o_orderdate < '1995-03-15' and l_shipdate > '1995-03-15' and c_mktsegment = 'BUILDING'
group by l_orderkey, o_orderdate, o_shippriority 
order by revenue desc, o_orderdate 
limit 10;
select --4--
  count(1), o_orderpriority from 
  (select distinct l_orderkey, o_orderpriority from denorm_cached where
    o_orderdate >= '1993-07-01' and 
    o_orderdate < '1993-10-01' and
    l_commitdate < l_receiptdate) a
  group by o_orderpriority;
select --12--
  l_shipmode,
  sum(case
    when o_orderpriority ='1-URGENT'
         or o_orderpriority ='2-HIGH'
    then 1
    else 0
end
  ) as high_line_count,
  sum(case
    when o_orderpriority <> '1-URGENT'
         and o_orderpriority <> '2-HIGH'
    then 1
    else 0
end
  ) as low_line_count

from denorm_cached
where 
  l_receiptdate < '1995-01-01' and
  (l_shipmode = 'MAIL' or l_shipmode = 'SHIP') and
  l_shipdate < l_commitdate and l_receiptdate >= '1994-01-01' and
  l_commitdate < l_receiptdate
group by l_shipmode
order by l_shipmode;
select --6--
sum(l_extendedprice*l_discount) as revenue
from 
  denorm_cached
where 
  l_shipdate >= '1994-01-01'
  and l_shipdate < '1995-01-01'
  and l_discount >= 0.05 and l_discount <= 0.07
  and l_quantity < 24;
select --6--
sum(l_extendedprice*l_discount) as revenue
from 
  denorm_cached
where 
  l_shipdate >= '1994-01-01'
  and l_shipdate < '1995-01-01'
  and l_discount >= 0.05 and l_discount <= 0.07
  and l_quantity < 24;
select --12--
  l_shipmode,
  sum(case
    when o_orderpriority ='1-URGENT'
         or o_orderpriority ='2-HIGH'
    then 1
    else 0
end
  ) as high_line_count,
  sum(case
    when o_orderpriority <> '1-URGENT'
         and o_orderpriority <> '2-HIGH'
    then 1
    else 0
end
  ) as low_line_count

from denorm_cached
where 
  l_receiptdate < '1995-01-01' and
  (l_shipmode = 'MAIL' or l_shipmode = 'SHIP') and
  l_shipdate < l_commitdate and l_receiptdate >= '1994-01-01' and
  l_commitdate < l_receiptdate
group by l_shipmode
order by l_shipmode;
select --4--
  count(1), o_orderpriority from 
  (select distinct l_orderkey, o_orderpriority from denorm_cached where
    o_orderdate >= '1993-07-01' and 
    o_orderdate < '1993-10-01' and
    l_commitdate < l_receiptdate) a
  group by o_orderpriority;
select --3-- 
  l_orderkey, sum(l_extendedprice*(1-l_discount)) as revenue, o_orderdate, o_shippriority 
from 
  denorm_cached
where 
  o_orderdate < '1995-03-15' and l_shipdate > '1995-03-15' and c_mktsegment = 'BUILDING'
group by l_orderkey, o_orderdate, o_shippriority 
order by revenue desc, o_orderdate 
limit 10;
select --12--
  l_shipmode,
  sum(case
    when o_orderpriority ='1-URGENT'
         or o_orderpriority ='2-HIGH'
    then 1
    else 0
end
  ) as high_line_count,
  sum(case
    when o_orderpriority <> '1-URGENT'
         and o_orderpriority <> '2-HIGH'
    then 1
    else 0
end
  ) as low_line_count

from denorm_cached
where 
  l_receiptdate < '1995-01-01' and
  (l_shipmode = 'MAIL' or l_shipmode = 'SHIP') and
  l_shipdate < l_commitdate and l_receiptdate >= '1994-01-01' and
  l_commitdate < l_receiptdate
group by l_shipmode
order by l_shipmode;
select --4--
  count(1), o_orderpriority from 
  (select distinct l_orderkey, o_orderpriority from denorm_cached where
    o_orderdate >= '1993-07-01' and 
    o_orderdate < '1993-10-01' and
    l_commitdate < l_receiptdate) a
  group by o_orderpriority;
select --6--
sum(l_extendedprice*l_discount) as revenue
from 
  denorm_cached
where 
  l_shipdate >= '1994-01-01'
  and l_shipdate < '1995-01-01'
  and l_discount >= 0.05 and l_discount <= 0.07
  and l_quantity < 24;
select --3-- 
  l_orderkey, sum(l_extendedprice*(1-l_discount)) as revenue, o_orderdate, o_shippriority 
from 
  denorm_cached
where 
  o_orderdate < '1995-03-15' and l_shipdate > '1995-03-15' and c_mktsegment = 'BUILDING'
group by l_orderkey, o_orderdate, o_shippriority 
order by revenue desc, o_orderdate 
limit 10;
select --4--
  count(1), o_orderpriority from 
  (select distinct l_orderkey, o_orderpriority from denorm_cached where
    o_orderdate >= '1993-07-01' and 
    o_orderdate < '1993-10-01' and
    l_commitdate < l_receiptdate) a
  group by o_orderpriority;
select --12--
  l_shipmode,
  sum(case
    when o_orderpriority ='1-URGENT'
         or o_orderpriority ='2-HIGH'
    then 1
    else 0
end
  ) as high_line_count,
  sum(case
    when o_orderpriority <> '1-URGENT'
         and o_orderpriority <> '2-HIGH'
    then 1
    else 0
end
  ) as low_line_count

from denorm_cached
where 
  l_receiptdate < '1995-01-01' and
  (l_shipmode = 'MAIL' or l_shipmode = 'SHIP') and
  l_shipdate < l_commitdate and l_receiptdate >= '1994-01-01' and
  l_commitdate < l_receiptdate
group by l_shipmode
order by l_shipmode;
select --6--
sum(l_extendedprice*l_discount) as revenue
from 
  denorm_cached
where 
  l_shipdate >= '1994-01-01'
  and l_shipdate < '1995-01-01'
  and l_discount >= 0.05 and l_discount <= 0.07
  and l_quantity < 24;
select --3-- 
  l_orderkey, sum(l_extendedprice*(1-l_discount)) as revenue, o_orderdate, o_shippriority 
from 
  denorm_cached
where 
  o_orderdate < '1995-03-15' and l_shipdate > '1995-03-15' and c_mktsegment = 'BUILDING'
group by l_orderkey, o_orderdate, o_shippriority 
order by revenue desc, o_orderdate 
limit 10;
select --6--
sum(l_extendedprice*l_discount) as revenue
from 
  denorm_cached
where 
  l_shipdate >= '1994-01-01'
  and l_shipdate < '1995-01-01'
  and l_discount >= 0.05 and l_discount <= 0.07
  and l_quantity < 24;
select --3-- 
  l_orderkey, sum(l_extendedprice*(1-l_discount)) as revenue, o_orderdate, o_shippriority 
from 
  denorm_cached
where 
  o_orderdate < '1995-03-15' and l_shipdate > '1995-03-15' and c_mktsegment = 'BUILDING'
group by l_orderkey, o_orderdate, o_shippriority 
order by revenue desc, o_orderdate 
limit 10;
select --12--
  l_shipmode,
  sum(case
    when o_orderpriority ='1-URGENT'
         or o_orderpriority ='2-HIGH'
    then 1
    else 0
end
  ) as high_line_count,
  sum(case
    when o_orderpriority <> '1-URGENT'
         and o_orderpriority <> '2-HIGH'
    then 1
    else 0
end
  ) as low_line_count

from denorm_cached
where 
  l_receiptdate < '1995-01-01' and
  (l_shipmode = 'MAIL' or l_shipmode = 'SHIP') and
  l_shipdate < l_commitdate and l_receiptdate >= '1994-01-01' and
  l_commitdate < l_receiptdate
group by l_shipmode
order by l_shipmode;
select --4--
  count(1), o_orderpriority from 
  (select distinct l_orderkey, o_orderpriority from denorm_cached where
    o_orderdate >= '1993-07-01' and 
    o_orderdate < '1993-10-01' and
    l_commitdate < l_receiptdate) a
  group by o_orderpriority;
select --12--
  l_shipmode,
  sum(case
    when o_orderpriority ='1-URGENT'
         or o_orderpriority ='2-HIGH'
    then 1
    else 0
end
  ) as high_line_count,
  sum(case
    when o_orderpriority <> '1-URGENT'
         and o_orderpriority <> '2-HIGH'
    then 1
    else 0
end
  ) as low_line_count

from denorm_cached
where 
  l_receiptdate < '1995-01-01' and
  (l_shipmode = 'MAIL' or l_shipmode = 'SHIP') and
  l_shipdate < l_commitdate and l_receiptdate >= '1994-01-01' and
  l_commitdate < l_receiptdate
group by l_shipmode
order by l_shipmode;
select --4--
  count(1), o_orderpriority from 
  (select distinct l_orderkey, o_orderpriority from denorm_cached where
    o_orderdate >= '1993-07-01' and 
    o_orderdate < '1993-10-01' and
    l_commitdate < l_receiptdate) a
  group by o_orderpriority;
select --6--
sum(l_extendedprice*l_discount) as revenue
from 
  denorm_cached
where 
  l_shipdate >= '1994-01-01'
  and l_shipdate < '1995-01-01'
  and l_discount >= 0.05 and l_discount <= 0.07
  and l_quantity < 24;
select --3-- 
  l_orderkey, sum(l_extendedprice*(1-l_discount)) as revenue, o_orderdate, o_shippriority 
from 
  denorm_cached
where 
  o_orderdate < '1995-03-15' and l_shipdate > '1995-03-15' and c_mktsegment = 'BUILDING'
group by l_orderkey, o_orderdate, o_shippriority 
order by revenue desc, o_orderdate 
limit 10;
select --3-- 
  l_orderkey, sum(l_extendedprice*(1-l_discount)) as revenue, o_orderdate, o_shippriority 
from 
  denorm_cached
where 
  o_orderdate < '1995-03-15' and l_shipdate > '1995-03-15' and c_mktsegment = 'BUILDING'
group by l_orderkey, o_orderdate, o_shippriority 
order by revenue desc, o_orderdate 
limit 10;
select --4--
  count(1), o_orderpriority from 
  (select distinct l_orderkey, o_orderpriority from denorm_cached where
    o_orderdate >= '1993-07-01' and 
    o_orderdate < '1993-10-01' and
    l_commitdate < l_receiptdate) a
  group by o_orderpriority;
select --6--
sum(l_extendedprice*l_discount) as revenue
from 
  denorm_cached
where 
  l_shipdate >= '1994-01-01'
  and l_shipdate < '1995-01-01'
  and l_discount >= 0.05 and l_discount <= 0.07
  and l_quantity < 24;
select --12--
  l_shipmode,
  sum(case
    when o_orderpriority ='1-URGENT'
         or o_orderpriority ='2-HIGH'
    then 1
    else 0
end
  ) as high_line_count,
  sum(case
    when o_orderpriority <> '1-URGENT'
         and o_orderpriority <> '2-HIGH'
    then 1
    else 0
end
  ) as low_line_count

from denorm_cached
where 
  l_receiptdate < '1995-01-01' and
  (l_shipmode = 'MAIL' or l_shipmode = 'SHIP') and
  l_shipdate < l_commitdate and l_receiptdate >= '1994-01-01' and
  l_commitdate < l_receiptdate
group by l_shipmode
order by l_shipmode;
select --4--
  count(1), o_orderpriority from 
  (select distinct l_orderkey, o_orderpriority from denorm_cached where
    o_orderdate >= '1993-07-01' and 
    o_orderdate < '1993-10-01' and
    l_commitdate < l_receiptdate) a
  group by o_orderpriority;
select --3-- 
  l_orderkey, sum(l_extendedprice*(1-l_discount)) as revenue, o_orderdate, o_shippriority 
from 
  denorm_cached
where 
  o_orderdate < '1995-03-15' and l_shipdate > '1995-03-15' and c_mktsegment = 'BUILDING'
group by l_orderkey, o_orderdate, o_shippriority 
order by revenue desc, o_orderdate 
limit 10;
select --6--
sum(l_extendedprice*l_discount) as revenue
from 
  denorm_cached
where 
  l_shipdate >= '1994-01-01'
  and l_shipdate < '1995-01-01'
  and l_discount >= 0.05 and l_discount <= 0.07
  and l_quantity < 24;
select --12--
  l_shipmode,
  sum(case
    when o_orderpriority ='1-URGENT'
         or o_orderpriority ='2-HIGH'
    then 1
    else 0
end
  ) as high_line_count,
  sum(case
    when o_orderpriority <> '1-URGENT'
         and o_orderpriority <> '2-HIGH'
    then 1
    else 0
end
  ) as low_line_count

from denorm_cached
where 
  l_receiptdate < '1995-01-01' and
  (l_shipmode = 'MAIL' or l_shipmode = 'SHIP') and
  l_shipdate < l_commitdate and l_receiptdate >= '1994-01-01' and
  l_commitdate < l_receiptdate
group by l_shipmode
order by l_shipmode;
select --3-- 
  l_orderkey, sum(l_extendedprice*(1-l_discount)) as revenue, o_orderdate, o_shippriority 
from 
  denorm_cached
where 
  o_orderdate < '1995-03-15' and l_shipdate > '1995-03-15' and c_mktsegment = 'BUILDING'
group by l_orderkey, o_orderdate, o_shippriority 
order by revenue desc, o_orderdate 
limit 10;
select --4--
  count(1), o_orderpriority from 
  (select distinct l_orderkey, o_orderpriority from denorm_cached where
    o_orderdate >= '1993-07-01' and 
    o_orderdate < '1993-10-01' and
    l_commitdate < l_receiptdate) a
  group by o_orderpriority;
select --12--
  l_shipmode,
  sum(case
    when o_orderpriority ='1-URGENT'
         or o_orderpriority ='2-HIGH'
    then 1
    else 0
end
  ) as high_line_count,
  sum(case
    when o_orderpriority <> '1-URGENT'
         and o_orderpriority <> '2-HIGH'
    then 1
    else 0
end
  ) as low_line_count

from denorm_cached
where 
  l_receiptdate < '1995-01-01' and
  (l_shipmode = 'MAIL' or l_shipmode = 'SHIP') and
  l_shipdate < l_commitdate and l_receiptdate >= '1994-01-01' and
  l_commitdate < l_receiptdate
group by l_shipmode
order by l_shipmode;
select --6--
sum(l_extendedprice*l_discount) as revenue
from 
  denorm_cached
where 
  l_shipdate >= '1994-01-01'
  and l_shipdate < '1995-01-01'
  and l_discount >= 0.05 and l_discount <= 0.07
  and l_quantity < 24;
select --12--
  l_shipmode,
  sum(case
    when o_orderpriority ='1-URGENT'
         or o_orderpriority ='2-HIGH'
    then 1
    else 0
end
  ) as high_line_count,
  sum(case
    when o_orderpriority <> '1-URGENT'
         and o_orderpriority <> '2-HIGH'
    then 1
    else 0
end
  ) as low_line_count

from denorm_cached
where 
  l_receiptdate < '1995-01-01' and
  (l_shipmode = 'MAIL' or l_shipmode = 'SHIP') and
  l_shipdate < l_commitdate and l_receiptdate >= '1994-01-01' and
  l_commitdate < l_receiptdate
group by l_shipmode
order by l_shipmode;
select --6--
sum(l_extendedprice*l_discount) as revenue
from 
  denorm_cached
where 
  l_shipdate >= '1994-01-01'
  and l_shipdate < '1995-01-01'
  and l_discount >= 0.05 and l_discount <= 0.07
  and l_quantity < 24;
select --4--
  count(1), o_orderpriority from 
  (select distinct l_orderkey, o_orderpriority from denorm_cached where
    o_orderdate >= '1993-07-01' and 
    o_orderdate < '1993-10-01' and
    l_commitdate < l_receiptdate) a
  group by o_orderpriority;
select --3-- 
  l_orderkey, sum(l_extendedprice*(1-l_discount)) as revenue, o_orderdate, o_shippriority 
from 
  denorm_cached
where 
  o_orderdate < '1995-03-15' and l_shipdate > '1995-03-15' and c_mktsegment = 'BUILDING'
group by l_orderkey, o_orderdate, o_shippriority 
order by revenue desc, o_orderdate 
limit 10;
select --12--
  l_shipmode,
  sum(case
    when o_orderpriority ='1-URGENT'
         or o_orderpriority ='2-HIGH'
    then 1
    else 0
end
  ) as high_line_count,
  sum(case
    when o_orderpriority <> '1-URGENT'
         and o_orderpriority <> '2-HIGH'
    then 1
    else 0
end
  ) as low_line_count

from denorm_cached
where 
  l_receiptdate < '1995-01-01' and
  (l_shipmode = 'MAIL' or l_shipmode = 'SHIP') and
  l_shipdate < l_commitdate and l_receiptdate >= '1994-01-01' and
  l_commitdate < l_receiptdate
group by l_shipmode
order by l_shipmode;
select --3-- 
  l_orderkey, sum(l_extendedprice*(1-l_discount)) as revenue, o_orderdate, o_shippriority 
from 
  denorm_cached
where 
  o_orderdate < '1995-03-15' and l_shipdate > '1995-03-15' and c_mktsegment = 'BUILDING'
group by l_orderkey, o_orderdate, o_shippriority 
order by revenue desc, o_orderdate 
limit 10;
select --6--
sum(l_extendedprice*l_discount) as revenue
from 
  denorm_cached
where 
  l_shipdate >= '1994-01-01'
  and l_shipdate < '1995-01-01'
  and l_discount >= 0.05 and l_discount <= 0.07
  and l_quantity < 24;
select --4--
  count(1), o_orderpriority from 
  (select distinct l_orderkey, o_orderpriority from denorm_cached where
    o_orderdate >= '1993-07-01' and 
    o_orderdate < '1993-10-01' and
    l_commitdate < l_receiptdate) a
  group by o_orderpriority;
select --4--
  count(1), o_orderpriority from 
  (select distinct l_orderkey, o_orderpriority from denorm_cached where
    o_orderdate >= '1993-07-01' and 
    o_orderdate < '1993-10-01' and
    l_commitdate < l_receiptdate) a
  group by o_orderpriority;
select --3-- 
  l_orderkey, sum(l_extendedprice*(1-l_discount)) as revenue, o_orderdate, o_shippriority 
from 
  denorm_cached
where 
  o_orderdate < '1995-03-15' and l_shipdate > '1995-03-15' and c_mktsegment = 'BUILDING'
group by l_orderkey, o_orderdate, o_shippriority 
order by revenue desc, o_orderdate 
limit 10;
select --6--
sum(l_extendedprice*l_discount) as revenue
from 
  denorm_cached
where 
  l_shipdate >= '1994-01-01'
  and l_shipdate < '1995-01-01'
  and l_discount >= 0.05 and l_discount <= 0.07
  and l_quantity < 24;
select --12--
  l_shipmode,
  sum(case
    when o_orderpriority ='1-URGENT'
         or o_orderpriority ='2-HIGH'
    then 1
    else 0
end
  ) as high_line_count,
  sum(case
    when o_orderpriority <> '1-URGENT'
         and o_orderpriority <> '2-HIGH'
    then 1
    else 0
end
  ) as low_line_count

from denorm_cached
where 
  l_receiptdate < '1995-01-01' and
  (l_shipmode = 'MAIL' or l_shipmode = 'SHIP') and
  l_shipdate < l_commitdate and l_receiptdate >= '1994-01-01' and
  l_commitdate < l_receiptdate
group by l_shipmode
order by l_shipmode;
select --3-- 
  l_orderkey, sum(l_extendedprice*(1-l_discount)) as revenue, o_orderdate, o_shippriority 
from 
  denorm_cached
where 
  o_orderdate < '1995-03-15' and l_shipdate > '1995-03-15' and c_mktsegment = 'BUILDING'
group by l_orderkey, o_orderdate, o_shippriority 
order by revenue desc, o_orderdate 
limit 10;
select --12--
  l_shipmode,
  sum(case
    when o_orderpriority ='1-URGENT'
         or o_orderpriority ='2-HIGH'
    then 1
    else 0
end
  ) as high_line_count,
  sum(case
    when o_orderpriority <> '1-URGENT'
         and o_orderpriority <> '2-HIGH'
    then 1
    else 0
end
  ) as low_line_count

from denorm_cached
where 
  l_receiptdate < '1995-01-01' and
  (l_shipmode = 'MAIL' or l_shipmode = 'SHIP') and
  l_shipdate < l_commitdate and l_receiptdate >= '1994-01-01' and
  l_commitdate < l_receiptdate
group by l_shipmode
order by l_shipmode;
select --4--
  count(1), o_orderpriority from 
  (select distinct l_orderkey, o_orderpriority from denorm_cached where
    o_orderdate >= '1993-07-01' and 
    o_orderdate < '1993-10-01' and
    l_commitdate < l_receiptdate) a
  group by o_orderpriority;
select --6--
sum(l_extendedprice*l_discount) as revenue
from 
  denorm_cached
where 
  l_shipdate >= '1994-01-01'
  and l_shipdate < '1995-01-01'
  and l_discount >= 0.05 and l_discount <= 0.07
  and l_quantity < 24;
select --12--
  l_shipmode,
  sum(case
    when o_orderpriority ='1-URGENT'
         or o_orderpriority ='2-HIGH'
    then 1
    else 0
end
  ) as high_line_count,
  sum(case
    when o_orderpriority <> '1-URGENT'
         and o_orderpriority <> '2-HIGH'
    then 1
    else 0
end
  ) as low_line_count

from denorm_cached
where 
  l_receiptdate < '1995-01-01' and
  (l_shipmode = 'MAIL' or l_shipmode = 'SHIP') and
  l_shipdate < l_commitdate and l_receiptdate >= '1994-01-01' and
  l_commitdate < l_receiptdate
group by l_shipmode
order by l_shipmode;
select --4--
  count(1), o_orderpriority from 
  (select distinct l_orderkey, o_orderpriority from denorm_cached where
    o_orderdate >= '1993-07-01' and 
    o_orderdate < '1993-10-01' and
    l_commitdate < l_receiptdate) a
  group by o_orderpriority;
s
Download .txt
gitextract_h85zoo_k/

├── .gitignore
├── LICENSE
├── deploy/
│   ├── README
│   ├── ec2/
│   │   ├── README
│   │   ├── ec2-exp.sh
│   │   ├── ec2_exp.py
│   │   ├── fairness.py
│   │   ├── fairness.sh
│   │   ├── isolation.py
│   │   ├── isolation.sh
│   │   ├── osdi.py
│   │   ├── prepare_tpch_experiments.py
│   │   ├── prepare_tpch_experiments.sh
│   │   ├── probe-ratio-het.sh
│   │   ├── probe-ratio.sh
│   │   ├── probe_ratio.py
│   │   ├── probe_ratio_het.py
│   │   ├── spark_v_mesos.py
│   │   ├── template/
│   │   │   ├── README
│   │   │   ├── backends.txt
│   │   │   ├── build_sparrow.sh
│   │   │   ├── clean_logs.sh
│   │   │   ├── configure_node.sh
│   │   │   ├── create_database.sh
│   │   │   ├── create_tpch_tables_primary.sh
│   │   │   ├── create_tpch_tables_secondary.sh
│   │   │   ├── deploy_sparrow.sh
│   │   │   ├── find_bugs.sh
│   │   │   ├── find_bugs_filtered.sh
│   │   │   ├── find_bugs_verbose.sh
│   │   │   ├── find_cache_partitions.sh
│   │   │   ├── frontend.conf
│   │   │   ├── frontends.txt
│   │   │   ├── hadoop-env.sh
│   │   │   ├── hdfs-site.xml
│   │   │   ├── hive-site.xml
│   │   │   ├── prepare_logs.sh
│   │   │   ├── shark-env.sh
│   │   │   ├── spark-env.sh
│   │   │   ├── spark-run.sh
│   │   │   ├── sparrow.conf
│   │   │   ├── sparrow_schedulers.txt
│   │   │   ├── start_mesos_master.sh
│   │   │   ├── start_mesos_slave.sh
│   │   │   ├── start_proto_backend.sh
│   │   │   ├── start_proto_frontend.sh
│   │   │   ├── start_shark_tpch.sh
│   │   │   ├── start_spark_backend.sh
│   │   │   ├── start_spark_frontend.sh
│   │   │   ├── start_sparrow.sh
│   │   │   ├── start_throughput_exp_spark.sh
│   │   │   ├── start_throughput_exp_sparrow.sh
│   │   │   ├── stop_mesos_master.sh
│   │   │   ├── stop_mesos_slave.sh
│   │   │   ├── stop_proto_backend.sh
│   │   │   ├── stop_proto_frontend.sh
│   │   │   ├── stop_spark_backend.sh
│   │   │   ├── stop_spark_frontend.sh
│   │   │   ├── stop_sparrow.sh
│   │   │   ├── tpch/
│   │   │   │   ├── make_base_tables.hql
│   │   │   │   ├── make_denorm_cached.hql
│   │   │   │   ├── make_denorm_table_primary.hql
│   │   │   │   ├── make_denorm_table_secondary.hql
│   │   │   │   ├── tpch_workload_1
│   │   │   │   ├── tpch_workload_10
│   │   │   │   ├── tpch_workload_11
│   │   │   │   ├── tpch_workload_12
│   │   │   │   ├── tpch_workload_13
│   │   │   │   ├── tpch_workload_14
│   │   │   │   ├── tpch_workload_15
│   │   │   │   ├── tpch_workload_2
│   │   │   │   ├── tpch_workload_3
│   │   │   │   ├── tpch_workload_4
│   │   │   │   ├── tpch_workload_5
│   │   │   │   ├── tpch_workload_6
│   │   │   │   ├── tpch_workload_7
│   │   │   │   ├── tpch_workload_8
│   │   │   │   └── tpch_workload_9
│   │   │   ├── tpch_experiment.sh
│   │   │   └── wipe_hdfs.sh
│   │   └── tpch_experiments.py
│   ├── example_sparrow_configuration.conf
│   └── third_party/
│       └── boto-2.1.1/
│           ├── PKG-INFO
│           ├── bin/
│           │   ├── bundle_image
│           │   ├── cfadmin
│           │   ├── cq
│           │   ├── cwutil
│           │   ├── elbadmin
│           │   ├── fetch_file
│           │   ├── kill_instance
│           │   ├── launch_instance
│           │   ├── list_instances
│           │   ├── lss3
│           │   ├── pyami_sendmail
│           │   ├── route53
│           │   ├── s3multiput
│           │   ├── s3put
│           │   ├── sdbadmin
│           │   └── taskadmin
│           ├── boto/
│           │   ├── __init__.py
│           │   ├── auth.py
│           │   ├── auth_handler.py
│           │   ├── cacerts/
│           │   │   ├── __init__.py
│           │   │   └── cacerts.txt
│           │   ├── cloudformation/
│           │   │   ├── __init__.py
│           │   │   ├── connection.py
│           │   │   ├── stack.py
│           │   │   └── template.py
│           │   ├── cloudfront/
│           │   │   ├── __init__.py
│           │   │   ├── distribution.py
│           │   │   ├── exception.py
│           │   │   ├── identity.py
│           │   │   ├── invalidation.py
│           │   │   ├── logging.py
│           │   │   ├── object.py
│           │   │   ├── origin.py
│           │   │   └── signers.py
│           │   ├── connection.py
│           │   ├── contrib/
│           │   │   ├── __init__.py
│           │   │   ├── m2helpers.py
│           │   │   └── ymlmessage.py
│           │   ├── ec2/
│           │   │   ├── __init__.py
│           │   │   ├── address.py
│           │   │   ├── autoscale/
│           │   │   │   ├── __init__.py
│           │   │   │   ├── activity.py
│           │   │   │   ├── group.py
│           │   │   │   ├── instance.py
│           │   │   │   ├── launchconfig.py
│           │   │   │   ├── policy.py
│           │   │   │   ├── request.py
│           │   │   │   └── scheduled.py
│           │   │   ├── blockdevicemapping.py
│           │   │   ├── bundleinstance.py
│           │   │   ├── buyreservation.py
│           │   │   ├── cloudwatch/
│           │   │   │   ├── __init__.py
│           │   │   │   ├── alarm.py
│           │   │   │   ├── datapoint.py
│           │   │   │   ├── listelement.py
│           │   │   │   └── metric.py
│           │   │   ├── connection.py
│           │   │   ├── ec2object.py
│           │   │   ├── elb/
│           │   │   │   ├── __init__.py
│           │   │   │   ├── healthcheck.py
│           │   │   │   ├── instancestate.py
│           │   │   │   ├── listelement.py
│           │   │   │   ├── listener.py
│           │   │   │   ├── loadbalancer.py
│           │   │   │   ├── policies.py
│           │   │   │   └── securitygroup.py
│           │   │   ├── image.py
│           │   │   ├── instance.py
│           │   │   ├── instanceinfo.py
│           │   │   ├── keypair.py
│           │   │   ├── launchspecification.py
│           │   │   ├── placementgroup.py
│           │   │   ├── regioninfo.py
│           │   │   ├── reservedinstance.py
│           │   │   ├── securitygroup.py
│           │   │   ├── snapshot.py
│           │   │   ├── spotdatafeedsubscription.py
│           │   │   ├── spotinstancerequest.py
│           │   │   ├── spotpricehistory.py
│           │   │   ├── tag.py
│           │   │   ├── volume.py
│           │   │   └── zone.py
│           │   ├── ecs/
│           │   │   ├── __init__.py
│           │   │   └── item.py
│           │   ├── emr/
│           │   │   ├── __init__.py
│           │   │   ├── bootstrap_action.py
│           │   │   ├── connection.py
│           │   │   ├── emrobject.py
│           │   │   ├── instance_group.py
│           │   │   └── step.py
│           │   ├── exception.py
│           │   ├── file/
│           │   │   ├── __init__.py
│           │   │   ├── bucket.py
│           │   │   ├── connection.py
│           │   │   ├── key.py
│           │   │   └── simpleresultset.py
│           │   ├── fps/
│           │   │   ├── __init__.py
│           │   │   └── connection.py
│           │   ├── gs/
│           │   │   ├── __init__.py
│           │   │   ├── acl.py
│           │   │   ├── bucket.py
│           │   │   ├── connection.py
│           │   │   ├── key.py
│           │   │   ├── resumable_upload_handler.py
│           │   │   └── user.py
│           │   ├── handler.py
│           │   ├── https_connection.py
│           │   ├── iam/
│           │   │   ├── __init__.py
│           │   │   ├── connection.py
│           │   │   └── summarymap.py
│           │   ├── jsonresponse.py
│           │   ├── manage/
│           │   │   ├── __init__.py
│           │   │   ├── cmdshell.py
│           │   │   ├── propget.py
│           │   │   ├── server.py
│           │   │   ├── task.py
│           │   │   ├── test_manage.py
│           │   │   └── volume.py
│           │   ├── mashups/
│           │   │   ├── __init__.py
│           │   │   ├── interactive.py
│           │   │   ├── iobject.py
│           │   │   ├── order.py
│           │   │   └── server.py
│           │   ├── mturk/
│           │   │   ├── __init__.py
│           │   │   ├── connection.py
│           │   │   ├── notification.py
│           │   │   ├── price.py
│           │   │   ├── qualification.py
│           │   │   └── question.py
│           │   ├── plugin.py
│           │   ├── provider.py
│           │   ├── pyami/
│           │   │   ├── __init__.py
│           │   │   ├── bootstrap.py
│           │   │   ├── config.py
│           │   │   ├── copybot.py
│           │   │   ├── helloworld.py
│           │   │   ├── launch_ami.py
│           │   │   ├── scriptbase.py
│           │   │   └── startup.py
│           │   ├── rds/
│           │   │   ├── __init__.py
│           │   │   ├── dbinstance.py
│           │   │   ├── dbsecuritygroup.py
│           │   │   ├── dbsnapshot.py
│           │   │   ├── event.py
│           │   │   ├── parametergroup.py
│           │   │   └── regioninfo.py
│           │   ├── regioninfo.py
│           │   ├── resultset.py
│           │   ├── roboto/
│           │   │   ├── __init__.py
│           │   │   ├── awsqueryrequest.py
│           │   │   ├── awsqueryservice.py
│           │   │   └── param.py
│           │   ├── route53/
│           │   │   ├── __init__.py
│           │   │   ├── connection.py
│           │   │   ├── exception.py
│           │   │   ├── hostedzone.py
│           │   │   └── record.py
│           │   ├── s3/
│           │   │   ├── __init__.py
│           │   │   ├── acl.py
│           │   │   ├── bucket.py
│           │   │   ├── bucketlistresultset.py
│           │   │   ├── connection.py
│           │   │   ├── deletemarker.py
│           │   │   ├── key.py
│           │   │   ├── multipart.py
│           │   │   ├── prefix.py
│           │   │   ├── resumable_download_handler.py
│           │   │   └── user.py
│           │   ├── sdb/
│           │   │   ├── __init__.py
│           │   │   ├── connection.py
│           │   │   ├── db/
│           │   │   │   ├── __init__.py
│           │   │   │   ├── blob.py
│           │   │   │   ├── key.py
│           │   │   │   ├── manager/
│           │   │   │   │   ├── __init__.py
│           │   │   │   │   ├── pgmanager.py
│           │   │   │   │   ├── sdbmanager.py
│           │   │   │   │   └── xmlmanager.py
│           │   │   │   ├── model.py
│           │   │   │   ├── property.py
│           │   │   │   ├── query.py
│           │   │   │   ├── sequence.py
│           │   │   │   └── test_db.py
│           │   │   ├── domain.py
│           │   │   ├── item.py
│           │   │   ├── queryresultset.py
│           │   │   └── regioninfo.py
│           │   ├── services/
│           │   │   ├── __init__.py
│           │   │   ├── bs.py
│           │   │   ├── message.py
│           │   │   ├── result.py
│           │   │   ├── service.py
│           │   │   ├── servicedef.py
│           │   │   ├── sonofmmm.py
│           │   │   └── submit.py
│           │   ├── ses/
│           │   │   ├── __init__.py
│           │   │   └── connection.py
│           │   ├── sns/
│           │   │   ├── __init__.py
│           │   │   └── connection.py
│           │   ├── sqs/
│           │   │   ├── __init__.py
│           │   │   ├── attributes.py
│           │   │   ├── connection.py
│           │   │   ├── jsonmessage.py
│           │   │   ├── message.py
│           │   │   ├── queue.py
│           │   │   └── regioninfo.py
│           │   ├── storage_uri.py
│           │   ├── sts/
│           │   │   ├── __init__.py
│           │   │   ├── connection.py
│           │   │   └── credentials.py
│           │   ├── utils.py
│           │   └── vpc/
│           │       ├── __init__.py
│           │       ├── customergateway.py
│           │       ├── dhcpoptions.py
│           │       ├── internetgateway.py
│           │       ├── routetable.py
│           │       ├── subnet.py
│           │       ├── vpc.py
│           │       ├── vpnconnection.py
│           │       └── vpngateway.py
│           └── setup.py
├── lib/
│   ├── README
│   └── javax.jms-1.1.jar
├── pom.xml
├── readme.markdown
├── simulation/
│   ├── run_simulation.py
│   ├── simulation.py
│   ├── simulation_batch.py
│   ├── simulation_cancellation.py
│   ├── simulation_centralized.py
│   ├── simulation_hacked.py
│   ├── simulation_multi.py
│   ├── simulation_random.py
│   ├── test_simulation_cancellation.py
│   ├── test_simulation_multi.py
│   └── util.py
├── src/
│   ├── main/
│   │   ├── gen-java/
│   │   │   └── edu/
│   │   │       └── berkeley/
│   │   │           └── sparrow/
│   │   │               └── thrift/
│   │   │                   ├── BackendService.java
│   │   │                   ├── FrontendService.java
│   │   │                   ├── GetTaskService.java
│   │   │                   ├── IncompleteRequestException.java
│   │   │                   ├── InternalService.java
│   │   │                   ├── LoadSpec.java
│   │   │                   ├── NodeMonitorService.java
│   │   │                   ├── PongService.java
│   │   │                   ├── SchedulerService.java
│   │   │                   ├── SchedulerStateStoreService.java
│   │   │                   ├── StateStoreService.java
│   │   │                   ├── TCancelTaskReservationsRequest.java
│   │   │                   ├── TEnqueueTaskReservationsRequest.java
│   │   │                   ├── TFullTaskId.java
│   │   │                   ├── THostPort.java
│   │   │                   ├── TNodeState.java
│   │   │                   ├── TPlacementPreference.java
│   │   │                   ├── TResourceVector.java
│   │   │                   ├── TSchedulingRequest.java
│   │   │                   ├── TTaskLaunchSpec.java
│   │   │                   ├── TTaskSpec.java
│   │   │                   └── TUserGroupInfo.java
│   │   ├── java/
│   │   │   └── edu/
│   │   │       └── berkeley/
│   │   │           └── sparrow/
│   │   │               ├── api/
│   │   │               │   ├── SparrowBackendClient.java
│   │   │               │   └── SparrowFrontendClient.java
│   │   │               ├── daemon/
│   │   │               │   ├── SparrowConf.java
│   │   │               │   ├── SparrowDaemon.java
│   │   │               │   ├── StandaloneStateStore.java
│   │   │               │   ├── nodemonitor/
│   │   │               │   │   ├── ConfigNodeMonitorState.java
│   │   │               │   │   ├── FifoTaskScheduler.java
│   │   │               │   │   ├── NoQueueTaskScheduler.java
│   │   │               │   │   ├── NodeMonitor.java
│   │   │               │   │   ├── NodeMonitorState.java
│   │   │               │   │   ├── NodeMonitorThrift.java
│   │   │               │   │   ├── PriorityTaskScheduler.java
│   │   │               │   │   ├── RoundRobinTaskScheduler.java
│   │   │               │   │   ├── StandaloneNodeMonitorState.java
│   │   │               │   │   ├── TaskLauncherService.java
│   │   │               │   │   └── TaskScheduler.java
│   │   │               │   ├── scheduler/
│   │   │               │   │   ├── CancellationService.java
│   │   │               │   │   ├── ConfigSchedulerState.java
│   │   │               │   │   ├── ConstrainedTaskPlacer.java
│   │   │               │   │   ├── Scheduler.java
│   │   │               │   │   ├── SchedulerState.java
│   │   │               │   │   ├── SchedulerThrift.java
│   │   │               │   │   ├── StandaloneSchedulerState.java
│   │   │               │   │   ├── TaskPlacer.java
│   │   │               │   │   └── UnconstrainedTaskPlacer.java
│   │   │               │   └── util/
│   │   │               │       ├── ConfigUtil.java
│   │   │               │       ├── Logging.java
│   │   │               │       ├── Network.java
│   │   │               │       ├── Resolution.java
│   │   │               │       ├── Resources.java
│   │   │               │       ├── Serialization.java
│   │   │               │       ├── TClients.java
│   │   │               │       ├── TServers.java
│   │   │               │       └── ThriftClientPool.java
│   │   │               └── examples/
│   │   │                   ├── BackendBenchmarkProfiler.java
│   │   │                   ├── FairnessTestingFrontend.java
│   │   │                   ├── HeterogeneousFrontend.java
│   │   │                   ├── PingClient.java
│   │   │                   ├── PongServer.java
│   │   │                   ├── ProtoBackend.java
│   │   │                   ├── ProtoFrontend.java
│   │   │                   ├── ProtoFrontendAsync.java
│   │   │                   ├── SimpleBackend.java
│   │   │                   ├── SimpleFrontend.java
│   │   │                   ├── ThriftPongClient.java
│   │   │                   ├── ThriftPongServer.java
│   │   │                   ├── ThroughputTestingFrontend.java
│   │   │                   └── readme.markdown
│   │   ├── python/
│   │   │   ├── README
│   │   │   ├── get_response_time.py
│   │   │   ├── get_utilization.py
│   │   │   ├── parse_logs.py
│   │   │   ├── parse_logs.sh
│   │   │   ├── parse_per_task_logs.py
│   │   │   ├── parse_per_task_logs.sh
│   │   │   ├── parse_tpch_logs.py
│   │   │   ├── parse_tpch_logs.sh
│   │   │   ├── service_per_node.py
│   │   │   └── third_party/
│   │   │       └── stats.py
│   │   └── thrift/
│   │       ├── build.sh
│   │       ├── service.thrift
│   │       └── types.thrift
│   └── test/
│       └── java/
│           └── edu/
│               └── berkeley/
│                   └── sparrow/
│                       └── daemon/
│                           ├── nodemonitor/
│                           │   └── TestTaskScheduler.java
│                           ├── scheduler/
│                           │   ├── TestConstrainedTaskPlacer.java
│                           │   └── TestUnconstrainedTaskPlacer.java
│                           └── util/
│                               └── TestThriftClientPool.java
└── upload.py
Download .txt
Showing preview only (530K chars total). Download the full file or copy to clipboard to get everything.
SYMBOL INDEX (6283 symbols across 284 files)

FILE: deploy/ec2/ec2_exp.py
  function parse_args (line 29) | def parse_args(force_action=True):
  function get_or_make_group (line 114) | def get_or_make_group(conn, name):
  function scp (line 124) | def scp(host, opts, local_file, dest_file):
  function scp_from (line 130) | def scp_from(host, opts, dest_file, local_file):
  function rsync_from_all (line 135) | def rsync_from_all(hosts, opts, dest_pattern, local_dir, errors=0):
  function parallel_commands (line 146) | def parallel_commands(commands, tolerable_failures):
  function ssh (line 167) | def ssh(host, opts, command):
  function ssh_all (line 173) | def ssh_all(hosts, opts, command):
  function launch_cluster (line 182) | def launch_cluster(conn, opts, cluster_name):
  function wait_for_instances (line 278) | def wait_for_instances(instances):
  function is_active (line 290) | def is_active(instance):
  function find_existing_cluster (line 294) | def find_existing_cluster(conn, opts, cluster_name):
  function generate_deploy_files (line 330) | def generate_deploy_files(frontends, backends, opts, warmup_job_arrival_...
  function redeploy_sparrow (line 389) | def redeploy_sparrow(machines, frontends, backends, opts, warmup_job_arr...
  function deploy_cluster (line 405) | def deploy_cluster(frontends, backends, opts, warmup_job_arrival_s=0, wa...
  function start_sparrow_throughput (line 433) | def start_sparrow_throughput(frontends, backends, opts):
  function start_sparrow (line 438) | def start_sparrow(frontends, backends, opts):
  function stop_sparrow (line 448) | def stop_sparrow(frontends, backends, opts):
  function start_mesos (line 457) | def start_mesos(frontends, backends, opts):
  function stop_mesos (line 464) | def stop_mesos(frontends, backends, opts):
  function start_spark_shark (line 473) | def start_spark_shark(frontends, backends, opts):
  function stop_spark (line 481) | def stop_spark(frontends, backends, opts):
  function start_hdfs (line 490) | def start_hdfs(frontends, backends, opts):
  function stop_hdfs (line 495) | def stop_hdfs(frontends, backends,opts):
  function start_proto (line 501) | def start_proto(frontends, backends, opts):
  function stop_proto (line 510) | def stop_proto(frontends, backends, opts):
  function create_database (line 519) | def create_database(frontends, opts):
  function create_tpch_tables (line 525) | def create_tpch_tables(frontends, backends, opts):
  function start_shark_tpch (line 534) | def start_shark_tpch(frontends, backends, opts):
  function collect_logs (line 540) | def collect_logs(frontends, backends, opts):
  function destroy_cluster (line 562) | def destroy_cluster(frontends, backends, opts):
  function execute_command (line 576) | def execute_command(frontends, backends, opts, cmd):
  function login_frontend (line 581) | def login_frontend(frontends, backends, opts):
  function login_backend (line 588) | def login_backend(frontends, backends, opts):
  function main (line 594) | def main():

FILE: deploy/ec2/fairness.py
  function run_cmd (line 25) | def run_cmd(cmd):
  function main (line 28) | def main(argv):

FILE: deploy/ec2/isolation.py
  function run_cmd (line 25) | def run_cmd(cmd):
  function main (line 28) | def main(argv):

FILE: deploy/ec2/osdi.py
  function run_cmd (line 21) | def run_cmd(cmd):

FILE: deploy/ec2/prepare_tpch_experiments.py
  function run_cmd (line 21) | def run_cmd(cmd):
  function main (line 25) | def main(argv):

FILE: deploy/ec2/probe_ratio.py
  function run_cmd (line 25) | def run_cmd(cmd):
  function main (line 28) | def main(argv):

FILE: deploy/ec2/probe_ratio_het.py
  function run_cmd (line 25) | def run_cmd(cmd):
  function main (line 28) | def main(argv):

FILE: deploy/ec2/spark_v_mesos.py
  function run_cmd (line 21) | def run_cmd(cmd):

FILE: deploy/ec2/tpch_experiments.py
  function run_cmd (line 34) | def run_cmd(cmd):

FILE: deploy/third_party/boto-2.1.1/boto/__init__.py
  function init_logging (line 39) | def init_logging():
  class NullHandler (line 46) | class NullHandler(logging.Handler):
    method emit (line 47) | def emit(self, record):
  function set_file_logger (line 55) | def set_file_logger(name, filepath, level=logging.INFO, format_string=No...
  function set_stream_logger (line 68) | def set_stream_logger(name, level=logging.DEBUG, format_string=None):
  function connect_sqs (line 81) | def connect_sqs(aws_access_key_id=None, aws_secret_access_key=None, **kw...
  function connect_s3 (line 95) | def connect_s3(aws_access_key_id=None, aws_secret_access_key=None, **kwa...
  function connect_gs (line 109) | def connect_gs(gs_access_key_id=None, gs_secret_access_key=None, **kwargs):
  function connect_ec2 (line 123) | def connect_ec2(aws_access_key_id=None, aws_secret_access_key=None, **kw...
  function connect_elb (line 137) | def connect_elb(aws_access_key_id=None, aws_secret_access_key=None, **kw...
  function connect_autoscale (line 151) | def connect_autoscale(aws_access_key_id=None, aws_secret_access_key=None...
  function connect_cloudwatch (line 165) | def connect_cloudwatch(aws_access_key_id=None, aws_secret_access_key=Non...
  function connect_sdb (line 179) | def connect_sdb(aws_access_key_id=None, aws_secret_access_key=None, **kw...
  function connect_fps (line 193) | def connect_fps(aws_access_key_id=None, aws_secret_access_key=None, **kw...
  function connect_mturk (line 207) | def connect_mturk(aws_access_key_id=None, aws_secret_access_key=None, **...
  function connect_cloudfront (line 221) | def connect_cloudfront(aws_access_key_id=None, aws_secret_access_key=Non...
  function connect_vpc (line 235) | def connect_vpc(aws_access_key_id=None, aws_secret_access_key=None, **kw...
  function connect_rds (line 249) | def connect_rds(aws_access_key_id=None, aws_secret_access_key=None, **kw...
  function connect_emr (line 263) | def connect_emr(aws_access_key_id=None, aws_secret_access_key=None, **kw...
  function connect_sns (line 277) | def connect_sns(aws_access_key_id=None, aws_secret_access_key=None, **kw...
  function connect_iam (line 292) | def connect_iam(aws_access_key_id=None, aws_secret_access_key=None, **kw...
  function connect_route53 (line 306) | def connect_route53(aws_access_key_id=None, aws_secret_access_key=None, ...
  function connect_euca (line 320) | def connect_euca(host=None, aws_access_key_id=None, aws_secret_access_ke...
  function connect_walrus (line 358) | def connect_walrus(host=None, aws_access_key_id=None, aws_secret_access_...
  function connect_ses (line 396) | def connect_ses(aws_access_key_id=None, aws_secret_access_key=None, **kw...
  function connect_sts (line 410) | def connect_sts(aws_access_key_id=None, aws_secret_access_key=None, **kw...
  function connect_ia (line 424) | def connect_ia(ia_access_key_id=None, ia_secret_access_key=None,
  function check_extensions (line 456) | def check_extensions(module_name, module_path):
  function _get_aws_conn (line 475) | def _get_aws_conn(service):
  function lookup (line 484) | def lookup(service, name):
  function storage_uri (line 493) | def storage_uri(uri_str, default_scheme='file', debug=0, validate=True,
  function storage_uri_for_key (line 571) | def storage_uri_for_key(key):

FILE: deploy/third_party/boto-2.1.1/boto/auth.py
  class Faker (line 56) | class Faker:
    method __init__ (line 57) | def __init__(self, which):
    method new (line 61) | def new(self, *args, **kwargs):
  class HmacKeys (line 71) | class HmacKeys(object):
    method __init__ (line 74) | def __init__(self, host, config, provider):
    method algorithm (line 85) | def algorithm(self):
    method sign_string (line 91) | def sign_string(self, string_to_sign):
  class HmacAuthV1Handler (line 100) | class HmacAuthV1Handler(AuthHandler, HmacKeys):
    method __init__ (line 105) | def __init__(self, host, config, provider):
    method add_auth (line 110) | def add_auth(self, http_request, **kwargs):
  class HmacAuthV2Handler (line 128) | class HmacAuthV2Handler(AuthHandler, HmacKeys):
    method __init__ (line 134) | def __init__(self, host, config, provider):
    method add_auth (line 139) | def add_auth(self, http_request, **kwargs):
  class HmacAuthV3Handler (line 150) | class HmacAuthV3Handler(AuthHandler, HmacKeys):
    method __init__ (line 155) | def __init__(self, host, config, provider):
    method add_auth (line 159) | def add_auth(self, http_request, **kwargs):
  class QuerySignatureHelper (line 169) | class QuerySignatureHelper(HmacKeys):
    method add_auth (line 175) | def add_auth(self, http_request, **kwargs):
  class QuerySignatureV0AuthHandler (line 197) | class QuerySignatureV0AuthHandler(QuerySignatureHelper, AuthHandler):
    method _calc_signature (line 203) | def _calc_signature(self, params, *args):
  class QuerySignatureV1AuthHandler (line 217) | class QuerySignatureV1AuthHandler(QuerySignatureHelper, AuthHandler):
    method _calc_signature (line 225) | def _calc_signature(self, params, *args):
  class QuerySignatureV2AuthHandler (line 239) | class QuerySignatureV2AuthHandler(QuerySignatureHelper, AuthHandler):
    method _calc_signature (line 246) | def _calc_signature(self, params, verb, path, server_name):
  function get_auth_handler (line 275) | def get_auth_handler(host, config, provider, requested_capability=None):

FILE: deploy/third_party/boto-2.1.1/boto/auth_handler.py
  class NotReadyToAuthenticate (line 28) | class NotReadyToAuthenticate(Exception):
  class AuthHandler (line 31) | class AuthHandler(Plugin):
    method __init__ (line 35) | def __init__(self, host, config, provider):
    method add_auth (line 52) | def add_auth(self, http_request):

FILE: deploy/third_party/boto-2.1.1/boto/cloudformation/connection.py
  class CloudFormationConnection (line 34) | class CloudFormationConnection(AWSQueryConnection):
    method __init__ (line 47) | def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
    method _required_auth_capability (line 59) | def _required_auth_capability(self):
    method encode_bool (line 62) | def encode_bool(self, v):
    method create_stack (line 66) | def create_stack(self, stack_name, template_body=None, template_url=None,
    method delete_stack (line 132) | def delete_stack(self, stack_name_or_id):
    method describe_stack_events (line 144) | def describe_stack_events(self, stack_name_or_id=None, next_token=None):
    method describe_stack_resource (line 153) | def describe_stack_resource(self, stack_name_or_id, logical_resource_id):
    method describe_stack_resources (line 165) | def describe_stack_resources(self, stack_name_or_id=None,
    method describe_stacks (line 178) | def describe_stacks(self, stack_name_or_id=None):
    method get_template (line 184) | def get_template(self, stack_name_or_id):
    method list_stack_resources (line 195) | def list_stack_resources(self, stack_name_or_id, next_token=None):
    method list_stacks (line 202) | def list_stacks(self, stack_status_filters=[], next_token=None):
    method validate_template (line 213) | def validate_template(self, template_body=None, template_url=None):

FILE: deploy/third_party/boto-2.1.1/boto/cloudformation/stack.py
  class Stack (line 5) | class Stack:
    method __init__ (line 6) | def __init__(self, connection=None):
    method startElement (line 20) | def startElement(self, name, attrs, connection):
    method endElement (line 30) | def endElement(self, name, value, connection):
    method delete (line 54) | def delete(self):
    method describe_events (line 57) | def describe_events(self, next_token=None):
    method describe_resource (line 63) | def describe_resource(self, logical_resource_id):
    method describe_resources (line 69) | def describe_resources(self, logical_resource_id=None,
    method list_resources (line 77) | def list_resources(self, next_token=None):
    method update (line 83) | def update(self):
    method get_template (line 91) | def get_template(self):
  class StackSummary (line 94) | class StackSummary:
    method __init__ (line 95) | def __init__(self, connection=None):
    method startElement (line 104) | def startElement(self, name, attrs, connection):
    method endElement (line 107) | def endElement(self, name, value, connection):
  class Parameter (line 125) | class Parameter:
    method __init__ (line 126) | def __init__(self, connection=None):
    method startElement (line 131) | def startElement(self, name, attrs, connection):
    method endElement (line 134) | def endElement(self, name, value, connection):
    method __repr__ (line 142) | def __repr__(self):
  class Output (line 145) | class Output:
    method __init__ (line 146) | def __init__(self, connection=None):
    method startElement (line 152) | def startElement(self, name, attrs, connection):
    method endElement (line 155) | def endElement(self, name, value, connection):
    method __repr__ (line 165) | def __repr__(self):
  class StackResource (line 168) | class StackResource:
    method __init__ (line 169) | def __init__(self, connection=None):
    method startElement (line 181) | def startElement(self, name, attrs, connection):
    method endElement (line 184) | def endElement(self, name, value, connection):
    method __repr__ (line 206) | def __repr__(self):
  class StackResourceSummary (line 210) | class StackResourceSummary:
    method __init__ (line 211) | def __init__(self, connection=None):
    method startElement (line 220) | def startElement(self, name, attrs, connection):
    method endElement (line 223) | def endElement(self, name, value, connection):
    method __repr__ (line 240) | def __repr__(self):
  class StackEvent (line 244) | class StackEvent:
    method __init__ (line 247) | def __init__(self, connection=None):
    method startElement (line 260) | def startElement(self, name, attrs, connection):
    method endElement (line 263) | def endElement(self, name, value, connection):
    method __repr__ (line 287) | def __repr__(self):

FILE: deploy/third_party/boto-2.1.1/boto/cloudformation/template.py
  class Template (line 3) | class Template:
    method __init__ (line 4) | def __init__(self, connection=None):
    method startElement (line 9) | def startElement(self, name, attrs, connection):
    method endElement (line 16) | def endElement(self, name, value, connection):
  class TemplateParameter (line 22) | class TemplateParameter:
    method __init__ (line 23) | def __init__(self, parent):
    method startElement (line 30) | def startElement(self, name, attrs, connection):
    method endElement (line 33) | def endElement(self, name, value, connection):

FILE: deploy/third_party/boto-2.1.1/boto/cloudfront/__init__.py
  class CloudFrontConnection (line 37) | class CloudFrontConnection(AWSAuthConnection):
    method __init__ (line 42) | def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
    method get_etag (line 49) | def get_etag(self, response):
    method _required_auth_capability (line 56) | def _required_auth_capability(self):
    method _get_all_objects (line 61) | def _get_all_objects(self, resource, tags):
    method _get_info (line 74) | def _get_info(self, id, resource, dist_class):
    method _get_config (line 90) | def _get_config(self, id, resource, config_class):
    method _set_config (line 103) | def _set_config(self, distribution_id, etag, config):
    method _create_object (line 117) | def _create_object(self, config, resource, dist_class):
    method _delete_object (line 131) | def _delete_object(self, id, etag, resource):
    method get_all_distributions (line 141) | def get_all_distributions(self):
    method get_distribution_info (line 145) | def get_distribution_info(self, distribution_id):
    method get_distribution_config (line 148) | def get_distribution_config(self, distribution_id):
    method set_distribution_config (line 152) | def set_distribution_config(self, distribution_id, etag, config):
    method create_distribution (line 155) | def create_distribution(self, origin, enabled, caller_reference='',
    method delete_distribution (line 163) | def delete_distribution(self, distribution_id, etag):
    method get_all_streaming_distributions (line 168) | def get_all_streaming_distributions(self):
    method get_streaming_distribution_info (line 172) | def get_streaming_distribution_info(self, distribution_id):
    method get_streaming_distribution_config (line 176) | def get_streaming_distribution_config(self, distribution_id):
    method set_streaming_distribution_config (line 180) | def set_streaming_distribution_config(self, distribution_id, etag, con...
    method create_streaming_distribution (line 183) | def create_streaming_distribution(self, origin, enabled,
    method delete_streaming_distribution (line 194) | def delete_streaming_distribution(self, distribution_id, etag):
    method get_all_origin_access_identity (line 199) | def get_all_origin_access_identity(self):
    method get_origin_access_identity_info (line 204) | def get_origin_access_identity_info(self, access_id):
    method get_origin_access_identity_config (line 208) | def get_origin_access_identity_config(self, access_id):
    method set_origin_access_identity_config (line 213) | def set_origin_access_identity_config(self, access_id,
    method create_origin_access_identity (line 217) | def create_origin_access_identity(self, caller_reference='', comment=''):
    method delete_origin_access_identity (line 223) | def delete_origin_access_identity(self, access_id, etag):
    method create_invalidation_request (line 229) | def create_invalidation_request(self, distribution_id, paths,
    method invalidation_request_status (line 252) | def invalidation_request_status (self, distribution_id, request_id, ca...

FILE: deploy/third_party/boto-2.1.1/boto/cloudfront/distribution.py
  class DistributionConfig (line 32) | class DistributionConfig:
    method __init__ (line 34) | def __init__(self, connection=None, origin=None, enabled=False,
    method to_xml (line 105) | def to_xml(self):
    method startElement (line 140) | def startElement(self, name, attrs, connection):
    method endElement (line 156) | def endElement(self, name, value, connection):
  class StreamingDistributionConfig (line 173) | class StreamingDistributionConfig(DistributionConfig):
    method __init__ (line 175) | def __init__(self, connection=None, origin='', enabled=False,
    method to_xml (line 184) | def to_xml(self):
  class DistributionSummary (line 216) | class DistributionSummary:
    method __init__ (line 218) | def __init__(self, connection=None, domain_name='', id='',
    method startElement (line 236) | def startElement(self, name, attrs, connection):
    method endElement (line 248) | def endElement(self, name, value, connection):
    method get_distribution (line 273) | def get_distribution(self):
  class StreamingDistributionSummary (line 276) | class StreamingDistributionSummary(DistributionSummary):
    method get_distribution (line 278) | def get_distribution(self):
  class Distribution (line 281) | class Distribution:
    method __init__ (line 283) | def __init__(self, connection=None, config=None, domain_name='',
    method startElement (line 297) | def startElement(self, name, attrs, connection):
    method endElement (line 307) | def endElement(self, name, value, connection):
    method update (line 321) | def update(self, enabled=None, cnames=None, comment=None):
    method enable (line 362) | def enable(self):
    method disable (line 369) | def disable(self):
    method delete (line 376) | def delete(self):
    method _get_bucket (line 384) | def _get_bucket(self):
    method get_objects (line 403) | def get_objects(self):
    method set_permissions (line 416) | def set_permissions(self, object, replace=False):
    method set_permissions_all (line 448) | def set_permissions_all(self, replace=False):
    method add_object (line 465) | def add_object(self, name, content, headers=None, replace=True):
    method create_signed_url (line 498) | def create_signed_url(self, url, keypair_id,
    method _create_signing_params (line 575) | def _create_signing_params(self, url, keypair_id,
    method _canned_policy (line 608) | def _canned_policy(resource, expires):
    method _custom_policy (line 618) | def _custom_policy(resource, expires=None, valid_after=None, ip_addres...
    method _sign_string (line 637) | def _sign_string(message, private_key_file=None, private_key_string=No...
    method _url_base64_encode (line 668) | def _url_base64_encode(msg):
  class StreamingDistribution (line 679) | class StreamingDistribution(Distribution):
    method __init__ (line 681) | def __init__(self, connection=None, config=None, domain_name='',
    method startElement (line 687) | def startElement(self, name, attrs, connection):
    method update (line 694) | def update(self, enabled=None, cnames=None, comment=None):
    method delete (line 740) | def delete(self):

FILE: deploy/third_party/boto-2.1.1/boto/cloudfront/exception.py
  class CloudFrontServerError (line 24) | class CloudFrontServerError(BotoServerError):

FILE: deploy/third_party/boto-2.1.1/boto/cloudfront/identity.py
  class OriginAccessIdentity (line 24) | class OriginAccessIdentity:
    method __init__ (line 26) | def __init__(self, connection=None, config=None, id='',
    method startElement (line 35) | def startElement(self, name, attrs, connection):
    method endElement (line 42) | def endElement(self, name, value, connection):
    method update (line 52) | def update(self, comment=None):
    method delete (line 61) | def delete(self):
    method uri (line 64) | def uri(self):
  class OriginAccessIdentityConfig (line 67) | class OriginAccessIdentityConfig:
    method __init__ (line 69) | def __init__(self, connection=None, caller_reference='', comment=''):
    method to_xml (line 77) | def to_xml(self):
    method startElement (line 86) | def startElement(self, name, attrs, connection):
    method endElement (line 89) | def endElement(self, name, value, connection):
  class OriginAccessIdentitySummary (line 97) | class OriginAccessIdentitySummary:
    method __init__ (line 99) | def __init__(self, connection=None, id='',
    method startElement (line 107) | def startElement(self, name, attrs, connection):
    method endElement (line 110) | def endElement(self, name, value, connection):
    method get_origin_access_identity (line 120) | def get_origin_access_identity(self):

FILE: deploy/third_party/boto-2.1.1/boto/cloudfront/invalidation.py
  class InvalidationBatch (line 25) | class InvalidationBatch(object):
    method __init__ (line 30) | def __init__(self, paths=None, connection=None, distribution=None, cal...
    method add (line 47) | def add(self, path):
    method remove (line 51) | def remove(self, path):
    method __iter__ (line 55) | def __iter__(self):
    method __getitem__ (line 58) | def __getitem__(self, i):
    method __setitem__ (line 61) | def __setitem__(self, k, v):
    method escape (line 64) | def escape(self, p):
    method to_xml (line 70) | def to_xml(self):
    method startElement (line 81) | def startElement(self, name, attrs, connection):
    method endElement (line 86) | def endElement(self, name, value, connection):

FILE: deploy/third_party/boto-2.1.1/boto/cloudfront/logging.py
  class LoggingInfo (line 22) | class LoggingInfo(object):
    method __init__ (line 24) | def __init__(self, bucket='', prefix=''):
    method startElement (line 28) | def startElement(self, name, attrs, connection):
    method endElement (line 31) | def endElement(self, name, value, connection):

FILE: deploy/third_party/boto-2.1.1/boto/cloudfront/object.py
  class Object (line 24) | class Object(Key):
    method __init__ (line 26) | def __init__(self, bucket, name=None):
    method __repr__ (line 30) | def __repr__(self):
    method url (line 33) | def url(self, scheme='http'):
  class StreamingObject (line 43) | class StreamingObject(Object):
    method url (line 45) | def url(self, scheme='rtmp'):

FILE: deploy/third_party/boto-2.1.1/boto/cloudfront/origin.py
  function get_oai_value (line 25) | def get_oai_value(origin_access_identity):
  class S3Origin (line 31) | class S3Origin(object):
    method __init__ (line 38) | def __init__(self, dns_name=None, origin_access_identity=None):
    method __repr__ (line 58) | def __repr__(self):
    method startElement (line 61) | def startElement(self, name, attrs, connection):
    method endElement (line 64) | def endElement(self, name, value, connection):
    method to_xml (line 72) | def to_xml(self):
  class CustomOrigin (line 81) | class CustomOrigin(object):
    method __init__ (line 88) | def __init__(self, dns_name=None, http_port=80, https_port=443,
    method __repr__ (line 118) | def __repr__(self):
    method startElement (line 121) | def startElement(self, name, attrs, connection):
    method endElement (line 124) | def endElement(self, name, value, connection):
    method to_xml (line 142) | def to_xml(self):

FILE: deploy/third_party/boto-2.1.1/boto/cloudfront/signers.py
  class Signer (line 22) | class Signer:
    method __init__ (line 24) | def __init__(self):
    method startElement (line 28) | def startElement(self, name, attrs, connection):
    method endElement (line 31) | def endElement(self, name, value, connection):
  class ActiveTrustedSigners (line 39) | class ActiveTrustedSigners(list):
    method startElement (line 41) | def startElement(self, name, attrs, connection):
    method endElement (line 47) | def endElement(self, name, value, connection):
  class TrustedSigners (line 50) | class TrustedSigners(list):
    method startElement (line 52) | def startElement(self, name, attrs, connection):
    method endElement (line 55) | def endElement(self, name, value, connection):

FILE: deploy/third_party/boto-2.1.1/boto/connection.py
  class HostConnectionPool (line 94) | class HostConnectionPool(object):
    method __init__ (line 120) | def __init__(self):
    method size (line 123) | def size(self):
    method put (line 131) | def put(self, conn):
    method get (line 138) | def get(self):
    method _conn_ready (line 158) | def _conn_ready(self, conn):
    method clean (line 181) | def clean(self):
    method _pair_stale (line 190) | def _pair_stale(self, pair):
  class ConnectionPool (line 199) | class ConnectionPool(object):
    method __init__ (line 228) | def __init__(self):
    method size (line 236) | def size(self):
    method get_http_connection (line 242) | def get_http_connection(self, host, is_secure):
    method put_http_connection (line 254) | def put_http_connection(self, host, is_secure, conn):
    method clean (line 265) | def clean(self):
  class HTTPRequest (line 285) | class HTTPRequest(object):
    method __init__ (line 287) | def __init__(self, method, protocol, host, port, path, auth_path,
    method __str__ (line 342) | def __str__(self):
    method authorize (line 348) | def authorize(self, connection, **kwargs):
  class AWSAuthConnection (line 364) | class AWSAuthConnection(object):
    method __init__ (line 365) | def __init__(self, host, aws_access_key_id=None, aws_secret_access_key...
    method __repr__ (line 481) | def __repr__(self):
    method _required_auth_capability (line 484) | def _required_auth_capability(self):
    method connection (line 487) | def connection(self):
    method aws_access_key_id (line 491) | def aws_access_key_id(self):
    method aws_secret_access_key (line 497) | def aws_secret_access_key(self):
    method get_path (line 503) | def get_path(self, path='/'):
    method server_name (line 524) | def server_name(self, port=None):
    method handle_proxy (line 544) | def handle_proxy(self, proxy, proxy_port, proxy_user, proxy_pass):
    method get_http_connection (line 578) | def get_http_connection(self, host, is_secure):
    method new_http_connection (line 585) | def new_http_connection(self, host, is_secure):
    method put_http_connection (line 619) | def put_http_connection(self, host, is_secure, connection):
    method proxy_ssl (line 622) | def proxy_ssl(self):
    method prefix_proxy_to_path (line 678) | def prefix_proxy_to_path(self, path, host=None):
    method get_proxy_auth_header (line 682) | def get_proxy_auth_header(self):
    method _mexe (line 686) | def _mexe(self, request, sender=None, override_num_retries=None):
    method build_base_http_request (line 766) | def build_base_http_request(self, method, path, auth_path,
    method make_request (line 791) | def make_request(self, method, path, headers=None, data='', host=None,
    method close (line 798) | def close(self):
  class AWSQueryConnection (line 805) | class AWSQueryConnection(AWSAuthConnection):
    method __init__ (line 810) | def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
    method _required_auth_capability (line 821) | def _required_auth_capability(self):
    method get_utf8_value (line 824) | def get_utf8_value(self, value):
    method make_request (line 827) | def make_request(self, action, params=None, path='/', verb='GET'):
    method build_list_params (line 836) | def build_list_params(self, params, items, label):
    method get_list (line 844) | def get_list(self, action, params, markers, path='/',
    method get_object (line 864) | def get_object(self, action, params, cls, path='/',
    method get_status (line 884) | def get_status(self, action, params, path='/', parent=None, verb='GET'):

FILE: deploy/third_party/boto-2.1.1/boto/contrib/m2helpers.py
  function secure_context (line 40) | def secure_context(cafile=None, capath=None):
  function https_connection_factory (line 47) | def https_connection_factory(cafile=None, capath=None):

FILE: deploy/third_party/boto-2.1.1/boto/contrib/ymlmessage.py
  class YAMLMessage (line 31) | class YAMLMessage(Message):
    method __init__ (line 44) | def __init__(self, queue=None, body='', xml_attrs=None):
    method set_body (line 48) | def set_body(self, body):
    method get_body (line 51) | def get_body(self):

FILE: deploy/third_party/boto-2.1.1/boto/ec2/__init__.py
  function regions (line 28) | def regions(**kw_params):
  function connect_to_region (line 41) | def connect_to_region(region_name, **kw_params):
  function get_region (line 60) | def get_region(region_name, **kw_params):

FILE: deploy/third_party/boto-2.1.1/boto/ec2/address.py
  class Address (line 28) | class Address(EC2Object):
    method __init__ (line 30) | def __init__(self, connection=None, public_ip=None, instance_id=None):
    method __repr__ (line 39) | def __repr__(self):
    method endElement (line 42) | def endElement(self, name, value, connection):
    method release (line 56) | def release(self):
    method associate (line 61) | def associate(self, instance_id):
    method disassociate (line 64) | def disassociate(self):

FILE: deploy/third_party/boto-2.1.1/boto/ec2/autoscale/__init__.py
  function regions (line 49) | def regions():
  function connect_to_region (line 64) | def connect_to_region(region_name, **kw_params):
  class AutoScaleConnection (line 81) | class AutoScaleConnection(AWSQueryConnection):
    method __init__ (line 87) | def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
    method _required_auth_capability (line 109) | def _required_auth_capability(self):
    method build_list_params (line 112) | def build_list_params(self, params, items, label):
    method _update_group (line 141) | def _update_group(self, op, as_group):
    method create_auto_scaling_group (line 171) | def create_auto_scaling_group(self, as_group):
    method delete_auto_scaling_group (line 177) | def delete_auto_scaling_group(self, name, force_delete=False):
    method create_launch_configuration (line 188) | def create_launch_configuration(self, launch_config):
    method create_scaling_policy (line 220) | def create_scaling_policy(self, scaling_policy):
    method delete_launch_configuration (line 237) | def delete_launch_configuration(self, launch_config_name):
    method get_all_groups (line 248) | def get_all_groups(self, names=None, max_records=None, next_token=None):
    method get_all_launch_configurations (line 278) | def get_all_launch_configurations(self, **kwargs):
    method get_all_activities (line 312) | def get_all_activities(self, autoscale_group, activity_ids=None, max_r...
    method delete_scheduled_action (line 342) | def delete_scheduled_action(self, scheduled_action_name,
    method terminate_instance (line 356) | def terminate_instance(self, instance_id, decrement_capacity=True):
    method delete_policy (line 373) | def delete_policy(self, policy_name, autoscale_group=None):
    method get_all_adjustment_types (line 388) | def get_all_adjustment_types(self):
    method get_all_autoscaling_instances (line 391) | def get_all_autoscaling_instances(self, instance_ids=None,
    method get_all_metric_collection_types (line 422) | def get_all_metric_collection_types(self):
    method get_all_policies (line 430) | def get_all_policies(self, as_group=None, policy_names=None,
    method get_all_scaling_process_types (line 462) | def get_all_scaling_process_types(self):
    method suspend_processes (line 469) | def suspend_processes(self, as_group, scaling_processes=None):
    method resume_processes (line 484) | def resume_processes(self, as_group, scaling_processes=None):
    method create_scheduled_group_action (line 501) | def create_scheduled_group_action(self, as_group, name, time, desired_...
    method get_all_scheduled_actions (line 539) | def get_all_scheduled_actions(self, as_group=None, start_time=None, en...
    method disable_metrics_collection (line 552) | def disable_metrics_collection(self, as_group, metrics=None):
    method enable_metrics_collection (line 565) | def enable_metrics_collection(self, as_group, granularity, metrics=None):
    method execute_policy (line 594) | def execute_policy(self, policy_name, as_group=None, honor_cooldown=No...
    method set_instance_health (line 604) | def set_instance_health(self, instance_id, health_status,

FILE: deploy/third_party/boto-2.1.1/boto/ec2/autoscale/activity.py
  class Activity (line 25) | class Activity(object):
    method __init__ (line 26) | def __init__(self, connection=None):
    method __repr__ (line 38) | def __repr__(self):
    method startElement (line 44) | def startElement(self, name, attrs, connection):
    method endElement (line 47) | def endElement(self, name, value, connection):

FILE: deploy/third_party/boto-2.1.1/boto/ec2/autoscale/group.py
  class ProcessType (line 30) | class ProcessType(object):
    method __init__ (line 31) | def __init__(self, connection=None):
    method __repr__ (line 35) | def __repr__(self):
    method startElement (line 38) | def startElement(self, name, attrs, connection):
    method endElement (line 41) | def endElement(self, name, value, connection):
  class SuspendedProcess (line 46) | class SuspendedProcess(object):
    method __init__ (line 47) | def __init__(self, connection=None):
    method __repr__ (line 52) | def __repr__(self):
    method startElement (line 55) | def startElement(self, name, attrs, connection):
    method endElement (line 58) | def endElement(self, name, value, connection):
  class EnabledMetric (line 65) | class EnabledMetric(object):
    method __init__ (line 66) | def __init__(self, connection=None, metric=None, granularity=None):
    method __repr__ (line 71) | def __repr__(self):
    method startElement (line 74) | def startElement(self, name, attrs, connection):
    method endElement (line 77) | def endElement(self, name, value, connection):
  class AutoScalingGroup (line 84) | class AutoScalingGroup(object):
    method __init__ (line 85) | def __init__(self, connection=None, name=None,
    method _get_cooldown (line 167) | def _get_cooldown(self):
    method _set_cooldown (line 169) | def _set_cooldown(self, val):
    method __repr__ (line 173) | def __repr__(self):
    method startElement (line 180) | def startElement(self, name, attrs, connection):
    method endElement (line 197) | def endElement(self, name, value, connection):
    method set_capacity (line 225) | def set_capacity(self, capacity):
    method update (line 236) | def update(self):
    method shutdown_instances (line 240) | def shutdown_instances(self):
    method delete (line 249) | def delete(self, force_delete=False):
    method get_activities (line 255) | def get_activities(self, activity_ids=None, max_records=50):
    method suspend_processes (line 261) | def suspend_processes(self, scaling_processes=None):
    method resume_processes (line 265) | def resume_processes(self, scaling_processes=None):
  class AutoScalingGroupMetric (line 270) | class AutoScalingGroupMetric(object):
    method __init__ (line 271) | def __init__(self, connection=None):
    method __repr__ (line 277) | def __repr__(self):
    method startElement (line 280) | def startElement(self, name, attrs, connection):
    method endElement (line 283) | def endElement(self, name, value, connection):

FILE: deploy/third_party/boto-2.1.1/boto/ec2/autoscale/instance.py
  class Instance (line 23) | class Instance(object):
    method __init__ (line 24) | def __init__(self, connection=None):
    method __repr__ (line 33) | def __repr__(self):
    method startElement (line 42) | def startElement(self, name, attrs, connection):
    method endElement (line 45) | def endElement(self, name, value, connection):

FILE: deploy/third_party/boto-2.1.1/boto/ec2/autoscale/launchconfig.py
  class Ebs (line 29) | class Ebs(object):
    method __init__ (line 30) | def __init__(self, connection=None, snapshot_id=None, volume_size=None):
    method __repr__ (line 35) | def __repr__(self):
    method startElement (line 38) | def startElement(self, name, attrs, connection):
    method endElement (line 41) | def endElement(self, name, value, connection):
  class InstanceMonitoring (line 48) | class InstanceMonitoring(object):
    method __init__ (line 49) | def __init__(self, connection=None, enabled='false'):
    method __repr__ (line 53) | def __repr__(self):
    method startElement (line 56) | def startElement(self, name, attrs, connection):
    method endElement (line 59) | def endElement(self, name, value, connection):
  class BlockDeviceMapping (line 65) | class BlockDeviceMapping(object):
    method __init__ (line 66) | def __init__(self, connection=None, device_name=None, virtual_name=None):
    method __repr__ (line 72) | def __repr__(self):
    method startElement (line 75) | def startElement(self, name, attrs, connection):
    method endElement (line 80) | def endElement(self, name, value, connection):
  class LaunchConfiguration (line 87) | class LaunchConfiguration(object):
    method __init__ (line 88) | def __init__(self, connection=None, name=None, image_id=None,
    method __repr__ (line 146) | def __repr__(self):
    method startElement (line 149) | def startElement(self, name, attrs, connection):
    method endElement (line 159) | def endElement(self, name, value, connection):
    method delete (line 186) | def delete(self):

FILE: deploy/third_party/boto-2.1.1/boto/ec2/autoscale/policy.py
  class Alarm (line 26) | class Alarm(object):
    method __init__ (line 27) | def __init__(self, connection=None):
    method __repr__ (line 32) | def __repr__(self):
    method startElement (line 35) | def startElement(self, name, attrs, connection):
    method endElement (line 38) | def endElement(self, name, value, connection):
  class AdjustmentType (line 47) | class AdjustmentType(object):
    method __init__ (line 48) | def __init__(self, connection=None):
    method __repr__ (line 52) | def __repr__(self):
    method startElement (line 55) | def startElement(self, name, attrs, connection):
    method endElement (line 59) | def endElement(self, name, value, connection):
  class MetricCollectionTypes (line 63) | class MetricCollectionTypes(object):
    class BaseType (line 64) | class BaseType(object):
      method __init__ (line 66) | def __init__(self, connection):
      method __repr__ (line 69) | def __repr__(self):
      method startElement (line 71) | def startElement(self, name, attrs, connection):
      method endElement (line 73) | def endElement(self, name, value, connection):
    class Metric (line 76) | class Metric(BaseType):
    class Granularity (line 78) | class Granularity(BaseType):
    method __init__ (line 81) | def __init__(self, connection=None):
    method __repr__ (line 86) | def __repr__(self):
    method startElement (line 89) | def startElement(self, name, attrs, connection):
    method endElement (line 97) | def endElement(self, name, value, connection):
  class ScalingPolicy (line 101) | class ScalingPolicy(object):
    method __init__ (line 102) | def __init__(self, connection=None, **kwargs):
    method __repr__ (line 129) | def __repr__(self):
    method startElement (line 134) | def startElement(self, name, attrs, connection):
    method endElement (line 139) | def endElement(self, name, value, connection):
    method delete (line 153) | def delete(self):

FILE: deploy/third_party/boto-2.1.1/boto/ec2/autoscale/request.py
  class Request (line 22) | class Request(object):
    method __init__ (line 23) | def __init__(self, connection=None):
    method __repr__ (line 27) | def __repr__(self):
    method startElement (line 30) | def startElement(self, name, attrs, connection):
    method endElement (line 33) | def endElement(self, name, value, connection):

FILE: deploy/third_party/boto-2.1.1/boto/ec2/autoscale/scheduled.py
  class ScheduledUpdateGroupAction (line 26) | class ScheduledUpdateGroupAction(object):
    method __init__ (line 27) | def __init__(self, connection=None):
    method __repr__ (line 36) | def __repr__(self):
    method startElement (line 39) | def startElement(self, name, attrs, connection):
    method endElement (line 42) | def endElement(self, name, value, connection):

FILE: deploy/third_party/boto-2.1.1/boto/ec2/blockdevicemapping.py
  class BlockDeviceType (line 23) | class BlockDeviceType(object):
    method __init__ (line 25) | def __init__(self,
    method startElement (line 45) | def startElement(self, name, attrs, connection):
    method endElement (line 48) | def endElement(self, name, value, connection):
  class BlockDeviceMapping (line 74) | class BlockDeviceMapping(dict):
    method __init__ (line 76) | def __init__(self, connection=None):
    method startElement (line 82) | def startElement(self, name, attrs, connection):
    method endElement (line 87) | def endElement(self, name, value, connection):
    method build_list_params (line 93) | def build_list_params(self, params, prefix=''):

FILE: deploy/third_party/boto-2.1.1/boto/ec2/bundleinstance.py
  class BundleInstanceTask (line 28) | class BundleInstanceTask(EC2Object):
    method __init__ (line 30) | def __init__(self, connection=None):
    method __repr__ (line 45) | def __repr__(self):
    method startElement (line 48) | def startElement(self, name, attrs, connection):
    method endElement (line 51) | def endElement(self, name, value, connection):

FILE: deploy/third_party/boto-2.1.1/boto/ec2/buyreservation.py
  class BuyReservation (line 31) | class BuyReservation(object):
    method get_region (line 33) | def get_region(self, params):
    method get_instance_type (line 39) | def get_instance_type(self, params):
    method get_quantity (line 45) | def get_quantity(self, params):
    method get_zone (line 50) | def get_zone(self, params):
    method get (line 56) | def get(self, params):

FILE: deploy/third_party/boto-2.1.1/boto/ec2/cloudwatch/__init__.py
  function regions (line 158) | def regions():
  function connect_to_region (line 173) | def connect_to_region(region_name, **kw_params):
  class CloudWatchConnection (line 190) | class CloudWatchConnection(AWSQueryConnection):
    method __init__ (line 200) | def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
    method _required_auth_capability (line 222) | def _required_auth_capability(self):
    method build_dimension_param (line 225) | def build_dimension_param(self, dimension, params):
    method build_list_params (line 234) | def build_list_params(self, params, items, label):
    method build_put_params (line 247) | def build_put_params(self, params, name, value=None, timestamp=None,
    method get_metric_statistics (line 288) | def get_metric_statistics(self, period, start_time, end_time, metric_n...
    method list_metrics (line 340) | def list_metrics(self, next_token=None, dimensions=None,
    method put_metric_data (line 384) | def put_metric_data(self, namespace, name, value=None, timestamp=None,
    method describe_alarms (line 433) | def describe_alarms(self, action_prefix=None, alarm_name_prefix=None,
    method describe_alarm_history (line 481) | def describe_alarm_history(self, alarm_name=None,
    method describe_alarms_for_metric (line 533) | def describe_alarms_for_metric(self, metric_name, namespace, period=None,
    method put_metric_alarm (line 578) | def put_metric_alarm(self, alarm):
    method delete_alarms (line 626) | def delete_alarms(self, alarms):
    method set_alarm_state (line 638) | def set_alarm_state(self, alarm_name, state_reason, state_value,
    method enable_alarm_actions (line 667) | def enable_alarm_actions(self, alarm_names):
    method disable_alarm_actions (line 678) | def disable_alarm_actions(self, alarm_names):

FILE: deploy/third_party/boto-2.1.1/boto/ec2/cloudwatch/alarm.py
  class MetricAlarm (line 31) | class MetricAlarm(object):
    method __init__ (line 45) | def __init__(self, connection=None, name=None, metric=None,
    method __repr__ (line 143) | def __repr__(self):
    method startElement (line 149) | def startElement(self, name, attrs, connection):
    method endElement (line 162) | def endElement(self, name, value, connection):
    method set_state (line 196) | def set_state(self, value, reason, data=None):
    method update (line 210) | def update(self):
    method enable_actions (line 213) | def enable_actions(self):
    method disable_actions (line 216) | def disable_actions(self):
    method describe_history (line 219) | def describe_history(self, start_date=None, end_date=None, max_records...
    method add_alarm_action (line 226) | def add_alarm_action(self, action_arn=None):
    method add_insufficient_data_action (line 240) | def add_insufficient_data_action(self, action_arn=None):
    method add_ok_action (line 254) | def add_ok_action(self, action_arn=None):
    method delete (line 268) | def delete(self):
  class AlarmHistoryItem (line 271) | class AlarmHistoryItem(object):
    method __init__ (line 272) | def __init__(self, connection=None):
    method __repr__ (line 275) | def __repr__(self):
    method startElement (line 278) | def startElement(self, name, attrs, connection):
    method endElement (line 281) | def endElement(self, name, value, connection):

FILE: deploy/third_party/boto-2.1.1/boto/ec2/cloudwatch/datapoint.py
  class Datapoint (line 24) | class Datapoint(dict):
    method __init__ (line 26) | def __init__(self, connection=None):
    method startElement (line 30) | def startElement(self, name, attrs, connection):
    method endElement (line 33) | def endElement(self, name, value, connection):

FILE: deploy/third_party/boto-2.1.1/boto/ec2/cloudwatch/listelement.py
  class ListElement (line 22) | class ListElement(list):
    method startElement (line 24) | def startElement(self, name, attrs, connection):
    method endElement (line 27) | def endElement(self, name, value, connection):

FILE: deploy/third_party/boto-2.1.1/boto/ec2/cloudwatch/metric.py
  class Dimension (line 25) | class Dimension(dict):
    method startElement (line 27) | def startElement(self, name, attrs, connection):
    method endElement (line 30) | def endElement(self, name, value, connection):
  class Metric (line 41) | class Metric(object):
    method __init__ (line 52) | def __init__(self, connection=None):
    method __repr__ (line 58) | def __repr__(self):
    method startElement (line 61) | def startElement(self, name, attrs, connection):
    method endElement (line 66) | def endElement(self, name, value, connection):
    method query (line 74) | def query(self, start_time, end_time, statistics, unit=None, period=60):
    method create_alarm (line 86) | def create_alarm(self, name, comparison, threshold,
    method describe_alarms (line 102) | def describe_alarms(self, period=None, statistic=None,

FILE: deploy/third_party/boto-2.1.1/boto/ec2/connection.py
  class EC2Connection (line 58) | class EC2Connection(AWSQueryConnection):
    method __init__ (line 66) | def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
    method _required_auth_capability (line 92) | def _required_auth_capability(self):
    method get_params (line 95) | def get_params(self):
    method build_filter_params (line 109) | def build_filter_params(self, params, filters):
    method get_all_images (line 127) | def get_all_images(self, image_ids=None, owners=None,
    method get_all_kernels (line 167) | def get_all_kernels(self, kernel_ids=None, owners=None):
    method get_all_ramdisks (line 191) | def get_all_ramdisks(self, ramdisk_ids=None, owners=None):
    method get_image (line 215) | def get_image(self, image_id):
    method register_image (line 230) | def register_image(self, name=None, description=None, image_location=N...
    method deregister_image (line 287) | def deregister_image(self, image_id, delete_snapshot=False):
    method create_image (line 316) | def create_image(self, instance_id, name,
    method get_image_attribute (line 353) | def get_image_attribute(self, image_id, attribute='launchPermission'):
    method modify_image_attribute (line 376) | def modify_image_attribute(self, image_id, attribute='launchPermission',
    method reset_image_attribute (line 414) | def reset_image_attribute(self, image_id, attribute='launchPermission'):
    method get_all_instances (line 433) | def get_all_instances(self, instance_ids=None, filters=None):
    method run_instances (line 466) | def run_instances(self, image_id, min_count=1, max_count=1,
    method terminate_instances (line 628) | def terminate_instances(self, instance_ids=None):
    method stop_instances (line 644) | def stop_instances(self, instance_ids=None, force=False):
    method start_instances (line 665) | def start_instances(self, instance_ids=None):
    method get_console_output (line 681) | def get_console_output(self, instance_id):
    method reboot_instances (line 696) | def reboot_instances(self, instance_ids=None):
    method confirm_product_instance (line 708) | def confirm_product_instance(self, product_code, instance_id):
    method get_instance_attribute (line 717) | def get_instance_attribute(self, instance_id, attribute):
    method modify_instance_attribute (line 743) | def modify_instance_attribute(self, instance_id, attribute, value):
    method reset_instance_attribute (line 780) | def reset_instance_attribute(self, instance_id, attribute):
    method get_all_spot_instance_requests (line 800) | def get_all_spot_instance_requests(self, request_ids=None,
    method get_spot_price_history (line 835) | def get_spot_price_history(self, start_time=None, end_time=None,
    method request_spot_instances (line 878) | def request_spot_instances(self, price, image_id, count=1, type='one-t...
    method cancel_spot_instance_requests (line 1016) | def cancel_spot_instance_requests(self, request_ids):
    method get_spot_datafeed_subscription (line 1032) | def get_spot_datafeed_subscription(self):
    method create_spot_datafeed_subscription (line 1043) | def create_spot_datafeed_subscription(self, bucket, prefix):
    method delete_spot_datafeed_subscription (line 1066) | def delete_spot_datafeed_subscription(self):
    method get_all_zones (line 1079) | def get_all_zones(self, zones=None, filters=None):
    method get_all_addresses (line 1111) | def get_all_addresses(self, addresses=None, filters=None, allocation_i...
    method allocate_address (line 1147) | def allocate_address(self, domain=None):
    method associate_address (line 1161) | def associate_address(self, instance_id, public_ip=None, allocation_id...
    method disassociate_address (line 1188) | def disassociate_address(self, public_ip=None, association_id=None):
    method release_address (line 1210) | def release_address(self, public_ip=None, allocation_id=None):
    method get_all_volumes (line 1234) | def get_all_volumes(self, volume_ids=None, filters=None):
    method create_volume (line 1264) | def create_volume(self, size, zone, snapshot=None):
    method delete_volume (line 1288) | def delete_volume(self, volume_id):
    method attach_volume (line 1301) | def attach_volume(self, volume_id, instance_id, device):
    method detach_volume (line 1324) | def detach_volume(self, volume_id, instance_id=None,
    method get_all_snapshots (line 1363) | def get_all_snapshots(self, snapshot_ids=None,
    method create_snapshot (line 1411) | def create_snapshot(self, volume_id, description=None):
    method delete_snapshot (line 1436) | def delete_snapshot(self, snapshot_id):
    method trim_snapshots (line 1440) | def trim_snapshots(self, hourly_backups = 8, daily_backups = 7,
    method get_snapshot_attribute (line 1588) | def get_snapshot_attribute(self, snapshot_id,
    method modify_snapshot_attribute (line 1611) | def modify_snapshot_attribute(self, snapshot_id,
    method reset_snapshot_attribute (line 1645) | def reset_snapshot_attribute(self, snapshot_id,
    method get_all_key_pairs (line 1665) | def get_all_key_pairs(self, keynames=None, filters=None):
    method get_key_pair (line 1694) | def get_key_pair(self, keyname):
    method create_key_pair (line 1709) | def create_key_pair(self, key_name):
    method delete_key_pair (line 1726) | def delete_key_pair(self, key_name):
    method import_key_pair (line 1736) | def import_key_pair(self, key_name, public_key_material):
    method get_all_security_groups (line 1775) | def get_all_security_groups(self, groupnames=None, group_ids=None, fil...
    method create_security_group (line 1812) | def create_security_group(self, name, description, vpc_id=None):
    method delete_security_group (line 1845) | def delete_security_group(self, name=None, group_id=None):
    method authorize_security_group_deprecated (line 1868) | def authorize_security_group_deprecated(self, group_name,
    method authorize_security_group (line 1922) | def authorize_security_group(self, group_name=None,
    method authorize_security_group_egress (line 2008) | def authorize_security_group_egress(group_id,
    method revoke_security_group_deprecated (line 2038) | def revoke_security_group_deprecated(self, group_name,
    method revoke_security_group (line 2110) | def revoke_security_group(self, group_name, src_security_group_name=None,
    method get_all_regions (line 2179) | def get_all_regions(self, region_names=None, filters=None):
    method get_all_reserved_instances_offerings (line 2214) | def get_all_reserved_instances_offerings(self, reserved_instances_id=N...
    method get_all_reserved_instances (line 2267) | def get_all_reserved_instances(self, reserved_instances_id=None,
    method purchase_reserved_instance_offering (line 2299) | def purchase_reserved_instance_offering(self,
    method monitor_instances (line 2329) | def monitor_instances(self, instance_ids):
    method monitor_instance (line 2344) | def monitor_instance(self, instance_id):
    method unmonitor_instances (line 2357) | def unmonitor_instances(self, instance_ids):
    method unmonitor_instance (line 2372) | def unmonitor_instance(self, instance_id):
    method bundle_instance (line 2389) | def bundle_instance(self, instance_id,
    method get_all_bundle_tasks (line 2423) | def get_all_bundle_tasks(self, bundle_ids=None, filters=None):
    method cancel_bundle_task (line 2452) | def cancel_bundle_task(self, bundle_id):
    method get_password_data (line 2464) | def get_password_data(self, instance_id):
    method get_all_placement_groups (line 2481) | def get_all_placement_groups(self, groupnames=None, filters=None):
    method create_placement_group (line 2511) | def create_placement_group(self, name, strategy='cluster'):
    method delete_placement_group (line 2531) | def delete_placement_group(self, name):
    method build_tag_param_list (line 2543) | def build_tag_param_list(self, params, tags):
    method get_all_tags (line 2554) | def get_all_tags(self, filters=None):
    method create_tags (line 2577) | def create_tags(self, resource_ids, tags):
    method delete_tags (line 2596) | def delete_tags(self, resource_ids, tags):

FILE: deploy/third_party/boto-2.1.1/boto/ec2/ec2object.py
  class EC2Object (line 28) | class EC2Object(object):
    method __init__ (line 30) | def __init__(self, connection=None):
    method startElement (line 37) | def startElement(self, name, attrs, connection):
    method endElement (line 40) | def endElement(self, name, value, connection):
  class TaggedEC2Object (line 44) | class TaggedEC2Object(EC2Object):
    method __init__ (line 55) | def __init__(self, connection=None):
    method startElement (line 59) | def startElement(self, name, attrs, connection):
    method add_tag (line 65) | def add_tag(self, key, value=''):
    method remove_tag (line 84) | def remove_tag(self, key, value=None):

FILE: deploy/third_party/boto-2.1.1/boto/ec2/elb/__init__.py
  function regions (line 41) | def regions():
  function connect_to_region (line 56) | def connect_to_region(region_name, **kw_params):
  class ELBConnection (line 72) | class ELBConnection(AWSQueryConnection):
    method __init__ (line 79) | def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
    method _required_auth_capability (line 100) | def _required_auth_capability(self):
    method build_list_params (line 103) | def build_list_params(self, params, items, label):
    method get_all_load_balancers (line 109) | def get_all_load_balancers(self, load_balancer_names=None):
    method create_load_balancer (line 126) | def create_load_balancer(self, name, zones, listeners):
    method create_load_balancer_listeners (line 165) | def create_load_balancer_listeners(self, name, listeners):
    method delete_load_balancer (line 195) | def delete_load_balancer(self, name):
    method delete_load_balancer_listeners (line 205) | def delete_load_balancer_listeners(self, name, ports):
    method enable_availability_zones (line 222) | def enable_availability_zones(self, load_balancer_name, zones_to_add):
    method disable_availability_zones (line 245) | def disable_availability_zones(self, load_balancer_name, zones_to_remo...
    method register_instances (line 269) | def register_instances(self, load_balancer_name, instances):
    method deregister_instances (line 289) | def deregister_instances(self, load_balancer_name, instances):
    method describe_instance_health (line 309) | def describe_instance_health(self, load_balancer_name, instances=None):
    method configure_health_check (line 332) | def configure_health_check(self, name, health_check):
    method set_lb_listener_SSL_certificate (line 354) | def set_lb_listener_SSL_certificate(self, lb_name, lb_port,
    method create_app_cookie_stickiness_policy (line 368) | def create_app_cookie_stickiness_policy(self, name, lb_name, policy_na...
    method create_lb_cookie_stickiness_policy (line 391) | def create_lb_cookie_stickiness_policy(self, cookie_expiration_period,
    method delete_lb_policy (line 418) | def delete_lb_policy(self, lb_name, policy_name):
    method set_lb_policies_of_listener (line 429) | def set_lb_policies_of_listener(self, lb_name, lb_port, policies):

FILE: deploy/third_party/boto-2.1.1/boto/ec2/elb/healthcheck.py
  class HealthCheck (line 22) | class HealthCheck(object):
    method __init__ (line 27) | def __init__(self, access_point=None, interval=30, target=None,
    method __repr__ (line 36) | def __repr__(self):
    method startElement (line 39) | def startElement(self, name, attrs, connection):
    method endElement (line 42) | def endElement(self, name, value, connection):
    method update (line 56) | def update(self):

FILE: deploy/third_party/boto-2.1.1/boto/ec2/elb/instancestate.py
  class InstanceState (line 22) | class InstanceState(object):
    method __init__ (line 27) | def __init__(self, load_balancer=None, description=None,
    method __repr__ (line 35) | def __repr__(self):
    method startElement (line 38) | def startElement(self, name, attrs, connection):
    method endElement (line 41) | def endElement(self, name, value, connection):

FILE: deploy/third_party/boto-2.1.1/boto/ec2/elb/listelement.py
  class ListElement (line 22) | class ListElement(list):
    method startElement (line 24) | def startElement(self, name, attrs, connection):
    method endElement (line 27) | def endElement(self, name, value, connection):

FILE: deploy/third_party/boto-2.1.1/boto/ec2/elb/listener.py
  class Listener (line 22) | class Listener(object):
    method __init__ (line 27) | def __init__(self, load_balancer=None, load_balancer_port=0,
    method __repr__ (line 35) | def __repr__(self):
    method startElement (line 42) | def startElement(self, name, attrs, connection):
    method endElement (line 45) | def endElement(self, name, value, connection):
    method get_tuple (line 57) | def get_tuple(self):
    method __getitem__ (line 60) | def __getitem__(self, key):

FILE: deploy/third_party/boto-2.1.1/boto/ec2/elb/loadbalancer.py
  class LoadBalancer (line 30) | class LoadBalancer(object):
    method __init__ (line 35) | def __init__(self, connection=None, name=None, endpoints=None):
    method __repr__ (line 49) | def __repr__(self):
    method startElement (line 52) | def startElement(self, name, attrs, connection):
    method endElement (line 73) | def endElement(self, name, value, connection):
    method enable_zones (line 89) | def enable_zones(self, zones):
    method disable_zones (line 103) | def disable_zones(self, zones):
    method register_instances (line 116) | def register_instances(self, instances):
    method deregister_instances (line 132) | def deregister_instances(self, instances):
    method delete (line 147) | def delete(self):
    method configure_health_check (line 153) | def configure_health_check(self, health_check):
    method get_instance_health (line 156) | def get_instance_health(self, instances=None):
    method create_listeners (line 159) | def create_listeners(self, listeners):
    method create_listener (line 162) | def create_listener(self, inPort, outPort=None, proto="tcp"):
    method delete_listeners (line 167) | def delete_listeners(self, listeners):
    method delete_listener (line 170) | def delete_listener(self, inPort, outPort=None, proto="tcp"):
    method delete_policy (line 175) | def delete_policy(self, policy_name):
    method set_policies_of_listener (line 182) | def set_policies_of_listener(self, lb_port, policies):
    method create_cookie_stickiness_policy (line 185) | def create_cookie_stickiness_policy(self, cookie_expiration_period, po...
    method create_app_cookie_stickiness_policy (line 188) | def create_app_cookie_stickiness_policy(self, name, policy_name):
    method set_listener_SSL_certificate (line 191) | def set_listener_SSL_certificate(self, lb_port, ssl_certificate_id):

FILE: deploy/third_party/boto-2.1.1/boto/ec2/elb/policies.py
  class AppCookieStickinessPolicy (line 25) | class AppCookieStickinessPolicy(object):
    method __init__ (line 26) | def __init__(self, connection=None):
    method __repr__ (line 30) | def __repr__(self):
    method startElement (line 34) | def startElement(self, name, attrs, connection):
    method endElement (line 37) | def endElement(self, name, value, connection):
  class LBCookieStickinessPolicy (line 44) | class LBCookieStickinessPolicy(object):
    method __init__ (line 45) | def __init__(self, connection=None):
    method __repr__ (line 49) | def __repr__(self):
    method startElement (line 53) | def startElement(self, name, attrs, connection):
    method endElement (line 56) | def endElement(self, name, value, connection):
  class Policies (line 63) | class Policies(object):
    method __init__ (line 67) | def __init__(self, connection=None):
    method __repr__ (line 72) | def __repr__(self):
    method startElement (line 77) | def startElement(self, name, attrs, connection):
    method endElement (line 85) | def endElement(self, name, value, connection):

FILE: deploy/third_party/boto-2.1.1/boto/ec2/elb/securitygroup.py
  class SecurityGroup (line 22) | class SecurityGroup(object):
    method __init__ (line 23) | def __init__(self, connection=None):
    method __repr__ (line 27) | def __repr__(self):
    method startElement (line 30) | def startElement(self, name, attrs, connection):
    method endElement (line 33) | def endElement(self, name, value, connection):

FILE: deploy/third_party/boto-2.1.1/boto/ec2/image.py
  class ProductCodes (line 26) | class ProductCodes(list):
    method startElement (line 28) | def startElement(self, name, attrs, connection):
    method endElement (line 31) | def endElement(self, name, value, connection):
  class Image (line 35) | class Image(TaggedEC2Object):
    method __init__ (line 40) | def __init__(self, connection=None):
    method __repr__ (line 64) | def __repr__(self):
    method startElement (line 67) | def startElement(self, name, attrs, connection):
    method endElement (line 79) | def endElement(self, name, value, connection):
    method _update (line 130) | def _update(self, updated):
    method update (line 133) | def update(self, validate=False):
    method run (line 154) | def run(self, min_count=1, max_count=1, key_name=None,
    method deregister (line 250) | def deregister(self, delete_snapshot=False):
    method get_launch_permissions (line 253) | def get_launch_permissions(self):
    method set_launch_permissions (line 258) | def set_launch_permissions(self, user_ids=None, group_names=None):
    method remove_launch_permissions (line 265) | def remove_launch_permissions(self, user_ids=None, group_names=None):
    method reset_launch_attributes (line 272) | def reset_launch_attributes(self):
    method get_kernel (line 276) | def get_kernel(self):
    method get_ramdisk (line 280) | def get_ramdisk(self):
  class ImageAttribute (line 284) | class ImageAttribute:
    method __init__ (line 286) | def __init__(self, parent=None):
    method startElement (line 292) | def startElement(self, name, attrs, connection):
    method endElement (line 299) | def endElement(self, name, value, connection):

FILE: deploy/third_party/boto-2.1.1/boto/ec2/instance.py
  class Reservation (line 34) | class Reservation(EC2Object):
    method __init__ (line 46) | def __init__(self, connection=None):
    method __repr__ (line 53) | def __repr__(self):
    method startElement (line 56) | def startElement(self, name, attrs, connection):
    method endElement (line 66) | def endElement(self, name, value, connection):
    method stop_all (line 74) | def stop_all(self):
  class Instance (line 78) | class Instance(TaggedEC2Object):
    method __init__ (line 80) | def __init__(self, connection=None):
    method __repr__ (line 118) | def __repr__(self):
    method startElement (line 121) | def startElement(self, name, attrs, connection):
    method endElement (line 140) | def endElement(self, name, value, connection):
    method _update (line 216) | def _update(self, updated):
    method update (line 219) | def update(self, validate=False):
    method terminate (line 241) | def terminate(self):
    method stop (line 249) | def stop(self, force=False):
    method start (line 263) | def start(self):
    method reboot (line 271) | def reboot(self):
    method get_console_output (line 274) | def get_console_output(self):
    method confirm_product (line 283) | def confirm_product(self, product_code):
    method use_ip (line 286) | def use_ip(self, ip_address):
    method monitor (line 291) | def monitor(self):
    method unmonitor (line 294) | def unmonitor(self):
    method get_attribute (line 297) | def get_attribute(self, attribute):
    method modify_attribute (line 315) | def modify_attribute(self, attribute, value):
    method reset_attribute (line 339) | def reset_attribute(self, attribute):
  class Group (line 352) | class Group:
    method __init__ (line 354) | def __init__(self, parent=None):
    method startElement (line 358) | def startElement(self, name, attrs, connection):
    method endElement (line 361) | def endElement(self, name, value, connection):
  class ConsoleOutput (line 369) | class ConsoleOutput:
    method __init__ (line 371) | def __init__(self, parent=None):
    method startElement (line 377) | def startElement(self, name, attrs, connection):
    method endElement (line 380) | def endElement(self, name, value, connection):
  class InstanceAttribute (line 390) | class InstanceAttribute(dict):
    method __init__ (line 397) | def __init__(self, parent=None):
    method startElement (line 403) | def startElement(self, name, attrs, connection):
    method endElement (line 413) | def endElement(self, name, value, connection):
  class StateReason (line 423) | class StateReason(dict):
    method __init__ (line 425) | def __init__(self, parent=None):
    method startElement (line 428) | def startElement(self, name, attrs, connection):
    method endElement (line 431) | def endElement(self, name, value, connection):

FILE: deploy/third_party/boto-2.1.1/boto/ec2/instanceinfo.py
  class InstanceInfo (line 22) | class InstanceInfo(object):
    method __init__ (line 27) | def __init__(self, connection=None, id=None, state=None):
    method __repr__ (line 32) | def __repr__(self):
    method startElement (line 35) | def startElement(self, name, attrs, connection):
    method endElement (line 38) | def endElement(self, name, value, connection):

FILE: deploy/third_party/boto-2.1.1/boto/ec2/keypair.py
  class KeyPair (line 30) | class KeyPair(EC2Object):
    method __init__ (line 32) | def __init__(self, connection=None):
    method __repr__ (line 38) | def __repr__(self):
    method endElement (line 41) | def endElement(self, name, value, connection):
    method delete (line 51) | def delete(self):
    method save (line 60) | def save(self, directory_path):
    method copy_to_region (line 91) | def copy_to_region(self, region):

FILE: deploy/third_party/boto-2.1.1/boto/ec2/launchspecification.py
  class GroupList (line 31) | class GroupList(list):
    method startElement (line 33) | def startElement(self, name, attrs, connection):
    method endElement (line 36) | def endElement(self, name, value, connection):
  class LaunchSpecification (line 40) | class LaunchSpecification(EC2Object):
    method __init__ (line 42) | def __init__(self, connection=None):
    method __repr__ (line 56) | def __repr__(self):
    method startElement (line 59) | def startElement(self, name, attrs, connection):
    method endElement (line 71) | def endElement(self, name, value, connection):

FILE: deploy/third_party/boto-2.1.1/boto/ec2/placementgroup.py
  class PlacementGroup (line 27) | class PlacementGroup(EC2Object):
    method __init__ (line 29) | def __init__(self, connection=None, name=None, strategy=None, state=No...
    method __repr__ (line 35) | def __repr__(self):
    method endElement (line 38) | def endElement(self, name, value, connection):
    method delete (line 48) | def delete(self):

FILE: deploy/third_party/boto-2.1.1/boto/ec2/regioninfo.py
  class EC2RegionInfo (line 26) | class EC2RegionInfo(RegionInfo):
    method __init__ (line 31) | def __init__(self, connection=None, name=None, endpoint=None):

FILE: deploy/third_party/boto-2.1.1/boto/ec2/reservedinstance.py
  class ReservedInstancesOffering (line 24) | class ReservedInstancesOffering(EC2Object):
    method __init__ (line 26) | def __init__(self, connection=None, id=None, instance_type=None,
    method __repr__ (line 38) | def __repr__(self):
    method startElement (line 41) | def startElement(self, name, attrs, connection):
    method endElement (line 44) | def endElement(self, name, value, connection):
    method describe (line 62) | def describe(self):
    method purchase (line 71) | def purchase(self, instance_count=1):
  class ReservedInstance (line 74) | class ReservedInstance(ReservedInstancesOffering):
    method __init__ (line 76) | def __init__(self, connection=None, id=None, instance_type=None,
    method __repr__ (line 86) | def __repr__(self):
    method endElement (line 89) | def endElement(self, name, value, connection):

FILE: deploy/third_party/boto-2.1.1/boto/ec2/securitygroup.py
  class SecurityGroup (line 29) | class SecurityGroup(TaggedEC2Object):
    method __init__ (line 31) | def __init__(self, connection=None, owner_id=None,
    method __repr__ (line 42) | def __repr__(self):
    method startElement (line 45) | def startElement(self, name, attrs, connection):
    method endElement (line 56) | def endElement(self, name, value, connection):
    method delete (line 84) | def delete(self):
    method add_rule (line 87) | def add_rule(self, ip_protocol, from_port, to_port,
    method remove_rule (line 101) | def remove_rule(self, ip_protocol, from_port, to_port,
    method authorize (line 125) | def authorize(self, ip_protocol=None, from_port=None, to_port=None,
    method revoke (line 173) | def revoke(self, ip_protocol=None, from_port=None, to_port=None,
    method copy_to_region (line 194) | def copy_to_region(self, region, name=None):
    method instances (line 229) | def instances(self):
  class IPPermissionsList (line 247) | class IPPermissionsList(list):
    method startElement (line 249) | def startElement(self, name, attrs, connection):
    method endElement (line 255) | def endElement(self, name, value, connection):
  class IPPermissions (line 258) | class IPPermissions(object):
    method __init__ (line 260) | def __init__(self, parent=None):
    method __repr__ (line 267) | def __repr__(self):
    method startElement (line 271) | def startElement(self, name, attrs, connection):
    method endElement (line 277) | def endElement(self, name, value, connection):
    method add_grant (line 287) | def add_grant(self, name=None, owner_id=None, cidr_ip=None):
  class GroupOrCIDR (line 295) | class GroupOrCIDR(object):
    method __init__ (line 297) | def __init__(self, parent=None):
    method __repr__ (line 302) | def __repr__(self):
    method startElement (line 308) | def startElement(self, name, attrs, connection):
    method endElement (line 311) | def endElement(self, name, value, connection):

FILE: deploy/third_party/boto-2.1.1/boto/ec2/snapshot.py
  class Snapshot (line 28) | class Snapshot(TaggedEC2Object):
    method __init__ (line 32) | def __init__(self, connection=None):
    method __repr__ (line 43) | def __repr__(self):
    method endElement (line 46) | def endElement(self, name, value, connection):
    method _update (line 67) | def _update(self, updated):
    method update (line 71) | def update(self, validate=False):
    method delete (line 89) | def delete(self):
    method get_permissions (line 92) | def get_permissions(self):
    method share (line 96) | def share(self, user_ids=None, groups=None):
    method unshare (line 103) | def unshare(self, user_ids=None, groups=None):
    method reset_permissions (line 110) | def reset_permissions(self):
  class SnapshotAttribute (line 114) | class SnapshotAttribute:
    method __init__ (line 116) | def __init__(self, parent=None):
    method startElement (line 120) | def startElement(self, name, attrs, connection):
    method endElement (line 123) | def endElement(self, name, value, connection):

FILE: deploy/third_party/boto-2.1.1/boto/ec2/spotdatafeedsubscription.py
  class SpotDatafeedSubscription (line 28) | class SpotDatafeedSubscription(EC2Object):
    method __init__ (line 30) | def __init__(self, connection=None, owner_id=None,
    method __repr__ (line 39) | def __repr__(self):
    method startElement (line 42) | def startElement(self, name, attrs, connection):
    method endElement (line 49) | def endElement(self, name, value, connection):
    method delete (line 61) | def delete(self):

FILE: deploy/third_party/boto-2.1.1/boto/ec2/spotinstancerequest.py
  class SpotInstanceStateFault (line 30) | class SpotInstanceStateFault(object):
    method __init__ (line 32) | def __init__(self, code=None, message=None):
    method __repr__ (line 36) | def __repr__(self):
    method startElement (line 39) | def startElement(self, name, attrs, connection):
    method endElement (line 42) | def endElement(self, name, value, connection):
  class SpotInstanceRequest (line 49) | class SpotInstanceRequest(TaggedEC2Object):
    method __init__ (line 51) | def __init__(self, connection=None):
    method __repr__ (line 67) | def __repr__(self):
    method startElement (line 70) | def startElement(self, name, attrs, connection):
    method endElement (line 83) | def endElement(self, name, value, connection):
    method cancel (line 109) | def cancel(self):

FILE: deploy/third_party/boto-2.1.1/boto/ec2/spotpricehistory.py
  class SpotPriceHistory (line 28) | class SpotPriceHistory(EC2Object):
    method __init__ (line 30) | def __init__(self, connection=None):
    method __repr__ (line 38) | def __repr__(self):
    method endElement (line 41) | def endElement(self, name, value, connection):

FILE: deploy/third_party/boto-2.1.1/boto/ec2/tag.py
  class TagSet (line 23) | class TagSet(dict):
    method __init__ (line 31) | def __init__(self, connection=None):
    method startElement (line 36) | def startElement(self, name, attrs, connection):
    method endElement (line 42) | def endElement(self, name, value, connection):
  class Tag (line 51) | class Tag(object):
    method __init__ (line 59) | def __init__(self, connection=None, res_id=None, res_type=None,
    method __repr__ (line 67) | def __repr__(self):
    method startElement (line 70) | def startElement(self, name, attrs, connection):
    method endElement (line 73) | def endElement(self, name, value, connection):

FILE: deploy/third_party/boto-2.1.1/boto/ec2/volume.py
  class Volume (line 28) | class Volume(TaggedEC2Object):
    method __init__ (line 30) | def __init__(self, connection=None):
    method __repr__ (line 40) | def __repr__(self):
    method startElement (line 43) | def startElement(self, name, attrs, connection):
    method endElement (line 56) | def endElement(self, name, value, connection):
    method _update (line 73) | def _update(self, updated):
    method update (line 76) | def update(self, validate=False):
    method delete (line 94) | def delete(self):
    method attach (line 103) | def attach(self, instance_id, device):
    method detach (line 120) | def detach(self, force=False):
    method create_snapshot (line 144) | def create_snapshot(self, description=None):
    method volume_state (line 156) | def volume_state(self):
    method attachment_state (line 162) | def attachment_state(self):
    method snapshots (line 171) | def snapshots(self, owner=None, restorable_by=None):
  class AttachmentSet (line 199) | class AttachmentSet(object):
    method __init__ (line 201) | def __init__(self):
    method __repr__ (line 208) | def __repr__(self):
    method startElement (line 211) | def startElement(self, name, attrs, connection):
    method endElement (line 214) | def endElement(self, name, value, connection):

FILE: deploy/third_party/boto-2.1.1/boto/ec2/zone.py
  class Zone (line 27) | class Zone(EC2Object):
    method __init__ (line 29) | def __init__(self, connection=None):
    method __repr__ (line 34) | def __repr__(self):
    method endElement (line 37) | def endElement(self, name, value, connection):

FILE: deploy/third_party/boto-2.1.1/boto/ecs/__init__.py
  class ECSConnection (line 30) | class ECSConnection(AWSQueryConnection):
    method __init__ (line 41) | def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
    method _required_auth_capability (line 49) | def _required_auth_capability(self):
    method get_response (line 52) | def get_response(self, action, params, page=0, itemSet=None):
    method item_search (line 81) | def item_search(self, search_index, **params):

FILE: deploy/third_party/boto-2.1.1/boto/ecs/item.py
  class ResponseGroup (line 27) | class ResponseGroup(xml.sax.ContentHandler):
    method __init__ (line 32) | def __init__(self, connection=None, nodename=None):
    method __repr__ (line 40) | def __repr__(self):
    method get (line 46) | def get(self, name):
    method set (line 49) | def set(self, name, value):
    method to_xml (line 52) | def to_xml(self):
    method startElement (line 58) | def startElement(self, name, attrs, connection):
    method endElement (line 69) | def endElement(self, name, value, connection):
  class Item (line 88) | class Item(ResponseGroup):
    method __init__ (line 91) | def __init__(self, connection=None):
  class ItemSet (line 95) | class ItemSet(ResponseGroup):
    method __init__ (line 99) | def __init__(self, connection, action, params, page=0):
    method startElement (line 110) | def startElement(self, name, attrs, connection):
    method endElement (line 117) | def endElement(self, name, value, connection):
    method next (line 130) | def next(self):
    method __iter__ (line 146) | def __iter__(self):
    method to_xml (line 149) | def to_xml(self):

FILE: deploy/third_party/boto-2.1.1/boto/emr/bootstrap_action.py
  class BootstrapAction (line 23) | class BootstrapAction(object):
    method __init__ (line 24) | def __init__(self, name, path, bootstrap_action_args):
    method args (line 33) | def args(self):
    method __repr__ (line 41) | def __repr__(self):

FILE: deploy/third_party/boto-2.1.1/boto/emr/connection.py
  class EmrConnection (line 37) | class EmrConnection(AWSQueryConnection):
    method __init__ (line 49) | def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
    method _required_auth_capability (line 63) | def _required_auth_capability(self):
    method describe_jobflow (line 66) | def describe_jobflow(self, jobflow_id):
    method describe_jobflows (line 77) | def describe_jobflows(self, states=None, jobflow_ids=None,
    method terminate_jobflow (line 108) | def terminate_jobflow(self, jobflow_id):
    method terminate_jobflows (line 117) | def terminate_jobflows(self, jobflow_ids):
    method add_jobflow_steps (line 128) | def add_jobflow_steps(self, jobflow_id, steps):
    method add_instance_groups (line 149) | def add_instance_groups(self, jobflow_id, instance_groups):
    method modify_instance_groups (line 166) | def modify_instance_groups(self, instance_group_ids, new_sizes):
    method run_jobflow (line 191) | def run_jobflow(self, name, log_uri, ec2_keyname=None, availability_zo...
    method set_termination_protection (line 294) | def set_termination_protection(self, jobflow_id, termination_protectio...
    method _build_bootstrap_action_args (line 312) | def _build_bootstrap_action_args(self, bootstrap_action):
    method _build_step_args (line 327) | def _build_step_args(self, step):
    method _build_bootstrap_action_list (line 343) | def _build_bootstrap_action_list(self, bootstrap_actions):
    method _build_step_list (line 353) | def _build_step_list(self, steps):
    method _build_instance_common_args (line 363) | def _build_instance_common_args(self, ec2_keyname, availability_zone,
    method _build_instance_count_and_type_args (line 382) | def _build_instance_count_and_type_args(self, master_instance_type,
    method _build_instance_group_args (line 396) | def _build_instance_group_args(self, instance_group):
    method _build_instance_group_list_args (line 413) | def _build_instance_group_list_args(self, instance_groups):

FILE: deploy/third_party/boto-2.1.1/boto/emr/emrobject.py
  class EmrObject (line 31) | class EmrObject(object):
    method __init__ (line 34) | def __init__(self, connection=None):
    method startElement (line 37) | def startElement(self, name, attrs, connection):
    method endElement (line 40) | def endElement(self, name, value, connection):
  class RunJobFlowResponse (line 45) | class RunJobFlowResponse(EmrObject):
  class AddInstanceGroupsResponse (line 48) | class AddInstanceGroupsResponse(EmrObject):
  class ModifyInstanceGroupsResponse (line 51) | class ModifyInstanceGroupsResponse(EmrObject):
  class Arg (line 55) | class Arg(EmrObject):
    method __init__ (line 56) | def __init__(self, connection=None):
    method endElement (line 59) | def endElement(self, name, value, connection):
  class BootstrapAction (line 63) | class BootstrapAction(EmrObject):
    method startElement (line 70) | def startElement(self, name, attrs, connection):
  class KeyValue (line 76) | class KeyValue(EmrObject):
  class Step (line 83) | class Step(EmrObject):
    method __init__ (line 96) | def __init__(self, connection=None):
    method startElement (line 100) | def startElement(self, name, attrs, connection):
  class InstanceGroup (line 109) | class InstanceGroup(EmrObject):
  class JobFlow (line 129) | class JobFlow(EmrObject):
    method __init__ (line 157) | def __init__(self, connection=None):
    method startElement (line 163) | def startElement(self, name, attrs, connection):

FILE: deploy/third_party/boto-2.1.1/boto/emr/instance_group.py
  class InstanceGroup (line 22) | class InstanceGroup(object):
    method __init__ (line 23) | def __init__(self, num_instances, role, type, market, name, bidprice=N...
    method __repr__ (line 34) | def __repr__(self):

FILE: deploy/third_party/boto-2.1.1/boto/emr/step.py
  class Step (line 23) | class Step(object):
    method jar (line 27) | def jar(self):
    method args (line 34) | def args(self):
    method main_class (line 41) | def main_class(self):
  class JarStep (line 49) | class JarStep(Step):
    method __init__ (line 53) | def __init__(self, name, jar, main_class=None,
    method jar (line 79) | def jar(self):
    method args (line 82) | def args(self):
    method main_class (line 90) | def main_class(self):
  class StreamingStep (line 94) | class StreamingStep(Step):
    method __init__ (line 98) | def __init__(self, name, mapper, reducer=None, combiner=None,
    method jar (line 145) | def jar(self):
    method main_class (line 148) | def main_class(self):
    method args (line 151) | def args(self):
    method __repr__ (line 188) | def __repr__(self):

FILE: deploy/third_party/boto-2.1.1/boto/exception.py
  class BotoClientError (line 33) | class BotoClientError(StandardError):
    method __init__ (line 38) | def __init__(self, reason, *args):
    method __repr__ (line 42) | def __repr__(self):
    method __str__ (line 45) | def __str__(self):
  class SDBPersistenceError (line 48) | class SDBPersistenceError(StandardError):
  class StoragePermissionsError (line 52) | class StoragePermissionsError(BotoClientError):
  class S3PermissionsError (line 58) | class S3PermissionsError(StoragePermissionsError):
  class GSPermissionsError (line 64) | class GSPermissionsError(StoragePermissionsError):
  class BotoServerError (line 70) | class BotoServerError(StandardError):
    method __init__ (line 72) | def __init__(self, status, reason, body=None, *args):
    method __getattr__ (line 96) | def __getattr__(self, name):
    method __repr__ (line 103) | def __repr__(self):
    method __str__ (line 107) | def __str__(self):
    method startElement (line 111) | def startElement(self, name, attrs, connection):
    method endElement (line 114) | def endElement(self, name, value, connection):
    method _cleanupParsedProperties (line 125) | def _cleanupParsedProperties(self):
  class ConsoleOutput (line 131) | class ConsoleOutput:
    method __init__ (line 133) | def __init__(self, parent=None):
    method startElement (line 140) | def startElement(self, name, attrs, connection):
    method endElement (line 143) | def endElement(self, name, value, connection):
  class StorageCreateError (line 151) | class StorageCreateError(BotoServerError):
    method __init__ (line 155) | def __init__(self, status, reason, body=None):
    method endElement (line 159) | def endElement(self, name, value, connection):
  class S3CreateError (line 165) | class S3CreateError(StorageCreateError):
  class GSCreateError (line 171) | class GSCreateError(StorageCreateError):
  class StorageCopyError (line 177) | class StorageCopyError(BotoServerError):
  class S3CopyError (line 183) | class S3CopyError(StorageCopyError):
  class GSCopyError (line 189) | class GSCopyError(StorageCopyError):
  class SQSError (line 195) | class SQSError(BotoServerError):
    method __init__ (line 199) | def __init__(self, status, reason, body=None):
    method startElement (line 204) | def startElement(self, name, attrs, connection):
    method endElement (line 207) | def endElement(self, name, value, connection):
    method _cleanupParsedProperties (line 215) | def _cleanupParsedProperties(self):
  class SQSDecodeError (line 220) | class SQSDecodeError(BotoClientError):
    method __init__ (line 224) | def __init__(self, reason, message):
    method __repr__ (line 228) | def __repr__(self):
    method __str__ (line 231) | def __str__(self):
  class StorageResponseError (line 234) | class StorageResponseError(BotoServerError):
    method __init__ (line 238) | def __init__(self, status, reason, body=None):
    method startElement (line 242) | def startElement(self, name, attrs, connection):
    method endElement (line 245) | def endElement(self, name, value, connection):
    method _cleanupParsedProperties (line 251) | def _cleanupParsedProperties(self):
  class S3ResponseError (line 256) | class S3ResponseError(StorageResponseError):
  class GSResponseError (line 262) | class GSResponseError(StorageResponseError):
  class EC2ResponseError (line 268) | class EC2ResponseError(BotoServerError):
    method __init__ (line 273) | def __init__(self, status, reason, body=None):
    method startElement (line 282) | def startElement(self, name, attrs, connection):
    method endElement (line 289) | def endElement(self, name, value, connection):
    method _cleanupParsedProperties (line 295) | def _cleanupParsedProperties(self):
  class EmrResponseError (line 301) | class EmrResponseError(BotoServerError):
  class _EC2Error (line 307) | class _EC2Error:
    method __init__ (line 309) | def __init__(self, connection=None):
    method startElement (line 314) | def startElement(self, name, attrs, connection):
    method endElement (line 317) | def endElement(self, name, value, connection):
  class SDBResponseError (line 325) | class SDBResponseError(BotoServerError):
  class AWSConnectionError (line 331) | class AWSConnectionError(BotoClientError):
  class StorageDataError (line 337) | class StorageDataError(BotoClientError):
  class S3DataError (line 343) | class S3DataError(StorageDataError):
  class GSDataError (line 349) | class GSDataError(StorageDataError):
  class FPSResponseError (line 355) | class FPSResponseError(BotoServerError):
  class InvalidUriError (line 358) | class InvalidUriError(Exception):
    method __init__ (line 361) | def __init__(self, message):
  class InvalidAclError (line 365) | class InvalidAclError(Exception):
    method __init__ (line 368) | def __init__(self, message):
  class NoAuthHandlerFound (line 372) | class NoAuthHandlerFound(Exception):
  class TooManyAuthHandlerReadyToAuthenticate (line 376) | class TooManyAuthHandlerReadyToAuthenticate(Exception):
  class ResumableTransferDisposition (line 388) | class ResumableTransferDisposition(object):
  class ResumableUploadException (line 411) | class ResumableUploadException(Exception):
    method __init__ (line 418) | def __init__(self, message, disposition):
    method __repr__ (line 423) | def __repr__(self):
  class ResumableDownloadException (line 427) | class ResumableDownloadException(Exception):
    method __init__ (line 434) | def __init__(self, message, disposition):
    method __repr__ (line 439) | def __repr__(self):

FILE: deploy/third_party/boto-2.1.1/boto/file/bucket.py
  class Bucket (line 30) | class Bucket(object):
    method __init__ (line 31) | def __init__(self, name, contained_key):
    method __iter__ (line 37) | def __iter__(self):
    method __str__ (line 40) | def __str__(self):
    method delete_key (line 43) | def delete_key(self, key_name, headers=None,
    method get_all_keys (line 59) | def get_all_keys(self, headers=None, **params):
    method get_key (line 71) | def get_key(self, key_name, headers=None, version_id=None,
    method new_key (line 95) | def new_key(self, key_name=None, key_type=Key.KEY_REGULAR_FILE):

FILE: deploy/third_party/boto-2.1.1/boto/file/connection.py
  class FileConnection (line 26) | class FileConnection(object):
    method __init__ (line 28) | def __init__(self, file_storage_uri):
    method get_bucket (line 32) | def get_bucket(self, bucket_name, validate=True, headers=None):

FILE: deploy/third_party/boto-2.1.1/boto/file/key.py
  class Key (line 28) | class Key(object):
    method __init__ (line 35) | def __init__(self, bucket, name, fp=None, key_type=KEY_REGULAR_FILE):
    method __str__ (line 52) | def __str__(self):
    method get_file (line 55) | def get_file(self, fp, headers=None, cb=None, num_cb=10, torrent=False):
    method set_contents_from_file (line 79) | def set_contents_from_file(self, fp, headers=None, replace=True, cb=None,
    method get_contents_as_string (line 125) | def get_contents_as_string(self, headers=None, cb=None, num_cb=10,
    method is_stream (line 153) | def is_stream(self):

FILE: deploy/third_party/boto-2.1.1/boto/file/simpleresultset.py
  class SimpleResultSet (line 22) | class SimpleResultSet(list):
    method __init__ (line 27) | def __init__(self, input_list):

FILE: deploy/third_party/boto-2.1.1/boto/fps/connection.py
  class FPSConnection (line 36) | class FPSConnection(AWSQueryConnection):
    method __init__ (line 40) | def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
    method _required_auth_capability (line 51) | def _required_auth_capability(self):
    method install_payment_instruction (line 54) | def install_payment_instruction(self, instruction,
    method install_caller_instruction (line 76) | def install_caller_instruction(self, token_type="Unrestricted",
    method install_recipient_instruction (line 102) | def install_recipient_instruction(self, token_type="Unrestricted",
    method make_marketplace_registration_url (line 129) | def make_marketplace_registration_url(self, returnURL, pipelineName,
    method make_url (line 176) | def make_url(self, returnURL, paymentReason, pipelineName,
    method pay (line 220) | def pay(self, transactionAmount, senderTokenId,
    method get_transaction_status (line 278) | def get_transaction_status(self, transactionId):
    method cancel (line 295) | def cancel(self, transactionId, description=None):
    method settle (line 314) | def settle(self, reserveTransactionId, transactionAmount=None):
    method refund (line 333) | def refund(self, callerReference, transactionId, refundAmount=None,
    method get_recipient_verification_status (line 357) | def get_recipient_verification_status(self, recipientTokenId):
    method get_token_by_caller_reference (line 374) | def get_token_by_caller_reference(self, callerReference):
    method get_token_by_caller_token (line 391) | def get_token_by_caller_token(self, tokenId):
    method verify_signature (line 408) | def verify_signature(self, end_point_url, http_parameters):

FILE: deploy/third_party/boto-2.1.1/boto/gs/acl.py
  class ACL (line 51) | class ACL:
    method __init__ (line 53) | def __init__(self, parent=None):
    method acl (line 58) | def acl(self):
    method __repr__ (line 61) | def __repr__(self):
    method add_email_grant (line 75) | def add_email_grant(self, permission, email_address):
    method add_user_grant (line 82) | def add_user_grant(self, permission, user_id):
    method add_group_email_grant (line 86) | def add_group_email_grant(self, permission, email_address):
    method add_group_grant (line 91) | def add_group_grant(self, permission, group_id):
    method startElement (line 95) | def startElement(self, name, attrs, connection):
    method endElement (line 105) | def endElement(self, name, value, connection):
    method to_xml (line 113) | def to_xml(self):
  class Entries (line 125) | class Entries:
    method __init__ (line 127) | def __init__(self, parent=None):
    method __repr__ (line 133) | def __repr__(self):
    method startElement (line 139) | def startElement(self, name, attrs, connection):
    method endElement (line 147) | def endElement(self, name, value, connection):
    method to_xml (line 153) | def to_xml(self):
  class Entry (line 162) | class Entry:
    method __init__ (line 164) | def __init__(self, scope=None, type=None, id=None, name=None,
    method __repr__ (line 171) | def __repr__(self):
    method startElement (line 174) | def startElement(self, name, attrs, connection):
    method endElement (line 186) | def endElement(self, name, value, connection):
    method to_xml (line 197) | def to_xml(self):
  class Scope (line 204) | class Scope:
    method __init__ (line 217) | def __init__(self, parent, type=None, id=None, name=None,
    method __repr__ (line 229) | def __repr__(self):
    method startElement (line 242) | def startElement(self, name, attrs, connection):
    method endElement (line 248) | def endElement(self, name, value, connection):
    method to_xml (line 261) | def to_xml(self):

FILE: deploy/third_party/boto-2.1.1/boto/gs/bucket.py
  class Bucket (line 32) | class Bucket(S3Bucket):
    method __init__ (line 34) | def __init__(self, connection=None, name=None, key_class=GSKey):
    method set_acl (line 37) | def set_acl(self, acl_or_str, key_name='', headers=None, version_id=No...
    method get_acl (line 45) | def get_acl(self, key_name='', headers=None, version_id=None):
    method set_canned_acl (line 58) | def set_canned_acl(self, acl_str, key_name='', headers=None,
    method add_email_grant (line 79) | def add_email_grant(self, permission, email_address,
    method add_user_grant (line 116) | def add_user_grant(self, permission, user_id, recursive=False, headers...
    method add_group_email_grant (line 149) | def add_group_email_grant(self, permission, email_address, recursive=F...
    method list_grants (line 190) | def list_grants(self, headers=None):
    method disable_logging (line 194) | def disable_logging(self, headers=None):
    method enable_logging (line 198) | def enable_logging(self, target_bucket, target_prefix=None, headers=None,

FILE: deploy/third_party/boto-2.1.1/boto/gs/connection.py
  class Location (line 27) | class Location:
  class GSConnection (line 31) | class GSConnection(S3Connection):
    method __init__ (line 36) | def __init__(self, gs_access_key_id=None, gs_secret_access_key=None,
    method create_bucket (line 46) | def create_bucket(self, bucket_name, headers=None,

FILE: deploy/third_party/boto-2.1.1/boto/gs/key.py
  class Key (line 25) | class Key(S3Key):
    method add_email_grant (line 27) | def add_email_grant(self, permission, email_address):
    method add_user_grant (line 48) | def add_user_grant(self, permission, user_id):
    method add_group_email_grant (line 69) | def add_group_email_grant(self, permission, email_address, headers=None):
    method add_group_grant (line 90) | def add_group_grant(self, permission, group_id):
    method set_contents_from_file (line 111) | def set_contents_from_file(self, fp, headers=None, replace=True,
    method set_contents_from_filename (line 194) | def set_contents_from_filename(self, filename, headers=None, replace=T...
    method set_contents_from_string (line 249) | def set_contents_from_string(self, s, headers=None, replace=True,

FILE: deploy/third_party/boto-2.1.1/boto/gs/resumable_upload_handler.py
  class ResumableUploadHandler (line 55) | class ResumableUploadHandler(object):
    method __init__ (line 65) | def __init__(self, tracker_file_name=None, num_retries=None):
    method _load_tracker_uri_from_file (line 92) | def _load_tracker_uri_from_file(self):
    method _save_tracker_uri_to_file (line 116) | def _save_tracker_uri_to_file(self):
    method _set_tracker_uri (line 138) | def _set_tracker_uri(self, uri):
    method get_tracker_uri (line 158) | def get_tracker_uri(self):
    method _remove_tracker_file (line 164) | def _remove_tracker_file(self):
    method _build_content_range_header (line 169) | def _build_content_range_header(self, range_spec='*', length_spec='*'):
    method _query_server_state (line 172) | def _query_server_state(self, conn, file_length):
    method _query_server_pos (line 196) | def _query_server_pos(self, conn, file_length):
    method _start_new_resumable_upload (line 242) | def _start_new_resumable_upload(self, key, headers=None):
    method _upload_file_bytes (line 300) | def _upload_file_bytes(self, conn, http_conn, fp, file_length,
    method _attempt_resumable_upload (line 380) | def _attempt_resumable_upload(self, key, fp, file_length, headers, cb,
    method _check_final_md5 (line 453) | def _check_final_md5(self, key, etag):
    method send_file (line 476) | def send_file(self, key, fp, headers, cb=None, num_cb=10):

FILE: deploy/third_party/boto-2.1.1/boto/gs/user.py
  class User (line 23) | class User:
    method __init__ (line 24) | def __init__(self, parent=None, id='', name=''):
    method __repr__ (line 31) | def __repr__(self):
    method startElement (line 34) | def startElement(self, name, attrs, connection):
    method endElement (line 37) | def endElement(self, name, value, connection):
    method to_xml (line 45) | def to_xml(self, element_name='Owner'):

FILE: deploy/third_party/boto-2.1.1/boto/handler.py
  class XmlHandler (line 24) | class XmlHandler(xml.sax.ContentHandler):
    method __init__ (line 26) | def __init__(self, root_node, connection):
    method startElement (line 31) | def startElement(self, name, attrs):
    method endElement (line 37) | def endElement(self, name):
    method characters (line 43) | def characters(self, content):

FILE: deploy/third_party/boto-2.1.1/boto/https_connection.py
  class InvalidCertificateException (line 29) | class InvalidCertificateException(httplib.HTTPException):
    method __init__ (line 32) | def __init__(self, host, cert, reason):
    method __str__ (line 44) | def __str__(self):
  function GetValidHostsForCert (line 48) | def GetValidHostsForCert(cert):
  function ValidateCertificateHostname (line 62) | def ValidateCertificateHostname(cert, hostname):
  class CertValidatingHTTPSConnection (line 82) | class CertValidatingHTTPSConnection(httplib.HTTPConnection):
    method __init__ (line 87) | def __init__(self, host, port=None, key_file=None, cert_file=None,
    method connect (line 106) | def connect(self):

FILE: deploy/third_party/boto-2.1.1/boto/iam/connection.py
  class IAMConnection (line 30) | class IAMConnection(AWSQueryConnection):
    method __init__ (line 34) | def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
    method _required_auth_capability (line 46) | def _required_auth_capability(self):
    method get_response (line 49) | def get_response(self, action, params, path='/', parent=None,
    method get_all_groups (line 74) | def get_all_groups(self, path_prefix='/', marker=None, max_items=None):
    method get_group (line 104) | def get_group(self, group_name, marker=None, max_items=None):
    method create_group (line 130) | def create_group(self, group_name, path='/'):
    method delete_group (line 145) | def delete_group(self, group_name):
    method update_group (line 157) | def update_group(self, group_name, new_group_name=None, new_path=None):
    method add_user_to_group (line 180) | def add_user_to_group(self, group_name, user_name):
    method remove_user_from_group (line 195) | def remove_user_from_group(self, group_name, user_name):
    method put_group_policy (line 210) | def put_group_policy(self, group_name, policy_name, policy_json):
    method get_all_group_policies (line 229) | def get_all_group_policies(self, group_name, marker=None, max_items=No...
    method get_group_policy (line 256) | def get_group_policy(self, group_name, policy_name):
    method delete_group_policy (line 271) | def delete_group_policy(self, group_name, policy_name):
    method get_all_users (line 286) | def get_all_users(self, path_prefix='/', marker=None, max_items=None):
    method create_user (line 317) | def create_user(self, user_name, path='/'):
    method delete_user (line 333) | def delete_user(self, user_name):
    method get_user (line 347) | def get_user(self, user_name=None):
    method update_user (line 365) | def update_user(self, user_name, new_user_name=None, new_path=None):
    method get_all_user_policies (line 388) | def get_all_user_policies(self, user_name, marker=None, max_items=None):
    method put_user_policy (line 415) | def put_user_policy(self, user_name, policy_name, policy_json):
    method get_user_policy (line 434) | def get_user_policy(self, user_name, policy_name):
    method delete_user_policy (line 449) | def delete_user_policy(self, user_name, policy_name):
    method get_groups_for_user (line 464) | def get_groups_for_user(self, user_name, marker=None, max_items=None):
    method get_all_access_keys (line 495) | def get_all_access_keys(self, user_name, marker=None, max_items=None):
    method create_access_key (line 522) | def create_access_key(self, user_name=None):
    method update_access_key (line 537) | def update_access_key(self, access_key_id, status, user_name=None):
    method delete_access_key (line 562) | def delete_access_key(self, access_key_id, user_name=None):
    method get_all_signing_certs (line 585) | def get_all_signing_certs(self, marker=None, max_items=None,
    method update_signing_cert (line 619) | def update_signing_cert(self, cert_id, status, user_name=None):
    method upload_signing_cert (line 642) | def upload_signing_cert(self, cert_body, user_name=None):
    method delete_signing_cert (line 663) | def delete_signing_cert(self, cert_id, user_name=None):
    method get_all_server_certs (line 686) | def get_all_server_certs(self, path_prefix='/',
    method update_server_cert (line 720) | def update_server_cert(self, cert_name, new_cert_name=None,
    method upload_server_cert (line 745) | def upload_server_cert(self, cert_name, cert_body, private_key,
    method get_server_certificate (line 784) | def get_server_certificate(self, cert_name):
    method delete_server_cert (line 796) | def delete_server_cert(self, cert_name):
    method get_all_mfa_devices (line 812) | def get_all_mfa_devices(self, user_name, marker=None, max_items=None):
    method enable_mfa_device (line 840) | def enable_mfa_device(self, user_name, serial_number,
    method deactivate_mfa_device (line 867) | def deactivate_mfa_device(self, user_name, serial_number):
    method resync_mfa_device (line 884) | def resync_mfa_device(self, user_name, serial_number,
    method get_login_profiles (line 914) | def get_login_profiles(self, user_name):
    method create_login_profile (line 925) | def create_login_profile(self, user_name, password):
    method delete_login_profile (line 941) | def delete_login_profile(self, user_name):
    method update_login_profile (line 952) | def update_login_profile(self, user_name, password):
    method create_account_alias (line 967) | def create_account_alias(self, alias):
    method delete_account_alias (line 980) | def delete_account_alias(self, alias):
    method get_account_alias (line 993) | def get_account_alias(self):
    method get_signin_url (line 1006) | def get_signin_url(self, service='ec2'):
    method get_account_summary (line 1020) | def get_account_summary(self):

FILE: deploy/third_party/boto-2.1.1/boto/iam/summarymap.py
  class SummaryMap (line 23) | class SummaryMap(dict):
    method __init__ (line 25) | def __init__(self, parent=None):
    method startElement (line 29) | def startElement(self, name, attrs, connection):
    method endElement (line 32) | def endElement(self, name, value, connection):

FILE: deploy/third_party/boto-2.1.1/boto/jsonresponse.py
  class XmlHandler (line 26) | class XmlHandler(xml.sax.ContentHandler):
    method __init__ (line 28) | def __init__(self, root_node, connection):
    method startElement (line 33) | def startElement(self, name, attrs):
    method endElement (line 42) | def endElement(self, name):
    method characters (line 48) | def characters(self, content):
    method parse (line 51) | def parse(self, s):
  class Element (line 54) | class Element(dict):
    method __init__ (line 56) | def __init__(self, connection=None, element_name=None,
    method __getattr__ (line 72) | def __getattr__(self, key):
    method get_name (line 84) | def get_name(self, name):
    method startElement (line 89) | def startElement(self, name, attrs, connection):
    method endElement (line 107) | def endElement(self, name, value, connection):
  class ListElement (line 117) | class ListElement(list):
    method __init__ (line 119) | def __init__(self, connection=None, element_name=None,
    method get_name (line 129) | def get_name(self, name):
    method startElement (line 134) | def startElement(self, name, attrs, connection):
    method endElement (line 149) | def endElement(self, name, value, connection):

FILE: deploy/third_party/boto-2.1.1/boto/manage/cmdshell.py
  class SSHClient (line 33) | class SSHClient(object):
    method __init__ (line 35) | def __init__(self, server,
    method connect (line 49) | def connect(self):
    method open_sftp (line 75) | def open_sftp(self):
    method get_file (line 78) | def get_file(self, src, dst):
    method put_file (line 82) | def put_file(self, src, dst):
    method open (line 86) | def open(self, filename, mode='r', bufsize=-1):
    method listdir (line 93) | def listdir(self, path):
    method isdir (line 97) | def isdir(self, path):
    method exists (line 103) | def exists(self, path):
    method shell (line 109) | def shell(self):
    method run (line 116) | def run(self, command):
    method run_pty (line 137) | def run_pty(self, command):
    method close (line 148) | def close(self):
  class LocalClient (line 153) | class LocalClient(object):
    method __init__ (line 155) | def __init__(self, server, host_key_file=None, uname='root'):
    method get_file (line 160) | def get_file(self, src, dst):
    method put_file (line 163) | def put_file(self, src, dst):
    method listdir (line 166) | def listdir(self, path):
    method isdir (line 169) | def isdir(self, path):
    method exists (line 172) | def exists(self, path):
    method shell (line 175) | def shell(self):
    method run (line 178) | def run(self):
    method close (line 192) | def close(self):
  class FakeServer (line 195) | class FakeServer(object):
    method __init__ (line 201) | def __init__(self, instance, ssh_key_file):
  function start (line 207) | def start(server):
  function sshclient_from_instance (line 214) | def sshclient_from_instance(instance, ssh_key_file,

FILE: deploy/third_party/boto-2.1.1/boto/manage/propget.py
  function get (line 23) | def get(prop, choices=None):

FILE: deploy/third_party/boto-2.1.1/boto/manage/server.py
  class Bundler (line 43) | class Bundler(object):
    method __init__ (line 45) | def __init__(self, server, uname='root'):
    method copy_x509 (line 51) | def copy_x509(self, key_file, cert_file):
    method bundle_image (line 62) | def bundle_image(self, prefix, size, ssh_key):
    method upload_bundle (line 78) | def upload_bundle(self, bucket, prefix, ssh_key):
    method bundle (line 89) | def bundle(self, bucket=None, prefix=None, key_file=None, cert_file=None,
  class CommandLineGetter (line 128) | class CommandLineGetter(object):
    method get_ami_list (line 130) | def get_ami_list(self):
    method get_region (line 138) | def get_region(self, params):
    method get_name (line 148) | def get_name(self, params):
    method get_description (line 153) | def get_description(self, params):
    method get_instance_type (line 158) | def get_instance_type(self, params):
    method get_quantity (line 164) | def get_quantity(self, params):
    method get_zone (line 169) | def get_zone(self, params):
    method get_ami_id (line 175) | def get_ami_id(self, params):
    method get_group (line 190) | def get_group(self, params):
    method get_key (line 203) | def get_key(self, params):
    method get (line 216) | def get(self, cls, params):
  class Server (line 229) | class Server(Model):
    method add_credentials (line 259) | def add_credentials(cls, cfg, aws_access_key_id, aws_secret_access_key):
    method create (line 270) | def create(cls, config_file=None, logical_volume = None, cfg = None, *...
    method create_from_instance_id (line 351) | def create_from_instance_id(cls, instance_id, name, description=''):
    method create_from_current_instances (line 375) | def create_from_current_instances(cls):
    method __init__ (line 397) | def __init__(self, id=None, **kw):
    method _setup_ec2 (line 406) | def _setup_ec2(self):
    method _status (line 425) | def _status(self):
    method _hostname (line 432) | def _hostname(self):
    method _private_hostname (line 438) | def _private_hostname(self):
    method _instance_type (line 444) | def _instance_type(self):
    method _launch_time (line 450) | def _launch_time(self):
    method _console_output (line 456) | def _console_output(self):
    method _groups (line 462) | def _groups(self):
    method _security_group (line 468) | def _security_group(self):
    method _zone (line 474) | def _zone(self):
    method _key_name (line 480) | def _key_name(self):
    method put (line 486) | def put(self):
    method delete (line 490) | def delete(self):
    method stop (line 496) | def stop(self):
    method terminate (line 502) | def terminate(self):
    method reboot (line 508) | def reboot(self):
    method wait (line 512) | def wait(self):
    method get_ssh_key_file (line 516) | def get_ssh_key_file(self):
    method get_cmdshell (line 528) | def get_cmdshell(self):
    method reset_cmdshell (line 535) | def reset_cmdshell(self):
    method run (line 538) | def run(self, command):
    method get_bundler (line 543) | def get_bundler(self, uname='root'):
    method get_ssh_client (line 547) | def get_ssh_client(self, uname='root', ssh_pwd=None):
    method install (line 552) | def install(self, pkg):

FILE: deploy/third_party/boto-2.1.1/boto/manage/task.py
  function check_hour (line 28) | def check_hour(val):
  class Task (line 34) | class Task(Model):
    method start_all (line 57) | def start_all(cls, queue_name):
    method __init__ (line 61) | def __init__(self, id=None, **kw):
    method check (line 67) | def check(self):
    method _run (line 101) | def _run(self, msg, vtimeout):
    method run (line 125) | def run(self, msg, vtimeout=60):
    method start (line 142) | def start(self, queue_name):
  class TaskPoller (line 151) | class TaskPoller(object):
    method __init__ (line 153) | def __init__(self, queue_name):
    method poll (line 157) | def poll(self, wait=60, vtimeout=60):

FILE: deploy/third_party/boto-2.1.1/boto/manage/volume.py
  class CommandLineGetter (line 35) | class CommandLineGetter(object):
    method get_region (line 37) | def get_region(self, params):
    method get_zone (line 42) | def get_zone(self, params):
    method get_name (line 48) | def get_name(self, params):
    method get_size (line 53) | def get_size(self, params):
    method get_mount_point (line 58) | def get_mount_point(self, params):
    method get_device (line 63) | def get_device(self, params):
    method get (line 68) | def get(self, cls, params):
  class Volume (line 78) | class Volume(Model):
    method create (line 97) | def create(cls, **params):
    method create_from_volume_id (line 117) | def create_from_volume_id(cls, region_name, volume_id, name):
    method create_from_latest_snapshot (line 131) | def create_from_latest_snapshot(self, name, size=None):
    method create_from_snapshot (line 135) | def create_from_snapshot(self, name, snapshot, size=None):
    method get_ec2_connection (line 155) | def get_ec2_connection(self):
    method _volume_state (line 162) | def _volume_state(self):
    method _attachment_state (line 167) | def _attachment_state(self):
    method _size (line 172) | def _size(self):
    method install_xfs (line 179) | def install_xfs(self):
    method get_snapshots (line 183) | def get_snapshots(self):
    method attach (line 200) | def attach(self, server=None):
    method detach (line 210) | def detach(self, force=False):
    method checkfs (line 220) | def checkfs(self, use_cmd=None):
    method wait (line 235) | def wait(self):
    method format (line 245) | def format(self):
    method mount (line 255) | def mount(self):
    method make_ready (line 283) | def make_ready(self, server):
    method freeze (line 292) | def freeze(self):
    method unfreeze (line 296) | def unfreeze(self):
    method snapshot (line 300) | def snapshot(self):
    method get_snapshot_range (line 317) | def get_snapshot_range(self, snaps, start_date=None, end_date=None):
    method trim_snapshots (line 333) | def trim_snapshots(self, delete=False):
    method grow (line 400) | def grow(self, size):
    method copy (line 403) | def copy(self, snapshot):
    method get_snapshot_from_date (line 406) | def get_snapshot_from_date(self, date):
    method delete (line 409) | def delete(self, delete_ebs_volume=False):
    method archive (line 416) | def archive(self):

FILE: deploy/third_party/boto-2.1.1/boto/mashups/interactive.py
  function interactive_shell (line 32) | def interactive_shell(chan):
  function posix_shell (line 39) | def posix_shell(chan):
  function windows_shell (line 71) | def windows_shell(chan):

FILE: deploy/third_party/boto-2.1.1/boto/mashups/iobject.py
  function int_val_fn (line 24) | def int_val_fn(v):
  class IObject (line 31) | class IObject(object):
    method choose_from_list (line 33) | def choose_from_list(self, item_list, search_str='',
    method get_string (line 79) | def get_string(self, prompt, validation_fn=None):
    method get_filename (line 91) | def get_filename(self, prompt):
    method get_int (line 112) | def get_int(self, prompt):

FILE: deploy/third_party/boto-2.1.1/boto/mashups/order.py
  class Item (line 36) | class Item(IObject):
    method __init__ (line 38) | def __init__(self):
    method set_userdata (line 50) | def set_userdata(self, key, value):
    method get_userdata (line 53) | def get_userdata(self, key):
    method set_region (line 56) | def set_region(self, region=None):
    method set_name (line 63) | def set_name(self, name=None):
    method set_instance_type (line 69) | def set_instance_type(self, instance_type=None):
    method set_quantity (line 75) | def set_quantity(self, n=0):
    method set_zone (line 81) | def set_zone(self, zone=None):
    method set_ami (line 88) | def set_ami(self, ami=None):
    method add_group (line 95) | def add_group(self, group=None):
    method set_key (line 102) | def set_key(self, key=None):
    method update_config (line 109) | def update_config(self):
    method set_config (line 121) | def set_config(self, config_path=None):
    method get_userdata_string (line 126) | def get_userdata_string(self):
    method enter (line 131) | def enter(self, **params):
  class Order (line 162) | class Order(IObject):
    method __init__ (line 164) | def __init__(self):
    method add_item (line 168) | def add_item(self, **params):
    method display (line 173) | def display(self):
    method place (line 181) | def place(self, block=True):

FILE: deploy/third_party/boto-2.1.1/boto/mashups/server.py
  class ServerSet (line 36) | class ServerSet(list):
    method __getattr__ (line 38) | def __getattr__(self, name):
    method map (line 54) | def map(self, *args):
  class Server (line 60) | class Server(Model):
    method ec2 (line 63) | def ec2(self):
    method Inventory (line 69) | def Inventory(cls):
    method Register (line 81) | def Register(cls, name, instance_id, description=''):
    method __init__ (line 89) | def __init__(self, id=None, **kw):
    method setReadOnly (line 110) | def setReadOnly(self, value):
    method getInstance (line 113) | def getInstance(self):
    method getAMI (line 127) | def getAMI(self):
    method getStatus (line 133) | def getStatus(self):
    method getHostname (line 141) | def getHostname(self):
    method getPrivateHostname (line 148) | def getPrivateHostname(self):
    method getLaunchTime (line 155) | def getLaunchTime(self):
    method getConsoleOutput (line 162) | def getConsoleOutput(self):
    method getGroups (line 169) | def getGroups(self):
    method getConfig (line 178) | def getConfig(self):
    method setConfig (line 186) | def setConfig(self, config):
    method set_config (line 197) | def set_config(self, config):
    method load_config (line 204) | def load_config(self):
    method stop (line 208) | def stop(self):
    method start (line 212) | def start(self):
    method reboot (line 247) | def reboot(self):
    method get_ssh_client (line 251) | def get_ssh_client(self, key_file=None, host_key_file='~/.ssh/known_ho...
    method get_file (line 270) | def get_file(self, remotepath, localpath):
    method put_file (line 275) | def put_file(self, localpath, remotepath):
    method listdir (line 280) | def listdir(self, remotepath):
    method shell (line 285) | def shell(self, key_file=None):
    method bundle_image (line 290) | def bundle_image(self, prefix, key_file, cert_file, size):
    method upload_bundle (line 324) | def upload_bundle(self, bucket, prefix):
    method create_image (line 339) | def create_image(self, bucket=None, prefix=None, key_file=None, cert_f...
    method attach_volume (line 357) | def attach_volume(self, volume, device="/dev/sdp"):
    method detach_volume (line 373) | def detach_volume(self, volume):
    method install_package (line 386) | def install_package(self, package_name):

FILE: deploy/third_party/boto-2.1.1/boto/mturk/connection.py
  class MTurkRequestError (line 35) | class MTurkRequestError(EC2ResponseError):
  class MTurkConnection (line 39) | class MTurkConnection(AWSQueryConnection):
    method __init__ (line 43) | def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
    method _required_auth_capability (line 60) | def _required_auth_capability(self):
    method get_account_balance (line 63) | def get_account_balance(self):
    method register_hit_type (line 71) | def register_hit_type(self, title, description, reward, duration,
    method set_email_notification (line 99) | def set_email_notification(self, hit_type, email, event_types=None):
    method set_rest_notification (line 106) | def set_rest_notification(self, hit_type, url, event_types=None):
    method _set_notification (line 113) | def _set_notification(self, hit_type, transport, destination, event_ty...
    method create_hit (line 148) | def create_hit(self, hit_type=None, question=None,
    method change_hit_type_of_hit (line 226) | def change_hit_type_of_hit(self, hit_id, hit_type):
    method get_reviewable_hits (line 240) | def get_reviewable_hits(self, hit_type=None, status='Reviewable',
    method _get_pages (line 261) | def _get_pages(page_size, total_records):
    method get_all_hits (line 270) | def get_all_hits(self):
    method search_hits (line 287) | def search_hits(self, sort_by='CreationTime', sort_direction='Ascending',
    method get_assignments (line 308) | def get_assignments(self, hit_id, status=None,
    method approve_assignment (line 350) | def approve_assignment(self, assignment_id, feedback=None):
    method reject_assignment (line 358) | def reject_assignment(self, assignment_id, feedback=None):
    method get_hit (line 366) | def get_hit(self, hit_id, response_groups=None):
    method set_reviewing (line 376) | def set_reviewing(self, hit_id, revert=None):
    method disable_hit (line 390) | def disable_hit(self, hit_id, response_groups=None):
    method dispose_hit (line 414) | def dispose_hit(self, hit_id):
    method expire_hit (line 427) | def expire_hit(self, hit_id):
    method extend_hit (line 444) | def extend_hit(self, hit_id, assignments_increment=None, expiration_in...
    method get_help (line 471) | def get_help(self, about, help_type='Operation'):
    method grant_bonus (line 482) | def grant_bonus(self, worker_id, assignment_id, bonus_price, reason):
    method block_worker (line 499) | def block_worker(self, worker_id, reason):
    method unblock_worker (line 507) | def unblock_worker(self, worker_id, reason):
    method notify_workers (line 515) | def notify_workers(self, worker_ids, subject, message_text):
    method create_qualification_type (line 525) | def create_qualification_type(self,
    method get_qualification_type (line 603) | def get_qualification_type(self, qualification_type_id):
    method get_qualifications_for_qualification_type (line 608) | def get_qualifications_for_qualification_type(self, qualification_type...
    method update_qualification_type (line 613) | def update_qualification_type(self, qualification_type_id,
    method dispose_qualification_type (line 657) | def dispose_qualification_type(self, qualification_type_id):
    method search_qualification_types (line 662) | def search_qualification_types(self, query=None, sort_by='Name',
    method get_qualification_requests (line 677) | def get_qualification_requests(self, qualification_type_id,
    method grant_qualification (line 690) | def grant_qualification(self, qualification_request_id, integer_value=1):
    method revoke_qualification (line 696) | def revoke_qualification(self, subject_id, qualification_type_id,
    method assign_qualification (line 704) | def assign_qualification(self, qualification_type_id, worker_id,
    method get_qualification_score (line 712) | def get_qualification_score(self, qualification_type_id, worker_id):
    method update_qualification_score (line 719) | def update_qualification_score(self, qualification_type_id, worker_id,
    method _process_request (line 727) | def _process_request(self, request_type, params, marker_elems=None):
    method _process_response (line 734) | def _process_response(self, response, marker_elems=None):
    method get_keywords_as_string (line 749) | def get_keywords_as_string(keywords):
    method get_price_as_price (line 767) | def get_price_as_price(reward):
    method duration_as_seconds (line 778) | def duration_as_seconds(duration):
  class BaseAutoResultElement (line 787) | class BaseAutoResultElement:
    method __init__ (line 791) | def __init__(self, connection):
    method startElement (line 794) | def startElement(self, name, attrs, connection):
    method endElement (line 797) | def endElement(self, name, value, connection):
  class HIT (line 800) | class HIT(BaseAutoResultElement):
    method _has_expired (line 809) | def _has_expired(self):
  class Qualification (line 823) | class Qualification(BaseAutoResultElement):
  class QualificationType (line 834) | class QualificationType(BaseAutoResultElement):
  class QualificationRequest (line 845) | class QualificationRequest(BaseAutoResultElement):
  class Assignment (line 859) | class Assignment(BaseAutoResultElement):
    method __init__ (line 868) | def __init__(self, connection):
    method endElement (line 872) | def endElement(self, name, value, connection):
  class QuestionFormAnswer (line 883) | class QuestionFormAnswer(BaseAutoResultElement):
    method __init__ (line 903) | def __init__(self, connection):
    method endElement (line 908) | def endElement(self, name, value, connection):

FILE: deploy/third_party/boto-2.1.1/boto/mturk/notification.py
  class NotificationMessage (line 35) | class NotificationMessage:
    method __init__ (line 46) | def __init__(self, d):
    method verify (line 74) | def verify(self, secret_key):
  class Event (line 91) | class Event:
    method __init__ (line 92) | def __init__(self, d):
    method __repr__ (line 102) | def __repr__(self):

FILE: deploy/third_party/boto-2.1.1/boto/mturk/price.py
  class Price (line 22) | class Price:
    method __init__ (line 24) | def __init__(self, amount=0.0, currency_code='USD'):
    method __repr__ (line 29) | def __repr__(self):
    method startElement (line 35) | def startElement(self, name, attrs, connection):
    method endElement (line 38) | def endElement(self, name, value, connection):
    method get_as_params (line 46) | def get_as_params(self, label, ord=1):

FILE: deploy/third_party/boto-2.1.1/boto/mturk/qualification.py
  class Qualifications (line 22) | class Qualifications:
    method __init__ (line 24) | def __init__(self, requirements=None):
    method add (line 29) | def add(self, req):
    method get_as_params (line 32) | def get_as_params(self):
  class Requirement (line 42) | class Requirement(object):
    method __init__ (line 47) | def __init__(self, qualification_type_id, comparator, integer_value=No...
    method get_as_params (line 53) | def get_as_params(self):
  class PercentAssignmentsSubmittedRequirement (line 64) | class PercentAssignmentsSubmittedRequirement(Requirement):
    method __init__ (line 69) | def __init__(self, comparator, integer_value, required_to_preview=False):
  class PercentAssignmentsAbandonedRequirement (line 72) | class PercentAssignmentsAbandonedRequirement(Requirement):
    method __init__ (line 77) | def __init__(self, comparator, integer_value, required_to_preview=False):
  class PercentAssignmentsReturnedRequirement (line 80) | class PercentAssignmentsReturnedRequirement(Requirement):
    method __init__ (line 85) | def __init__(self, comparator, integer_value, required_to_preview=False):
  class PercentAssignmentsApprovedRequirement (line 88) | class PercentAssignmentsApprovedRequirement(Requirement):
    method __init__ (line 93) | def __init__(self, comparator, integer_value, required_to_preview=False):
  class PercentAssignmentsRejectedRequirement (line 96) | class PercentAssignmentsRejectedRequirement(Requirement):
    method __init__ (line 101) | def __init__(self, comparator, integer_value, required_to_preview=False):
  class NumberHitsApprovedRequirement (line 104) | class NumberHitsApprovedRequirement(Requirement):
    method __init__ (line 109) | def __init__(self, comparator, integer_value, required_to_preview=False):
  class LocaleRequirement (line 112) | class LocaleRequirement(Requirement):
    method __init__ (line 117) | def __init__(self, comparator, locale, required_to_preview=False):
    method get_as_params (line 121) | def get_as_params(self):
  class AdultRequirement (line 131) | class AdultRequirement(Requirement):
    method __init__ (line 136) | def __init__(self, comparator, integer_value, required_to_preview=False):

FILE: deploy/third_party/boto-2.1.1/boto/mturk/question.py
  class Question (line 22) | class Question(object):
    method __init__ (line 25) | def __init__(self, identifier, content, answer_spec,
    method get_as_params (line 31) | def get_as_params(self, label='Question'):
    method get_as_xml (line 34) | def get_as_xml(self):
  class ValidatingXML (line 48) | class ValidatingXML(object):
    method validate (line 49) | def validate(self):
    method validate (line 58) | def validate(self): pass
  class ValidatingXML (line 57) | class ValidatingXML(object):
    method validate (line 49) | def validate(self):
    method validate (line 58) | def validate(self): pass
  class ExternalQuestion (line 61) | class ExternalQuestion(ValidatingXML):
    method __init__ (line 68) | def __init__(self, external_url, frame_height):
    method get_as_params (line 72) | def get_as_params(self, label='ExternalQuestion'):
    method get_as_xml (line 75) | def get_as_xml(self):
  class XMLTemplate (line 78) | class XMLTemplate:
    method get_as_xml (line 79) | def get_as_xml(self):
  class SimpleField (line 82) | class SimpleField(object, XMLTemplate):
    method __init__ (line 91) | def __init__(self, field, value):
  class Binary (line 95) | class Binary(object, XMLTemplate):
    method __init__ (line 97) | def __init__(self, type, subtype, url, alt_text):
  class List (line 101) | class List(list):
    method get_as_xml (line 103) | def get_as_xml(self):
  class Application (line 107) | class Application(object):
    method __init__ (line 111) | def __init__(self, width, height, **parameters):
    method get_inner_content (line 116) | def get_inner_content(self, content):
    method get_as_xml (line 123) | def get_as_xml(self):
  class JavaApplet (line 130) | class JavaApplet(Application):
    method __init__ (line 131) | def __init__(self, path, filename, *args, **kwargs):
    method get_inner_content (line 136) | def get_inner_content(self, content):
  class Flash (line 142) | class Flash(Application):
    method __init__ (line 143) | def __init__(self, url, *args, **kwargs):
    method get_inner_content (line 147) | def get_inner_content(self, content):
  class FormattedContent (line 152) | class FormattedContent(object, XMLTemplate):
    method __init__ (line 155) | def __init__(self, content):
  class OrderedContent (line 158) | class OrderedContent(list):
    method append_field (line 160) | def append_field(self, field, value):
    method get_as_xml (line 163) | def get_as_xml(self):
  class Overview (line 166) | class Overview(OrderedContent):
    method get_as_params (line 169) | def get_as_params(self, label='Overview'):
    method get_as_xml (line 172) | def get_as_xml(self):
  class QuestionForm (line 176) | class QuestionForm(ValidatingXML, list):
    method is_valid (line 213) | def is_valid(self):
    method get_as_xml (line 220) | def get_as_xml(self):
  class QuestionContent (line 225) | class QuestionContent(OrderedContent):
    method get_as_xml (line 228) | def get_as_xml(self):
  class AnswerSpecification (line 232) | class AnswerSpecification(object):
    method __init__ (line 235) | def __init__(self, spec):
    method get_as_xml (line 238) | def get_as_xml(self):
  class Constraints (line 242) | class Constraints(OrderedContent):
    method get_as_xml (line 245) | def get_as_xml(self):
  class Constraint (line 249) | class Constraint(object):
    method get_attributes (line 250) | def get_attributes(self):
    method get_as_xml (line 259) | def get_as_xml(self):
  class NumericConstraint (line 263) | class NumericConstraint(Constraint):
    method __init__ (line 267) | def __init__(self, min_value=None, max_value=None):
  class LengthConstraint (line 270) | class LengthConstraint(Constraint):
    method __init__ (line 274) | def __init__(self, min_length=None, max_length=None):
  class RegExConstraint (line 277) | class RegExConstraint(Constraint):
    method __init__ (line 281) | def __init__(self, pattern, error_text=None, flags=None):
  class NumberOfLinesSuggestion (line 284) | class NumberOfLinesSuggestion(object):
    method __init__ (line 287) | def __init__(self, num_lines=1):
    method get_as_xml (line 290) | def get_as_xml(self):
  class FreeTextAnswer (line 294) | class FreeTextAnswer(object):
    method __init__ (line 297) | def __init__(self, default=None, constraints=None, num_lines=None):
    method get_as_xml (line 305) | def get_as_xml(self):
  class FileUploadAnswer (line 314) | class FileUploadAnswer(object):
    method __init__ (line 317) | def __init__(self, min_bytes, max_bytes):
    method get_as_xml (line 322) | def get_as_xml(self):
  class SelectionAnswer (line 325) | class SelectionAnswer(object):
    method __init__ (line 339) | def __init__(self, min=1, max=1, style=None, selections=None, type='te...
    method get_as_xml (line 364) | def get_as_xml(self):

FILE: deploy/third_party/boto-2.1.1/boto/plugin.py
  class Plugin (line 43) | class Plugin(object):
    method is_capable (line 49) | def is_capable(cls, requested_capability):
  function get_plugin (line 57) | def get_plugin(cls, requested_capability=None):
  function _import_module (line 66) | def _import_module(filename):
  function load_plugins (line 79) | def load_plugins(config):

FILE: deploy/third_party/boto-2.1.1/boto/provider.py
  class Provider (line 64) | class Provider(object):
    method __init__ (line 154) | def __init__(self, name, access_key=None, secret_key=None,
    method get_credentials (line 171) | def get_credentials(self, access_key=None, secret_key=None):
    method configure_headers (line 191) | def configure_headers(self):
    method configure_errors (line 212) | def configure_errors(self):
    method get_provider_name (line 220) | def get_provider_name(self):
    method supports_chunked_transfer (line 223) | def supports_chunked_transfer(self):
  function get_default (line 227) | def get_default():

FILE: deploy/third_party/boto-2.1.1/boto/pyami/bootstrap.py
  class Bootstrap (line 29) | class Bootstrap(ScriptBase):
    method __init__ (line 40) | def __init__(self):
    method write_metadata (line 45) | def write_metadata(self):
    method create_working_dir (line 62) | def create_working_dir(self):
    method load_boto (line 67) | def load_boto(self):
    method fetch_s3_file (line 98) | def fetch_s3_file(self, s3_file):
    method load_packages (line 109) | def load_packages(self):
    method main (line 123) | def main(self):

FILE: deploy/third_party/boto-2.1.1/boto/pyami/config.py
  class Config (line 58) | class Config(ConfigParser.SafeConfigParser):
    method __init__ (line 60) | def __init__(self, path=None, fp=None, do_load=True):
    method load_credential_file (line 77) | def load_credential_file(self, path):
    method load_from_path (line 86) | def load_from_path(self, path):
    method save_option (line 96) | def save_option(self, path, section, option, value):
    method save_user_option (line 114) | def save_user_option(self, section, option, value):
    method save_system_option (line 117) | def save_system_option(self, section, option, value):
    method get_instance (line 120) | def get_instance(self, name, default=None):
    method get_user (line 127) | def get_user(self, name, default=None):
    method getint_user (line 134) | def getint_user(self, name, default=0):
    method get_value (line 141) | def get_value(self, section, name, default=None):
    method get (line 144) | def get(self, section, name, default=None):
    method getint (line 151) | def getint(self, section, name, default=0):
    method getfloat (line 158) | def getfloat(self, section, name, default=0.0):
    method getbool (line 165) | def getbool(self, section, name, default=False):
    method setbool (line 176) | def setbool(self, section, name, value):
    method dump (line 182) | def dump(self):
    method dump_safe (line 187) | def dump_safe(self, fp=None):
    method dump_to_sdb (line 198) | def dump_to_sdb(self, domain_name, item_name):
    method load_from_sdb (line 217) | def load_from_sdb(self, domain_name, item_name):

FILE: deploy/third_party/boto-2.1.1/boto/pyami/copybot.py
  class CopyBot (line 26) | class CopyBot(ScriptBase):
    method __init__ (line 28) | def __init__(self):
    method copy_bucket_acl (line 49) | def copy_bucket_acl(self):
    method copy_key_acl (line 54) | def copy_key_acl(self, src, dst):
    method copy_keys (line 59) | def copy_keys(self):
    method copy_log (line 80) | def copy_log(self):
    method main (line 84) | def main(self):

FILE: deploy/third_party/boto-2.1.1/boto/pyami/helloworld.py
  class HelloWorld (line 24) | class HelloWorld(ScriptBase):
    method main (line 26) | def main(self):

FILE: deploy/third_party/boto-2.1.1/boto/pyami/launch_ami.py
  function usage (line 70) | def usage():
  function main (line 74) | def main():

FILE: deploy/third_party/boto-2.1.1/boto/pyami/scriptbase.py
  class ScriptBase (line 7) | class ScriptBase:
    method __init__ (line 9) | def __init__(self, config_file=None):
    method notify (line 16) | def notify(self, subject, body=''):
    method mkdir (line 19) | def mkdir(self, path):
    method umount (line 26) | def umount(self, path):
    method run (line 30) | def run(self, command, notify=True, exit_on_error=False, cwd=None):
    method main (line 42) | def main(self):

FILE: deploy/third_party/boto-2.1.1/boto/pyami/startup.py
  class Startup (line 29) | class Startup(ScriptBase):
    method run_scripts (line 31) | def run_scripts(self):
    method main (line 51) | def main(self):

FILE: deploy/third_party/boto-2.1.1/boto/rds/__init__.py
  function regions (line 33) | def regions():
  function connect_to_region (line 52) | def connect_to_region(region_name, **kw_params):
  class RDSConnection (line 73) | class RDSConnection(AWSQueryConnection):
    method __init__ (line 79) | def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
    method _required_auth_capability (line 94) | def _required_auth_capability(self):
    method get_all_dbinstances (line 99) | def get_all_dbinstances(self, instance_id=None, max_records=None,
    method create_dbinstance (line 132) | def create_dbinstance(self, id, allocated_storage, instance_class,
    method create_dbinstance_read_replica (line 269) | def create_dbinstance_read_replica(self, id, source_id,
    method modify_dbinstance (line 338) | def modify_dbinstance(self, id, param_group=None, security_groups=None,
    method delete_dbinstance (line 438) | def delete_dbinstance(self, id, skip_final_snapshot=False,
    method reboot_dbinstance (line 469) | def reboot_dbinstance(self, id):
    method get_all_dbparameter_groups (line 484) | def get_all_dbparameter_groups(self, groupname=None, max_records=None,
    method get_all_dbparameters (line 515) | def get_all_dbparameters(self, groupname, source=None,
    method create_parameter_group (line 551) | def create_parameter_group(self, name, engine='MySQL5.1', description=...
    method modify_parameter_group (line 572) | def modify_parameter_group(self, name, parameters=None):
    method reset_parameter_group (line 592) | def reset_parameter_group(self, name, reset_all_params=False,
    method delete_parameter_group (line 615) | def delete_parameter_group(self, name):
    method get_all_dbsecurity_groups (line 627) | def get_all_dbsecurity_groups(self, groupname=None, max_records=None,
    method create_dbsecurity_group (line 659) | def create_dbsecurity_group(self, name, description=None):
    method delete_dbsecurity_group (line 683) | def delete_dbsecurity_group(self, name):
    method authorize_dbsecurity_group (line 693) | def authorize_dbsecurity_group(self, group_name, cidr_ip=None,
    method revoke_dbsecurity_group (line 731) | def revoke_dbsecurity_group(self, group_name, ec2_security_group_name=...
    method get_all_dbsnapshots (line 774) | def get_all_dbsnapshots(self, snapshot_id=None, instance_id=None,
    method create_dbsnapshot (line 813) | def create_dbsnapshot(self, snapshot_id, dbinstance_id):
    method delete_dbsnapshot (line 831) | def delete_dbsnapshot(self, identifier):
    method restore_dbinstance_from_dbsnapshot (line 841) | def restore_dbinstance_from_dbsnapshot(self, identifier, instance_id,
    method restore_dbinstance_from_point_in_time (line 882) | def restore_dbinstance_from_point_in_time(self, source_instance_id,
    method get_all_events (line 941) | def get_all_events(self, source_identifier=None, source_type=None,

FILE: deploy/third_party/boto-2.1.1/boto/rds/dbinstance.py
  class DBInstance (line 25) | class DBInstance(object):
    method __init__ (line 30) | def __init__(self, connection=None, id=None):
    method __repr__ (line 53) | def __repr__(self):
    method startElement (line 56) | def startElement(self, name, attrs, connection):
    method endElement (line 70) | def endElement(self, name, value, connection):
    method snapshot (line 112) | def snapshot(self, snapshot_id):
    method reboot (line 124) | def reboot(self):
    method update (line 133) | def update(self, validate=False):
    method stop (line 155) | def stop(self, skip_final_snapshot=False, final_snapshot_id=''):
    method modify (line 177) | def modify(self, param_group=None, security_groups=None,
  class PendingModifiedValues (line 256) | class PendingModifiedValues(dict):
    method startElement (line 258) | def startElement(self, name, attrs, connection):
    method endElement (line 261) | def endElement(self, name, value, connection):

FILE: deploy/third_party/boto-2.1.1/boto/rds/dbsecuritygroup.py
  class DBSecurityGroup (line 27) | class DBSecurityGroup(object):
    method __init__ (line 29) | def __init__(self, connection=None, owner_id=None,
    method __repr__ (line 38) | def __repr__(self):
    method startElement (line 41) | def startElement(self, name, attrs, connection):
    method endElement (line 53) | def endElement(self, name, value, connection):
    method delete (line 65) | def delete(self):
    method authorize (line 68) | def authorize(self, cidr_ip=None, ec2_group=None):
    method revoke (line 93) | def revoke(self, cidr_ip=None, ec2_group=None):
  class IPRange (line 119) | class IPRange(object):
    method __init__ (line 121) | def __init__(self, parent=None):
    method __repr__ (line 126) | def __repr__(self):
    method startElement (line 129) | def startElement(self, name, attrs, connection):
    method endElement (line 132) | def endElement(self, name, value, connection):
  class EC2SecurityGroup (line 140) | class EC2SecurityGroup(object):
    method __init__ (line 142) | def __init__(self, parent=None):
    method __repr__ (line 147) | def __repr__(self):
    method startElement (line 150) | def startElement(self, name, attrs, connection):
    method endElement (line 153) | def endElement(self, name, value, connection):

FILE: deploy/third_party/boto-2.1.1/boto/rds/dbsnapshot.py
  class DBSnapshot (line 22) | class DBSnapshot(object):
    method __init__ (line 27) | def __init__(self, connection=None, id=None):
    method __repr__ (line 41) | def __repr__(self):
    method startElement (line 44) | def startElement(self, name, attrs, connection):
    method endElement (line 47) | def endElement(self, name, value, connection):

FILE: deploy/third_party/boto-2.1.1/boto/rds/event.py
  class Event (line 22) | class Event(object):
    method __init__ (line 24) | def __init__(self, connection=None):
    method __repr__ (line 32) | def __repr__(self):
    method startElement (line 35) | def startElement(self, name, attrs, connection):
    method endElement (line 38) | def endElement(self, name, value, connection):

FILE: deploy/third_party/boto-2.1.1/boto/rds/parametergroup.py
  class ParameterGroup (line 22) | class ParameterGroup(dict):
    method __init__ (line 24) | def __init__(self, connection=None):
    method __repr__ (line 32) | def __repr__(self):
    method startElement (line 35) | def startElement(self, name, attrs, connection):
    method endElement (line 42) | def endElement(self, name, value, connection):
    method modifiable (line 52) | def modifiable(self):
    method get_params (line 60) | def get_params(self):
    method add_param (line 64) | def add_param(self, name, value, apply_method):
  class Parameter (line 71) | class Parameter(object):
    method __init__ (line 83) | def __init__(self, group=None, name=None):
    method __repr__ (line 94) | def __repr__(self):
    method startElement (line 97) | def startElement(self, name, attrs, connection):
    method endElement (line 100) | def endElement(self, name, value, connection):
    method merge (line 126) | def merge(self, d, i):
    method _set_string_value (line 135) | def _set_string_value(self, value):
    method _set_integer_value (line 144) | def _set_integer_value(self, value):
    method _set_boolean_value (line 156) | def _set_boolean_value(self, value):
    method set_value (line 167) | def set_value(self, value):
    method get_value (line 177) | def get_value(self):
    method apply (line 195) | def apply(self, immediate=False):

FILE: deploy/third_party/boto-2.1.1/boto/rds/regioninfo.py
  class RDSRegionInfo (line 27) | class RDSRegionInfo(RegionInfo):
    method __init__ (line 29) | def __init__(self, connection=None, name=None, endpoint=None):

FILE: deploy/third_party/boto-2.1.1/boto/regioninfo.py
  class RegionInfo (line 24) | class RegionInfo(object):
    method __init__ (line 29) | def __init__(self, connection=None, name=None, endpoint=None,
    method __repr__ (line 36) | def __repr__(self):
    method startElement (line 39) | def startElement(self, name, attrs, connection):
    method endElement (line 42) | def endElement(self, name, value, connection):
    method connect (line 50) | def connect(self, **kw_params):

FILE: deploy/third_party/boto-2.1.1/boto/resultset.py
  class ResultSet (line 22) | class ResultSet(list):
    method __init__ (line 43) | def __init__(self, marker_elem=None):
    method startElement (line 58) | def startElement(self, name, attrs, connection):
    method to_boolean (line 66) | def to_boolean(self, value, true_value='true'):
    method endElement (line 72) | def endElement(self, name, value, connection):
  class BooleanResult (line 113) | class BooleanResult(object):
    method __init__ (line 115) | def __init__(self, marker_elem=None):
    method __repr__ (line 120) | def __repr__(self):
    method __nonzero__ (line 126) | def __nonzero__(self):
    method startElement (line 129) | def startElement(self, name, attrs, connection):
    method to_boolean (line 132) | def to_boolean(self, value, true_value='true'):
    method endElement (line 138) | def endElement(self, name, value, connection):

FILE: deploy/third_party/boto-2.1.1/boto/roboto/awsqueryrequest.py
  function boto_except_hook (line 38) | def boto_except_hook(debugger_flag, debug_flag):
  class Line (line 58) | class Line(object):
    method __init__ (line 60) | def __init__(self, fmt, data, label):
    method append (line 67) | def append(self, datum):
    method print_it (line 70) | def print_it(self):
  class RequiredParamError (line 75) | class RequiredParamError(boto.exception.BotoClientError):
    method __init__ (line 77) | def __init__(self, required):
  class EncoderError (line 82) | class EncoderError(boto.exception.BotoClientError):
    method __init__ (line 84) | def __init__(self, error_msg):
  class FilterError (line 88) | class FilterError(boto.exception.BotoClientError):
    method __init__ (line 90) | def __init__(self, filters):
  class Encoder (line 95) | class Encoder:
    method encode (line 98) | def encode(cls, p, rp, v, label=None):
    method encode_string (line 108) | def encode_string(cls, p, rp, v, l):
    method encode_integer (line 119) | def encode_integer(cls, p, rp, v, l):
    method encode_boolean (line 127) | def encode_boolean(cls, p, rp, v, l):
    method encode_datetime (line 139) | def encode_datetime(cls, p, rp, v, l):
    method encode_array (line 147) | def encode_array(cls, p, rp, v, l):
  class AWSQueryRequest (line 157) | class AWSQueryRequest(object):
    method name (line 177) | def name(cls):
    method __init__ (line 180) | def __init__(self, **args):
    method __repr__ (line 192) | def __repr__(self):
    method get_connection (line 195) | def get_connection(self, **args):
    method status (line 201) | def status(self):
    method reason (line 208) | def reason(self):
    method request_id (line 215) | def request_id(self):
    method process_filters (line 221) | def process_filters(self):
    method process_args (line 235) | def process_args(self, **args):
    method process_markers (line 286) | def process_markers(self, fmt, prev_name=None):
    method send (line 294) | def send(self, verb='GET', **args):
    method add_standard_options (line 317) | def add_standard_options(self):
    method process_standard_options (line 343) | def process_standard_options(self, options, args, d):
    method get_usage (line 366) | def get_usage(self):
    method build_cli_parser (line 375) | def build_cli_parser(self):
    method do_cli (line 405) | def do_cli(self):
    method _generic_cli_formatter (line 468) | def _generic_cli_formatter(self, fmt, data, label=''):
    method cli_formatter (line 490) | def cli_formatter(self, data):

FILE: deploy/third_party/boto-2.1.1/boto/roboto/awsqueryservice.py
  class NoCredentialsError (line 9) | class NoCredentialsError(boto.exception.BotoClientError):
    method __init__ (line 11) | def __init__(self):
  class AWSQueryService (line 15) | class AWSQueryService(boto.connection.AWSQueryConnection):
    method __init__ (line 28) | def __init__(self, **args):
    method check_for_credential_file (line 49) | def check_for_credential_file(self):
    method check_for_env_url (line 82) | def check_for_env_url(self):
    method _required_auth_capability (line 119) | def _required_auth_capability(self):

FILE: deploy/third_party/boto-2.1.1/boto/roboto/param.py
  class Converter (line 25) | class Converter(object):
    method convert_string (line 28) | def convert_string(cls, param, value):
    method convert_integer (line 35) | def convert_integer(cls, param, value):
    method convert_boolean (line 40) | def convert_boolean(cls, param, value):
    method convert_file (line 48) | def convert_file(cls, param, value):
    method convert_dir (line 54) | def convert_dir(cls, param, value):
    method convert (line 60) | def convert(cls, param, value):
  class Param (line 70) | class Param(object):
    method __init__ (line 72) | def __init__(self, name=None, ptype='string', optional=True,
    method optparse_long_name (line 90) | def optparse_long_name(self):
    method synopsis_long_name (line 97) | def synopsis_long_name(self):
    method getopt_long_name (line 104) | def getopt_long_name(self):
    method optparse_short_name (line 113) | def optparse_short_name(self):
    method synopsis_short_name (line 120) | def synopsis_short_name(self):
    method getopt_short_name (line 127) | def getopt_short_name(self):
    method convert (line 135) | def convert(self, value):

FILE: deploy/third_party/boto-2.1.1/boto/route53/connection.py
  class Route53Connection (line 47) | class Route53Connection(AWSAuthConnection):
    method __init__ (line 57) | def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
    method _required_auth_capability (line 64) | def _required_auth_capability(self):
    method make_request (line 67) | def make_request(self, action, path, headers=None, data='', params=None):
    method get_all_hosted_zones (line 78) | def get_all_hosted_zones(self, start_marker=None, zone_list=None):
    method get_hosted_zone (line 110) | def get_hosted_zone(self, hosted_zone_id):
    method create_hosted_zone (line 132) | def create_hosted_zone(self, domain_name, caller_ref=None, comment=''):
    method delete_hosted_zone (line 183) | def delete_hosted_zone(self, hosted_zone_id):
    method get_all_rrsets (line 199) | def get_all_rrsets(self, hosted_zone_id, type=None,
    method change_rrsets (line 267) | def change_rrsets(self, hosted_zone_id, xml_body):
    method get_change (line 297) | def get_change(self, change_id):

FILE: deploy/third_party/boto-2.1.1/boto/route53/exception.py
  class DNSServerError (line 25) | class DNSServerError(BotoServerError):

FILE: deploy/third_party/boto-2.1.1/boto/route53/hostedzone.py
  class HostedZone (line 25) | class HostedZone(object):
    method __init__ (line 27) | def __init__(self, id=None, name=None, owner=None, version=None,
    method startElement (line 36) | def startElement(self, name, attrs, connection):
    method endElement (line 43) | def endElement(self, name, value, connection):

FILE: deploy/third_party/boto-2.1.1/boto/route53/record.py
  class ResourceRecordSets (line 26) | class ResourceRecordSets(ResultSet):
    method __init__ (line 42) | def __init__(self, connection=None, hosted_zone_id=None, comment=None):
    method __repr__ (line 51) | def __repr__(self):
    method add_change (line 54) | def add_change(self, action, name, type, ttl=600, alias_hosted_zone_id...
    method to_xml (line 60) | def to_xml(self):
    method commit (line 70) | def commit(self):
    method endElement (line 77) | def endElement(self, name, value, connection):
    method __iter__ (line 87) | def __iter__(self):
  class Record (line 101) | class Record(object):
    method __init__ (line 125) | def __init__(self, name=None, type=None, ttl=600, resource_records=Non...
    method add_value (line 135) | def add_value(self, value):
    method set_alias (line 139) | def set_alias(self, alias_hosted_zone_id, alias_dns_name):
    method to_xml (line 144) | def to_xml(self):
    method to_print (line 165) | def to_print(self):
    method endElement (line 173) | def endElement(self, name, value, connection):
    method startElement (line 187) | def startElement(self, name, attrs, connection):

FILE: deploy/third_party/boto-2.1.1/boto/s3/acl.py
  class Policy (line 30) | class Policy:
    method __init__ (line 32) | def __init__(self, parent=None):
    method __repr__ (line 36) | def __repr__(self):
    method startElement (line 51) | def startElement(self, name, attrs, connection):
    method endElement (line 61) | def endElement(self, name, value, connection):
    method to_xml (line 69) | def to_xml(self):
  class ACL (line 76) | class ACL:
    method __init__ (line 78) | def __init__(self, policy=None):
    method add_grant (line 82) | def add_grant(self, grant):
    method add_email_grant (line 85) | def add_email_grant(self, permission, email_address):
    method add_user_grant (line 90) | def add_user_grant(self, permission, user_id, display_name=None):
    method startElement (line 94) | def startElement(self, name, attrs, connection):
    method endElement (line 101) | def endElement(self, name, value, connection):
    method to_xml (line 107) | def to_xml(self):
  class Grant (line 114) | class Grant:
    method __init__ (line 118) | def __init__(self, permission=None, type=None, id=None,
    method startElement (line 127) | def startElement(self, name, attrs, connection):
    method endElement (line 132) | def endElement(self, name, value, connection):
    method to_xml (line 148) | def to_xml(self):

FILE: deploy/third_party/boto-2.1.1/boto/s3/bucket.py
  class S3WebsiteEndpointTranslate (line 44) | class S3WebsiteEndpointTranslate:
    method translate_region (line 53) | def translate_region(self, reg):
  class Bucket (line 58) | class Bucket(object):
    method __init__ (line 96) | def __init__(self, connection=None, name=None, key_class=Key):
    method __repr__ (line 101) | def __repr__(self):
    method __iter__ (line 104) | def __iter__(self):
    method __contains__ (line 107) | def __contains__(self, key_name):
    method startElement (line 110) | def startElement(self, name, attrs, connection):
    method endElement (line 113) | def endElement(self, name, value, connection):
    method set_key_class (line 121) | def set_key_class(self, key_class):
    method lookup (line 135) | def lookup(self, key_name, headers=None):
    method get_key (line 147) | def get_key(self, key_name, headers=None, version_id=None):
    method list (line 199) | def list(self, prefix='', delimiter='', marker='', headers=None):
    method list_versions (line 238) | def list_versions(self, prefix='', delimiter='', key_marker='',
    method list_multipart_uploads (line 270) | def list_multipart_uploads(self, key_marker='',
    method _get_all (line 289) | def _get_all(self, element_map, initial_query_string='',
    method get_all_keys (line 318) | def get_all_keys(self, headers=None, **params):
    method get_all_versions (line 351) | def get_all_versions(self, headers=None, **params):
    method get_all_multipart_uploads (line 390) | def get_all_multipart_uploads(self, headers=None, **params):
    method new_key (line 434) | def new_key(self, key_name=None):
    method generate_url (line 446) | def generate_url(self, expires_in, method='GET', headers=None,
    method delete_key (line 453) | def delete_key(self, key_name, headers=None,
    method copy_key (line 490) | def copy_key(self, new_key_name, src_bucket_name,
    method set_canned_acl (line 585) | def set_canned_acl(self, acl_str, key_name='', headers=None,
    method get_xml_acl (line 604) | def get_xml_acl(self, key_name='', headers=None, version_id=None):
    method set_xml_acl (line 617) | def set_xml_acl(self, acl_str, key_name='', headers=None, version_id=N...
    method set_acl (line 630) | def set_acl(self, acl_or_str, key_name='', headers=None, version_id=No...
    method get_acl (line 638) | def get_acl(self, key_name='', headers=None, version_id=None):
    method set_subresource (line 655) | def set_subresource(self, subresource, value, key_name = '', headers=N...
    method get_subresource (line 692) | def get_subresource(self, subresource, key_name='', headers=None,
    method make_public (line 729) | def make_public(self, recursive=False, headers=None):
    method add_email_grant (line 735) | def add_email_grant(self, permission, email_address,
    method add_user_grant (line 770) | def add_user_grant(self, permission, user_id, recursive=False,
    method list_grants (line 811) | def list_grants(self, headers=None):
    method get_location (line 815) | def get_location(self):
    method enable_logging (line 836) | def enable_logging(self, target_bucket, target_prefix='', headers=None):
    method disable_logging (line 849) | def disable_logging(self, headers=None):
    method get_logging_status (line 860) | def get_logging_status(self, headers=None):
    method set_as_logging_target (line 870) | def set_as_logging_target(self, headers=None):
    method get_request_payment (line 878) | def get_request_payment(self, headers=None):
    method set_request_payment (line 888) | def set_request_payment(self, payer='BucketOwner', headers=None):
    method configure_versioning (line 899) | def configure_versioning(self, versioning, mfa_delete=False,
    method get_versioning_status (line 949) | def get_versioning_status(self, headers=None):
    method configure_website (line 978) | def configure_website(self, suffix, error_key='', headers=None):
    method get_website_configuration (line 1011) | def get_website_configuration(self, headers=None):
    method delete_website_configuration (line 1042) | def delete_website_configuration(self, headers=None):
    method get_website_endpoint (line 1056) | def get_website_endpoint(self):
    method get_policy (line 1067) | def get_policy(self, headers=None):
    method set_policy (line 1077) | def set_policy(self, policy, headers=None):
    method delete_policy (line 1089) | def delete_policy(self, headers=None):
    method initiate_multipart_upload (line 1102) | def initiate_multipart_upload(self, key_name, headers=None,
    method complete_multipart_upload (line 1167) | def complete_multipart_upload(self, key_name, upload_id,
    method cancel_multipart_upload (line 1196) | def cancel_multipart_upload(self, key_name, upload_id, headers=None):
    method delete (line 1207) | def delete(self, headers=None):

FILE: deploy/third_party/boto-2.1.1/boto/s3/bucketlistresultset.py
  function bucket_lister (line 22) | def bucket_lister(bucket, prefix='', delimiter='', marker='', headers=No...
  class BucketListResultSet (line 37) | class BucketListResultSet:
    method __init__ (line 46) | def __init__(self, bucket=None, prefix='', delimiter='', marker='', he...
    method __iter__ (line 53) | def __iter__(self):
  function versioned_bucket_lister (line 58) | def versioned_bucket_lister(bucket, prefix='', delimiter='',
  class VersionedBucketListResultSet (line 76) | class VersionedBucketListResultSet:
    method __init__ (line 85) | def __init__(self, bucket=None, prefix='', delimiter='', key_marker='',
    method __iter__ (line 94) | def __iter__(self):
  function multipart_upload_lister (line 101) | def multipart_upload_lister(bucket, key_marker='',
  class MultiPartUploadListResultSet (line 119) | class MultiPartUploadListResultSet:
    method __init__ (line 128) | def __init__(self, bucket=None, key_marker='',
    method __iter__ (line 135) | def __iter__(self):

FILE: deploy/third_party/boto-2.1.1/boto/s3/connection.py
  function check_lowercase_bucketname (line 35) | def check_lowercase_bucketname(n):
  function assert_case_insensitive (line 59) | def assert_case_insensitive(f):
  class _CallingFormat (line 66) | class _CallingFormat(object):
    method get_bucket_server (line 68) | def get_bucket_server(self, server, bucket):
    method build_url_base (line 71) | def build_url_base(self, connection, protocol, server, bucket, key=''):
    method build_host (line 77) | def build_host(self, server, bucket):
    method build_auth_path (line 83) | def build_auth_path(self, bucket, key=''):
    method build_path_base (line 90) | def build_path_base(self, bucket, key=''):
  class SubdomainCallingFormat (line 94) | class SubdomainCallingFormat(_CallingFormat):
    method get_bucket_server (line 97) | def get_bucket_server(self, server, bucket):
  class VHostCallingFormat (line 100) | class VHostCallingFormat(_CallingFormat):
    method get_bucket_server (line 103) | def get_bucket_server(self, server, bucket):
  class OrdinaryCallingFormat (line 106) | class OrdinaryCallingFormat(_CallingFormat):
    method get_bucket_server (line 108) | def get_bucket_server(self, server, bucket):
    method build_path_base (line 111) | def build_path_base(self, bucket, key=''):
  class ProtocolIndependentOrdinaryCallingFormat (line 118) | class ProtocolIndependentOrdinaryCallingFormat(OrdinaryCallingFormat):
    method build_url_base (line 120) | def build_url_base(self, connection, protocol, server, bucket, key=''):
  class Location (line 126) | class Location:
  class S3Connection (line 133) | class S3Connection(AWSAuthConnection):
    method __init__ (line 138) | def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
    method _required_auth_capability (line 152) | def _required_auth_capability(self):
    method __iter__ (line 155) | def __iter__(self):
    method __contains__ (line 159) | def __contains__(self, bucket_name):
    method set_bucket_class (line 162) | def set_bucket_class(self, bucket_class):
    method build_post_policy (line 173) | def build_post_policy(self, expiration_time, conditions):
    method build_post_form_args (line 186) | def build_post_form_args(self, bucket_name, key, expires_in = 6000,
    method generate_url (line 280) | def generate_url(self, expires_in, method, bucket='', key='', headers=...
    method get_all_buckets (line 321) | def get_all_buckets(self, headers=None):
    method get_canonical_user_id (line 332) | def get_canonical_user_id(self, headers=None):
    method get_bucket (line 348) | def get_bucket(self, bucket_name, validate=True, headers=None):
    method lookup (line 354) | def lookup(self, bucket_name, validate=True, headers=None):
    method create_bucket (line 361) | def create_bucket(self, bucket_name, headers=None,
    method delete_bucket (line 404) | def delete_bucket(self, bucket, headers=None):
    method make_request (line 411) | def make_request(self, method, bucket='', key='', headers=None, data='',

FILE: deploy/third_party/boto-2.1.1/boto/s3/deletemarker.py
  class DeleteMarker (line 24) | class DeleteMarker:
    method __init__ (line 25) | def __init__(self, bucket=None, name=None):
    method startElement (line 32) | def startElement(self, name, attrs, connection):
    method endElement (line 39) | def endElement(self, name, value, connection):

FILE: deploy/third_party/boto-2.1.1/boto/s3/key.py
  class Key (line 40) | class Key(object):
    method __init__ (line 46) | def __init__(self, bucket=None, name=None):
    method __repr__ (line 69) | def __repr__(self):
    method __getattr__ (line 75) | def __getattr__(self, name):
    method __setattr__ (line 81) | def __setattr__(self, name, value):
    method __iter__ (line 87) | def __iter__(self):
    method provider (line 91) | def provider(self):
    method get_md5_from_hexdigest (line 98) | def get_md5_from_hexdigest(self, md5_hexdigest):
    method handle_encryption_headers (line 110) | def handle_encryption_headers(self, resp):
    method handle_version_headers (line 117) | def handle_version_headers(self, resp, force=False):
    method open_read (line 133) | def open_read(self, headers=None, query_args=None,
    method open_write (line 192) | def open_write(self, headers=None, override_num_retries=None):
    method open (line 206) | def open(self, mode='r', headers=None, query_args=None,
    method close (line 220) | def close(self):
    method next (line 227) | def next(self):
    method read (line 244) | def read(self, size=0):
    method change_storage_class (line 254) | def change_storage_class(self, new_storage_class, dst_bucket=None):
    method copy (line 285) | def copy(self, dst_bucket, dst_key, metadata=None,
    method startElement (line 347) | def startElement(self, name, attrs, connection):
    method endElement (line 354) | def endElement(self, name, value, connection):
    method exists (line 372) | def exists(self):
    method delete (line 381) | def delete(self):
    method get_metadata (line 387) | def get_metadata(self, name):
    method set_metadata (line 390) | def set_metadata(self, name, value):
    method update_metadata (line 393) | def update_metadata(self, d):
    method set_acl (line 397) | def set_acl(self, acl_str, headers=None):
    method get_acl (line 401) | def get_acl(self, headers=None):
    method get_xml_acl (line 405) | def get_xml_acl(self, headers=None):
    method set_xml_acl (line 409) | def set_xml_acl(self, acl_str, headers=None):
    method set_canned_acl (line 413) | def set_canned_acl(self, acl_str, headers=None):
    method make_public (line 416) | def make_public(self, headers=None):
    method generate_url (line 419) | def generate_url(self, expires_in, method='GET', headers=None,
    method send_file (line 446) | def send_file(self, fp, headers=None, cb=None, num_cb=10,
    method compute_md5 (line 586) | def compute_md5(self, fp):
    method set_contents_from_stream (line 612) | def set_contents_from_stream(self, fp, headers=None, replace=True,
    method set_contents_from_file (line 693) | def set_contents_from_file(self, fp, headers=None, replace=True,
    method set_contents_from_filename (line 792) | def set_contents_from_filename(self, filename, headers=None, replace=T...
    method set_contents_from_string (line 860) | def set_contents_from_string(self, s, headers=None, replace=True,
    method get_file (line 928) | def get_file(self, fp, headers=None, cb=None, num_cb=10,
    method get_torrent_file (line 1010) | def get_torrent_file(self, fp, headers=None, cb=None, num_cb=10):
    method get_contents_to_file (line 1038) | def get_contents_to_file(self, fp, headers=None,
    method get_contents_to_filename (line 1095) | def get_contents_to_filename(self, filename, headers=None,
    method get_contents_as_string (line 1156) | def get_contents_as_string(self, headers=None,
    method add_email_grant (line 1204) | def add_email_grant(self, permission, email_address, headers=None):
    method add_user_grant (line 1232) | def add_user_grant(self, permission, user_id, headers=None,

FILE: deploy/third_party/boto-2.1.1/boto/s3/multipart.py
  class CompleteMultiPartUpload (line 28) | class CompleteMultiPartUpload(object):
    method __init__ (line 40) | def __init__(self, bucket=None):
    method __repr__ (line 47) | def __repr__(self):
    method startElement (line 51) | def startElement(self, name, attrs, connection):
    method endElement (line 54) | def endElement(self, name, value, connection):
  class Part (line 66) | class Part(object):
    method __init__ (line 77) | def __init__(self, bucket=None):
    method __repr__ (line 84) | def __repr__(self):
    method startElement (line 90) | def startElement(self, name, attrs, connection):
    method endElement (line 93) | def endElement(self, name, value, connection):
  function part_lister (line 105) | def part_lister(mpupload, part_number_marker=None):
  class MultiPartUpload (line 118) | class MultiPartUpload(object):
    method __init__ (line 123) | def __init__(self, bucket=None):
    method __repr__ (line 138) | def __repr__(self):
    method __iter__ (line 141) | def __iter__(self):
    method to_xml (line 144) | def to_xml(self):
    method startElement (line 155) | def startElement(self, name, attrs, connection):
    method endElement (line 168) | def endElement(self, name, value, connection):
    method get_all_parts (line 191) | def get_all_parts(self, max_parts=None, part_number_marker=None):
    method upload_part_from_file (line 214) | def upload_part_from_file(self, fp, part_num, headers=None, replace=True,
    method complete_upload (line 236) | def complete_upload(self):
    method cancel_upload (line 249) | def cancel_upload(self):

FILE: deploy/third_party/boto-2.1.1/boto/s3/prefix.py
  class Prefix (line 22) | class Prefix(object):
    method __init__ (line 23) | def __init__(self, bucket=None, name=None):
    method startElement (line 27) | def startElement(self, name, attrs, connection):
    method endElement (line 30) | def endElement(self, name, value, connection):

FILE: deploy/third_party/boto-2.1.1/boto/s3/resumable_download_handler.py
  class ByteTranslatingCallbackHandler (line 56) | class ByteTranslatingCallbackHandler(object):
    method __init__ (line 62) | def __init__(self, proxied_cb, download_start_point):
    method call (line 66) | def call(self, total_bytes_uploaded, total_size):
  function get_cur_file_size (line 71) | def get_cur_file_size(fp, position_to_eof=False):
  class ResumableDownloadHandler (line 84) | class ResumableDownloadHandler(object):
    method __init__ (line 94) | def __init__(self, tracker_file_name=None, num_retries=None):
    method _load_tracker_file_etag (line 122) | def _load_tracker_file_etag(self):
    method _save_tracker_info (line 146) | def _save_tracker_info(self, key):
    method _remove_tracker_file (line 166) | def _remove_tracker_file(self):
    method _attempt_resumable_download (line 171) | def _attempt_resumable_download(self, key, fp, headers, cb, num_cb,
    method _check_final_md5 (line 215) | def _check_final_md5(self, key, file_name):
    method get_file (line 236) | def get_file(self, key, fp, headers, cb=None, num_cb=10, torrent=False,

FILE: deploy/third_party/boto-2.1.1/boto/s3/user.py
  class User (line 22) | class User:
    method __init__ (line 23) | def __init__(self, parent=None, id='', display_name=''):
    method startElement (line 30) | def startElement(self, name, attrs, connection):
    method endElement (line 33) | def endElement(self, name, value, connection):
    method to_xml (line 41) | def to_xml(self, element_name='Owner'):

FILE: deploy/third_party/boto-2.1.1/boto/sdb/__init__.py
  function regions (line 25) | def regions():
  function connect_to_region (line 44) | def connect_to_region(region_name, **kw_params):
  function get_region (line 61) | def get_region(region_name, **kw_params):

FILE: deploy/third_party/boto-2.1.1/boto/sdb/connection.py
  class ItemThread (line 32) | class ItemThread(threading.Thread):
    method __init__ (line 41) | def __init__(self, name, domain_name, item_names):
    method run (line 58) | def run(self):
  class SDBConnection (line 69) | class SDBConnection(AWSQueryConnection):
    method __init__ (line 85) | def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
    method _required_auth_capability (line 125) | def _required_auth_capability(self):
    method set_item_cls (line 128) | def set_item_cls(self, cls):
    method _build_name_value_list (line 140) | def _build_name_value_list(self, params, attributes, replace=False,
    method _build_expected_value (line 165) | def _build_expected_value(self, params, expected_value):
    method _build_batch_list (line 174) | def _build_batch_list(self, params, items, replace=False):
    method _build_name_list (line 204) | def _build_name_list(self, params, attribute_names):
    method get_usage (line 211) | def get_usage(self):
    method print_usage (line 226) | def print_usage(self):
    method get_domain (line 240) | def get_domain(self, domain_name, validate=True):
    method lookup (line 263) | def lookup(self, domain_name, validate=True):
    method get_all_domains (line 285) | def get_all_domains(self, max_domains=None, next_token=None):
    method create_domain (line 309) | def create_domain(self, domain_name):
    method get_domain_and_name (line 324) | def get_domain_and_name(self, domain_or_name):
    method delete_domain (line 348) | def delete_domain(self, domain_or_name):
    method domain_metadata (line 365) | def domain_metadata(self, domain_or_name):
    method put_attributes (line 381) | def put_attributes(self, domain_or_name, item_name, attributes,
    method batch_put_attributes (line 429) | def batch_put_attributes(self, domain_or_name, items, replace=True):
    method get_attributes (line 456) | def get_attributes(self, domain_or_name, item_name, attribute_names=None,
    method delete_attributes (line 504) | def delete_attributes(self, domain_or_name, item_name, attr_names=None,
    method batch_delete_attributes (line 557) | def batch_delete_attributes(self, domain_or_name, items):
    method select (line 583) | def select(self, domain_or_name, query='', next_token=None,

FILE: deploy/third_party/boto-2.1.1/boto/sdb/db/blob.py
  class Blob (line 23) | class Blob(object):
    method __init__ (line 25) | def __init__(self, value=None, file=None, id=None):
    method file (line 31) | def file(self):
    method __str__ (line 39) | def __str__(self):
    method __unicode__ (line 42) | def __unicode__(self):
    method read (line 53) | def read(self):
    method readline (line 59) | def readline(self):
    method next (line 62) | def next(self):
    method __iter__ (line 65) | def __iter__(self):
    method size (line 69) | def size(self):

FILE: deploy/third_party/boto-2.1.1/boto/sdb/db/key.py
  class Key (line 22) | class Key(object):
    method from_path (line 25) | def from_path(cls, *args, **kwds):
    method __init__ (line 28) | def __init__(self, encoded=None, obj=None):
    method app (line 37) | def app(self):
    method kind (line 40) | def kind(self):
    method id (line 43) | def id(self):
    method name (line 46) | def name(self):
    method id_or_name (line 49) | def id_or_name(self):
    method has_id_or_name (line 52) | def has_id_or_name(self):
    method parent (line 55) | def parent(self):
    method __str__ (line 58) | def __str__(self):

FILE: deploy/third_party/boto-2.1.1/boto/sdb/db/manager/__init__.py
  function get_manager (line 23) | def get_manager(cls):

FILE: deploy/third_party/boto-2.1.1/boto/sdb/db/manager/pgmanager.py
  class PGConverter (line 32) | class PGConverter:
    method __init__ (line 34) | def __init__(self, manager):
    method encode (line 39) | def encode(self, type, value):
    method decode (line 45) | def decode(self, type, value):
    method encode_prop (line 51) | def encode_prop(self, prop, value):
    method decode_prop (line 68) | def decode_prop(self, prop, value):
    method encode_reference (line 94) | def encode_reference(self, value):
    method decode_reference (line 102) | def decode_reference(self, value):
  class PGManager (line 110) | class PGManager(object):
    method __init__ (line 112) | def __init__(self, cls, db_name, db_user, db_passwd,
    method _build_connect_string (line 126) | def _build_connect_string(self):
    method _connect (line 131) | def _connect(self):
    method _object_lister (line 136) | def _object_lister(self, cursor):
    method _dict_from_row (line 144) | def _dict_from_row(self, row, description):
    method _object_from_row (line 150) | def _object_from_row(self, row, description=None):
    method _build_insert_qs (line 169) | def _build_insert_qs(self, obj, calculated):
    method _build_update_qs (line 198) | def _build_update_qs(self, obj, calculated):
    method _get_sql (line 220) | def _get_sql(self, mapping=None):
    method start_transaction (line 234) | def start_transaction(self):
    method end_transaction (line 238) | def end_transaction(self):
    method commit (line 243) | def commit(self):
    method rollback (line 253) | def rollback(self):
    method delete_table (line 257) | def delete_table(self):
    method create_table (line 261) | def create_table(self, mapping=None):
    method encode_value (line 265) | def encode_value(self, prop, value):
    method decode_value (line 268) | def decode_value(self, prop, value):
    method execute_sql (line 271) | def execute_sql(self, query):
    method query_sql (line 275) | def query_sql(self, query, vars=None):
    method lookup (line 279) | def lookup(self, cls, name, value):
    method query (line 303) | def query(self, cls, filters, limit=None, order_by=None):
    method get_property (line 326) | def get_property(self, prop, obj, name):
    method set_property (line 338) | def set_property(self, prop, obj, name, value):
    method get_object (line 349) | def get_object(self, cls, id):
    method get_object_from_id (line 358) | def get_object_from_id(self, id):
    method _find_calculated_props (line 361) | def _find_calculated_props(self, obj):
    method save_object (line 364) | def save_object(self, obj, expected_value=None):
    method delete_object (line 383) | def delete_object(self, obj):

FILE: deploy/third_party/boto-2.1.1/boto/sdb/db/manager/sdbmanager.py
  class TimeDecodeError (line 35) | class TimeDecodeError(Exception):
  class SDBConverter (line 38) | class SDBConverter(object):
    method __init__ (line 50) | def __init__(self, manager):
    method encode (line 65) | def encode(self, item_type, value):
    method decode (line 76) | def decode(self, item_type, value):
    method encode_list (line 82) | def encode_list(self, prop, value):
    method encode_map (line 99) | def encode_map(self, prop, value):
    method encode_prop (line 115) | def encode_prop(self, prop, value):
    method decode_list (line 123) | def decode_list(self, prop, value):
    method decode_map (line 140) | def decode_map(self, prop, value):
    method decode_map_element (line 150) | def decode_map_element(self, item_type, value):
    method decode_prop (line 163) | def decode_prop(self, prop, value):
    method encode_int (line 171) | def encode_int(self, value):
    method decode_int (line 176) | def decode_int(self, value):
    method encode_long (line 186) | def encode_long(self, value):
    method decode_long (line 191) | def decode_long(self, value):
    method encode_bool (line 196) | def encode_bool(self, value):
    method decode_bool (line 202) | def decode_bool(self, value):
    method encode_float (line 208) | def encode_float(self, value):
    method decode_float (line 239) | def decode_float(self, value):
    method encode_datetime (line 257) | def encode_datetime(self, value):
    method decode_datetime (line 262) | def decode_datetime(self, value):
    method encode_date (line 268) | def encode_date(self, value):
    method decode_date (line 273) | def decode_date(self, value):
    method decode_time (line 282) | def decode_time(self, value):
    method encode_reference (line 299) | def encode_reference(self, value):
    method decode_reference (line 307) | def decode_reference(self, value):
    method encode_blob (line 312) | def encode_blob(self, value):
    method decode_blob (line 336) | def decode_blob(self, value):
    method encode_string (line 356) | def encode_string(self, value):
    method decode_string (line 369) | def decode_string(self, value):
  class SDBManager (line 374) | class SDBManager(object):
    method __init__ (line 376) | def __init__(self, cls, db_name, db_user, db_passwd,
    method sdb (line 397) | def sdb(self):
    method domain (line 403) | def domain(self):
    method _connect (line 408) | def _connect(self):
    method _object_lister (line 426) | def _object_lister(self, cls, query_lister):
    method encode_value (line 432) | def encode_value(self, prop, value):
    method decode_value (line 439) | def decode_value(self, prop, value):
    method get_s3_connection (line 442) | def get_s3_connection(self):
    method get_blob_bucket (line 447) | def get_blob_bucket(self, bucket_name=None):
    method load_object (line 457) | def load_object(self, obj):
    method get_object (line 471) | def get_object(self, cls, id, a=None):
    method get_object_from_id (line 492) | def get_object_from_id(self, id):
    method query (line 495) | def query(self, query):
    method count (line 503) | def count(self, cls, filters, quick=True, sort_by=None, select=None):
    method _build_filter (line 517) | def _build_filter(self, property, name, op, val):
    method _build_filter_part (line 538) | def _build_filter_part(self, cls, filters, order_by=None, select=None):
    method _get_all_decendents (line 620) | def _get_all_decendents(self, cls):
    method query_gql (line 628) | def query_gql(self, query_string, *args, **kwds):
    method save_object (line 631) | def save_object(self, obj, expected_value=None):
    method delete_object (line 669) | def delete_object(self, obj):
    method set_property (line 672) | def set_property(self, prop, obj, name, value):
    method get_property (line 686) | def get_property(self, prop, obj, name):
    method set_key_value (line 697) | def set_key_value(self, obj, name, value):
    method delete_key_value (line 700) | def delete_key_value(self, obj, name):
    method get_key_value (line 703) | def get_key_value(self, obj, name):
    method get_raw_item (line 710) | def get_raw_item(self, obj):

FILE: deploy/third_party/boto-2.1.1/boto/sdb/db/manager/xmlmanager.py
  class XMLConverter (line 30) | class XMLConverter:
    method __init__ (line 42) | def __init__(self, manager):
    method get_text_value (line 52) | def get_text_value(self, parent_node):
    method encode (line 59) | def encode(self, item_type, value):
    method decode (line 65) | def decode(self, item_type, value):
    method encode_prop (line 73) | def encode_prop(self, prop, value):
    method decode_prop (line 88) | def decode_prop(self, prop, value):
    method encode_int (line 104) | def encode_int(self, value):
    method decode_int (line 108) | def decode_int(self, value):
    method encode_long (line 116) | def encode_long(self, value):
    method decode_long (line 120) | def decode_long(self, value):
    method encode_bool (line 124) | def encode_bool(self, value):
    method decode_bool (line 130) | def decode_bool(self, value):
    method encode_datetime (line 137) | def encode_datetime(self, value):
    method decode_datetime (line 140) | def decode_datetime(self, value):
    method encode_reference (line 147) | def encode_reference(self, value):
    method decode_reference (line 158) | def decode_reference(self, value):
    method encode_password (line 170) | def encode_password(self, value):
    method decode_password (line 176) | def decode_password(self, value):
  class XMLManager (line 181) | class XMLManager(object):
    method __init__ (line 183) | def __init__(self, cls, db_name, db_user, db_passwd,
    method _connect (line 209) | def _connect(self):
    method _make_request (line 218) | def _make_request(self, method, url, post_data=None, body=None):
    method new_doc (line 236) | def new_doc(self):
    method _object_lister (line 239) | def _object_lister(self, cls, doc):
    method reset (line 258) | def reset(self):
    method get_doc (line 261) | def get_doc(self):
    method encode_value (line 264) | def encode_value(self, prop, value):
    method decode_value (line 267) | def decode_value(self, prop, value):
    method get_s3_connection (line 270) | def get_s3_connection(self):
    method get_list (line 275) | def get_list(self, prop_node, item_type):
    method get_object_from_doc (line 286) | def get_object_from_doc(self, cls, id, doc):
    method get_props_from_doc (line 306) | def get_props_from_doc(self, cls, id, doc):
    method get_object (line 329) | def get_object(self, cls, id):
    method query (line 343) | def query(self, cls, filters, limit=None, order_by=None):
    method _build_query (line 364) | def _build_query(self, cls, filters, limit, order_by):
    method query_gql (line 397) | def query_gql(self, query_string, *args, **kwds):
    method save_list (line 400) | def save_list(self, doc, items, prop_node):
    method save_object (line 412) | def save_object(self, obj, expected_value=None):
    method marshal_object (line 436) | def marshal_object(self, obj, doc=None):
    method unmarshal_object (line 468) | def unmarshal_object(self, fp, cls=None, id=None):
    method unmarshal_props (line 475) | def unmarshal_props(self, fp, cls=None, id=None):
    method delete_object (line 486) | def delete_object(self, obj):
    method set_key_value (line 490) | def set_key_value(self, obj, name, value):
    method delete_key_value (line 493) | def delete_key_value(self, obj, name):
    method get_key_value (line 496) | def get_key_value(self, obj, name):
    method get_raw_item (line 503) | def get_raw_item(self, obj):
    method set_property (line 506) | def set_property(self, prop, obj, name, value):
    method get_property (line 509) | def get_property(self, prop, obj, name):
    method load_object (line 512) | def load_object(self, obj):

FILE: deploy/third_party/boto-2.1.1/boto/sdb/db/model.py
  class ModelMeta (line 28) | class ModelMeta(type):
    method __init__ (line 31) | def __init__(cls, name, bases, dict):
  class Model (line 56) | class Model(object):
    method get_lineage (line 62) | def get_lineage(cls):
    method kind (line 68) | def kind(cls):
    method _get_by_id (line 72) | def _get_by_id(cls, id, manager=None):
    method get_by_id (line 78) | def get_by_id(cls, ids=None, parent=None):
    method get_by_key_name (line 88) | def get_by_key_name(cls, key_names, parent=None):
    method find (line 92) | def find(cls, limit=None, next_token=None, **params):
    method all (line 99) | def all(cls, limit=None, next_token=None):
    method get_or_insert (line 103) | def get_or_insert(key_name, **kw):
    method properties (line 107) | def properties(cls, hidden=True):
    method find_property (line 122) | def find_property(cls, prop_name):
    method get_xmlmanager (line 137) | def get_xmlmanager(cls):
    method from_xml (line 145) | def from_xml(cls, fp):
    method __init__ (line 149) | def __init__(self, id=None, **kw):
    method __repr__ (line 169) | def __repr__(self):
    method __str__ (line 172) | def __str__(self):
    method __eq__ (line 175) | def __eq__(self, other):
    method _get_raw_item (line 178) | def _get_raw_item(self):
    method load (line 181) | def load(self):
    method reload (line 185) | def reload(self):
    method put (line 190) | def put(self, expected_value=None):
    method put_attributes (line 207) | def put_attributes(self, attrs):
    method delete_attributes (line 225) | def delete_attributes(self, attrs):
    method delete (line 241) | def delete(self):
    method key (line 244) | def key(self):
    method set_manager (line 247) | def set_manager(self, manager):
    method to_dict (line 250) | def to_dict(self):
    method to_xml (line 258) | def to_xml(self, doc=None):
    method find_subclass (line 264) | def find_subclass(cls, name):
  class Expando (line 273) | class Expando(Model):
    method __setattr__ (line 275) | def __setattr__(self, name, value):
    method __getattr__ (line 286) | def __getattr__(self, name):

FILE: deploy/third_party/boto-2.1.1/boto/sdb/db/property.py
  class Property (line 31) | class Property(object):
    method __init__ (line 38) | def __init__(self, verbose_name=None, name=None, default=None, require...
    method __get__ (line 52) | def __get__(self, obj, objtype):
    method __set__ (line 59) | def __set__(self, obj, value):
    method __property_config__ (line 72) | def __property_config__(self, model_class, property_name):
    method default_validator (line 77) | def default_validator(self, value):
    method default_value (line 83) | def default_value(self):
    method validate (line 86) | def validate(self, value):
    method empty (line 97) | def empty(self, value):
    method get_value_for_datastore (line 100) | def get_value_for_datastore(self, model_instance):
    method make_value_from_datastore (line 103) | def make_value_from_datastore(self, value):
    method get_choices (line 106) | def get_choices(self):
  function validate_string (line 111) | def validate_string(value):
  class StringProperty (line 120) | class StringProperty(Property):
    method __init__ (line 124) | def __init__(self, verbose_name=None, name=None, default='', required=...
  class TextProperty (line 128) | class TextProperty(Property):
    method __init__ (line 132) | def __init__(self, verbose_name=None, name=None, default='', required=...
    method validate (line 137) | def validate(self, value):
  class PasswordProperty (line 144) | class PasswordProperty(StringProperty):
    method __init__ (line 193) | def __init__(self, verbose_name=None, name=None, default='', required=...
    method make_value_from_datastore (line 205) | def make_value_from_datastore(self, value):
    method get_value_for_datastore (line 209) | def get_value_for_datastore(self, model_instance):
    method __set__ (line 216) | def __set__(self, obj, value):
    method __get__ (line 223) | def __get__(self, obj, objtype):
    method validate (line 226) | def validate(self, value):
  class BlobProperty (line 234) | class BlobProperty(Property):
    method __set__ (line 238) | def __set__(self, obj, value):
  class S3KeyProperty (line 249) | class S3KeyProperty(Property):
    method __init__ (line 255) | def __init__(self, verbose_name=None, name=None, default=None,
    method validate (line 260) | def validate(self, value):
    method __get__ (line 271) | def __get__(self, obj, objtype):
    method get_value_for_datastore (line 288) | def get_value_for_datastore(self, model_instance):
  class IntegerProperty (line 295) | class IntegerProperty(Property):
    method __init__ (line 300) | def __init__(self, verbose_name=None, name=None, default=0, required=F...
    method validate (line 306) | def validate(self, value):
    method empty (line 315) | def empty(self, value):
    method __set__ (line 318) | def __set__(self, obj, value):
  class LongProperty (line 325) | class LongProperty(Property):
    method __init__ (line 330) | def __init__(self, verbose_name=None, name=None, default=0, required=F...
    method validate (line 334) | def validate(self, value):
    method empty (line 345) | def empty(self, value):
  class BooleanProperty (line 348) | class BooleanProperty(Property):
    method __init__ (line 353) | def __init__(self, verbose_name=None, name=None, default=False, requir...
    method empty (line 357) | def empty(self, value):
  class FloatProperty (line 360) | class FloatProperty(Property):
    method __init__ (line 365) | def __init__(self, verbose_name=None, name=None, default=0.0, required...
    method validate (line 369) | def validate(self, value):
    method empty (line 374) | def empty(self, value):
  class DateTimeProperty (line 377) | class DateTimeProperty(Property):
    method __init__ (line 382) | def __init__(self, verbose_name=None, auto_now=False, auto_now_add=Fal...
    method default_value (line 388) | def default_value(self):
    method validate (line 393) | def validate(self, value):
    method get_value_for_datastore (line 400) | def get_value_for_datastore(self, model_instance):
    method now (line 405) | def now(self):
  class DateProperty (line 408) | class DateProperty(Property):
    method __init__ (line 413) | def __init__(self, verbose_name=None, auto_now=False, auto_now_add=Fal...
    method default_value (line 419) | def default_value(self):
    method validate (line 424) | def validate(self, value):
    method get_value_for_datastore (line 431) | def get_value_for_datastore(self, model_instance):
    method now (line 439) | def now(self):
  class TimeProperty (line 443) | class TimeProperty(Property):
    method __init__ (line 447) | def __init__(self, verbose_name=None, name=None,
    method validate (line 451) | def validate(self, value):
  class ReferenceProperty (line 459) | class ReferenceProperty(Property):
    method __init__ (line 464) | def __init__(self, reference_class=None, collection_name=None,
    method __get__ (line 470) | def __get__(self, obj, objtype):
    method __set__ (line 483) | def __set__(self, obj, value):
    method __property_config__ (line 490) | def __property_config__(self, model_class, property_name):
    method check_uuid (line 499) | def check_uuid(self, value):
    method check_instance (line 505) | def check_instance(self, value):
    method validate (line 515) | def validate(self, value):
  class _ReverseReferenceProperty (line 525) | class _ReverseReferenceProperty(Property):
    method __init__ (line 529) | def __init__(self, model, prop, name):
    method __get__ (line 536) | def __get__(self, model_instance, model_class):
    method __set__ (line 550) | def __set__(self, model_instance, value):
  class CalculatedProperty (line 555) | class CalculatedProperty(Property):
    method __init__ (line 557) | def __init__(self, verbose_name=None, name=None, default=None,
    method __get__ (line 565) | def __get__(self, obj, objtype):
    method __set__ (line 576) | def __set__(self, obj, value):
    method _set_direct (line 580) | def _set_direct(self, obj, value):
    method get_value_for_datastore (line 584) | def get_value_for_datastore(self, model_instance):
  class ListProperty (line 591) | class ListProperty(Property):
    method __init__ (line 596) | def __init__(self, item_type, verbose_name=None, name=None, default=No...
    method validate (line 602) | def validate(self, value):
    method empty (line 625) | def empty(self, value):
    method default_value (line 628) | def default_value(self):
    method __set__ (line 631) | def __set__(self, obj, value):
  class MapProperty (line 646) | class MapProperty(Property):
    method __init__ (line 651) | def __init__(self, item_type=str, verbose_name=None, name=None, defaul...
    method validate (line 657) | def validate(self, value):
    method empty (line 679) | def empty(self, value):
    method default_value (line 682) | def default_value(self):

FILE: deploy/third_party/boto-2.1.1/boto/sdb/db/query.py
  class Query (line 22) | class Query(object):
    method __init__ (line 24) | def __init__(self, model_class, limit=None, next_token=None, manager=N...
    method __iter__ (line 38) | def __iter__(self):
    method next (line 41) | def next(self):
    method filter (line 46) | def filter(self, property_operator, value):
    method fetch (line 50) | def fetch(self, limit, offset=0):
    method count (line 57) | def count(self, quick=True):
    method get_query (line 60) | def get_query(self):
    method order (line 63) | def order(self, key):
    method to_xml (line 67) | def to_xml(self, doc=None):
    method get_next_token (line 75) | def get_next_token(self):
    method set_next_token (line 82) | def set_next_token(self, token):

FILE: deploy/third_party/boto-2.1.1/boto/sdb/db/sequence.py
  class SequenceGenerator (line 24) | class SequenceGenerator(object):
    method __init__ (line 39) | def __init__(self, sequence_string, rollover=False):
    method __call__ (line 57) | def __call__(self, val, last=None):
    method _inc (line 71) | def _inc(self, val):
  function increment_by_one (line 81) | def increment_by_one(cv=None, lv=None):
  function double (line 86) | def double(cv=None, lv=None):
  function fib (line 91) | de
Copy disabled (too large) Download .json
Condensed preview — 407 files, each showing path, character count, and a content snippet. Download the .json file for the full structured content (14,007K chars).
[
  {
    "path": ".gitignore",
    "chars": 48,
    "preview": "*.class\n.classpath\n.project\ntarget/\n*.swp\n*.pyc\n"
  },
  {
    "path": "LICENSE",
    "chars": 11357,
    "preview": "\n                                 Apache License\n                           Version 2.0, January 2004\n                  "
  },
  {
    "path": "deploy/README",
    "chars": 2280,
    "preview": "Files related to deploying Sparrow.\n\nThe ec2 directory contains files for deploying Sparrow on ec2. The Sparrow\nAMI is n"
  },
  {
    "path": "deploy/ec2/README",
    "chars": 1743,
    "preview": "This script automates the deployment of a Sparrow cluster on ec2. To use it:\n\n1) Get an ec2 account.\n\n2) Get your access"
  },
  {
    "path": "deploy/ec2/ec2-exp.sh",
    "chars": 710,
    "preview": "#!/bin/sh\n\n#\n# Copyright 2013 The Regents of The University California\n#\n# Licensed under the Apache License, Version 2."
  },
  {
    "path": "deploy/ec2/ec2_exp.py",
    "chars": 28141,
    "preview": "#\n# Copyright 2013 The Regents of The University California\n# \n# Licensed under the Apache License, Version 2.0 (the \"Li"
  },
  {
    "path": "deploy/ec2/fairness.py",
    "chars": 4556,
    "preview": "#\n# Copyright 2013 The Regents of The University California\n# \n# Licensed under the Apache License, Version 2.0 (the \"Li"
  },
  {
    "path": "deploy/ec2/fairness.sh",
    "chars": 711,
    "preview": "#!/bin/sh\n\n#\n# Copyright 2013 The Regents of The University California\n#\n# Licensed under the Apache License, Version 2."
  },
  {
    "path": "deploy/ec2/isolation.py",
    "chars": 5033,
    "preview": "#\n# Copyright 2013 The Regents of The University California\n# \n# Licensed under the Apache License, Version 2.0 (the \"Li"
  },
  {
    "path": "deploy/ec2/isolation.sh",
    "chars": 711,
    "preview": "#!/bin/sh\n#\n# Copyright 2013 The Regents of The University California\n#\n# Licensed under the Apache License, Version 2.0"
  },
  {
    "path": "deploy/ec2/osdi.py",
    "chars": 1925,
    "preview": "#\n# Copyright 2013 The Regents of The University California\n# \n# Licensed under the Apache License, Version 2.0 (the \"Li"
  },
  {
    "path": "deploy/ec2/prepare_tpch_experiments.py",
    "chars": 2945,
    "preview": "#\n# Copyright 2013 The Regents of The University California\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lic"
  },
  {
    "path": "deploy/ec2/prepare_tpch_experiments.sh",
    "chars": 727,
    "preview": "#!/bin/sh\n\n#\n# Copyright 2013 The Regents of The University California\n#\n# Licensed under the Apache License, Version 2."
  },
  {
    "path": "deploy/ec2/probe-ratio-het.sh",
    "chars": 717,
    "preview": "#!/bin/sh\n#\n# Copyright 2013 The Regents of The University California\n#\n# Licensed under the Apache License, Version 2.0"
  },
  {
    "path": "deploy/ec2/probe-ratio.sh",
    "chars": 713,
    "preview": "#!/bin/sh\n#\n# Copyright 2013 The Regents of The University California\n#\n# Licensed under the Apache License, Version 2.0"
  },
  {
    "path": "deploy/ec2/probe_ratio.py",
    "chars": 4553,
    "preview": "#\n# Copyright 2013 The Regents of The University California\n# \n# Licensed under the Apache License, Version 2.0 (the \"Li"
  },
  {
    "path": "deploy/ec2/probe_ratio_het.py",
    "chars": 4979,
    "preview": "#\n# Copyright 2013 The Regents of The University California\n# \n# Licensed under the Apache License, Version 2.0 (the \"Li"
  },
  {
    "path": "deploy/ec2/spark_v_mesos.py",
    "chars": 2048,
    "preview": "#\n# Copyright 2013 The Regents of The University California\n# \n# Licensed under the Apache License, Version 2.0 (the \"Li"
  },
  {
    "path": "deploy/ec2/template/README",
    "chars": 121,
    "preview": "All files in this folder are copied to each remote Sparrow node, with any\nvariables filled in by the EC2 install script."
  },
  {
    "path": "deploy/ec2/template/backends.txt",
    "chars": 17,
    "preview": "{{backend_list}}\n"
  },
  {
    "path": "deploy/ec2/template/build_sparrow.sh",
    "chars": 1916,
    "preview": "#!/bin/bash\n# Build Sparrow locally\n\nSPARROW_INSTALL_DIR=~/sparrow/\nSPARK_INSTALL_DIR=~/spark/\nSHARK_INSTALL_DIR=~/shark"
  },
  {
    "path": "deploy/ec2/template/clean_logs.sh",
    "chars": 106,
    "preview": "#!/bin/bash\nrm -f /tmp/*.log\nrm -f /tmp/*.log.gz\nrm -f ~/*.log\nrm -f ~/*.log.gz\nrm /tmp/spark-local-* -rf\n"
  },
  {
    "path": "deploy/ec2/template/configure_node.sh",
    "chars": 1016,
    "preview": "#!/bin/bash\n\n/etc/init.d/ntpd stop\nntpdate ntp.ubuntu.com\n\nrm -rf /tmp/spark-local*\n\nif [ -d \"/mnt\" ]; then\n  umount /mn"
  },
  {
    "path": "deploy/ec2/template/create_database.sh",
    "chars": 1455,
    "preview": "#!/bin/bash\n# Generate databases\n\nSCALE=$1\nFRONTENDS=`cat frontends.txt`\n\nif [ \"$SCALE\" = \"\" ];\nthen\n  echo \"Scale facto"
  },
  {
    "path": "deploy/ec2/template/create_tpch_tables_primary.sh",
    "chars": 346,
    "preview": "#!/bin/bash\n# Create shark tables for tpc-h benchmark\n\nLOG=/disk1/sparrow/createTPCH.log\n\nSHARK=/root/shark/bin/shark-wi"
  },
  {
    "path": "deploy/ec2/template/create_tpch_tables_secondary.sh",
    "chars": 348,
    "preview": "#!/bin/bash\n# Create shark tables for tpc-h benchmark\n\nLOG=/disk1/sparrow/createTPCH.log\n\nSHARK=/root/shark/bin/shark-wi"
  },
  {
    "path": "deploy/ec2/template/deploy_sparrow.sh",
    "chars": 758,
    "preview": "#!/bin/bash\n# Deploy built version of Sparrow on frontends and backends.\n\nFRONTENDS=`cat frontends.txt`\nBACKENDS=`cat ba"
  },
  {
    "path": "deploy/ec2/template/find_bugs.sh",
    "chars": 136,
    "preview": "#!/bin/bash\nhost=`ec2metadata  |grep public-hostname | cut -d \" \" -f 2`\necho $host\ncat /disk1/sparrow/* |egrep \"ERROR|Ex"
  },
  {
    "path": "deploy/ec2/template/find_bugs_filtered.sh",
    "chars": 200,
    "preview": "#!/bin/bash\n#Filters out some known issues\nhost=`ec2metadata  |grep public-hostname | cut -d \" \" -f 2`\necho $host\ncat /d"
  },
  {
    "path": "deploy/ec2/template/find_bugs_verbose.sh",
    "chars": 194,
    "preview": "#!/bin/bash\n#Filters out some known issues\nhost=`ec2metadata  |grep public-hostname | cut -d \" \" -f 2`\necho $host\ncat /d"
  },
  {
    "path": "deploy/ec2/template/find_cache_partitions.sh",
    "chars": 131,
    "preview": "#!/bin/bash\nhost=`ec2metadata  |grep public-hostname | cut -d \" \" -f 2`\necho $host\ncat /disk1/sparrow/* |egrep \"ensureFr"
  },
  {
    "path": "deploy/ec2/template/frontend.conf",
    "chars": 375,
    "preview": "job_arrival_rate_s = {{arrival_lambda}}\ntasks_per_job = {{tasks_per_job}}\nbenchmark.id = {{benchmark_id}}\nbenchmark.iter"
  },
  {
    "path": "deploy/ec2/template/frontends.txt",
    "chars": 18,
    "preview": "{{frontend_list}}\n"
  },
  {
    "path": "deploy/ec2/template/hadoop-env.sh",
    "chars": 2286,
    "preview": "# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configura"
  },
  {
    "path": "deploy/ec2/template/hdfs-site.xml",
    "chars": 640,
    "preview": "<?xml version=\"1.0\"?>\n<?xml-stylesheet type=\"text/xsl\" href=\"configuration.xsl\"?>\n\n<!-- Put site-specific property overr"
  },
  {
    "path": "deploy/ec2/template/hive-site.xml",
    "chars": 39682,
    "preview": "<?xml version=\"1.0\"?>\n<?xml-stylesheet type=\"text/xsl\" href=\"configuration.xsl\"?>\n<!--\n   Licensed to the Apache Softwar"
  },
  {
    "path": "deploy/ec2/template/prepare_logs.sh",
    "chars": 76,
    "preview": "#!/bin/bash\ncp /disk1/sparrow/shark_* /root/\ntouch foo.log && gzip -f *.log\n"
  },
  {
    "path": "deploy/ec2/template/shark-env.sh",
    "chars": 2781,
    "preview": "#!/usr/bin/env bash\n\n# Copyright (C) 2012 The Regents of The University California.\n# All rights reserved.\n#\n# Licensed "
  },
  {
    "path": "deploy/ec2/template/spark-env.sh",
    "chars": 1369,
    "preview": "#!/usr/bin/env bash\n\n# Set Spark environment variables for your site in this file. Some useful\n# variables to set are:\n#"
  },
  {
    "path": "deploy/ec2/template/spark-run.sh",
    "chars": 6116,
    "preview": "#!/usr/bin/env bash\n\n#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreem"
  },
  {
    "path": "deploy/ec2/template/sparrow.conf",
    "chars": 465,
    "preview": "deployment.mode = configbased\nstatic.node_monitors = {{static_backends}}\nsystem.cpus = {{cpus}}\nstatic.app.name = spark\n"
  },
  {
    "path": "deploy/ec2/template/sparrow_schedulers.txt",
    "chars": 23,
    "preview": "{{sparrow_schedulers}}\n"
  },
  {
    "path": "deploy/ec2/template/start_mesos_master.sh",
    "chars": 570,
    "preview": "#!/bin/bash\n# Start mesos master\n\n# Make sure software firewall is stopped (ec2 firewall subsumes)\n/etc/init.d/iptables "
  },
  {
    "path": "deploy/ec2/template/start_mesos_slave.sh",
    "chars": 991,
    "preview": "#!/bin/bash\n# Start mesos slave\n\n# Make sure software firewall is stopped (ec2 firewall subsumes)\n/etc/init.d/iptables s"
  },
  {
    "path": "deploy/ec2/template/start_proto_backend.sh",
    "chars": 521,
    "preview": "#!/bin/bash\n# Start Prototype backend\n\nLOG=/disk1/sparrow/protoBackend\n\nAPPCHK=$(ps aux | grep -v grep | grep -c ProtoBa"
  },
  {
    "path": "deploy/ec2/template/start_proto_frontend.sh",
    "chars": 646,
    "preview": "#!/bin/bash\n# Start Prototype frontend\n\nLOG=/disk1/sparrow/protoFrontend.log\n\nAPPCHK=$(ps aux | grep -v grep | grep -c {"
  },
  {
    "path": "deploy/ec2/template/start_shark_tpch.sh",
    "chars": 774,
    "preview": "#!/bin/bash\n# Start shark tpch workload\nulimit -n 16384\n\nAPPCHK=$(ps aux | grep -v grep |grep -v start| grep java | grep"
  },
  {
    "path": "deploy/ec2/template/start_spark_backend.sh",
    "chars": 1115,
    "preview": "#!/bin/bash\n# Start Spark backend\nulimit -n 16384\nFRONTENDS=`cat frontends.txt`\n\n\nAPPCHK=$(ps aux | grep -v grep | grep "
  },
  {
    "path": "deploy/ec2/template/start_spark_frontend.sh",
    "chars": 1484,
    "preview": "#!/bin/bash\n# Start Prototype frontend\nulimit -n 16384\n\nAPPCHK=$(ps aux | grep -v grep |grep -v start| grep java | grep "
  },
  {
    "path": "deploy/ec2/template/start_sparrow.sh",
    "chars": 996,
    "preview": "#!/bin/bash\n# Start Sparrow locally\nulimit -n 16384\n\nLOG=/disk1/sparrow/sparrowDaemon.log\nIP=`ifconfig eth0 | grep 'inet"
  },
  {
    "path": "deploy/ec2/template/start_throughput_exp_spark.sh",
    "chars": 659,
    "preview": "#!/bin/bash\n# Starts an experiment to test scheduler throughput with spark.\n# Starts the master and slaves, and then the"
  },
  {
    "path": "deploy/ec2/template/start_throughput_exp_sparrow.sh",
    "chars": 444,
    "preview": "#!/bin/bash\n# Starts an experiment to test scheduler throughput.\nmy_ip=`hostname -i`\nlog=\"/disk1/sparrow/spark_$my_ip.lo"
  },
  {
    "path": "deploy/ec2/template/stop_mesos_master.sh",
    "chars": 314,
    "preview": "#!/bin/bash\n# Stop mesos master\n\nAPPCHK=$(ps aux | grep -v grep | grep -c mesos-master)\n\nif [ $APPCHK = '0' ]; then\n  ec"
  },
  {
    "path": "deploy/ec2/template/stop_mesos_slave.sh",
    "chars": 309,
    "preview": "#!/bin/bash\n# Stop mesos slave\n\nAPPCHK=$(ps aux | grep -v grep | grep -c mesos-slave)\n\nif [ $APPCHK = '0' ]; then\n  echo"
  },
  {
    "path": "deploy/ec2/template/stop_proto_backend.sh",
    "chars": 323,
    "preview": "#!/bin/bash\n# Stop proto backend locally\n\nAPPCHK=$(ps aux | grep -v grep | grep -c ProtoBackend)\n\nif [ $APPCHK = '0' ]; "
  },
  {
    "path": "deploy/ec2/template/stop_proto_frontend.sh",
    "chars": 343,
    "preview": "#!/bin/bash\n# Stop proto frontend locally\n\nAPPCHK=$(ps aux | grep -v grep | grep -c {{frontend_type}})\n\nif [ $APPCHK = '"
  },
  {
    "path": "deploy/ec2/template/stop_spark_backend.sh",
    "chars": 325,
    "preview": "#!/bin/bash\n# Stop spark backend locally\n\nAPPCHK=$(ps aux | grep -v grep | grep -v stop | grep -c spark)\n\nif [ $APPCHK ="
  },
  {
    "path": "deploy/ec2/template/stop_spark_frontend.sh",
    "chars": 326,
    "preview": "#!/bin/bash\n# Stop spark frontend locally\n\nAPPCHK=$(ps aux | grep -v grep | grep -v stop | grep -c spark)\n\nif [ $APPCHK "
  },
  {
    "path": "deploy/ec2/template/stop_sparrow.sh",
    "chars": 308,
    "preview": "#!/bin/bash\n# Stop sparrow locally\n\nAPPCHK=$(ps aux | grep -v grep | grep -c SparrowDaemon)\n\nif [ $APPCHK = '0' ]; then\n"
  },
  {
    "path": "deploy/ec2/template/tpch/make_base_tables.hql",
    "chars": 2325,
    "preview": "drop table if exists lineitem;\ndrop table if exists orders;\ndrop table if exists denorm;\ndrop table if exists part;\ndrop"
  },
  {
    "path": "deploy/ec2/template/tpch/make_denorm_cached.hql",
    "chars": 1312,
    "preview": "set mapred.reduce.tasks={{num_partitions}};\nset hive.map.aggr=false;\ncreate table denorm_cached as\nselect r_regionkey, l"
  },
  {
    "path": "deploy/ec2/template/tpch/make_denorm_table_primary.hql",
    "chars": 1375,
    "preview": "SET mapred.reduce.tasks={{reduce_tasks}};\ndrop table if exists denorm;\ncreate table denorm\nrow format delimited fields t"
  },
  {
    "path": "deploy/ec2/template/tpch/make_denorm_table_secondary.hql",
    "chars": 1008,
    "preview": "create external table denorm (r_regionkey int, l_linenumber int, r_name string, n_nationkey int, n_name string, s_suppke"
  },
  {
    "path": "deploy/ec2/template/tpch/tpch_workload_1",
    "chars": 674022,
    "preview": "set mapred.reduce.tasks={{num_partitions}};\nset hive.map.aggr=false;\ndrop table if exists denorm_cached;\ncreate table de"
  },
  {
    "path": "deploy/ec2/template/tpch/tpch_workload_10",
    "chars": 674016,
    "preview": "set mapred.reduce.tasks={{num_partitions}};\nset hive.map.aggr=false;\ndrop table if exists denorm_cached;\ncreate table de"
  },
  {
    "path": "deploy/ec2/template/tpch/tpch_workload_11",
    "chars": 674016,
    "preview": "set mapred.reduce.tasks={{num_partitions}};\nset hive.map.aggr=false;\ndrop table if exists denorm_cached;\ncreate table de"
  },
  {
    "path": "deploy/ec2/template/tpch/tpch_workload_12",
    "chars": 674016,
    "preview": "set mapred.reduce.tasks={{num_partitions}};\nset hive.map.aggr=false;\ndrop table if exists denorm_cached;\ncreate table de"
  },
  {
    "path": "deploy/ec2/template/tpch/tpch_workload_13",
    "chars": 674016,
    "preview": "set mapred.reduce.tasks={{num_partitions}};\nset hive.map.aggr=false;\ndrop table if exists denorm_cached;\ncreate table de"
  },
  {
    "path": "deploy/ec2/template/tpch/tpch_workload_14",
    "chars": 674016,
    "preview": "set mapred.reduce.tasks={{num_partitions}};\nset hive.map.aggr=false;\ndrop table if exists denorm_cached;\ncreate table de"
  },
  {
    "path": "deploy/ec2/template/tpch/tpch_workload_15",
    "chars": 674016,
    "preview": "set mapred.reduce.tasks={{num_partitions}};\nset hive.map.aggr=false;\ndrop table if exists denorm_cached;\ncreate table de"
  },
  {
    "path": "deploy/ec2/template/tpch/tpch_workload_2",
    "chars": 674016,
    "preview": "set mapred.reduce.tasks={{num_partitions}};\nset hive.map.aggr=false;\ndrop table if exists denorm_cached;\ncreate table de"
  },
  {
    "path": "deploy/ec2/template/tpch/tpch_workload_3",
    "chars": 674016,
    "preview": "set mapred.reduce.tasks={{num_partitions}};\nset hive.map.aggr=false;\ndrop table if exists denorm_cached;\ncreate table de"
  },
  {
    "path": "deploy/ec2/template/tpch/tpch_workload_4",
    "chars": 674016,
    "preview": "set mapred.reduce.tasks={{num_partitions}};\nset hive.map.aggr=false;\ndrop table if exists denorm_cached;\ncreate table de"
  },
  {
    "path": "deploy/ec2/template/tpch/tpch_workload_5",
    "chars": 674016,
    "preview": "set mapred.reduce.tasks={{num_partitions}};\nset hive.map.aggr=false;\ndrop table if exists denorm_cached;\ncreate table de"
  },
  {
    "path": "deploy/ec2/template/tpch/tpch_workload_6",
    "chars": 674016,
    "preview": "set mapred.reduce.tasks={{num_partitions}};\nset hive.map.aggr=false;\ndrop table if exists denorm_cached;\ncreate table de"
  },
  {
    "path": "deploy/ec2/template/tpch/tpch_workload_7",
    "chars": 674016,
    "preview": "set mapred.reduce.tasks={{num_partitions}};\nset hive.map.aggr=false;\ndrop table if exists denorm_cached;\ncreate table de"
  },
  {
    "path": "deploy/ec2/template/tpch/tpch_workload_8",
    "chars": 674016,
    "preview": "set mapred.reduce.tasks={{num_partitions}};\nset hive.map.aggr=false;\ndrop table if exists denorm_cached;\ncreate table de"
  },
  {
    "path": "deploy/ec2/template/tpch/tpch_workload_9",
    "chars": 674016,
    "preview": "set mapred.reduce.tasks={{num_partitions}};\nset hive.map.aggr=false;\ndrop table if exists denorm_cached;\ncreate table de"
  },
  {
    "path": "deploy/ec2/template/tpch_experiment.sh",
    "chars": 304,
    "preview": "FILES=/opt/tpch_hive/*\nOUT_DIR=/tmp\n\nfor f in $FILES\ndo\n  echo \"Processing $f\"\n  qname=`echo $f |  sed \"s/.*\\(q[0-9]*\\)."
  },
  {
    "path": "deploy/ec2/template/wipe_hdfs.sh",
    "chars": 38,
    "preview": "#!/bin/bash\nrm -rf /disk1/hdfs/data/*\n"
  },
  {
    "path": "deploy/ec2/tpch_experiments.py",
    "chars": 2550,
    "preview": "#\n# Copyright 2013 The Regents of The University California\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lic"
  },
  {
    "path": "deploy/example_sparrow_configuration.conf",
    "chars": 681,
    "preview": "# This file gives a basic Sparrow configuration. Refer to\n# daemon/SparrowConf.java for more configuration options (e.g."
  },
  {
    "path": "deploy/third_party/boto-2.1.1/PKG-INFO",
    "chars": 512,
    "preview": "Metadata-Version: 1.0\nName: boto\nVersion: 2.1.1\nSummary: Amazon Web Services Library\nHome-page: http://code.google.com/p"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/bin/bundle_image",
    "chars": 1555,
    "preview": "#!/usr/bin/env python\nfrom boto.manage.server import Server\nif __name__ == \"__main__\":\n    from optparse import OptionPa"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/bin/cfadmin",
    "chars": 2753,
    "preview": "#!/usr/bin/env python\n# Author: Chris Moyer\n#\n# cfadmin is similar to sdbadmin for CloudFront, it's a simple\n# console u"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/bin/cq",
    "chars": 2963,
    "preview": "#!/usr/bin/env python\n# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/\n#\n# Permission is hereby granted, free"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/bin/cwutil",
    "chars": 5044,
    "preview": "#!/usr/bin/env python\n# Author: Chris Moyer <cmoyer@newstex.com>\n# Description: CloudWatch Utility\n# For listing stats, "
  },
  {
    "path": "deploy/third_party/boto-2.1.1/bin/elbadmin",
    "chars": 7069,
    "preview": "#!/usr/bin/env python\n# Copyright (c) 2009 Chris Moyer http://coredumped.org/\n#\n# Permission is hereby granted, free of "
  },
  {
    "path": "deploy/third_party/boto-2.1.1/bin/fetch_file",
    "chars": 1575,
    "preview": "#!/usr/bin/env python\n# Copyright (c) 2009 Chris Moyer http://coredumped.org\n#\n# Permission is hereby granted, free of c"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/bin/kill_instance",
    "chars": 923,
    "preview": "#!/usr/bin/env python\n\nimport sys\nfrom optparse import OptionParser\n\nimport boto\nfrom boto.ec2 import regions\n\n\n\ndef kil"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/bin/launch_instance",
    "chars": 10326,
    "preview": "#!/usr/bin/env python\n# Copyright (c) 2009 Chris Moyer http://coredumped.org/\n#\n# Permission is hereby granted, free of "
  },
  {
    "path": "deploy/third_party/boto-2.1.1/bin/list_instances",
    "chars": 2385,
    "preview": "#!/usr/bin/env python\n\nimport sys\nfrom operator import attrgetter\nfrom optparse import OptionParser\n\nimport boto\nfrom bo"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/bin/lss3",
    "chars": 2266,
    "preview": "#!/usr/bin/env python\nimport boto\nfrom boto.s3.connection import OrdinaryCallingFormat\n\ndef sizeof_fmt(num):\n    for x i"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/bin/pyami_sendmail",
    "chars": 2618,
    "preview": "#!/usr/bin/env python\n# Copyright (c) 2010 Chris Moyer http://coredumped.org/\n#\n# Permission is hereby granted, free of "
  },
  {
    "path": "deploy/third_party/boto-2.1.1/bin/route53",
    "chars": 5228,
    "preview": "#!/usr/bin/env python\n# Author: Chris Moyer\n#\n# route53 is similar to sdbadmin for Route53, it's a simple\n# console util"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/bin/s3multiput",
    "chars": 12460,
    "preview": "#!/usr/bin/env python\n# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/\n#\n# Permission is hereby granted,"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/bin/s3put",
    "chars": 8809,
    "preview": "#!/usr/bin/env python\n# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/\n#\n# Permission is hereby granted,"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/bin/sdbadmin",
    "chars": 5878,
    "preview": "#!/usr/bin/env python\n# Copyright (c) 2009 Chris Moyer http://kopertop.blogspot.com/\n#\n# Permission is hereby granted, f"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/bin/taskadmin",
    "chars": 3770,
    "preview": "#!/usr/bin/env python\n# Copyright (c) 2009 Chris Moyer http://coredumped.org/\n#\n# Permission is hereby granted, free of "
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/__init__.py",
    "chars": 21988,
    "preview": "# Copyright (c) 2006-2011 Mitch Garnaat http://garnaat.org/\n# Copyright (c) 2010-2011, Eucalyptus Systems, Inc.\n# Copyri"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/auth.py",
    "chars": 12595,
    "preview": "# Copyright 2010 Google Inc.\n# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/\n# Copyright (c) 2011, Eucalyptus Sys"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/auth_handler.py",
    "chars": 2059,
    "preview": "# Copyright 2010 Google Inc.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this "
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/cacerts/__init__.py",
    "chars": 1097,
    "preview": "# Copyright 2010 Google Inc.\n# All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obta"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/cacerts/cacerts.txt",
    "chars": 33640,
    "preview": "# Certifcate Authority certificates for validating SSL connections.\n#\n# This file contains PEM format certificates gener"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/cloudformation/__init__.py",
    "chars": 1302,
    "preview": "# Copyright (c) 2010-2011 Mitch Garnaat http://garnaat.org/\n# Copyright (c) 2010-2011, Eucalyptus Systems, Inc.\n#\n# Perm"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/cloudformation/connection.py",
    "chars": 9842,
    "preview": "# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/\n#\n# Permission is hereby granted, free of charge, to any per"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/cloudformation/stack.py",
    "chars": 9892,
    "preview": "from datetime import datetime\n\nfrom boto.resultset import ResultSet\n\nclass Stack:\n    def __init__(self, connection=None"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/cloudformation/template.py",
    "chars": 1318,
    "preview": "from boto.resultset import ResultSet\n\nclass Template:\n    def __init__(self, connection=None):\n        self.connection ="
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/cloudfront/__init__.py",
    "chars": 11833,
    "preview": "# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/\n#\n# Permission is hereby granted, free of charge, to any per"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/cloudfront/distribution.py",
    "chars": 31064,
    "preview": "# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/\n#\n# Permission is hereby granted, free of charge, to any per"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/cloudfront/exception.py",
    "chars": 1205,
    "preview": "# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/\n#\n# Permission is hereby granted, free of charge, to any per"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/cloudfront/identity.py",
    "chars": 4489,
    "preview": "# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/\n#\n# Permission is hereby granted, free of charge, to any per"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/cloudfront/invalidation.py",
    "chars": 3654,
    "preview": "# Copyright (c) 2006-2010 Chris Moyer http://coredumped.org/\n#\n# Permission is hereby granted, free of charge, to any pe"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/cloudfront/logging.py",
    "chars": 1557,
    "preview": "# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/\n#\n# Permission is hereby granted, free of charge, to any per"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/cloudfront/object.py",
    "chars": 1781,
    "preview": "# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/\n#\n# Permission is hereby granted, free of charge, to any per"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/cloudfront/origin.py",
    "chars": 6044,
    "preview": "# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/\n# Copyright (c) 2010, Eucalyptus Systems, Inc.\n#\n# Permissio"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/cloudfront/signers.py",
    "chars": 2088,
    "preview": "# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/\n#\n# Permission is hereby granted, free of charge, to any per"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/connection.py",
    "chars": 37029,
    "preview": "# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/\n# Copyright (c) 2010 Google\n# Copyright (c) 2008 rPath, Inc."
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/contrib/__init__.py",
    "chars": 1107,
    "preview": "# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/\n#\n# Permission is hereby granted, free of charge, to any per"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/contrib/m2helpers.py",
    "chars": 2295,
    "preview": "# Copyright (c) 2006,2007 Jon Colverson\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# co"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/contrib/ymlmessage.py",
    "chars": 1869,
    "preview": "# Copyright (c) 2006,2007 Chris Moyer\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/ec2/__init__.py",
    "chars": 2841,
    "preview": "# Copyright (c) 2006-2008 Mitch Garnaat http://garnaat.org/\n#\n# Permission is hereby granted, free of charge, to any per"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/ec2/address.py",
    "chars": 2401,
    "preview": "# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/\n#\n# Permission is hereby granted, free of charge, to any per"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/ec2/autoscale/__init__.py",
    "chars": 26643,
    "preview": "# Copyright (c) 2009-2011 Reza Lotun http://reza.lotun.name/\n# Copyright (c) 2011 Jann Kleen\n#\n# Permission is hereby gr"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/ec2/autoscale/activity.py",
    "chars": 3059,
    "preview": "# Copyright (c) 2009-2011 Reza Lotun http://reza.lotun.name/\n#\n# Permission is hereby granted, free of charge, to any pe"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/ec2/autoscale/group.py",
    "chars": 11445,
    "preview": "# Copyright (c) 2009-2011 Reza Lotun http://reza.lotun.name/\n#\n# Permission is hereby granted, free of charge, to any pe"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/ec2/autoscale/instance.py",
    "chars": 2429,
    "preview": "# Copyright (c) 2009 Reza Lotun http://reza.lotun.name/\n#\n# Permission is hereby granted, free of charge, to any person "
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/ec2/autoscale/launchconfig.py",
    "chars": 6972,
    "preview": "# Copyright (c) 2009 Reza Lotun http://reza.lotun.name/\n#\n# Permission is hereby granted, free of charge, to any person "
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/ec2/autoscale/policy.py",
    "chars": 5549,
    "preview": "# Copyright (c) 2009-2010 Reza Lotun http://reza.lotun.name/\n# Copyright (c) 2011 Jann Kleen\n#\n# Permission is hereby gr"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/ec2/autoscale/request.py",
    "chars": 1549,
    "preview": "# Copyright (c) 2009 Reza Lotun http://reza.lotun.name/\n#\n# Permission is hereby granted, free of charge, to any person "
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/ec2/autoscale/scheduled.py",
    "chars": 2303,
    "preview": "# Copyright (c) 2009-2010 Reza Lotun http://reza.lotun.name/\n#\n# Permission is hereby granted, free of charge, to any pe"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/ec2/blockdevicemapping.py",
    "chars": 4324,
    "preview": "# Copyright (c) 2009 Mitch Garnaat http://garnaat.org/\n#\n# Permission is hereby granted, free of charge, to any person o"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/ec2/bundleinstance.py",
    "chars": 2745,
    "preview": "# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/\n#\n# Permission is hereby granted, free of charge, to any person o"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/ec2/buyreservation.py",
    "chars": 3813,
    "preview": "# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/\n#\n# Permission is hereby granted, free of charge, to any per"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/ec2/cloudwatch/__init__.py",
    "chars": 28485,
    "preview": "# Copyright (c) 2006-2011 Mitch Garnaat http://garnaat.org/\n#\n# Permission is hereby granted, free of charge, to any per"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/ec2/cloudwatch/alarm.py",
    "chars": 11160,
    "preview": "# Copyright (c) 2010 Reza Lotun http://reza.lotun.name\n#\n# Permission is hereby granted, free of charge, to any person o"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/ec2/cloudwatch/datapoint.py",
    "chars": 1668,
    "preview": "# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/\n#\n# Permission is hereby granted, free of charge, to any per"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/ec2/cloudwatch/listelement.py",
    "chars": 1319,
    "preview": "# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/\n#\n# Permission is hereby granted, free of charge, to any per"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/ec2/cloudwatch/metric.py",
    "chars": 4819,
    "preview": "# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/\n#\n# Permission is hereby granted, free of charge, to any per"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/ec2/connection.py",
    "chars": 105157,
    "preview": "# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/\n# Copyright (c) 2010, Eucalyptus Systems, Inc.\n#\n# Permissio"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/ec2/ec2object.py",
    "chars": 4047,
    "preview": "# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/\n# Copyright (c) 2010, Eucalyptus Systems, Inc.\n#\n# Permissio"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/ec2/elb/__init__.py",
    "chars": 19299,
    "preview": "# Copyright (c) 2006-2011 Mitch Garnaat http://garnaat.org/\n#\n# Permission is hereby granted, free of charge, to any per"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/ec2/elb/healthcheck.py",
    "chars": 2606,
    "preview": "# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/\n#\n# Permission is hereby granted, free of charge, to any per"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/ec2/elb/instancestate.py",
    "chars": 2077,
    "preview": "# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/\n#\n# Permission is hereby granted, free of charge, to any per"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/ec2/elb/listelement.py",
    "chars": 1319,
    "preview": "# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/\n#\n# Permission is hereby granted, free of charge, to any per"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/ec2/elb/listener.py",
    "chars": 2634,
    "preview": "# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/\n#\n# Permission is hereby granted, free of charge, to any per"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/ec2/elb/loadbalancer.py",
    "chars": 7629,
    "preview": "# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/\n#\n# Permission is hereby granted, free of charge, to any per"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/ec2/elb/policies.py",
    "chars": 3236,
    "preview": "# Copyright (c) 2010 Reza Lotun http://reza.lotun.name\n#\n# Permission is hereby granted, free of charge, to any person o"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/ec2/elb/securitygroup.py",
    "chars": 1576,
    "preview": "# Copyright (c) 2010 Reza Lotun http://reza.lotun.name\n#\n# Permission is hereby granted, free of charge, to any person o"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/ec2/image.py",
    "chars": 13056,
    "preview": "# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/\n# Copyright (c) 2010, Eucalyptus Systems, Inc.\n#\n# Permissio"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/ec2/instance.py",
    "chars": 14919,
    "preview": "# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/\n# Copyright (c) 2010, Eucalyptus Systems, Inc.\n#\n# Permissio"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/ec2/instanceinfo.py",
    "chars": 1772,
    "preview": "# Copyright (c) 2006-2008 Mitch Garnaat http://garnaat.org/\n#\n# Permission is hereby granted, free of charge, to any per"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/ec2/keypair.py",
    "chars": 4312,
    "preview": "# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/\n#\n# Permission is hereby granted, free of charge, to any per"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/ec2/launchspecification.py",
    "chars": 3403,
    "preview": "# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/\n#\n# Permission is hereby granted, free of charge, to any per"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/ec2/placementgroup.py",
    "chars": 1930,
    "preview": "# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/\n#\n# Permission is hereby granted, free of charge, to any person o"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/ec2/regioninfo.py",
    "chars": 1524,
    "preview": "# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/\n# Copyright (c) 2010, Eucalyptus Systems, Inc.\n# All rights "
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/ec2/reservedinstance.py",
    "chars": 4028,
    "preview": "# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/\n#\n# Permission is hereby granted, free of charge, to any per"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/ec2/securitygroup.py",
    "chars": 12124,
    "preview": "# Copyright (c) 2006-2011 Mitch Garnaat http://garnaat.org/\n# Copyright (c) 2011, Eucalyptus Systems, Inc.\n#\n# Permissio"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/ec2/snapshot.py",
    "chars": 5265,
    "preview": "# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/\n# Copyright (c) 2010, Eucalyptus Systems, Inc.\n#\n# Permissio"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/ec2/spotdatafeedsubscription.py",
    "chars": 2371,
    "preview": "# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/\n#\n# Permission is hereby granted, free of charge, to any per"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/ec2/spotinstancerequest.py",
    "chars": 3946,
    "preview": "# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/\n# Copyright (c) 2010, Eucalyptus Systems, Inc.\n#\n# Permissio"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/ec2/spotpricehistory.py",
    "chars": 2085,
    "preview": "# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/\n#\n# Permission is hereby granted, free of charge, to any per"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/ec2/tag.py",
    "chars": 3100,
    "preview": "# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/\n# Copyright (c) 2010, Eucalyptus Systems, Inc.\n#\n# Permission is "
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/ec2/volume.py",
    "chars": 7876,
    "preview": "# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/\n# Copyright (c) 2010, Eucalyptus Systems, Inc.\n#\n# Permissio"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/ec2/zone.py",
    "chars": 1653,
    "preview": "# Copyright (c) 2006-2008 Mitch Garnaat http://garnaat.org/\n#\n# Permission is hereby granted, free of charge, to any per"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/ecs/__init__.py",
    "chars": 3493,
    "preview": "# Copyright (c) 2010 Chris Moyer http://coredumped.org/\n#\n# Permission is hereby granted, free of charge, to any person "
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/ecs/item.py",
    "chars": 5148,
    "preview": "# Copyright (c) 2010 Chris Moyer http://coredumped.org/\n#\n# Permission is hereby granted, free of charge, to any person "
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/emr/__init__.py",
    "chars": 1296,
    "preview": "# Copyright (c) 2010 Spotify AB\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of th"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/emr/bootstrap_action.py",
    "chars": 1795,
    "preview": "# Copyright (c) 2010 Spotify AB\n# Copyright (c) 2010 Yelp\n#\n# Permission is hereby granted, free of charge, to any perso"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/emr/connection.py",
    "chars": 17712,
    "preview": "# Copyright (c) 2010 Spotify AB\n# Copyright (c) 2010-2011 Yelp\n#\n# Permission is hereby granted, free of charge, to any "
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/emr/emrobject.py",
    "chars": 4791,
    "preview": "# Copyright (c) 2010 Spotify AB\n# Copyright (c) 2010 Jeremy Thurgood <firxen+boto@gmail.com>\n# Copyright (c) 2010-2011 Y"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/emr/instance_group.py",
    "chars": 2098,
    "preview": "#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated docum"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/emr/step.py",
    "chars": 6366,
    "preview": "# Copyright (c) 2010 Spotify AB\n# Copyright (c) 2010-2011 Yelp\n#\n# Permission is hereby granted, free of charge, to any "
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/exception.py",
    "chars": 13412,
    "preview": "# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/\n# Copyright (c) 2010, Eucalyptus Systems, Inc.\n# All rights "
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/file/__init__.py",
    "chars": 1228,
    "preview": "# Copyright 2010 Google Inc.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this "
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/file/bucket.py",
    "chars": 4075,
    "preview": "# Copyright 2010 Google Inc.\n# Copyright (c) 2011, Nexenta Systems Inc.\n#\n# Permission is hereby granted, free of charge"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/file/connection.py",
    "chars": 1480,
    "preview": "# Copyright 2010 Google Inc.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this "
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/file/key.py",
    "chars": 5475,
    "preview": "# Copyright 2010 Google Inc.\n# Copyright (c) 2011, Nexenta Systems Inc.\n#\n# Permission is hereby granted, free of charge"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/file/simpleresultset.py",
    "chars": 1321,
    "preview": "# Copyright 2010 Google Inc.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this "
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/fps/__init__.py",
    "chars": 1104,
    "preview": "# Copyright (c) 2008, Chris Moyer http://coredumped.org\n#\n# Permission is hereby granted, free of charge, to any person "
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/fps/connection.py",
    "chars": 16647,
    "preview": "# Copyright (c) 2008 Chris Moyer http://coredumped.org/\n# Copyringt (c) 2010 Jason R. Coombs http://www.jaraco.com/\n#\n# "
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/gs/__init__.py",
    "chars": 1076,
    "preview": "# Copyright 2010 Google Inc.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this "
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/gs/acl.py",
    "chars": 9529,
    "preview": "# Copyright 2010 Google Inc.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this "
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/gs/bucket.py",
    "chars": 10089,
    "preview": "# Copyright 2010 Google Inc.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this "
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/gs/connection.py",
    "chars": 4028,
    "preview": "# Copyright 2010 Google Inc.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this "
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/gs/key.py",
    "chars": 13732,
    "preview": "# Copyright 2010 Google Inc.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this "
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/gs/resumable_upload_handler.py",
    "chars": 26351,
    "preview": "# Copyright 2010 Google Inc.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this "
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/gs/user.py",
    "chars": 1939,
    "preview": "# Copyright 2010 Google Inc.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this "
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/handler.py",
    "chars": 1849,
    "preview": "# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/\n#\n# Permission is hereby granted, free of charge, to any per"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/https_connection.py",
    "chars": 4362,
    "preview": "# Copyright 2007,2011 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use "
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/iam/__init__.py",
    "chars": 1301,
    "preview": "# Copyright (c) 2010-2011 Mitch Garnaat http://garnaat.org/\n# Copyright (c) 2010-2011, Eucalyptus Systems, Inc.\n#\n# Perm"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/iam/connection.py",
    "chars": 37598,
    "preview": "# Copyright (c) 2010-2011 Mitch Garnaat http://garnaat.org/\n# Copyright (c) 2010-2011, Eucalyptus Systems, Inc.\n#\n# Perm"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/iam/summarymap.py",
    "chars": 1660,
    "preview": "# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/\n# Copyright (c) 2010, Eucalyptus Systems, Inc.\n#\n# Permission is "
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/jsonresponse.py",
    "chars": 5809,
    "preview": "# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/\n# Copyright (c) 2010, Eucalyptus Systems, Inc.\n#\n# Permission is "
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/manage/__init__.py",
    "chars": 1108,
    "preview": "# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/\n#\n# Permission is hereby granted, free of charge, to any per"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/manage/cmdshell.py",
    "chars": 8477,
    "preview": "# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/\n#\n# Permission is hereby granted, free of charge, to any per"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/manage/propget.py",
    "chars": 2498,
    "preview": "# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/\n#\n# Permission is hereby granted, free of charge, to any per"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/manage/server.py",
    "chars": 21995,
    "preview": "# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/\n# Copyright (c) 2010 Chris Moyer http://coredumped.org/\n#\n# "
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/manage/task.py",
    "chars": 6820,
    "preview": "# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/\n#\n# Permission is hereby granted, free of charge, to any per"
  },
  {
    "path": "deploy/third_party/boto-2.1.1/boto/manage/test_manage.py",
    "chars": 792,
    "preview": "from boto.manage.server import Server\nfrom boto.manage.volume import Volume\nimport time\n\nprint '--> Creating New Volume'"
  }
]

// ... and 207 more files (download for full content)

About this extraction

This page contains the full source code of the radlab/sparrow GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 407 files (12.9 MB), approximately 3.4M tokens, and a symbol index with 6283 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.

Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.

Copied to clipboard!